diff --git a/Gopkg.lock b/Gopkg.lock
index 9f1d1c1..0c9f231 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -2,25 +2,12 @@
[[projects]]
- name = "github.com/Luzifer/rconfig"
+ name = "github.com/fsnotify/fsnotify"
packages = ["."]
- revision = "7aef1d393c1e2d0758901853b59981c7adc67c7e"
- version = "v1.2.0"
+ revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
+ version = "v1.4.7"
[[projects]]
- name = "github.com/Sirupsen/logrus"
- packages = ["."]
- revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
- version = "v1.0.3"
-
-[[projects]]
- name = "github.com/fatih/structs"
- packages = ["."]
- revision = "a720dfa8df582c51dee1b36feabb906bde1588bd"
- version = "v1.0"
-
-[[projects]]
- branch = "master"
name = "github.com/golang/snappy"
packages = ["."]
revision = "553a641470496b2327abcac10b36396bd98e45c9"
@@ -35,13 +22,13 @@
branch = "master"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
- revision = "3573b8b52aa7b37b9358d966a898feb387f62437"
+ revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-multierror"
packages = ["."]
- revision = "83588e72410abfbe4df460eeb6f30841ae47d4c4"
+ revision = "b7773ae218740a7be65057fc60b366a49b538a44"
[[projects]]
branch = "master"
@@ -49,17 +36,53 @@
packages = ["."]
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
+[[projects]]
+ name = "github.com/hashicorp/go-sockaddr"
+ packages = ["."]
+ revision = "7165ee14aff120ee3642aa2bcf2dea8eebef29c3"
+
[[projects]]
branch = "master"
name = "github.com/hashicorp/hcl"
- packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"]
- revision = "42e33e2d55a0ff1d6263f738896ea8c13571a8d0"
+ packages = [
+ ".",
+ "hcl/ast",
+ "hcl/parser",
+ "hcl/printer",
+ "hcl/scanner",
+ "hcl/strconv",
+ "hcl/token",
+ "json/parser",
+ "json/scanner",
+ "json/token"
+ ]
+ revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
[[projects]]
name = "github.com/hashicorp/vault"
- packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil","helper/parseutil"]
- revision = "6b29fb2b7f70ed538ee2b3c057335d706b6d4e36"
- version = "v0.8.3"
+ packages = [
+ "api",
+ "helper/certutil",
+ "helper/compressutil",
+ "helper/errutil",
+ "helper/jsonutil",
+ "helper/parseutil",
+ "helper/strutil"
+ ]
+ revision = "756fdc4587350daf1c65b93647b2cc31a6f119cd"
+ version = "v0.10.1"
+
+[[projects]]
+ name = "github.com/inconshreveable/mousetrap"
+ packages = ["."]
+ revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
+ version = "v1.0"
+
+[[projects]]
+ name = "github.com/magiconair/properties"
+ packages = ["."]
+ revision = "c2353362d570a7bfa228149c62842019201cfb71"
+ version = "v1.8.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
@@ -71,71 +94,136 @@
branch = "master"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
- revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
+ revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
[[projects]]
- branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
- revision = "d0303fe809921458f417bcf828397a65db30a7e4"
+ revision = "a4e142e9c047c904fa2f1e144d9a84e6133024bc"
[[projects]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
packages = ["."]
- revision = "a7a4c189eb47ed33ce7b35f2880070a0c82a67d4"
+ revision = "d4647c9c7a84d847478d890b816b7d8b62b0b279"
+
+[[projects]]
+ name = "github.com/pelletier/go-toml"
+ packages = ["."]
+ revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
+ version = "v1.1.0"
[[projects]]
branch = "master"
+ name = "github.com/ryanuber/go-glob"
+ packages = ["."]
+ revision = "256dc444b735e061061cf46c809487313d5b0065"
+
+[[projects]]
name = "github.com/sethgrid/pester"
packages = ["."]
- revision = "0af5bab1e1ea2860c5aef8e77427bab011d774d8"
+ revision = "ed9870dad3170c0b25ab9b11830cc57c3a7798fb"
+
+[[projects]]
+ name = "github.com/sirupsen/logrus"
+ packages = ["."]
+ revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
+ version = "v1.0.5"
+
+[[projects]]
+ name = "github.com/spf13/afero"
+ packages = [
+ ".",
+ "mem"
+ ]
+ revision = "63644898a8da0bc22138abf860edaf5277b6102e"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/spf13/cast"
+ packages = ["."]
+ revision = "8965335b8c7107321228e3e3702cab9832751bac"
+ version = "v1.2.0"
+
+[[projects]]
+ name = "github.com/spf13/cobra"
+ packages = ["."]
+ revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
+ version = "v0.0.3"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/spf13/jwalterweatherman"
+ packages = ["."]
+ revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
- revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
- version = "v1.0.0"
+ revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
+ version = "v1.0.1"
+
+[[projects]]
+ name = "github.com/spf13/viper"
+ packages = ["."]
+ revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736"
+ version = "v1.0.2"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
- revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
+ revision = "ab813273cd59e1333f7ae7bff5d027d4aadf528c"
[[projects]]
- branch = "master"
name = "golang.org/x/net"
- packages = ["http2","http2/hpack","idna","lex/httplex"]
- revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
+ packages = [
+ "http2",
+ "http2/hpack",
+ "idna",
+ "lex/httplex"
+ ]
+ revision = "f5dfe339be1d06f81b22525fe34671ee7d2c8904"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
- packages = ["unix","windows"]
- revision = "ebfc5b4631820b793c9010c87fd8fef0f39eb082"
+ packages = [
+ "unix",
+ "windows"
+ ]
+ revision = "c11f84a56e43e20a78cee75a7c034031ecf57d1f"
[[projects]]
- branch = "master"
name = "golang.org/x/text"
- packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
- revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25"
+ packages = [
+ "collate",
+ "collate/build",
+ "internal/colltab",
+ "internal/gen",
+ "internal/tag",
+ "internal/triegen",
+ "internal/ucd",
+ "language",
+ "secure/bidirule",
+ "transform",
+ "unicode/bidi",
+ "unicode/cldr",
+ "unicode/norm",
+ "unicode/rangetable"
+ ]
+ revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
+ version = "v0.3.0"
[[projects]]
- branch = "v2"
- name = "gopkg.in/validator.v2"
- packages = ["."]
- revision = "460c83432a98c35224a6fe352acf8b23e067ad06"
-
-[[projects]]
- branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
- revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
+ revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
+ version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "338132dcce86eed0dc26d5c2a448092d59a299761c47da97aed0c6e98e8c355d"
+ inputs-digest = "d5c94eece84d41716e83088082c119c84736465b21834e66ed176c3ba5bbccf5"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index 7cb6546..9b6826b 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -1,7 +1,6 @@
-
# Gopkg.toml example
#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
@@ -17,25 +16,39 @@
# source = "github.com/myfork/project2"
#
# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
-[[constraint]]
- name = "github.com/Luzifer/rconfig"
- version = "1.1.0"
-
-[[constraint]]
- name = "github.com/Sirupsen/logrus"
-
[[constraint]]
name = "github.com/hashicorp/vault"
+ version = "0.10.1"
[[constraint]]
+ branch = "master"
name = "github.com/mitchellh/go-homedir"
[[constraint]]
+ branch = "master"
name = "github.com/olekukonko/tablewriter"
[[constraint]]
- name = "gopkg.in/yaml.v2"
+ name = "github.com/sirupsen/logrus"
+ version = "1.0.5"
+
+[[constraint]]
+ name = "github.com/spf13/cobra"
+ version = "0.0.3"
+
+[[constraint]]
+ name = "github.com/spf13/viper"
+ version = "1.0.2"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/vendor/github.com/Luzifer/rconfig/.travis.yml b/vendor/github.com/Luzifer/rconfig/.travis.yml
deleted file mode 100644
index b5c25ee..0000000
--- a/vendor/github.com/Luzifer/rconfig/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - tip
-
-script: go test -v -race -cover ./...
diff --git a/vendor/github.com/Luzifer/rconfig/History.md b/vendor/github.com/Luzifer/rconfig/History.md
deleted file mode 100644
index 5adadd9..0000000
--- a/vendor/github.com/Luzifer/rconfig/History.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# 1.2.0 / 2017-06-19
-
- * Add ParseAndValidate method
-
-# 1.1.0 / 2016-06-28
-
- * Support time.Duration config parameters
- * Added goreportcard badge
- * Added testcase for using bool with ENV and default
diff --git a/vendor/github.com/Luzifer/rconfig/README.md b/vendor/github.com/Luzifer/rconfig/README.md
deleted file mode 100644
index f42a664..0000000
--- a/vendor/github.com/Luzifer/rconfig/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-[![Build Status](https://travis-ci.org/Luzifer/rconfig.svg?branch=master)](https://travis-ci.org/Luzifer/rconfig)
-[![License: Apache v2.0](https://badge.luzifer.io/v1/badge?color=5d79b5&title=license&text=Apache+v2.0)](http://www.apache.org/licenses/LICENSE-2.0)
-[![Documentation](https://badge.luzifer.io/v1/badge?title=godoc&text=reference)](https://godoc.org/github.com/Luzifer/rconfig)
-[![Go Report](http://goreportcard.com/badge/Luzifer/rconfig)](http://goreportcard.com/report/Luzifer/rconfig)
-
-## Description
-
-> Package rconfig implements a CLI configuration reader with struct-embedded defaults, environment variables and posix compatible flag parsing using the [pflag](https://github.com/spf13/pflag) library.
-
-## Installation
-
-Install by running:
-
-```
-go get -u github.com/Luzifer/rconfig
-```
-
-OR fetch a specific version:
-
-```
-go get -u gopkg.in/luzifer/rconfig.v1
-```
-
-Run tests by running:
-
-```
-go test -v -race -cover github.com/Luzifer/rconfig
-```
-
-## Usage
-
-A very simple usecase is to just configure a struct inside the vars section of your `main.go` and to parse the commandline flags from the `main()` function:
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/Luzifer/rconfig"
-)
-
-var (
- cfg = struct {
- Username string `default:"unknown" flag:"user" description:"Your name"`
- Details struct {
- Age int `default:"25" flag:"age" env:"age" description:"Your age"`
- }
- }{}
-)
-
-func main() {
- rconfig.Parse(&cfg)
-
- fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
- cfg.Username,
- cfg.Details.Age)
-}
-```
-
-### Provide variable defaults by using a file
-
-Given you have a file `~/.myapp.yml` containing some secrets or usernames (for the example below username is assumed to be "luzifer") as a default configuration for your application you can use this source code to load the defaults from that file using the `vardefault` tag in your configuration struct.
-
-The order of the directives (lower number = higher precedence):
-
-1. Flags provided in command line
-1. Environment variables
-1. Variable defaults (`vardefault` tag in the struct)
-1. `default` tag in the struct
-
-```go
-var cfg = struct {
- Username string `vardefault:"username" flag:"username" description:"Your username"`
-}
-
-func main() {
- rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
- rconfig.Parse(&cfg)
-
- fmt.Printf("Username = %s", cfg.Username)
- // Output: Username = luzifer
-}
-```
-
-## More info
-
-You can see the full reference documentation of the rconfig package [at godoc.org](https://godoc.org/github.com/Luzifer/rconfig), or through go's standard documentation system by running `godoc -http=:6060` and browsing to [http://localhost:6060/pkg/github.com/Luzifer/rconfig](http://localhost:6060/pkg/github.com/Luzifer/rconfig) after installation.
diff --git a/vendor/github.com/Luzifer/rconfig/bool_test.go b/vendor/github.com/Luzifer/rconfig/bool_test.go
deleted file mode 100644
index 11a6f4b..0000000
--- a/vendor/github.com/Luzifer/rconfig/bool_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package rconfig
-
-import (
- "os"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing bool parsing", func() {
- type t struct {
- Test1 bool `default:"true"`
- Test2 bool `default:"false" flag:"test2"`
- Test3 bool `default:"true" flag:"test3,t"`
- Test4 bool `flag:"test4"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--test2",
- "-t",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.Test1).To(Equal(true))
- Expect(cfg.Test2).To(Equal(true))
- Expect(cfg.Test3).To(Equal(true))
- Expect(cfg.Test4).To(Equal(false))
- })
-})
-
-var _ = Describe("Testing to set bool from ENV with default", func() {
- type t struct {
- Test1 bool `default:"true" env:"TEST1"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{}
- })
-
- JustBeforeEach(func() {
- os.Unsetenv("TEST1")
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.Test1).To(Equal(true))
- })
-})
diff --git a/vendor/github.com/Luzifer/rconfig/config.go b/vendor/github.com/Luzifer/rconfig/config.go
deleted file mode 100644
index 251909d..0000000
--- a/vendor/github.com/Luzifer/rconfig/config.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Package rconfig implements a CLI configuration reader with struct-embedded
-// defaults, environment variables and posix compatible flag parsing using
-// the pflag library.
-package rconfig
-
-import (
- "errors"
- "fmt"
- "os"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/spf13/pflag"
- validator "gopkg.in/validator.v2"
-)
-
-var (
- fs *pflag.FlagSet
- variableDefaults map[string]string
-)
-
-func init() {
- variableDefaults = make(map[string]string)
-}
-
-// Parse takes the pointer to a struct filled with variables which should be read
-// from ENV, default or flag. The precedence in this is flag > ENV > default. So
-// if a flag is specified on the CLI it will overwrite the ENV and otherwise ENV
-// overwrites the default specified.
-//
-// For your configuration struct you can use the following struct-tags to control
-// the behavior of rconfig:
-//
-// default: Set a default value
-// vardefault: Read the default value from the variable defaults
-// env: Read the value from this environment variable
-// flag: Flag to read in format "long,short" (for example "listen,l")
-// description: A help text for Usage output to guide your users
-//
-// The format you need to specify those values you can see in the example to this
-// function.
-//
-func Parse(config interface{}) error {
- return parse(config, nil)
-}
-
-// ParseAndValidate works exactly like Parse but implements an additional run of
-// the go-validator package on the configuration struct. Therefore additonal struct
-// tags are supported like described in the readme file of the go-validator package:
-//
-// https://github.com/go-validator/validator/tree/v2#usage
-func ParseAndValidate(config interface{}) error {
- return parseAndValidate(config, nil)
-}
-
-// Args returns the non-flag command-line arguments.
-func Args() []string {
- return fs.Args()
-}
-
-// Usage prints a basic usage with the corresponding defaults for the flags to
-// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV.
-func Usage() {
- if fs != nil && fs.Parsed() {
- fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
- fs.PrintDefaults()
- }
-}
-
-// SetVariableDefaults presets the parser with a map of default values to be used
-// when specifying the vardefault tag
-func SetVariableDefaults(defaults map[string]string) {
- variableDefaults = defaults
-}
-
-func parseAndValidate(in interface{}, args []string) error {
- if err := parse(in, args); err != nil {
- return err
- }
-
- return validator.Validate(in)
-}
-
-func parse(in interface{}, args []string) error {
- if args == nil {
- args = os.Args
- }
-
- fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
- if err := execTags(in, fs); err != nil {
- return err
- }
-
- return fs.Parse(args)
-}
-
-func execTags(in interface{}, fs *pflag.FlagSet) error {
- if reflect.TypeOf(in).Kind() != reflect.Ptr {
- return errors.New("Calling parser with non-pointer")
- }
-
- if reflect.ValueOf(in).Elem().Kind() != reflect.Struct {
- return errors.New("Calling parser with pointer to non-struct")
- }
-
- st := reflect.ValueOf(in).Elem()
- for i := 0; i < st.NumField(); i++ {
- valField := st.Field(i)
- typeField := st.Type().Field(i)
-
- if typeField.Tag.Get("default") == "" && typeField.Tag.Get("env") == "" && typeField.Tag.Get("flag") == "" && typeField.Type.Kind() != reflect.Struct {
- // None of our supported tags is present and it's not a sub-struct
- continue
- }
-
- value := varDefault(typeField.Tag.Get("vardefault"), typeField.Tag.Get("default"))
- value = envDefault(typeField.Tag.Get("env"), value)
- parts := strings.Split(typeField.Tag.Get("flag"), ",")
-
- switch typeField.Type {
- case reflect.TypeOf(time.Duration(0)):
- v, err := time.ParseDuration(value)
- if err != nil {
- if value == "" {
- v = time.Duration(0)
- } else {
- return err
- }
- }
-
- if typeField.Tag.Get("flag") != "" {
- if len(parts) == 1 {
- fs.DurationVar(valField.Addr().Interface().(*time.Duration), parts[0], v, typeField.Tag.Get("description"))
- } else {
- fs.DurationVarP(valField.Addr().Interface().(*time.Duration), parts[0], parts[1], v, typeField.Tag.Get("description"))
- }
- } else {
- valField.Set(reflect.ValueOf(v))
- }
- continue
- }
-
- switch typeField.Type.Kind() {
- case reflect.String:
- if typeField.Tag.Get("flag") != "" {
- if len(parts) == 1 {
- fs.StringVar(valField.Addr().Interface().(*string), parts[0], value, typeField.Tag.Get("description"))
- } else {
- fs.StringVarP(valField.Addr().Interface().(*string), parts[0], parts[1], value, typeField.Tag.Get("description"))
- }
- } else {
- valField.SetString(value)
- }
-
- case reflect.Bool:
- v := value == "true"
- if typeField.Tag.Get("flag") != "" {
- if len(parts) == 1 {
- fs.BoolVar(valField.Addr().Interface().(*bool), parts[0], v, typeField.Tag.Get("description"))
- } else {
- fs.BoolVarP(valField.Addr().Interface().(*bool), parts[0], parts[1], v, typeField.Tag.Get("description"))
- }
- } else {
- valField.SetBool(v)
- }
-
- case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64:
- vt, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- if value == "" {
- vt = 0
- } else {
- return err
- }
- }
- if typeField.Tag.Get("flag") != "" {
- registerFlagInt(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
- } else {
- valField.SetInt(vt)
- }
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- vt, err := strconv.ParseUint(value, 10, 64)
- if err != nil {
- if value == "" {
- vt = 0
- } else {
- return err
- }
- }
- if typeField.Tag.Get("flag") != "" {
- registerFlagUint(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
- } else {
- valField.SetUint(vt)
- }
-
- case reflect.Float32, reflect.Float64:
- vt, err := strconv.ParseFloat(value, 64)
- if err != nil {
- if value == "" {
- vt = 0.0
- } else {
- return err
- }
- }
- if typeField.Tag.Get("flag") != "" {
- registerFlagFloat(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
- } else {
- valField.SetFloat(vt)
- }
-
- case reflect.Struct:
- if err := execTags(valField.Addr().Interface(), fs); err != nil {
- return err
- }
-
- case reflect.Slice:
- switch typeField.Type.Elem().Kind() {
- case reflect.Int:
- def := []int{}
- for _, v := range strings.Split(value, ",") {
- it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64)
- if err != nil {
- return err
- }
- def = append(def, int(it))
- }
- if len(parts) == 1 {
- fs.IntSliceVar(valField.Addr().Interface().(*[]int), parts[0], def, typeField.Tag.Get("description"))
- } else {
- fs.IntSliceVarP(valField.Addr().Interface().(*[]int), parts[0], parts[1], def, typeField.Tag.Get("description"))
- }
- case reflect.String:
- del := typeField.Tag.Get("delimiter")
- if len(del) == 0 {
- del = ","
- }
- def := strings.Split(value, del)
- if len(parts) == 1 {
- fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description"))
- } else {
- fs.StringSliceVarP(valField.Addr().Interface().(*[]string), parts[0], parts[1], def, typeField.Tag.Get("description"))
- }
- }
- }
- }
-
- return nil
-}
-
-func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) {
- switch t {
- case reflect.Float32:
- if len(parts) == 1 {
- fs.Float32Var(field.(*float32), parts[0], float32(vt), desc)
- } else {
- fs.Float32VarP(field.(*float32), parts[0], parts[1], float32(vt), desc)
- }
- case reflect.Float64:
- if len(parts) == 1 {
- fs.Float64Var(field.(*float64), parts[0], float64(vt), desc)
- } else {
- fs.Float64VarP(field.(*float64), parts[0], parts[1], float64(vt), desc)
- }
- }
-}
-
-func registerFlagInt(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt int64, desc string) {
- switch t {
- case reflect.Int:
- if len(parts) == 1 {
- fs.IntVar(field.(*int), parts[0], int(vt), desc)
- } else {
- fs.IntVarP(field.(*int), parts[0], parts[1], int(vt), desc)
- }
- case reflect.Int8:
- if len(parts) == 1 {
- fs.Int8Var(field.(*int8), parts[0], int8(vt), desc)
- } else {
- fs.Int8VarP(field.(*int8), parts[0], parts[1], int8(vt), desc)
- }
- case reflect.Int32:
- if len(parts) == 1 {
- fs.Int32Var(field.(*int32), parts[0], int32(vt), desc)
- } else {
- fs.Int32VarP(field.(*int32), parts[0], parts[1], int32(vt), desc)
- }
- case reflect.Int64:
- if len(parts) == 1 {
- fs.Int64Var(field.(*int64), parts[0], int64(vt), desc)
- } else {
- fs.Int64VarP(field.(*int64), parts[0], parts[1], int64(vt), desc)
- }
- }
-}
-
-func registerFlagUint(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt uint64, desc string) {
- switch t {
- case reflect.Uint:
- if len(parts) == 1 {
- fs.UintVar(field.(*uint), parts[0], uint(vt), desc)
- } else {
- fs.UintVarP(field.(*uint), parts[0], parts[1], uint(vt), desc)
- }
- case reflect.Uint8:
- if len(parts) == 1 {
- fs.Uint8Var(field.(*uint8), parts[0], uint8(vt), desc)
- } else {
- fs.Uint8VarP(field.(*uint8), parts[0], parts[1], uint8(vt), desc)
- }
- case reflect.Uint16:
- if len(parts) == 1 {
- fs.Uint16Var(field.(*uint16), parts[0], uint16(vt), desc)
- } else {
- fs.Uint16VarP(field.(*uint16), parts[0], parts[1], uint16(vt), desc)
- }
- case reflect.Uint32:
- if len(parts) == 1 {
- fs.Uint32Var(field.(*uint32), parts[0], uint32(vt), desc)
- } else {
- fs.Uint32VarP(field.(*uint32), parts[0], parts[1], uint32(vt), desc)
- }
- case reflect.Uint64:
- if len(parts) == 1 {
- fs.Uint64Var(field.(*uint64), parts[0], uint64(vt), desc)
- } else {
- fs.Uint64VarP(field.(*uint64), parts[0], parts[1], uint64(vt), desc)
- }
- }
-}
-
-func envDefault(env, def string) string {
- value := def
-
- if env != "" {
- if e := os.Getenv(env); e != "" {
- value = e
- }
- }
-
- return value
-}
-
-func varDefault(name, def string) string {
- value := def
-
- if name != "" {
- if v, ok := variableDefaults[name]; ok {
- value = v
- }
- }
-
- return value
-}
diff --git a/vendor/github.com/Luzifer/rconfig/duration_test.go b/vendor/github.com/Luzifer/rconfig/duration_test.go
deleted file mode 100644
index 1ca95dc..0000000
--- a/vendor/github.com/Luzifer/rconfig/duration_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package rconfig
-
-import (
- "time"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Duration", func() {
- type t struct {
- Test time.Duration `flag:"duration"`
- TestS time.Duration `flag:"other-duration,o"`
- TestDef time.Duration `default:"30h"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--duration=23s", "-o", "45m",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.Test).To(Equal(23 * time.Second))
- Expect(cfg.TestS).To(Equal(45 * time.Minute))
-
- Expect(cfg.TestDef).To(Equal(30 * time.Hour))
- })
-})
diff --git a/vendor/github.com/Luzifer/rconfig/errors_test.go b/vendor/github.com/Luzifer/rconfig/errors_test.go
deleted file mode 100644
index 46db039..0000000
--- a/vendor/github.com/Luzifer/rconfig/errors_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package rconfig
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing errors", func() {
-
- It("should not accept string as int", func() {
- Expect(parse(&struct {
- A int `default:"a"`
- }{}, []string{})).To(HaveOccurred())
- })
-
- It("should not accept string as float", func() {
- Expect(parse(&struct {
- A float32 `default:"a"`
- }{}, []string{})).To(HaveOccurred())
- })
-
- It("should not accept string as uint", func() {
- Expect(parse(&struct {
- A uint `default:"a"`
- }{}, []string{})).To(HaveOccurred())
- })
-
- It("should not accept string as uint in sub-struct", func() {
- Expect(parse(&struct {
- B struct {
- A uint `default:"a"`
- }
- }{}, []string{})).To(HaveOccurred())
- })
-
- It("should not accept string slice as int slice", func() {
- Expect(parse(&struct {
- A []int `default:"a,bn"`
- }{}, []string{})).To(HaveOccurred())
- })
-
- It("should not accept variables not being pointers", func() {
- cfg := struct {
- A string `default:"a"`
- }{}
-
- Expect(parse(cfg, []string{})).To(HaveOccurred())
- })
-
- It("should not accept variables not being pointers to structs", func() {
- cfg := "test"
-
- Expect(parse(cfg, []string{})).To(HaveOccurred())
- })
-
-})
diff --git a/vendor/github.com/Luzifer/rconfig/example_test.go b/vendor/github.com/Luzifer/rconfig/example_test.go
deleted file mode 100644
index 0a65b2f..0000000
--- a/vendor/github.com/Luzifer/rconfig/example_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package rconfig
-
-import (
- "fmt"
- "os"
-)
-
-func ExampleParse() {
- // We're building an example configuration with a sub-struct to be filled
- // by the Parse command.
- config := struct {
- Username string `default:"unknown" flag:"user,u" description:"Your name"`
- Details struct {
- Age int `default:"25" flag:"age" description:"Your age"`
- }
- }{}
-
- // To have more relieable results we're setting os.Args to a known value.
- // In real-life use cases you wouldn't do this but parse the original
- // commandline arguments.
- os.Args = []string{
- "example",
- "--user=Luzifer",
- }
-
- Parse(&config)
-
- fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
- config.Username,
- config.Details.Age)
-
- // You can also show an usage message for your user
- Usage()
-
- // Output:
- // Hello Luzifer, happy birthday for your 25th birthday.
-}
diff --git a/vendor/github.com/Luzifer/rconfig/float_test.go b/vendor/github.com/Luzifer/rconfig/float_test.go
deleted file mode 100644
index 4ec8a1e..0000000
--- a/vendor/github.com/Luzifer/rconfig/float_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package rconfig
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing float parsing", func() {
- type t struct {
- Test32 float32 `flag:"float32"`
- Test32P float32 `flag:"float32p,3"`
- Test64 float64 `flag:"float64"`
- Test64P float64 `flag:"float64p,6"`
- TestDef float32 `default:"66.256"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--float32=5.5", "-3", "6.6",
- "--float64=7.7", "-6", "8.8",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.Test32).To(Equal(float32(5.5)))
- Expect(cfg.Test32P).To(Equal(float32(6.6)))
- Expect(cfg.Test64).To(Equal(float64(7.7)))
- Expect(cfg.Test64P).To(Equal(float64(8.8)))
-
- Expect(cfg.TestDef).To(Equal(float32(66.256)))
- })
-})
diff --git a/vendor/github.com/Luzifer/rconfig/general_test.go b/vendor/github.com/Luzifer/rconfig/general_test.go
deleted file mode 100644
index e7f29b7..0000000
--- a/vendor/github.com/Luzifer/rconfig/general_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package rconfig
-
-import (
- "os"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing general parsing", func() {
- type t struct {
- Test string `default:"foo" env:"shell" flag:"shell" description:"Test"`
- Test2 string `default:"blub" env:"testvar" flag:"testvar,t" description:"Test"`
- DefaultFlag string `default:"goo"`
- SadFlag string
- }
-
- type tValidated struct {
- Test string `flag:"test" default:"" validate:"nonzero"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- Context("with defined arguments", func() {
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--shell=test23",
- "-t", "bla",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have parsed the expected values", func() {
- Expect(cfg.Test).To(Equal("test23"))
- Expect(cfg.Test2).To(Equal("bla"))
- Expect(cfg.SadFlag).To(Equal(""))
- Expect(cfg.DefaultFlag).To(Equal("goo"))
- })
- })
-
- Context("with no arguments", func() {
- BeforeEach(func() {
- cfg = t{}
- args = []string{}
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have used the default value", func() {
- Expect(cfg.Test).To(Equal("foo"))
- })
- })
-
- Context("with no arguments and set env", func() {
- BeforeEach(func() {
- cfg = t{}
- args = []string{}
- os.Setenv("shell", "test546")
- })
-
- AfterEach(func() {
- os.Unsetenv("shell")
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have used the value from env", func() {
- Expect(cfg.Test).To(Equal("test546"))
- })
- })
-
- Context("with additional arguments", func() {
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--shell=test23",
- "-t", "bla",
- "positional1", "positional2",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have parsed the expected values", func() {
- Expect(cfg.Test).To(Equal("test23"))
- Expect(cfg.Test2).To(Equal("bla"))
- Expect(cfg.SadFlag).To(Equal(""))
- Expect(cfg.DefaultFlag).To(Equal("goo"))
- })
- It("should have detected the positional arguments", func() {
- Expect(Args()).To(Equal([]string{"positional1", "positional2"}))
- })
- })
-
- Context("making use of the validator package", func() {
- var cfgValidated tValidated
-
- BeforeEach(func() {
- cfgValidated = tValidated{}
- args = []string{}
- })
-
- JustBeforeEach(func() {
- err = parseAndValidate(&cfgValidated, args)
- })
-
- It("should have errored", func() { Expect(err).To(HaveOccurred()) })
- })
-
-})
diff --git a/vendor/github.com/Luzifer/rconfig/int_test.go b/vendor/github.com/Luzifer/rconfig/int_test.go
deleted file mode 100644
index 2cc0022..0000000
--- a/vendor/github.com/Luzifer/rconfig/int_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package rconfig
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing int parsing", func() {
- type t struct {
- Test int `flag:"int"`
- TestP int `flag:"intp,i"`
- Test8 int8 `flag:"int8"`
- Test8P int8 `flag:"int8p,8"`
- Test32 int32 `flag:"int32"`
- Test32P int32 `flag:"int32p,3"`
- Test64 int64 `flag:"int64"`
- Test64P int64 `flag:"int64p,6"`
- TestDef int8 `default:"66"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--int=1", "-i", "2",
- "--int8=3", "-8", "4",
- "--int32=5", "-3", "6",
- "--int64=7", "-6", "8",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.Test).To(Equal(1))
- Expect(cfg.TestP).To(Equal(2))
- Expect(cfg.Test8).To(Equal(int8(3)))
- Expect(cfg.Test8P).To(Equal(int8(4)))
- Expect(cfg.Test32).To(Equal(int32(5)))
- Expect(cfg.Test32P).To(Equal(int32(6)))
- Expect(cfg.Test64).To(Equal(int64(7)))
- Expect(cfg.Test64P).To(Equal(int64(8)))
-
- Expect(cfg.TestDef).To(Equal(int8(66)))
- })
-})
diff --git a/vendor/github.com/Luzifer/rconfig/os-args_test.go b/vendor/github.com/Luzifer/rconfig/os-args_test.go
deleted file mode 100644
index eacee71..0000000
--- a/vendor/github.com/Luzifer/rconfig/os-args_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package rconfig_test
-
-import (
- "os"
-
- . "github.com/Luzifer/rconfig"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing os.Args", func() {
- type t struct {
- A string `default:"a" flag:"a"`
- }
-
- var (
- err error
- cfg t
- )
-
- JustBeforeEach(func() {
- err = Parse(&cfg)
- })
-
- Context("With only valid arguments", func() {
-
- BeforeEach(func() {
- cfg = t{}
- os.Args = []string{"--a=bar"}
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.A).To(Equal("bar"))
- })
-
- })
-
-})
diff --git a/vendor/github.com/Luzifer/rconfig/precedence_test.go b/vendor/github.com/Luzifer/rconfig/precedence_test.go
deleted file mode 100644
index 6d87ca0..0000000
--- a/vendor/github.com/Luzifer/rconfig/precedence_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package rconfig
-
-import (
- "os"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Precedence", func() {
-
- type t struct {
- A int `default:"1" vardefault:"a" env:"a" flag:"avar,a" description:"a"`
- }
-
- var (
- err error
- cfg t
- args []string
- vardefaults map[string]string
- )
-
- JustBeforeEach(func() {
- cfg = t{}
- SetVariableDefaults(vardefaults)
- err = parse(&cfg, args)
- })
-
- Context("Provided: Flag, Env, Default, VarDefault", func() {
- BeforeEach(func() {
- args = []string{"-a", "5"}
- os.Setenv("a", "8")
- vardefaults = map[string]string{
- "a": "3",
- }
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have used the flag value", func() {
- Expect(cfg.A).To(Equal(5))
- })
- })
-
- Context("Provided: Env, Default, VarDefault", func() {
- BeforeEach(func() {
- args = []string{}
- os.Setenv("a", "8")
- vardefaults = map[string]string{
- "a": "3",
- }
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have used the env value", func() {
- Expect(cfg.A).To(Equal(8))
- })
- })
-
- Context("Provided: Default, VarDefault", func() {
- BeforeEach(func() {
- args = []string{}
- os.Unsetenv("a")
- vardefaults = map[string]string{
- "a": "3",
- }
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have used the vardefault value", func() {
- Expect(cfg.A).To(Equal(3))
- })
- })
-
- Context("Provided: Default", func() {
- BeforeEach(func() {
- args = []string{}
- os.Unsetenv("a")
- vardefaults = map[string]string{}
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have used the default value", func() {
- Expect(cfg.A).To(Equal(1))
- })
- })
-
-})
diff --git a/vendor/github.com/Luzifer/rconfig/rconfig_suite_test.go b/vendor/github.com/Luzifer/rconfig/rconfig_suite_test.go
deleted file mode 100644
index 72c9ce4..0000000
--- a/vendor/github.com/Luzifer/rconfig/rconfig_suite_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package rconfig_test
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "testing"
-)
-
-func TestRconfig(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "Rconfig Suite")
-}
diff --git a/vendor/github.com/Luzifer/rconfig/slice_test.go b/vendor/github.com/Luzifer/rconfig/slice_test.go
deleted file mode 100644
index 7d9524e..0000000
--- a/vendor/github.com/Luzifer/rconfig/slice_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package rconfig
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing slices", func() {
- type t struct {
- Int []int `default:"1,2,3" flag:"int"`
- String []string `default:"a,b,c" flag:"string"`
- IntP []int `default:"1,2,3" flag:"intp,i"`
- StringP []string `default:"a,b,c" flag:"stringp,s"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--int=4,5", "-s", "hallo,welt",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values for int-slice", func() {
- Expect(len(cfg.Int)).To(Equal(2))
- Expect(cfg.Int).To(Equal([]int{4, 5}))
- Expect(cfg.Int).NotTo(Equal([]int{5, 4}))
- })
- It("should have the expected values for int-shorthand-slice", func() {
- Expect(len(cfg.IntP)).To(Equal(3))
- Expect(cfg.IntP).To(Equal([]int{1, 2, 3}))
- })
- It("should have the expected values for string-slice", func() {
- Expect(len(cfg.String)).To(Equal(3))
- Expect(cfg.String).To(Equal([]string{"a", "b", "c"}))
- })
- It("should have the expected values for string-shorthand-slice", func() {
- Expect(len(cfg.StringP)).To(Equal(2))
- Expect(cfg.StringP).To(Equal([]string{"hallo", "welt"}))
- })
-})
diff --git a/vendor/github.com/Luzifer/rconfig/sub-struct_test.go b/vendor/github.com/Luzifer/rconfig/sub-struct_test.go
deleted file mode 100644
index cfbfbc2..0000000
--- a/vendor/github.com/Luzifer/rconfig/sub-struct_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package rconfig
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing sub-structs", func() {
- type t struct {
- Test string `default:"blubb"`
- Sub struct {
- Test string `default:"Hallo"`
- }
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{}
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.Test).To(Equal("blubb"))
- Expect(cfg.Sub.Test).To(Equal("Hallo"))
- })
-})
diff --git a/vendor/github.com/Luzifer/rconfig/uint_test.go b/vendor/github.com/Luzifer/rconfig/uint_test.go
deleted file mode 100644
index 886db1d..0000000
--- a/vendor/github.com/Luzifer/rconfig/uint_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package rconfig
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing uint parsing", func() {
- type t struct {
- Test uint `flag:"int"`
- TestP uint `flag:"intp,i"`
- Test8 uint8 `flag:"int8"`
- Test8P uint8 `flag:"int8p,8"`
- Test16 uint16 `flag:"int16"`
- Test16P uint16 `flag:"int16p,1"`
- Test32 uint32 `flag:"int32"`
- Test32P uint32 `flag:"int32p,3"`
- Test64 uint64 `flag:"int64"`
- Test64P uint64 `flag:"int64p,6"`
- TestDef uint8 `default:"66"`
- }
-
- var (
- err error
- args []string
- cfg t
- )
-
- BeforeEach(func() {
- cfg = t{}
- args = []string{
- "--int=1", "-i", "2",
- "--int8=3", "-8", "4",
- "--int32=5", "-3", "6",
- "--int64=7", "-6", "8",
- "--int16=9", "-1", "10",
- }
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.Test).To(Equal(uint(1)))
- Expect(cfg.TestP).To(Equal(uint(2)))
- Expect(cfg.Test8).To(Equal(uint8(3)))
- Expect(cfg.Test8P).To(Equal(uint8(4)))
- Expect(cfg.Test32).To(Equal(uint32(5)))
- Expect(cfg.Test32P).To(Equal(uint32(6)))
- Expect(cfg.Test64).To(Equal(uint64(7)))
- Expect(cfg.Test64P).To(Equal(uint64(8)))
- Expect(cfg.Test16).To(Equal(uint16(9)))
- Expect(cfg.Test16P).To(Equal(uint16(10)))
-
- Expect(cfg.TestDef).To(Equal(uint8(66)))
- })
-})
diff --git a/vendor/github.com/Luzifer/rconfig/vardefault_providers.go b/vendor/github.com/Luzifer/rconfig/vardefault_providers.go
deleted file mode 100644
index 2199cfa..0000000
--- a/vendor/github.com/Luzifer/rconfig/vardefault_providers.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package rconfig
-
-import (
- "io/ioutil"
-
- "gopkg.in/yaml.v2"
-)
-
-// VarDefaultsFromYAMLFile reads contents of a file and calls VarDefaultsFromYAML
-func VarDefaultsFromYAMLFile(filename string) map[string]string {
- data, err := ioutil.ReadFile(filename)
- if err != nil {
- return make(map[string]string)
- }
-
- return VarDefaultsFromYAML(data)
-}
-
-// VarDefaultsFromYAML creates a vardefaults map from YAML raw data
-func VarDefaultsFromYAML(in []byte) map[string]string {
- out := make(map[string]string)
- err := yaml.Unmarshal(in, &out)
- if err != nil {
- return make(map[string]string)
- }
- return out
-}
diff --git a/vendor/github.com/Luzifer/rconfig/vardefault_test.go b/vendor/github.com/Luzifer/rconfig/vardefault_test.go
deleted file mode 100644
index 8328919..0000000
--- a/vendor/github.com/Luzifer/rconfig/vardefault_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package rconfig
-
-import (
- "io/ioutil"
- "os"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-var _ = Describe("Testing variable defaults", func() {
-
- type t struct {
- MySecretValue string `default:"secret" env:"foo" vardefault:"my_secret_value"`
- MyUsername string `default:"luzifer" vardefault:"username"`
- SomeVar string `flag:"var" description:"some variable"`
- IntVar int64 `vardefault:"int_var" default:"23"`
- }
-
- var (
- err error
- cfg t
- args = []string{}
- vardefaults = map[string]string{
- "my_secret_value": "veryverysecretkey",
- "unkownkey": "hi there",
- "int_var": "42",
- }
- )
-
- BeforeEach(func() {
- cfg = t{}
- })
-
- JustBeforeEach(func() {
- err = parse(&cfg, args)
- })
-
- Context("With manually provided variables", func() {
- BeforeEach(func() {
- SetVariableDefaults(vardefaults)
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.IntVar).To(Equal(int64(42)))
- Expect(cfg.MySecretValue).To(Equal("veryverysecretkey"))
- Expect(cfg.MyUsername).To(Equal("luzifer"))
- Expect(cfg.SomeVar).To(Equal(""))
- })
- })
-
- Context("With defaults from YAML data", func() {
- BeforeEach(func() {
- yamlData := []byte("---\nmy_secret_value: veryverysecretkey\nunknownkey: hi there\nint_var: 42\n")
- SetVariableDefaults(VarDefaultsFromYAML(yamlData))
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.IntVar).To(Equal(int64(42)))
- Expect(cfg.MySecretValue).To(Equal("veryverysecretkey"))
- Expect(cfg.MyUsername).To(Equal("luzifer"))
- Expect(cfg.SomeVar).To(Equal(""))
- })
- })
-
- Context("With defaults from YAML file", func() {
- var tmp *os.File
-
- BeforeEach(func() {
- tmp, _ = ioutil.TempFile("", "")
- yamlData := "---\nmy_secret_value: veryverysecretkey\nunknownkey: hi there\nint_var: 42\n"
- tmp.WriteString(yamlData)
- SetVariableDefaults(VarDefaultsFromYAMLFile(tmp.Name()))
- })
-
- AfterEach(func() {
- tmp.Close()
- os.Remove(tmp.Name())
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.IntVar).To(Equal(int64(42)))
- Expect(cfg.MySecretValue).To(Equal("veryverysecretkey"))
- Expect(cfg.MyUsername).To(Equal("luzifer"))
- Expect(cfg.SomeVar).To(Equal(""))
- })
- })
-
- Context("With defaults from invalid YAML data", func() {
- BeforeEach(func() {
- yamlData := []byte("---\nmy_secret_value = veryverysecretkey\nunknownkey = hi there\nint_var = 42\n")
- SetVariableDefaults(VarDefaultsFromYAML(yamlData))
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.IntVar).To(Equal(int64(23)))
- Expect(cfg.MySecretValue).To(Equal("secret"))
- Expect(cfg.MyUsername).To(Equal("luzifer"))
- Expect(cfg.SomeVar).To(Equal(""))
- })
- })
-
- Context("With defaults from non existent YAML file", func() {
- BeforeEach(func() {
- file := "/tmp/this_file_should_not_exist_146e26723r"
- SetVariableDefaults(VarDefaultsFromYAMLFile(file))
- })
-
- It("should not have errored", func() { Expect(err).NotTo(HaveOccurred()) })
- It("should have the expected values", func() {
- Expect(cfg.IntVar).To(Equal(int64(23)))
- Expect(cfg.MySecretValue).To(Equal("secret"))
- Expect(cfg.MyUsername).To(Equal("luzifer"))
- Expect(cfg.SomeVar).To(Equal(""))
- })
- })
-
-})
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go
deleted file mode 100644
index a08b1a8..0000000
--- a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package logrus
-
-import (
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "testing"
- "time"
-)
-
-func TestRegister(t *testing.T) {
- current := len(handlers)
- RegisterExitHandler(func() {})
- if len(handlers) != current+1 {
- t.Fatalf("expected %d handlers, got %d", current+1, len(handlers))
- }
-}
-
-func TestHandler(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "test_handler")
- if err != nil {
- log.Fatalf("can't create temp dir. %q", err)
- }
- defer os.RemoveAll(tempDir)
-
- gofile := filepath.Join(tempDir, "gofile.go")
- if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
- t.Fatalf("can't create go file. %q", err)
- }
-
- outfile := filepath.Join(tempDir, "outfile.out")
- arg := time.Now().UTC().String()
- err = exec.Command("go", "run", gofile, outfile, arg).Run()
- if err == nil {
- t.Fatalf("completed normally, should have failed")
- }
-
- data, err := ioutil.ReadFile(outfile)
- if err != nil {
- t.Fatalf("can't read output file %s. %q", outfile, err)
- }
-
- if string(data) != arg {
- t.Fatalf("bad data. Expected %q, got %q", data, arg)
- }
-}
-
-var testprog = []byte(`
-// Test program for atexit, gets output file and data as arguments and writes
-// data to output file in atexit handler.
-package main
-
-import (
- "github.com/sirupsen/logrus"
- "flag"
- "fmt"
- "io/ioutil"
-)
-
-var outfile = ""
-var data = ""
-
-func handler() {
- ioutil.WriteFile(outfile, []byte(data), 0666)
-}
-
-func badHandler() {
- n := 0
- fmt.Println(1/n)
-}
-
-func main() {
- flag.Parse()
- outfile = flag.Arg(0)
- data = flag.Arg(1)
-
- logrus.RegisterExitHandler(handler)
- logrus.RegisterExitHandler(badHandler)
- logrus.Fatal("Bye bye")
-}
-`)
diff --git a/vendor/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/Sirupsen/logrus/entry_test.go
deleted file mode 100644
index 99c3b41..0000000
--- a/vendor/github.com/Sirupsen/logrus/entry_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestEntryWithError(t *testing.T) {
-
- assert := assert.New(t)
-
- defer func() {
- ErrorKey = "error"
- }()
-
- err := fmt.Errorf("kaboom at layer %d", 4711)
-
- assert.Equal(err, WithError(err).Data["error"])
-
- logger := New()
- logger.Out = &bytes.Buffer{}
- entry := NewEntry(logger)
-
- assert.Equal(err, entry.WithError(err).Data["error"])
-
- ErrorKey = "err"
-
- assert.Equal(err, entry.WithError(err).Data["err"])
-
-}
-
-func TestEntryPanicln(t *testing.T) {
- errBoom := fmt.Errorf("boom time")
-
- defer func() {
- p := recover()
- assert.NotNil(t, p)
-
- switch pVal := p.(type) {
- case *Entry:
- assert.Equal(t, "kaboom", pVal.Message)
- assert.Equal(t, errBoom, pVal.Data["err"])
- default:
- t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
- }
- }()
-
- logger := New()
- logger.Out = &bytes.Buffer{}
- entry := NewEntry(logger)
- entry.WithField("err", errBoom).Panicln("kaboom")
-}
-
-func TestEntryPanicf(t *testing.T) {
- errBoom := fmt.Errorf("boom again")
-
- defer func() {
- p := recover()
- assert.NotNil(t, p)
-
- switch pVal := p.(type) {
- case *Entry:
- assert.Equal(t, "kaboom true", pVal.Message)
- assert.Equal(t, errBoom, pVal.Data["err"])
- default:
- t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
- }
- }()
-
- logger := New()
- logger.Out = &bytes.Buffer{}
- entry := NewEntry(logger)
- entry.WithField("err", errBoom).Panicf("kaboom %v", true)
-}
diff --git a/vendor/github.com/Sirupsen/logrus/example_basic_test.go b/vendor/github.com/Sirupsen/logrus/example_basic_test.go
deleted file mode 100644
index a2acf55..0000000
--- a/vendor/github.com/Sirupsen/logrus/example_basic_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package logrus_test
-
-import (
- "github.com/sirupsen/logrus"
- "os"
-)
-
-func Example_basic() {
- var log = logrus.New()
- log.Formatter = new(logrus.JSONFormatter)
- log.Formatter = new(logrus.TextFormatter) //default
- log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
- log.Level = logrus.DebugLevel
- log.Out = os.Stdout
-
- // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
- // if err == nil {
- // log.Out = file
- // } else {
- // log.Info("Failed to log to file, using default stderr")
- // }
-
- defer func() {
- err := recover()
- if err != nil {
- entry := err.(*logrus.Entry)
- log.WithFields(logrus.Fields{
- "omg": true,
- "err_animal": entry.Data["animal"],
- "err_size": entry.Data["size"],
- "err_level": entry.Level,
- "err_message": entry.Message,
- "number": 100,
- }).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code
- }
- }()
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "number": 8,
- }).Debug("Started observing beach")
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(logrus.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(logrus.Fields{
- "temperature": -4,
- }).Debug("Temperature changes")
-
- log.WithFields(logrus.Fields{
- "animal": "orca",
- "size": 9009,
- }).Panic("It's over 9000!")
-
- // Output:
- // level=debug msg="Started observing beach" animal=walrus number=8
- // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
- // level=warning msg="The group's number increased tremendously!" number=122 omg=true
- // level=debug msg="Temperature changes" temperature=-4
- // level=panic msg="It's over 9000!" animal=orca size=9009
- // level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true
-}
diff --git a/vendor/github.com/Sirupsen/logrus/example_hook_test.go b/vendor/github.com/Sirupsen/logrus/example_hook_test.go
deleted file mode 100644
index d4ddffc..0000000
--- a/vendor/github.com/Sirupsen/logrus/example_hook_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package logrus_test
-
-import (
- "github.com/sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
- "os"
-)
-
-func Example_hook() {
- var log = logrus.New()
- log.Formatter = new(logrus.TextFormatter) // default
- log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
- log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
- log.Out = os.Stdout
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(logrus.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(logrus.Fields{
- "omg": true,
- "number": 100,
- }).Error("The ice breaks!")
-
- // Output:
- // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
- // level=warning msg="The group's number increased tremendously!" number=122 omg=true
- // level=error msg="The ice breaks!" number=100 omg=true
-}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
deleted file mode 100644
index d948158..0000000
--- a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package logrus
-
-import (
- "fmt"
- "testing"
- "time"
-)
-
-// smallFields is a small size data set for benchmarking
-var smallFields = Fields{
- "foo": "bar",
- "baz": "qux",
- "one": "two",
- "three": "four",
-}
-
-// largeFields is a large size data set for benchmarking
-var largeFields = Fields{
- "foo": "bar",
- "baz": "qux",
- "one": "two",
- "three": "four",
- "five": "six",
- "seven": "eight",
- "nine": "ten",
- "eleven": "twelve",
- "thirteen": "fourteen",
- "fifteen": "sixteen",
- "seventeen": "eighteen",
- "nineteen": "twenty",
- "a": "b",
- "c": "d",
- "e": "f",
- "g": "h",
- "i": "j",
- "k": "l",
- "m": "n",
- "o": "p",
- "q": "r",
- "s": "t",
- "u": "v",
- "w": "x",
- "y": "z",
- "this": "will",
- "make": "thirty",
- "entries": "yeah",
-}
-
-var errorFields = Fields{
- "foo": fmt.Errorf("bar"),
- "baz": fmt.Errorf("qux"),
-}
-
-func BenchmarkErrorTextFormatter(b *testing.B) {
- doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
-}
-
-func BenchmarkSmallTextFormatter(b *testing.B) {
- doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
-}
-
-func BenchmarkLargeTextFormatter(b *testing.B) {
- doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
-}
-
-func BenchmarkSmallColoredTextFormatter(b *testing.B) {
- doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
-}
-
-func BenchmarkLargeColoredTextFormatter(b *testing.B) {
- doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
-}
-
-func BenchmarkSmallJSONFormatter(b *testing.B) {
- doBenchmark(b, &JSONFormatter{}, smallFields)
-}
-
-func BenchmarkLargeJSONFormatter(b *testing.B) {
- doBenchmark(b, &JSONFormatter{}, largeFields)
-}
-
-func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
- logger := New()
-
- entry := &Entry{
- Time: time.Time{},
- Level: InfoLevel,
- Message: "message",
- Data: fields,
- Logger: logger,
- }
- var d []byte
- var err error
- for i := 0; i < b.N; i++ {
- d, err = formatter.Format(entry)
- if err != nil {
- b.Fatal(err)
- }
- b.SetBytes(int64(len(d)))
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/Sirupsen/logrus/hook_test.go
deleted file mode 100644
index 13f34cb..0000000
--- a/vendor/github.com/Sirupsen/logrus/hook_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package logrus
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-type TestHook struct {
- Fired bool
-}
-
-func (hook *TestHook) Fire(entry *Entry) error {
- hook.Fired = true
- return nil
-}
-
-func (hook *TestHook) Levels() []Level {
- return []Level{
- DebugLevel,
- InfoLevel,
- WarnLevel,
- ErrorLevel,
- FatalLevel,
- PanicLevel,
- }
-}
-
-func TestHookFires(t *testing.T) {
- hook := new(TestHook)
-
- LogAndAssertJSON(t, func(log *Logger) {
- log.Hooks.Add(hook)
- assert.Equal(t, hook.Fired, false)
-
- log.Print("test")
- }, func(fields Fields) {
- assert.Equal(t, hook.Fired, true)
- })
-}
-
-type ModifyHook struct {
-}
-
-func (hook *ModifyHook) Fire(entry *Entry) error {
- entry.Data["wow"] = "whale"
- return nil
-}
-
-func (hook *ModifyHook) Levels() []Level {
- return []Level{
- DebugLevel,
- InfoLevel,
- WarnLevel,
- ErrorLevel,
- FatalLevel,
- PanicLevel,
- }
-}
-
-func TestHookCanModifyEntry(t *testing.T) {
- hook := new(ModifyHook)
-
- LogAndAssertJSON(t, func(log *Logger) {
- log.Hooks.Add(hook)
- log.WithField("wow", "elephant").Print("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["wow"], "whale")
- })
-}
-
-func TestCanFireMultipleHooks(t *testing.T) {
- hook1 := new(ModifyHook)
- hook2 := new(TestHook)
-
- LogAndAssertJSON(t, func(log *Logger) {
- log.Hooks.Add(hook1)
- log.Hooks.Add(hook2)
-
- log.WithField("wow", "elephant").Print("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["wow"], "whale")
- assert.Equal(t, hook2.Fired, true)
- })
-}
-
-type ErrorHook struct {
- Fired bool
-}
-
-func (hook *ErrorHook) Fire(entry *Entry) error {
- hook.Fired = true
- return nil
-}
-
-func (hook *ErrorHook) Levels() []Level {
- return []Level{
- ErrorLevel,
- }
-}
-
-func TestErrorHookShouldntFireOnInfo(t *testing.T) {
- hook := new(ErrorHook)
-
- LogAndAssertJSON(t, func(log *Logger) {
- log.Hooks.Add(hook)
- log.Info("test")
- }, func(fields Fields) {
- assert.Equal(t, hook.Fired, false)
- })
-}
-
-func TestErrorHookShouldFireOnError(t *testing.T) {
- hook := new(ErrorHook)
-
- LogAndAssertJSON(t, func(log *Logger) {
- log.Hooks.Add(hook)
- log.Error("test")
- }, func(fields Fields) {
- assert.Equal(t, hook.Fired, true)
- })
-}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
deleted file mode 100644
index 1bbc0f7..0000000
--- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Syslog Hooks for Logrus
-
-## Usage
-
-```go
-import (
- "log/syslog"
- "github.com/sirupsen/logrus"
- lSyslog "github.com/sirupsen/logrus/hooks/syslog"
-)
-
-func main() {
- log := logrus.New()
- hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
-
- if err == nil {
- log.Hooks.Add(hook)
- }
-}
-```
-
-If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
-
-```go
-import (
- "log/syslog"
- "github.com/sirupsen/logrus"
- lSyslog "github.com/sirupsen/logrus/hooks/syslog"
-)
-
-func main() {
- log := logrus.New()
- hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
-
- if err == nil {
- log.Hooks.Add(hook)
- }
-}
-```
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
deleted file mode 100644
index 329ce0d..0000000
--- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// +build !windows,!nacl,!plan9
-
-package syslog
-
-import (
- "fmt"
- "log/syslog"
- "os"
-
- "github.com/sirupsen/logrus"
-)
-
-// SyslogHook to send logs via syslog.
-type SyslogHook struct {
- Writer *syslog.Writer
- SyslogNetwork string
- SyslogRaddr string
-}
-
-// Creates a hook to be added to an instance of logger. This is called with
-// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
-// `if err == nil { log.Hooks.Add(hook) }`
-func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
- w, err := syslog.Dial(network, raddr, priority, tag)
- return &SyslogHook{w, network, raddr}, err
-}
-
-func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
- line, err := entry.String()
- if err != nil {
- fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
- return err
- }
-
- switch entry.Level {
- case logrus.PanicLevel:
- return hook.Writer.Crit(line)
- case logrus.FatalLevel:
- return hook.Writer.Crit(line)
- case logrus.ErrorLevel:
- return hook.Writer.Err(line)
- case logrus.WarnLevel:
- return hook.Writer.Warning(line)
- case logrus.InfoLevel:
- return hook.Writer.Info(line)
- case logrus.DebugLevel:
- return hook.Writer.Debug(line)
- default:
- return nil
- }
-}
-
-func (hook *SyslogHook) Levels() []logrus.Level {
- return logrus.AllLevels
-}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
deleted file mode 100644
index 5ec3a44..0000000
--- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package syslog
-
-import (
- "log/syslog"
- "testing"
-
- "github.com/sirupsen/logrus"
-)
-
-func TestLocalhostAddAndPrint(t *testing.T) {
- log := logrus.New()
- hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
-
- if err != nil {
- t.Errorf("Unable to connect to local syslog.")
- }
-
- log.Hooks.Add(hook)
-
- for _, level := range hook.Levels() {
- if len(log.Hooks[level]) != 1 {
- t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
- }
- }
-
- log.Info("Congratulations!")
-}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go
deleted file mode 100644
index 62c4845..0000000
--- a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// The Test package is used for testing logrus. It is here for backwards
-// compatibility from when logrus' organization was upper-case. Please use
-// lower-case logrus and the `null` package instead of this one.
-package test
-
-import (
- "io/ioutil"
- "sync"
-
- "github.com/sirupsen/logrus"
-)
-
-// Hook is a hook designed for dealing with logs in test scenarios.
-type Hook struct {
- // Entries is an array of all entries that have been received by this hook.
- // For safe access, use the AllEntries() method, rather than reading this
- // value directly.
- Entries []*logrus.Entry
- mu sync.RWMutex
-}
-
-// NewGlobal installs a test hook for the global logger.
-func NewGlobal() *Hook {
-
- hook := new(Hook)
- logrus.AddHook(hook)
-
- return hook
-
-}
-
-// NewLocal installs a test hook for a given local logger.
-func NewLocal(logger *logrus.Logger) *Hook {
-
- hook := new(Hook)
- logger.Hooks.Add(hook)
-
- return hook
-
-}
-
-// NewNullLogger creates a discarding logger and installs the test hook.
-func NewNullLogger() (*logrus.Logger, *Hook) {
-
- logger := logrus.New()
- logger.Out = ioutil.Discard
-
- return logger, NewLocal(logger)
-
-}
-
-func (t *Hook) Fire(e *logrus.Entry) error {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.Entries = append(t.Entries, e)
- return nil
-}
-
-func (t *Hook) Levels() []logrus.Level {
- return logrus.AllLevels
-}
-
-// LastEntry returns the last entry that was logged or nil.
-func (t *Hook) LastEntry() *logrus.Entry {
- t.mu.RLock()
- defer t.mu.RUnlock()
- i := len(t.Entries) - 1
- if i < 0 {
- return nil
- }
- // Make a copy, for safety
- e := *t.Entries[i]
- return &e
-}
-
-// AllEntries returns all entries that were logged.
-func (t *Hook) AllEntries() []*logrus.Entry {
- t.mu.RLock()
- defer t.mu.RUnlock()
- // Make a copy so the returned value won't race with future log requests
- entries := make([]*logrus.Entry, len(t.Entries))
- for i, entry := range t.Entries {
- // Make a copy, for safety
- e := *entry
- entries[i] = &e
- }
- return entries
-}
-
-// Reset removes all Entries from this test hook.
-func (t *Hook) Reset() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.Entries = make([]*logrus.Entry, 0)
-}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
deleted file mode 100644
index 3f55cfe..0000000
--- a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package test
-
-import (
- "testing"
-
- "github.com/sirupsen/logrus"
- "github.com/stretchr/testify/assert"
-)
-
-func TestAllHooks(t *testing.T) {
-
- assert := assert.New(t)
-
- logger, hook := NewNullLogger()
- assert.Nil(hook.LastEntry())
- assert.Equal(0, len(hook.Entries))
-
- logger.Error("Hello error")
- assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
- assert.Equal("Hello error", hook.LastEntry().Message)
- assert.Equal(1, len(hook.Entries))
-
- logger.Warn("Hello warning")
- assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
- assert.Equal("Hello warning", hook.LastEntry().Message)
- assert.Equal(2, len(hook.Entries))
-
- hook.Reset()
- assert.Nil(hook.LastEntry())
- assert.Equal(0, len(hook.Entries))
-
- hook = NewGlobal()
-
- logrus.Error("Hello error")
- assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
- assert.Equal("Hello error", hook.LastEntry().Message)
- assert.Equal(1, len(hook.Entries))
-
-}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go
deleted file mode 100644
index 51093a7..0000000
--- a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package logrus
-
-import (
- "encoding/json"
- "errors"
- "strings"
- "testing"
-)
-
-func TestErrorNotLost(t *testing.T) {
- formatter := &JSONFormatter{}
-
- b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
-
- entry := make(map[string]interface{})
- err = json.Unmarshal(b, &entry)
- if err != nil {
- t.Fatal("Unable to unmarshal formatted entry: ", err)
- }
-
- if entry["error"] != "wild walrus" {
- t.Fatal("Error field not set")
- }
-}
-
-func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
- formatter := &JSONFormatter{}
-
- b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
-
- entry := make(map[string]interface{})
- err = json.Unmarshal(b, &entry)
- if err != nil {
- t.Fatal("Unable to unmarshal formatted entry: ", err)
- }
-
- if entry["omg"] != "wild walrus" {
- t.Fatal("Error field not set")
- }
-}
-
-func TestFieldClashWithTime(t *testing.T) {
- formatter := &JSONFormatter{}
-
- b, err := formatter.Format(WithField("time", "right now!"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
-
- entry := make(map[string]interface{})
- err = json.Unmarshal(b, &entry)
- if err != nil {
- t.Fatal("Unable to unmarshal formatted entry: ", err)
- }
-
- if entry["fields.time"] != "right now!" {
- t.Fatal("fields.time not set to original time field")
- }
-
- if entry["time"] != "0001-01-01T00:00:00Z" {
- t.Fatal("time field not set to current time, was: ", entry["time"])
- }
-}
-
-func TestFieldClashWithMsg(t *testing.T) {
- formatter := &JSONFormatter{}
-
- b, err := formatter.Format(WithField("msg", "something"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
-
- entry := make(map[string]interface{})
- err = json.Unmarshal(b, &entry)
- if err != nil {
- t.Fatal("Unable to unmarshal formatted entry: ", err)
- }
-
- if entry["fields.msg"] != "something" {
- t.Fatal("fields.msg not set to original msg field")
- }
-}
-
-func TestFieldClashWithLevel(t *testing.T) {
- formatter := &JSONFormatter{}
-
- b, err := formatter.Format(WithField("level", "something"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
-
- entry := make(map[string]interface{})
- err = json.Unmarshal(b, &entry)
- if err != nil {
- t.Fatal("Unable to unmarshal formatted entry: ", err)
- }
-
- if entry["fields.level"] != "something" {
- t.Fatal("fields.level not set to original level field")
- }
-}
-
-func TestJSONEntryEndsWithNewline(t *testing.T) {
- formatter := &JSONFormatter{}
-
- b, err := formatter.Format(WithField("level", "something"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
-
- if b[len(b)-1] != '\n' {
- t.Fatal("Expected JSON log entry to end with a newline")
- }
-}
-
-func TestJSONMessageKey(t *testing.T) {
- formatter := &JSONFormatter{
- FieldMap: FieldMap{
- FieldKeyMsg: "message",
- },
- }
-
- b, err := formatter.Format(&Entry{Message: "oh hai"})
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
- s := string(b)
- if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
- t.Fatal("Expected JSON to format message key")
- }
-}
-
-func TestJSONLevelKey(t *testing.T) {
- formatter := &JSONFormatter{
- FieldMap: FieldMap{
- FieldKeyLevel: "somelevel",
- },
- }
-
- b, err := formatter.Format(WithField("level", "something"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
- s := string(b)
- if !strings.Contains(s, "somelevel") {
- t.Fatal("Expected JSON to format level key")
- }
-}
-
-func TestJSONTimeKey(t *testing.T) {
- formatter := &JSONFormatter{
- FieldMap: FieldMap{
- FieldKeyTime: "timeywimey",
- },
- }
-
- b, err := formatter.Format(WithField("level", "something"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
- s := string(b)
- if !strings.Contains(s, "timeywimey") {
- t.Fatal("Expected JSON to format time key")
- }
-}
-
-func TestJSONDisableTimestamp(t *testing.T) {
- formatter := &JSONFormatter{
- DisableTimestamp: true,
- }
-
- b, err := formatter.Format(WithField("level", "something"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
- s := string(b)
- if strings.Contains(s, FieldKeyTime) {
- t.Error("Did not prevent timestamp", s)
- }
-}
-
-func TestJSONEnableTimestamp(t *testing.T) {
- formatter := &JSONFormatter{}
-
- b, err := formatter.Format(WithField("level", "something"))
- if err != nil {
- t.Fatal("Unable to format entry: ", err)
- }
- s := string(b)
- if !strings.Contains(s, FieldKeyTime) {
- t.Error("Timestamp not present", s)
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/logger_bench_test.go b/vendor/github.com/Sirupsen/logrus/logger_bench_test.go
deleted file mode 100644
index dd23a35..0000000
--- a/vendor/github.com/Sirupsen/logrus/logger_bench_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package logrus
-
-import (
- "os"
- "testing"
-)
-
-// smallFields is a small size data set for benchmarking
-var loggerFields = Fields{
- "foo": "bar",
- "baz": "qux",
- "one": "two",
- "three": "four",
-}
-
-func BenchmarkDummyLogger(b *testing.B) {
- nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
- if err != nil {
- b.Fatalf("%v", err)
- }
- defer nullf.Close()
- doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
-}
-
-func BenchmarkDummyLoggerNoLock(b *testing.B) {
- nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
- if err != nil {
- b.Fatalf("%v", err)
- }
- defer nullf.Close()
- doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
-}
-
-func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
- logger := Logger{
- Out: out,
- Level: InfoLevel,
- Formatter: formatter,
- }
- entry := logger.WithFields(fields)
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- entry.Info("aaa")
- }
- })
-}
-
-func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
- logger := Logger{
- Out: out,
- Level: InfoLevel,
- Formatter: formatter,
- }
- logger.SetNoLock()
- entry := logger.WithFields(fields)
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- entry.Info("aaa")
- }
- })
-}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go
deleted file mode 100644
index 78cbc28..0000000
--- a/vendor/github.com/Sirupsen/logrus/logrus_test.go
+++ /dev/null
@@ -1,386 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "encoding/json"
- "strconv"
- "strings"
- "sync"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
- var buffer bytes.Buffer
- var fields Fields
-
- logger := New()
- logger.Out = &buffer
- logger.Formatter = new(JSONFormatter)
-
- log(logger)
-
- err := json.Unmarshal(buffer.Bytes(), &fields)
- assert.Nil(t, err)
-
- assertions(fields)
-}
-
-func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
- var buffer bytes.Buffer
-
- logger := New()
- logger.Out = &buffer
- logger.Formatter = &TextFormatter{
- DisableColors: true,
- }
-
- log(logger)
-
- fields := make(map[string]string)
- for _, kv := range strings.Split(buffer.String(), " ") {
- if !strings.Contains(kv, "=") {
- continue
- }
- kvArr := strings.Split(kv, "=")
- key := strings.TrimSpace(kvArr[0])
- val := kvArr[1]
- if kvArr[1][0] == '"' {
- var err error
- val, err = strconv.Unquote(val)
- assert.NoError(t, err)
- }
- fields[key] = val
- }
- assertions(fields)
-}
-
-func TestPrint(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Print("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test")
- assert.Equal(t, fields["level"], "info")
- })
-}
-
-func TestInfo(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Info("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test")
- assert.Equal(t, fields["level"], "info")
- })
-}
-
-func TestWarn(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Warn("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test")
- assert.Equal(t, fields["level"], "warning")
- })
-}
-
-func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Infoln("test", "test")
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test test")
- })
-}
-
-func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Infoln("test", 10)
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test 10")
- })
-}
-
-func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Infoln(10, 10)
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "10 10")
- })
-}
-
-func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Infoln(10, 10)
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "10 10")
- })
-}
-
-func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Info("test", 10)
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test10")
- })
-}
-
-func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.Info("test", "test")
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "testtest")
- })
-}
-
-func TestWithFieldsShouldAllowAssignments(t *testing.T) {
- var buffer bytes.Buffer
- var fields Fields
-
- logger := New()
- logger.Out = &buffer
- logger.Formatter = new(JSONFormatter)
-
- localLog := logger.WithFields(Fields{
- "key1": "value1",
- })
-
- localLog.WithField("key2", "value2").Info("test")
- err := json.Unmarshal(buffer.Bytes(), &fields)
- assert.Nil(t, err)
-
- assert.Equal(t, "value2", fields["key2"])
- assert.Equal(t, "value1", fields["key1"])
-
- buffer = bytes.Buffer{}
- fields = Fields{}
- localLog.Info("test")
- err = json.Unmarshal(buffer.Bytes(), &fields)
- assert.Nil(t, err)
-
- _, ok := fields["key2"]
- assert.Equal(t, false, ok)
- assert.Equal(t, "value1", fields["key1"])
-}
-
-func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.WithField("msg", "hello").Info("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test")
- })
-}
-
-func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.WithField("msg", "hello").Info("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["msg"], "test")
- assert.Equal(t, fields["fields.msg"], "hello")
- })
-}
-
-func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.WithField("time", "hello").Info("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["fields.time"], "hello")
- })
-}
-
-func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
- LogAndAssertJSON(t, func(log *Logger) {
- log.WithField("level", 1).Info("test")
- }, func(fields Fields) {
- assert.Equal(t, fields["level"], "info")
- assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
- })
-}
-
-func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
- LogAndAssertText(t, func(log *Logger) {
- ll := log.WithField("herp", "derp")
- ll.Info("hello")
- ll.Info("bye")
- }, func(fields map[string]string) {
- for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
- if _, ok := fields[fieldName]; ok {
- t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
- }
- }
- })
-}
-
-func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
-
- var buffer bytes.Buffer
- var fields Fields
-
- logger := New()
- logger.Out = &buffer
- logger.Formatter = new(JSONFormatter)
-
- llog := logger.WithField("context", "eating raw fish")
-
- llog.Info("looks delicious")
-
- err := json.Unmarshal(buffer.Bytes(), &fields)
- assert.NoError(t, err, "should have decoded first message")
- assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
- assert.Equal(t, fields["msg"], "looks delicious")
- assert.Equal(t, fields["context"], "eating raw fish")
-
- buffer.Reset()
-
- llog.Warn("omg it is!")
-
- err = json.Unmarshal(buffer.Bytes(), &fields)
- assert.NoError(t, err, "should have decoded second message")
- assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
- assert.Equal(t, fields["msg"], "omg it is!")
- assert.Equal(t, fields["context"], "eating raw fish")
- assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
-
-}
-
-func TestConvertLevelToString(t *testing.T) {
- assert.Equal(t, "debug", DebugLevel.String())
- assert.Equal(t, "info", InfoLevel.String())
- assert.Equal(t, "warning", WarnLevel.String())
- assert.Equal(t, "error", ErrorLevel.String())
- assert.Equal(t, "fatal", FatalLevel.String())
- assert.Equal(t, "panic", PanicLevel.String())
-}
-
-func TestParseLevel(t *testing.T) {
- l, err := ParseLevel("panic")
- assert.Nil(t, err)
- assert.Equal(t, PanicLevel, l)
-
- l, err = ParseLevel("PANIC")
- assert.Nil(t, err)
- assert.Equal(t, PanicLevel, l)
-
- l, err = ParseLevel("fatal")
- assert.Nil(t, err)
- assert.Equal(t, FatalLevel, l)
-
- l, err = ParseLevel("FATAL")
- assert.Nil(t, err)
- assert.Equal(t, FatalLevel, l)
-
- l, err = ParseLevel("error")
- assert.Nil(t, err)
- assert.Equal(t, ErrorLevel, l)
-
- l, err = ParseLevel("ERROR")
- assert.Nil(t, err)
- assert.Equal(t, ErrorLevel, l)
-
- l, err = ParseLevel("warn")
- assert.Nil(t, err)
- assert.Equal(t, WarnLevel, l)
-
- l, err = ParseLevel("WARN")
- assert.Nil(t, err)
- assert.Equal(t, WarnLevel, l)
-
- l, err = ParseLevel("warning")
- assert.Nil(t, err)
- assert.Equal(t, WarnLevel, l)
-
- l, err = ParseLevel("WARNING")
- assert.Nil(t, err)
- assert.Equal(t, WarnLevel, l)
-
- l, err = ParseLevel("info")
- assert.Nil(t, err)
- assert.Equal(t, InfoLevel, l)
-
- l, err = ParseLevel("INFO")
- assert.Nil(t, err)
- assert.Equal(t, InfoLevel, l)
-
- l, err = ParseLevel("debug")
- assert.Nil(t, err)
- assert.Equal(t, DebugLevel, l)
-
- l, err = ParseLevel("DEBUG")
- assert.Nil(t, err)
- assert.Equal(t, DebugLevel, l)
-
- l, err = ParseLevel("invalid")
- assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
-}
-
-func TestGetSetLevelRace(t *testing.T) {
- wg := sync.WaitGroup{}
- for i := 0; i < 100; i++ {
- wg.Add(1)
- go func(i int) {
- defer wg.Done()
- if i%2 == 0 {
- SetLevel(InfoLevel)
- } else {
- GetLevel()
- }
- }(i)
-
- }
- wg.Wait()
-}
-
-func TestLoggingRace(t *testing.T) {
- logger := New()
-
- var wg sync.WaitGroup
- wg.Add(100)
-
- for i := 0; i < 100; i++ {
- go func() {
- logger.Info("info")
- wg.Done()
- }()
- }
- wg.Wait()
-}
-
-// Compile test
-func TestLogrusInterface(t *testing.T) {
- var buffer bytes.Buffer
- fn := func(l FieldLogger) {
- b := l.WithField("key", "value")
- b.Debug("Test")
- }
- // test logger
- logger := New()
- logger.Out = &buffer
- fn(logger)
-
- // test Entry
- e := logger.WithField("another", "value")
- fn(e)
-}
-
-// Implements io.Writer using channels for synchronization, so we can wait on
-// the Entry.Writer goroutine to write in a non-racey way. This does assume that
-// there is a single call to Logger.Out for each message.
-type channelWriter chan []byte
-
-func (cw channelWriter) Write(p []byte) (int, error) {
- cw <- p
- return len(p), nil
-}
-
-func TestEntryWriter(t *testing.T) {
- cw := channelWriter(make(chan []byte, 1))
- log := New()
- log.Out = cw
- log.Formatter = new(JSONFormatter)
- log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n"))
-
- bs := <-cw
- var fields Fields
- err := json.Unmarshal(bs, &fields)
- assert.Nil(t, err)
- assert.Equal(t, fields["foo"], "bar")
- assert.Equal(t, fields["level"], "warning")
-}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go
deleted file mode 100644
index d93b931..0000000
--- a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strings"
- "testing"
- "time"
-)
-
-func TestFormatting(t *testing.T) {
- tf := &TextFormatter{DisableColors: true}
-
- testCases := []struct {
- value string
- expected string
- }{
- {`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"},
- }
-
- for _, tc := range testCases {
- b, _ := tf.Format(WithField("test", tc.value))
-
- if string(b) != tc.expected {
- t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
- }
- }
-}
-
-func TestQuoting(t *testing.T) {
- tf := &TextFormatter{DisableColors: true}
-
- checkQuoting := func(q bool, value interface{}) {
- b, _ := tf.Format(WithField("test", value))
- idx := bytes.Index(b, ([]byte)("test="))
- cont := bytes.Contains(b[idx+5:], []byte("\""))
- if cont != q {
- if q {
- t.Errorf("quoting expected for: %#v", value)
- } else {
- t.Errorf("quoting not expected for: %#v", value)
- }
- }
- }
-
- checkQuoting(false, "")
- checkQuoting(false, "abcd")
- checkQuoting(false, "v1.0")
- checkQuoting(false, "1234567890")
- checkQuoting(false, "/foobar")
- checkQuoting(false, "foo_bar")
- checkQuoting(false, "foo@bar")
- checkQuoting(false, "foobar^")
- checkQuoting(false, "+/-_^@f.oobar")
- checkQuoting(true, "foobar$")
- checkQuoting(true, "&foobar")
- checkQuoting(true, "x y")
- checkQuoting(true, "x,y")
- checkQuoting(false, errors.New("invalid"))
- checkQuoting(true, errors.New("invalid argument"))
-
- // Test for quoting empty fields.
- tf.QuoteEmptyFields = true
- checkQuoting(true, "")
- checkQuoting(false, "abcd")
- checkQuoting(true, errors.New("invalid argument"))
-}
-
-func TestEscaping(t *testing.T) {
- tf := &TextFormatter{DisableColors: true}
-
- testCases := []struct {
- value string
- expected string
- }{
- {`ba"r`, `ba\"r`},
- {`ba'r`, `ba'r`},
- }
-
- for _, tc := range testCases {
- b, _ := tf.Format(WithField("test", tc.value))
- if !bytes.Contains(b, []byte(tc.expected)) {
- t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
- }
- }
-}
-
-func TestEscaping_Interface(t *testing.T) {
- tf := &TextFormatter{DisableColors: true}
-
- ts := time.Now()
-
- testCases := []struct {
- value interface{}
- expected string
- }{
- {ts, fmt.Sprintf("\"%s\"", ts.String())},
- {errors.New("error: something went wrong"), "\"error: something went wrong\""},
- }
-
- for _, tc := range testCases {
- b, _ := tf.Format(WithField("test", tc.value))
- if !bytes.Contains(b, []byte(tc.expected)) {
- t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
- }
- }
-}
-
-func TestTimestampFormat(t *testing.T) {
- checkTimeStr := func(format string) {
- customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
- customStr, _ := customFormatter.Format(WithField("test", "test"))
- timeStart := bytes.Index(customStr, ([]byte)("time="))
- timeEnd := bytes.Index(customStr, ([]byte)("level="))
- timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")]
- if format == "" {
- format = time.RFC3339
- }
- _, e := time.Parse(format, (string)(timeStr))
- if e != nil {
- t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
- }
- }
-
- checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
- checkTimeStr("Mon Jan _2 15:04:05 2006")
- checkTimeStr("")
-}
-
-func TestDisableTimestampWithColoredOutput(t *testing.T) {
- tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
-
- b, _ := tf.Format(WithField("test", "test"))
- if strings.Contains(string(b), "[0000]") {
- t.Error("timestamp not expected when DisableTimestamp is true")
- }
-}
-
-// TODO add tests for sorting etc., this requires a parser for the text
-// formatter output.
diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml
deleted file mode 100644
index cbf2ccc..0000000
--- a/vendor/github.com/fatih/structs/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-language: go
-go:
- - 1.7.x
- - tip
-sudo: false
-before_install:
-- go get github.com/axw/gocov/gocov
-- go get github.com/mattn/goveralls
-- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
-script:
-- $HOME/gopath/bin/goveralls -service=travis-ci
diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md
deleted file mode 100644
index 44e0100..0000000
--- a/vendor/github.com/fatih/structs/README.md
+++ /dev/null
@@ -1,163 +0,0 @@
-# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
-
-Structs contains various utilities to work with Go (Golang) structs. It was
-initially used by me to convert a struct into a `map[string]interface{}`. With
-time I've added other utilities for structs. It's basically a high level
-package based on primitives from the reflect package. Feel free to add new
-functions or improve the existing code.
-
-## Install
-
-```bash
-go get github.com/fatih/structs
-```
-
-## Usage and Examples
-
-Just like the standard lib `strings`, `bytes` and co packages, `structs` has
-many global functions to manipulate or organize your struct data. Lets define
-and declare a struct:
-
-```go
-type Server struct {
- Name string `json:"name,omitempty"`
- ID int
- Enabled bool
- users []string // not exported
- http.Server // embedded
-}
-
-server := &Server{
- Name: "gopher",
- ID: 123456,
- Enabled: true,
-}
-```
-
-```go
-// Convert a struct to a map[string]interface{}
-// => {"Name":"gopher", "ID":123456, "Enabled":true}
-m := structs.Map(server)
-
-// Convert the values of a struct to a []interface{}
-// => ["gopher", 123456, true]
-v := structs.Values(server)
-
-// Convert the names of a struct to a []string
-// (see "Names methods" for more info about fields)
-n := structs.Names(server)
-
-// Convert the values of a struct to a []*Field
-// (see "Field methods" for more info about fields)
-f := structs.Fields(server)
-
-// Return the struct name => "Server"
-n := structs.Name(server)
-
-// Check if any field of a struct is initialized or not.
-h := structs.HasZero(server)
-
-// Check if all fields of a struct is initialized or not.
-z := structs.IsZero(server)
-
-// Check if server is a struct or a pointer to struct
-i := structs.IsStruct(server)
-```
-
-### Struct methods
-
-The structs functions can be also used as independent methods by creating a new
-`*structs.Struct`. This is handy if you want to have more control over the
-structs (such as retrieving a single Field).
-
-```go
-// Create a new struct type:
-s := structs.New(server)
-
-m := s.Map() // Get a map[string]interface{}
-v := s.Values() // Get a []interface{}
-f := s.Fields() // Get a []*Field
-n := s.Names() // Get a []string
-f := s.Field(name) // Get a *Field based on the given field name
-f, ok := s.FieldOk(name) // Get a *Field based on the given field name
-n := s.Name() // Get the struct name
-h := s.HasZero() // Check if any field is initialized
-z := s.IsZero() // Check if all fields are initialized
-```
-
-### Field methods
-
-We can easily examine a single Field for more detail. Below you can see how we
-get and interact with various field methods:
-
-
-```go
-s := structs.New(server)
-
-// Get the Field struct for the "Name" field
-name := s.Field("Name")
-
-// Get the underlying value, value => "gopher"
-value := name.Value().(string)
-
-// Set the field's value
-name.Set("another gopher")
-
-// Get the field's kind, kind => "string"
-name.Kind()
-
-// Check if the field is exported or not
-if name.IsExported() {
- fmt.Println("Name field is exported")
-}
-
-// Check if the value is a zero value, such as "" for string, 0 for int
-if !name.IsZero() {
- fmt.Println("Name is initialized")
-}
-
-// Check if the field is an anonymous (embedded) field
-if !name.IsEmbedded() {
- fmt.Println("Name is not an embedded field")
-}
-
-// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
-tagValue := name.Tag("json")
-```
-
-Nested structs are supported too:
-
-```go
-addrField := s.Field("Server").Field("Addr")
-
-// Get the value for addr
-a := addrField.Value().(string)
-
-// Or get all fields
-httpServer := s.Field("Server").Fields()
-```
-
-We can also get a slice of Fields from the Struct type to iterate over all
-fields. This is handy if you wish to examine all fields:
-
-```go
-s := structs.New(server)
-
-for _, f := range s.Fields() {
- fmt.Printf("field name: %+v\n", f.Name())
-
- if f.IsExported() {
- fmt.Printf("value : %+v\n", f.Value())
- fmt.Printf("is zero : %+v\n", f.IsZero())
- }
-}
-```
-
-## Credits
-
- * [Fatih Arslan](https://github.com/fatih)
- * [Cihangir Savas](https://github.com/cihangir)
-
-## License
-
-The MIT License (MIT) - see LICENSE.md for more details
diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go
deleted file mode 100644
index e697832..0000000
--- a/vendor/github.com/fatih/structs/field.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package structs
-
-import (
- "errors"
- "fmt"
- "reflect"
-)
-
-var (
- errNotExported = errors.New("field is not exported")
- errNotSettable = errors.New("field is not settable")
-)
-
-// Field represents a single struct field that encapsulates high level
-// functions around the field.
-type Field struct {
- value reflect.Value
- field reflect.StructField
- defaultTag string
-}
-
-// Tag returns the value associated with key in the tag string. If there is no
-// such key in the tag, Tag returns the empty string.
-func (f *Field) Tag(key string) string {
- return f.field.Tag.Get(key)
-}
-
-// Value returns the underlying value of the field. It panics if the field
-// is not exported.
-func (f *Field) Value() interface{} {
- return f.value.Interface()
-}
-
-// IsEmbedded returns true if the given field is an anonymous field (embedded)
-func (f *Field) IsEmbedded() bool {
- return f.field.Anonymous
-}
-
-// IsExported returns true if the given field is exported.
-func (f *Field) IsExported() bool {
- return f.field.PkgPath == ""
-}
-
-// IsZero returns true if the given field is not initialized (has a zero value).
-// It panics if the field is not exported.
-func (f *Field) IsZero() bool {
- zero := reflect.Zero(f.value.Type()).Interface()
- current := f.Value()
-
- return reflect.DeepEqual(current, zero)
-}
-
-// Name returns the name of the given field
-func (f *Field) Name() string {
- return f.field.Name
-}
-
-// Kind returns the fields kind, such as "string", "map", "bool", etc ..
-func (f *Field) Kind() reflect.Kind {
- return f.value.Kind()
-}
-
-// Set sets the field to given value v. It returns an error if the field is not
-// settable (not addressable or not exported) or if the given value's type
-// doesn't match the fields type.
-func (f *Field) Set(val interface{}) error {
- // we can't set unexported fields, so be sure this field is exported
- if !f.IsExported() {
- return errNotExported
- }
-
- // do we get here? not sure...
- if !f.value.CanSet() {
- return errNotSettable
- }
-
- given := reflect.ValueOf(val)
-
- if f.value.Kind() != given.Kind() {
- return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
- }
-
- f.value.Set(given)
- return nil
-}
-
-// Zero sets the field to its zero value. It returns an error if the field is not
-// settable (not addressable or not exported).
-func (f *Field) Zero() error {
- zero := reflect.Zero(f.value.Type()).Interface()
- return f.Set(zero)
-}
-
-// Fields returns a slice of Fields. This is particular handy to get the fields
-// of a nested struct . A struct tag with the content of "-" ignores the
-// checking of that particular field. Example:
-//
-// // Field is ignored by this package.
-// Field *http.Request `structs:"-"`
-//
-// It panics if field is not exported or if field's kind is not struct
-func (f *Field) Fields() []*Field {
- return getFields(f.value, f.defaultTag)
-}
-
-// Field returns the field from a nested struct. It panics if the nested struct
-// is not exported or if the field was not found.
-func (f *Field) Field(name string) *Field {
- field, ok := f.FieldOk(name)
- if !ok {
- panic("field not found")
- }
-
- return field
-}
-
-// FieldOk returns the field from a nested struct. The boolean returns whether
-// the field was found (true) or not (false).
-func (f *Field) FieldOk(name string) (*Field, bool) {
- value := &f.value
- // value must be settable so we need to make sure it holds the address of the
- // variable and not a copy, so we can pass the pointer to strctVal instead of a
- // copy (which is not assigned to any variable, hence not settable).
- // see "https://blog.golang.org/laws-of-reflection#TOC_8."
- if f.value.Kind() != reflect.Ptr {
- a := f.value.Addr()
- value = &a
- }
- v := strctVal(value.Interface())
- t := v.Type()
-
- field, ok := t.FieldByName(name)
- if !ok {
- return nil, false
- }
-
- return &Field{
- field: field,
- value: v.FieldByName(name),
- }, true
-}
diff --git a/vendor/github.com/fatih/structs/field_test.go b/vendor/github.com/fatih/structs/field_test.go
deleted file mode 100644
index de9dc3b..0000000
--- a/vendor/github.com/fatih/structs/field_test.go
+++ /dev/null
@@ -1,397 +0,0 @@
-package structs
-
-import (
- "reflect"
- "testing"
-)
-
-// A test struct that defines all cases
-type Foo struct {
- A string
- B int `structs:"y"`
- C bool `json:"c"`
- d string // not exported
- E *Baz
- x string `xml:"x"` // not exported, with tag
- Y []string
- Z map[string]interface{}
- *Bar // embedded
-}
-
-type Baz struct {
- A string
- B int
-}
-
-type Bar struct {
- E string
- F int
- g []string
-}
-
-func newStruct() *Struct {
- b := &Bar{
- E: "example",
- F: 2,
- g: []string{"zeynep", "fatih"},
- }
-
- // B and x is not initialized for testing
- f := &Foo{
- A: "gopher",
- C: true,
- d: "small",
- E: nil,
- Y: []string{"example"},
- Z: nil,
- }
- f.Bar = b
-
- return New(f)
-}
-
-func TestField_Set(t *testing.T) {
- s := newStruct()
-
- f := s.Field("A")
- err := f.Set("fatih")
- if err != nil {
- t.Error(err)
- }
-
- if f.Value().(string) != "fatih" {
- t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
- }
-
- f = s.Field("Y")
- err = f.Set([]string{"override", "with", "this"})
- if err != nil {
- t.Error(err)
- }
-
- sliceLen := len(f.Value().([]string))
- if sliceLen != 3 {
- t.Errorf("Setted values slice length is wrong: %d, want: %d", sliceLen, 3)
- }
-
- f = s.Field("C")
- err = f.Set(false)
- if err != nil {
- t.Error(err)
- }
-
- if f.Value().(bool) {
- t.Errorf("Setted value is wrong: %t want: %t", f.Value().(bool), false)
- }
-
- // let's pass a different type
- f = s.Field("A")
- err = f.Set(123) // Field A is of type string, but we are going to pass an integer
- if err == nil {
- t.Error("Setting a field's value with a different type than the field's type should return an error")
- }
-
- // old value should be still there :)
- if f.Value().(string) != "fatih" {
- t.Errorf("Setted value is wrong: %s want: %s", f.Value().(string), "fatih")
- }
-
- // let's access an unexported field, which should give an error
- f = s.Field("d")
- err = f.Set("large")
- if err != errNotExported {
- t.Error(err)
- }
-
- // let's set a pointer to struct
- b := &Bar{
- E: "gopher",
- F: 2,
- }
-
- f = s.Field("Bar")
- err = f.Set(b)
- if err != nil {
- t.Error(err)
- }
-
- baz := &Baz{
- A: "helloWorld",
- B: 42,
- }
-
- f = s.Field("E")
- err = f.Set(baz)
- if err != nil {
- t.Error(err)
- }
-
- ba := s.Field("E").Value().(*Baz)
-
- if ba.A != "helloWorld" {
- t.Errorf("could not set baz. Got: %s Want: helloWorld", ba.A)
- }
-}
-
-func TestField_NotSettable(t *testing.T) {
- a := map[int]Baz{
- 4: Baz{
- A: "value",
- },
- }
-
- s := New(a[4])
-
- if err := s.Field("A").Set("newValue"); err != errNotSettable {
- t.Errorf("Trying to set non-settable field should error with %q. Got %q instead.", errNotSettable, err)
- }
-}
-
-func TestField_Zero(t *testing.T) {
- s := newStruct()
-
- f := s.Field("A")
- err := f.Zero()
- if err != nil {
- t.Error(err)
- }
-
- if f.Value().(string) != "" {
- t.Errorf("Zeroed value is wrong: %s want: %s", f.Value().(string), "")
- }
-
- f = s.Field("Y")
- err = f.Zero()
- if err != nil {
- t.Error(err)
- }
-
- sliceLen := len(f.Value().([]string))
- if sliceLen != 0 {
- t.Errorf("Zeroed values slice length is wrong: %d, want: %d", sliceLen, 0)
- }
-
- f = s.Field("C")
- err = f.Zero()
- if err != nil {
- t.Error(err)
- }
-
- if f.Value().(bool) {
- t.Errorf("Zeroed value is wrong: %t want: %t", f.Value().(bool), false)
- }
-
- // let's access an unexported field, which should give an error
- f = s.Field("d")
- err = f.Zero()
- if err != errNotExported {
- t.Error(err)
- }
-
- f = s.Field("Bar")
- err = f.Zero()
- if err != nil {
- t.Error(err)
- }
-
- f = s.Field("E")
- err = f.Zero()
- if err != nil {
- t.Error(err)
- }
-
- v := s.Field("E").value
- if !v.IsNil() {
- t.Errorf("could not set baz. Got: %s Want: ", v.Interface())
- }
-}
-
-func TestField(t *testing.T) {
- s := newStruct()
-
- defer func() {
- err := recover()
- if err == nil {
- t.Error("Retrieveing a non existing field from the struct should panic")
- }
- }()
-
- _ = s.Field("no-field")
-}
-
-func TestField_Kind(t *testing.T) {
- s := newStruct()
-
- f := s.Field("A")
- if f.Kind() != reflect.String {
- t.Errorf("Field A has wrong kind: %s want: %s", f.Kind(), reflect.String)
- }
-
- f = s.Field("B")
- if f.Kind() != reflect.Int {
- t.Errorf("Field B has wrong kind: %s want: %s", f.Kind(), reflect.Int)
- }
-
- // unexported
- f = s.Field("d")
- if f.Kind() != reflect.String {
- t.Errorf("Field d has wrong kind: %s want: %s", f.Kind(), reflect.String)
- }
-}
-
-func TestField_Tag(t *testing.T) {
- s := newStruct()
-
- v := s.Field("B").Tag("json")
- if v != "" {
- t.Errorf("Field's tag value of a non existing tag should return empty, got: %s", v)
- }
-
- v = s.Field("C").Tag("json")
- if v != "c" {
- t.Errorf("Field's tag value of the existing field C should return 'c', got: %s", v)
- }
-
- v = s.Field("d").Tag("json")
- if v != "" {
- t.Errorf("Field's tag value of a non exported field should return empty, got: %s", v)
- }
-
- v = s.Field("x").Tag("xml")
- if v != "x" {
- t.Errorf("Field's tag value of a non exported field with a tag should return 'x', got: %s", v)
- }
-
- v = s.Field("A").Tag("json")
- if v != "" {
- t.Errorf("Field's tag value of a existing field without a tag should return empty, got: %s", v)
- }
-}
-
-func TestField_Value(t *testing.T) {
- s := newStruct()
-
- v := s.Field("A").Value()
- val, ok := v.(string)
- if !ok {
- t.Errorf("Field's value of a A should be string")
- }
-
- if val != "gopher" {
- t.Errorf("Field's value of a existing tag should return 'gopher', got: %s", val)
- }
-
- defer func() {
- err := recover()
- if err == nil {
- t.Error("Value of a non exported field from the field should panic")
- }
- }()
-
- // should panic
- _ = s.Field("d").Value()
-}
-
-func TestField_IsEmbedded(t *testing.T) {
- s := newStruct()
-
- if !s.Field("Bar").IsEmbedded() {
- t.Errorf("Fields 'Bar' field is an embedded field")
- }
-
- if s.Field("d").IsEmbedded() {
- t.Errorf("Fields 'd' field is not an embedded field")
- }
-}
-
-func TestField_IsExported(t *testing.T) {
- s := newStruct()
-
- if !s.Field("Bar").IsExported() {
- t.Errorf("Fields 'Bar' field is an exported field")
- }
-
- if !s.Field("A").IsExported() {
- t.Errorf("Fields 'A' field is an exported field")
- }
-
- if s.Field("d").IsExported() {
- t.Errorf("Fields 'd' field is not an exported field")
- }
-}
-
-func TestField_IsZero(t *testing.T) {
- s := newStruct()
-
- if s.Field("A").IsZero() {
- t.Errorf("Fields 'A' field is an initialized field")
- }
-
- if !s.Field("B").IsZero() {
- t.Errorf("Fields 'B' field is not an initialized field")
- }
-}
-
-func TestField_Name(t *testing.T) {
- s := newStruct()
-
- if s.Field("A").Name() != "A" {
- t.Errorf("Fields 'A' field should have the name 'A'")
- }
-}
-
-func TestField_Field(t *testing.T) {
- s := newStruct()
-
- e := s.Field("Bar").Field("E")
-
- val, ok := e.Value().(string)
- if !ok {
- t.Error("The value of the field 'e' inside 'Bar' struct should be string")
- }
-
- if val != "example" {
- t.Errorf("The value of 'e' should be 'example, got: %s", val)
- }
-
- defer func() {
- err := recover()
- if err == nil {
- t.Error("Field of a non existing nested struct should panic")
- }
- }()
-
- _ = s.Field("Bar").Field("e")
-}
-
-func TestField_Fields(t *testing.T) {
- s := newStruct()
- fields := s.Field("Bar").Fields()
-
- if len(fields) != 3 {
- t.Errorf("We expect 3 fields in embedded struct, was: %d", len(fields))
- }
-}
-
-func TestField_FieldOk(t *testing.T) {
- s := newStruct()
-
- b, ok := s.FieldOk("Bar")
- if !ok {
- t.Error("The field 'Bar' should exists.")
- }
-
- e, ok := b.FieldOk("E")
- if !ok {
- t.Error("The field 'E' should exists.")
- }
-
- val, ok := e.Value().(string)
- if !ok {
- t.Error("The value of the field 'e' inside 'Bar' struct should be string")
- }
-
- if val != "example" {
- t.Errorf("The value of 'e' should be 'example, got: %s", val)
- }
-}
diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go
deleted file mode 100644
index be3816a..0000000
--- a/vendor/github.com/fatih/structs/structs.go
+++ /dev/null
@@ -1,586 +0,0 @@
-// Package structs contains various utilities functions to work with structs.
-package structs
-
-import (
- "fmt"
-
- "reflect"
-)
-
-var (
- // DefaultTagName is the default tag name for struct fields which provides
- // a more granular to tweak certain structs. Lookup the necessary functions
- // for more info.
- DefaultTagName = "structs" // struct's field default tag name
-)
-
-// Struct encapsulates a struct type to provide several high level functions
-// around the struct.
-type Struct struct {
- raw interface{}
- value reflect.Value
- TagName string
-}
-
-// New returns a new *Struct with the struct s. It panics if the s's kind is
-// not struct.
-func New(s interface{}) *Struct {
- return &Struct{
- raw: s,
- value: strctVal(s),
- TagName: DefaultTagName,
- }
-}
-
-// Map converts the given struct to a map[string]interface{}, where the keys
-// of the map are the field names and the values of the map the associated
-// values of the fields. The default key string is the struct field name but
-// can be changed in the struct field's tag value. The "structs" key in the
-// struct's field tag value is the key name. Example:
-//
-// // Field appears in map as key "myName".
-// Name string `structs:"myName"`
-//
-// A tag value with the content of "-" ignores that particular field. Example:
-//
-// // Field is ignored by this package.
-// Field bool `structs:"-"`
-//
-// A tag value with the content of "string" uses the stringer to get the value. Example:
-//
-// // The value will be output of Animal's String() func.
-// // Map will panic if Animal does not implement String().
-// Field *Animal `structs:"field,string"`
-//
-// A tag value with the option of "flatten" used in a struct field is to flatten its fields
-// in the output map. Example:
-//
-// // The FieldStruct's fields will be flattened into the output map.
-// FieldStruct time.Time `structs:",flatten"`
-//
-// A tag value with the option of "omitnested" stops iterating further if the type
-// is a struct. Example:
-//
-// // Field is not processed further by this package.
-// Field time.Time `structs:"myName,omitnested"`
-// Field *http.Request `structs:",omitnested"`
-//
-// A tag value with the option of "omitempty" ignores that particular field if
-// the field value is empty. Example:
-//
-// // Field appears in map as key "myName", but the field is
-// // skipped if empty.
-// Field string `structs:"myName,omitempty"`
-//
-// // Field appears in map as key "Field" (the default), but
-// // the field is skipped if empty.
-// Field string `structs:",omitempty"`
-//
-// Note that only exported fields of a struct can be accessed, non exported
-// fields will be neglected.
-func (s *Struct) Map() map[string]interface{} {
- out := make(map[string]interface{})
- s.FillMap(out)
- return out
-}
-
-// FillMap is the same as Map. Instead of returning the output, it fills the
-// given map.
-func (s *Struct) FillMap(out map[string]interface{}) {
- if out == nil {
- return
- }
-
- fields := s.structFields()
-
- for _, field := range fields {
- name := field.Name
- val := s.value.FieldByName(name)
- isSubStruct := false
- var finalVal interface{}
-
- tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
- if tagName != "" {
- name = tagName
- }
-
- // if the value is a zero value and the field is marked as omitempty do
- // not include
- if tagOpts.Has("omitempty") {
- zero := reflect.Zero(val.Type()).Interface()
- current := val.Interface()
-
- if reflect.DeepEqual(current, zero) {
- continue
- }
- }
-
- if !tagOpts.Has("omitnested") {
- finalVal = s.nested(val)
-
- v := reflect.ValueOf(val.Interface())
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- switch v.Kind() {
- case reflect.Map, reflect.Struct:
- isSubStruct = true
- }
- } else {
- finalVal = val.Interface()
- }
-
- if tagOpts.Has("string") {
- s, ok := val.Interface().(fmt.Stringer)
- if ok {
- out[name] = s.String()
- }
- continue
- }
-
- if isSubStruct && (tagOpts.Has("flatten")) {
- for k := range finalVal.(map[string]interface{}) {
- out[k] = finalVal.(map[string]interface{})[k]
- }
- } else {
- out[name] = finalVal
- }
- }
-}
-
-// Values converts the given s struct's field values to a []interface{}. A
-// struct tag with the content of "-" ignores the that particular field.
-// Example:
-//
-// // Field is ignored by this package.
-// Field int `structs:"-"`
-//
-// A value with the option of "omitnested" stops iterating further if the type
-// is a struct. Example:
-//
-// // Fields is not processed further by this package.
-// Field time.Time `structs:",omitnested"`
-// Field *http.Request `structs:",omitnested"`
-//
-// A tag value with the option of "omitempty" ignores that particular field and
-// is not added to the values if the field value is empty. Example:
-//
-// // Field is skipped if empty
-// Field string `structs:",omitempty"`
-//
-// Note that only exported fields of a struct can be accessed, non exported
-// fields will be neglected.
-func (s *Struct) Values() []interface{} {
- fields := s.structFields()
-
- var t []interface{}
-
- for _, field := range fields {
- val := s.value.FieldByName(field.Name)
-
- _, tagOpts := parseTag(field.Tag.Get(s.TagName))
-
- // if the value is a zero value and the field is marked as omitempty do
- // not include
- if tagOpts.Has("omitempty") {
- zero := reflect.Zero(val.Type()).Interface()
- current := val.Interface()
-
- if reflect.DeepEqual(current, zero) {
- continue
- }
- }
-
- if tagOpts.Has("string") {
- s, ok := val.Interface().(fmt.Stringer)
- if ok {
- t = append(t, s.String())
- }
- continue
- }
-
- if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
- // look out for embedded structs, and convert them to a
- // []interface{} to be added to the final values slice
- for _, embeddedVal := range Values(val.Interface()) {
- t = append(t, embeddedVal)
- }
- } else {
- t = append(t, val.Interface())
- }
- }
-
- return t
-}
-
-// Fields returns a slice of Fields. A struct tag with the content of "-"
-// ignores the checking of that particular field. Example:
-//
-// // Field is ignored by this package.
-// Field bool `structs:"-"`
-//
-// It panics if s's kind is not struct.
-func (s *Struct) Fields() []*Field {
- return getFields(s.value, s.TagName)
-}
-
-// Names returns a slice of field names. A struct tag with the content of "-"
-// ignores the checking of that particular field. Example:
-//
-// // Field is ignored by this package.
-// Field bool `structs:"-"`
-//
-// It panics if s's kind is not struct.
-func (s *Struct) Names() []string {
- fields := getFields(s.value, s.TagName)
-
- names := make([]string, len(fields))
-
- for i, field := range fields {
- names[i] = field.Name()
- }
-
- return names
-}
-
-func getFields(v reflect.Value, tagName string) []*Field {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- t := v.Type()
-
- var fields []*Field
-
- for i := 0; i < t.NumField(); i++ {
- field := t.Field(i)
-
- if tag := field.Tag.Get(tagName); tag == "-" {
- continue
- }
-
- f := &Field{
- field: field,
- value: v.FieldByName(field.Name),
- }
-
- fields = append(fields, f)
-
- }
-
- return fields
-}
-
-// Field returns a new Field struct that provides several high level functions
-// around a single struct field entity. It panics if the field is not found.
-func (s *Struct) Field(name string) *Field {
- f, ok := s.FieldOk(name)
- if !ok {
- panic("field not found")
- }
-
- return f
-}
-
-// FieldOk returns a new Field struct that provides several high level functions
-// around a single struct field entity. The boolean returns true if the field
-// was found.
-func (s *Struct) FieldOk(name string) (*Field, bool) {
- t := s.value.Type()
-
- field, ok := t.FieldByName(name)
- if !ok {
- return nil, false
- }
-
- return &Field{
- field: field,
- value: s.value.FieldByName(name),
- defaultTag: s.TagName,
- }, true
-}
-
-// IsZero returns true if all fields in a struct is a zero value (not
-// initialized) A struct tag with the content of "-" ignores the checking of
-// that particular field. Example:
-//
-// // Field is ignored by this package.
-// Field bool `structs:"-"`
-//
-// A value with the option of "omitnested" stops iterating further if the type
-// is a struct. Example:
-//
-// // Field is not processed further by this package.
-// Field time.Time `structs:"myName,omitnested"`
-// Field *http.Request `structs:",omitnested"`
-//
-// Note that only exported fields of a struct can be accessed, non exported
-// fields will be neglected. It panics if s's kind is not struct.
-func (s *Struct) IsZero() bool {
- fields := s.structFields()
-
- for _, field := range fields {
- val := s.value.FieldByName(field.Name)
-
- _, tagOpts := parseTag(field.Tag.Get(s.TagName))
-
- if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
- ok := IsZero(val.Interface())
- if !ok {
- return false
- }
-
- continue
- }
-
- // zero value of the given field, such as "" for string, 0 for int
- zero := reflect.Zero(val.Type()).Interface()
-
- // current value of the given field
- current := val.Interface()
-
- if !reflect.DeepEqual(current, zero) {
- return false
- }
- }
-
- return true
-}
-
-// HasZero returns true if a field in a struct is not initialized (zero value).
-// A struct tag with the content of "-" ignores the checking of that particular
-// field. Example:
-//
-// // Field is ignored by this package.
-// Field bool `structs:"-"`
-//
-// A value with the option of "omitnested" stops iterating further if the type
-// is a struct. Example:
-//
-// // Field is not processed further by this package.
-// Field time.Time `structs:"myName,omitnested"`
-// Field *http.Request `structs:",omitnested"`
-//
-// Note that only exported fields of a struct can be accessed, non exported
-// fields will be neglected. It panics if s's kind is not struct.
-func (s *Struct) HasZero() bool {
- fields := s.structFields()
-
- for _, field := range fields {
- val := s.value.FieldByName(field.Name)
-
- _, tagOpts := parseTag(field.Tag.Get(s.TagName))
-
- if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
- ok := HasZero(val.Interface())
- if ok {
- return true
- }
-
- continue
- }
-
- // zero value of the given field, such as "" for string, 0 for int
- zero := reflect.Zero(val.Type()).Interface()
-
- // current value of the given field
- current := val.Interface()
-
- if reflect.DeepEqual(current, zero) {
- return true
- }
- }
-
- return false
-}
-
-// Name returns the structs's type name within its package. For more info refer
-// to Name() function.
-func (s *Struct) Name() string {
- return s.value.Type().Name()
-}
-
-// structFields returns the exported struct fields for a given s struct. This
-// is a convenient helper method to avoid duplicate code in some of the
-// functions.
-func (s *Struct) structFields() []reflect.StructField {
- t := s.value.Type()
-
- var f []reflect.StructField
-
- for i := 0; i < t.NumField(); i++ {
- field := t.Field(i)
- // we can't access the value of unexported fields
- if field.PkgPath != "" {
- continue
- }
-
- // don't check if it's omitted
- if tag := field.Tag.Get(s.TagName); tag == "-" {
- continue
- }
-
- f = append(f, field)
- }
-
- return f
-}
-
-func strctVal(s interface{}) reflect.Value {
- v := reflect.ValueOf(s)
-
- // if pointer get the underlying element≤
- for v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- if v.Kind() != reflect.Struct {
- panic("not struct")
- }
-
- return v
-}
-
-// Map converts the given struct to a map[string]interface{}. For more info
-// refer to Struct types Map() method. It panics if s's kind is not struct.
-func Map(s interface{}) map[string]interface{} {
- return New(s).Map()
-}
-
-// FillMap is the same as Map. Instead of returning the output, it fills the
-// given map.
-func FillMap(s interface{}, out map[string]interface{}) {
- New(s).FillMap(out)
-}
-
-// Values converts the given struct to a []interface{}. For more info refer to
-// Struct types Values() method. It panics if s's kind is not struct.
-func Values(s interface{}) []interface{} {
- return New(s).Values()
-}
-
-// Fields returns a slice of *Field. For more info refer to Struct types
-// Fields() method. It panics if s's kind is not struct.
-func Fields(s interface{}) []*Field {
- return New(s).Fields()
-}
-
-// Names returns a slice of field names. For more info refer to Struct types
-// Names() method. It panics if s's kind is not struct.
-func Names(s interface{}) []string {
- return New(s).Names()
-}
-
-// IsZero returns true if all fields is equal to a zero value. For more info
-// refer to Struct types IsZero() method. It panics if s's kind is not struct.
-func IsZero(s interface{}) bool {
- return New(s).IsZero()
-}
-
-// HasZero returns true if any field is equal to a zero value. For more info
-// refer to Struct types HasZero() method. It panics if s's kind is not struct.
-func HasZero(s interface{}) bool {
- return New(s).HasZero()
-}
-
-// IsStruct returns true if the given variable is a struct or a pointer to
-// struct.
-func IsStruct(s interface{}) bool {
- v := reflect.ValueOf(s)
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- // uninitialized zero value of a struct
- if v.Kind() == reflect.Invalid {
- return false
- }
-
- return v.Kind() == reflect.Struct
-}
-
-// Name returns the structs's type name within its package. It returns an
-// empty string for unnamed types. It panics if s's kind is not struct.
-func Name(s interface{}) string {
- return New(s).Name()
-}
-
-// nested retrieves recursively all types for the given value and returns the
-// nested value.
-func (s *Struct) nested(val reflect.Value) interface{} {
- var finalVal interface{}
-
- v := reflect.ValueOf(val.Interface())
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- switch v.Kind() {
- case reflect.Struct:
- n := New(val.Interface())
- n.TagName = s.TagName
- m := n.Map()
-
- // do not add the converted value if there are no exported fields, ie:
- // time.Time
- if len(m) == 0 {
- finalVal = val.Interface()
- } else {
- finalVal = m
- }
- case reflect.Map:
- // get the element type of the map
- mapElem := val.Type()
- switch val.Type().Kind() {
- case reflect.Ptr, reflect.Array, reflect.Map,
- reflect.Slice, reflect.Chan:
- mapElem = val.Type().Elem()
- if mapElem.Kind() == reflect.Ptr {
- mapElem = mapElem.Elem()
- }
- }
-
- // only iterate over struct types, ie: map[string]StructType,
- // map[string][]StructType,
- if mapElem.Kind() == reflect.Struct ||
- (mapElem.Kind() == reflect.Slice &&
- mapElem.Elem().Kind() == reflect.Struct) {
- m := make(map[string]interface{}, val.Len())
- for _, k := range val.MapKeys() {
- m[k.String()] = s.nested(val.MapIndex(k))
- }
- finalVal = m
- break
- }
-
- // TODO(arslan): should this be optional?
- finalVal = val.Interface()
- case reflect.Slice, reflect.Array:
- if val.Type().Kind() == reflect.Interface {
- finalVal = val.Interface()
- break
- }
-
- // TODO(arslan): should this be optional?
- // do not iterate of non struct types, just pass the value. Ie: []int,
- // []string, co... We only iterate further if it's a struct.
- // i.e []foo or []*foo
- if val.Type().Elem().Kind() != reflect.Struct &&
- !(val.Type().Elem().Kind() == reflect.Ptr &&
- val.Type().Elem().Elem().Kind() == reflect.Struct) {
- finalVal = val.Interface()
- break
- }
-
- slices := make([]interface{}, val.Len(), val.Len())
- for x := 0; x < val.Len(); x++ {
- slices[x] = s.nested(val.Index(x))
- }
- finalVal = slices
- default:
- finalVal = val.Interface()
- }
-
- return finalVal
-}
diff --git a/vendor/github.com/fatih/structs/structs_example_test.go b/vendor/github.com/fatih/structs/structs_example_test.go
deleted file mode 100644
index 329c130..0000000
--- a/vendor/github.com/fatih/structs/structs_example_test.go
+++ /dev/null
@@ -1,351 +0,0 @@
-package structs
-
-import (
- "fmt"
- "time"
-)
-
-func ExampleNew() {
- type Server struct {
- Name string
- ID int32
- Enabled bool
- }
-
- server := &Server{
- Name: "Arslan",
- ID: 123456,
- Enabled: true,
- }
-
- s := New(server)
-
- fmt.Printf("Name : %v\n", s.Name())
- fmt.Printf("Values : %v\n", s.Values())
- fmt.Printf("Value of ID : %v\n", s.Field("ID").Value())
- // Output:
- // Name : Server
- // Values : [Arslan 123456 true]
- // Value of ID : 123456
-
-}
-
-func ExampleMap() {
- type Server struct {
- Name string
- ID int32
- Enabled bool
- }
-
- s := &Server{
- Name: "Arslan",
- ID: 123456,
- Enabled: true,
- }
-
- m := Map(s)
-
- fmt.Printf("%#v\n", m["Name"])
- fmt.Printf("%#v\n", m["ID"])
- fmt.Printf("%#v\n", m["Enabled"])
- // Output:
- // "Arslan"
- // 123456
- // true
-
-}
-
-func ExampleMap_tags() {
- // Custom tags can change the map keys instead of using the fields name
- type Server struct {
- Name string `structs:"server_name"`
- ID int32 `structs:"server_id"`
- Enabled bool `structs:"enabled"`
- }
-
- s := &Server{
- Name: "Zeynep",
- ID: 789012,
- }
-
- m := Map(s)
-
- // access them by the custom tags defined above
- fmt.Printf("%#v\n", m["server_name"])
- fmt.Printf("%#v\n", m["server_id"])
- fmt.Printf("%#v\n", m["enabled"])
- // Output:
- // "Zeynep"
- // 789012
- // false
-
-}
-
-func ExampleMap_omitNested() {
- // By default field with struct types are processed too. We can stop
- // processing them via "omitnested" tag option.
- type Server struct {
- Name string `structs:"server_name"`
- ID int32 `structs:"server_id"`
- Time time.Time `structs:"time,omitnested"` // do not convert to map[string]interface{}
- }
-
- const shortForm = "2006-Jan-02"
- t, _ := time.Parse("2006-Jan-02", "2013-Feb-03")
-
- s := &Server{
- Name: "Zeynep",
- ID: 789012,
- Time: t,
- }
-
- m := Map(s)
-
- // access them by the custom tags defined above
- fmt.Printf("%v\n", m["server_name"])
- fmt.Printf("%v\n", m["server_id"])
- fmt.Printf("%v\n", m["time"].(time.Time))
- // Output:
- // Zeynep
- // 789012
- // 2013-02-03 00:00:00 +0000 UTC
-}
-
-func ExampleMap_omitEmpty() {
- // By default field with struct types of zero values are processed too. We
- // can stop processing them via "omitempty" tag option.
- type Server struct {
- Name string `structs:",omitempty"`
- ID int32 `structs:"server_id,omitempty"`
- Location string
- }
-
- // Only add location
- s := &Server{
- Location: "Tokyo",
- }
-
- m := Map(s)
-
- // map contains only the Location field
- fmt.Printf("%v\n", m)
- // Output:
- // map[Location:Tokyo]
-}
-
-func ExampleValues() {
- type Server struct {
- Name string
- ID int32
- Enabled bool
- }
-
- s := &Server{
- Name: "Fatih",
- ID: 135790,
- Enabled: false,
- }
-
- m := Values(s)
-
- fmt.Printf("Values: %+v\n", m)
- // Output:
- // Values: [Fatih 135790 false]
-}
-
-func ExampleValues_omitEmpty() {
- // By default field with struct types of zero values are processed too. We
- // can stop processing them via "omitempty" tag option.
- type Server struct {
- Name string `structs:",omitempty"`
- ID int32 `structs:"server_id,omitempty"`
- Location string
- }
-
- // Only add location
- s := &Server{
- Location: "Ankara",
- }
-
- m := Values(s)
-
- // values contains only the Location field
- fmt.Printf("Values: %+v\n", m)
- // Output:
- // Values: [Ankara]
-}
-
-func ExampleValues_tags() {
- type Location struct {
- City string
- Country string
- }
-
- type Server struct {
- Name string
- ID int32
- Enabled bool
- Location Location `structs:"-"` // values from location are not included anymore
- }
-
- s := &Server{
- Name: "Fatih",
- ID: 135790,
- Enabled: false,
- Location: Location{City: "Ankara", Country: "Turkey"},
- }
-
- // Let get all values from the struct s. Note that we don't include values
- // from the Location field
- m := Values(s)
-
- fmt.Printf("Values: %+v\n", m)
- // Output:
- // Values: [Fatih 135790 false]
-}
-
-func ExampleFields() {
- type Access struct {
- Name string
- LastAccessed time.Time
- Number int
- }
-
- s := &Access{
- Name: "Fatih",
- LastAccessed: time.Now(),
- Number: 1234567,
- }
-
- fields := Fields(s)
-
- for i, field := range fields {
- fmt.Printf("[%d] %+v\n", i, field.Name())
- }
-
- // Output:
- // [0] Name
- // [1] LastAccessed
- // [2] Number
-}
-
-func ExampleFields_nested() {
- type Person struct {
- Name string
- Number int
- }
-
- type Access struct {
- Person Person
- HasPermission bool
- LastAccessed time.Time
- }
-
- s := &Access{
- Person: Person{Name: "fatih", Number: 1234567},
- LastAccessed: time.Now(),
- HasPermission: true,
- }
-
- // Let's get all fields from the struct s.
- fields := Fields(s)
-
- for _, field := range fields {
- if field.Name() == "Person" {
- fmt.Printf("Access.Person.Name: %+v\n", field.Field("Name").Value())
- }
- }
-
- // Output:
- // Access.Person.Name: fatih
-}
-
-func ExampleField() {
- type Person struct {
- Name string
- Number int
- }
-
- type Access struct {
- Person Person
- HasPermission bool
- LastAccessed time.Time
- }
-
- access := &Access{
- Person: Person{Name: "fatih", Number: 1234567},
- LastAccessed: time.Now(),
- HasPermission: true,
- }
-
- // Create a new Struct type
- s := New(access)
-
- // Get the Field type for "Person" field
- p := s.Field("Person")
-
- // Get the underlying "Name field" and print the value of it
- name := p.Field("Name")
-
- fmt.Printf("Value of Person.Access.Name: %+v\n", name.Value())
-
- // Output:
- // Value of Person.Access.Name: fatih
-
-}
-
-func ExampleIsZero() {
- type Server struct {
- Name string
- ID int32
- Enabled bool
- }
-
- // Nothing is initalized
- a := &Server{}
- isZeroA := IsZero(a)
-
- // Name and Enabled is initialized, but not ID
- b := &Server{
- Name: "Golang",
- Enabled: true,
- }
- isZeroB := IsZero(b)
-
- fmt.Printf("%#v\n", isZeroA)
- fmt.Printf("%#v\n", isZeroB)
- // Output:
- // true
- // false
-}
-
-func ExampleHasZero() {
- // Let's define an Access struct. Note that the "Enabled" field is not
- // going to be checked because we added the "structs" tag to the field.
- type Access struct {
- Name string
- LastAccessed time.Time
- Number int
- Enabled bool `structs:"-"`
- }
-
- // Name and Number is not initialized.
- a := &Access{
- LastAccessed: time.Now(),
- }
- hasZeroA := HasZero(a)
-
- // Name and Number is initialized.
- b := &Access{
- Name: "Fatih",
- LastAccessed: time.Now(),
- Number: 12345,
- }
- hasZeroB := HasZero(b)
-
- fmt.Printf("%#v\n", hasZeroA)
- fmt.Printf("%#v\n", hasZeroB)
- // Output:
- // true
- // false
-}
diff --git a/vendor/github.com/fatih/structs/structs_test.go b/vendor/github.com/fatih/structs/structs_test.go
deleted file mode 100644
index 8a18a07..0000000
--- a/vendor/github.com/fatih/structs/structs_test.go
+++ /dev/null
@@ -1,1453 +0,0 @@
-package structs
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-)
-
-func TestMapNonStruct(t *testing.T) {
- foo := []string{"foo"}
-
- defer func() {
- err := recover()
- if err == nil {
- t.Error("Passing a non struct into Map should panic")
- }
- }()
-
- // this should panic. We are going to recover and and test it
- _ = Map(foo)
-}
-
-func TestStructIndexes(t *testing.T) {
- type C struct {
- something int
- Props map[string]interface{}
- }
-
- defer func() {
- err := recover()
- if err != nil {
- fmt.Printf("err %+v\n", err)
- t.Error("Using mixed indexes should not panic")
- }
- }()
-
- // They should not panic
- _ = Map(&C{})
- _ = Fields(&C{})
- _ = Values(&C{})
- _ = IsZero(&C{})
- _ = HasZero(&C{})
-}
-
-func TestMap(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
-
- a := Map(T)
-
- if typ := reflect.TypeOf(a).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- // we have three fields
- if len(a) != 3 {
- t.Errorf("Map should return a map of len 3, got: %d", len(a))
- }
-
- inMap := func(val interface{}) bool {
- for _, v := range a {
- if reflect.DeepEqual(v, val) {
- return true
- }
- }
-
- return false
- }
-
- for _, val := range []interface{}{"a-value", 2, true} {
- if !inMap(val) {
- t.Errorf("Map should have the value %v", val)
- }
- }
-
-}
-
-func TestMap_Tag(t *testing.T) {
- var T = struct {
- A string `structs:"x"`
- B int `structs:"y"`
- C bool `structs:"z"`
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
-
- a := Map(T)
-
- inMap := func(key interface{}) bool {
- for k := range a {
- if reflect.DeepEqual(k, key) {
- return true
- }
- }
- return false
- }
-
- for _, key := range []string{"x", "y", "z"} {
- if !inMap(key) {
- t.Errorf("Map should have the key %v", key)
- }
- }
-
-}
-
-func TestMap_CustomTag(t *testing.T) {
- var T = struct {
- A string `json:"x"`
- B int `json:"y"`
- C bool `json:"z"`
- D struct {
- E string `json:"jkl"`
- } `json:"nested"`
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
- T.D.E = "e-value"
-
- s := New(T)
- s.TagName = "json"
-
- a := s.Map()
-
- inMap := func(key interface{}) bool {
- for k := range a {
- if reflect.DeepEqual(k, key) {
- return true
- }
- }
- return false
- }
-
- for _, key := range []string{"x", "y", "z"} {
- if !inMap(key) {
- t.Errorf("Map should have the key %v", key)
- }
- }
-
- nested, ok := a["nested"].(map[string]interface{})
- if !ok {
- t.Fatalf("Map should contain the D field that is tagged as 'nested'")
- }
-
- e, ok := nested["jkl"].(string)
- if !ok {
- t.Fatalf("Map should contain the D.E field that is tagged as 'jkl'")
- }
-
- if e != "e-value" {
- t.Errorf("D.E field should be equal to 'e-value', got: '%v'", e)
- }
-
-}
-
-func TestMap_MultipleCustomTag(t *testing.T) {
- var A = struct {
- X string `aa:"ax"`
- }{"a_value"}
-
- aStruct := New(A)
- aStruct.TagName = "aa"
-
- var B = struct {
- X string `bb:"bx"`
- }{"b_value"}
-
- bStruct := New(B)
- bStruct.TagName = "bb"
-
- a, b := aStruct.Map(), bStruct.Map()
- if !reflect.DeepEqual(a, map[string]interface{}{"ax": "a_value"}) {
- t.Error("Map should have field ax with value a_value")
- }
-
- if !reflect.DeepEqual(b, map[string]interface{}{"bx": "b_value"}) {
- t.Error("Map should have field bx with value b_value")
- }
-}
-
-func TestMap_OmitEmpty(t *testing.T) {
- type A struct {
- Name string
- Value string `structs:",omitempty"`
- Time time.Time `structs:",omitempty"`
- }
- a := A{}
-
- m := Map(a)
-
- _, ok := m["Value"].(map[string]interface{})
- if ok {
- t.Error("Map should not contain the Value field that is tagged as omitempty")
- }
-
- _, ok = m["Time"].(map[string]interface{})
- if ok {
- t.Error("Map should not contain the Time field that is tagged as omitempty")
- }
-}
-
-func TestMap_OmitNested(t *testing.T) {
- type A struct {
- Name string
- Value string
- Time time.Time `structs:",omitnested"`
- }
- a := A{Time: time.Now()}
-
- type B struct {
- Desc string
- A A
- }
- b := &B{A: a}
-
- m := Map(b)
-
- in, ok := m["A"].(map[string]interface{})
- if !ok {
- t.Error("Map nested structs is not available in the map")
- }
-
- // should not happen
- if _, ok := in["Time"].(map[string]interface{}); ok {
- t.Error("Map nested struct should omit recursiving parsing of Time")
- }
-
- if _, ok := in["Time"].(time.Time); !ok {
- t.Error("Map nested struct should stop parsing of Time at is current value")
- }
-}
-
-func TestMap_Nested(t *testing.T) {
- type A struct {
- Name string
- }
- a := &A{Name: "example"}
-
- type B struct {
- A *A
- }
- b := &B{A: a}
-
- m := Map(b)
-
- if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- in, ok := m["A"].(map[string]interface{})
- if !ok {
- t.Error("Map nested structs is not available in the map")
- }
-
- if name := in["Name"].(string); name != "example" {
- t.Errorf("Map nested struct's name field should give example, got: %s", name)
- }
-}
-
-func TestMap_NestedMapWithStructValues(t *testing.T) {
- type A struct {
- Name string
- }
-
- type B struct {
- A map[string]*A
- }
-
- a := &A{Name: "example"}
-
- b := &B{
- A: map[string]*A{
- "example_key": a,
- },
- }
-
- m := Map(b)
-
- if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- in, ok := m["A"].(map[string]interface{})
- if !ok {
- t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["A"])
- }
-
- example := in["example_key"].(map[string]interface{})
- if name := example["Name"].(string); name != "example" {
- t.Errorf("Map nested struct's name field should give example, got: %s", name)
- }
-}
-
-func TestMap_NestedMapWithStringValues(t *testing.T) {
- type B struct {
- Foo map[string]string
- }
-
- type A struct {
- B *B
- }
-
- b := &B{
- Foo: map[string]string{
- "example_key": "example",
- },
- }
-
- a := &A{B: b}
-
- m := Map(a)
-
- if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- in, ok := m["B"].(map[string]interface{})
- if !ok {
- t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
- }
-
- foo := in["Foo"].(map[string]string)
- if name := foo["example_key"]; name != "example" {
- t.Errorf("Map nested struct's name field should give example, got: %s", name)
- }
-}
-func TestMap_NestedMapWithInterfaceValues(t *testing.T) {
- type B struct {
- Foo map[string]interface{}
- }
-
- type A struct {
- B *B
- }
-
- b := &B{
- Foo: map[string]interface{}{
- "example_key": "example",
- },
- }
-
- a := &A{B: b}
-
- m := Map(a)
-
- if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- in, ok := m["B"].(map[string]interface{})
- if !ok {
- t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
- }
-
- foo := in["Foo"].(map[string]interface{})
- if name := foo["example_key"]; name != "example" {
- t.Errorf("Map nested struct's name field should give example, got: %s", name)
- }
-}
-
-func TestMap_NestedMapWithSliceIntValues(t *testing.T) {
- type B struct {
- Foo map[string][]int
- }
-
- type A struct {
- B *B
- }
-
- b := &B{
- Foo: map[string][]int{
- "example_key": []int{80},
- },
- }
-
- a := &A{B: b}
-
- m := Map(a)
-
- if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- in, ok := m["B"].(map[string]interface{})
- if !ok {
- t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
- }
-
- foo := in["Foo"].(map[string][]int)
- if name := foo["example_key"]; name[0] != 80 {
- t.Errorf("Map nested struct's name field should give example, got: %s", name)
- }
-}
-
-func TestMap_NestedMapWithSliceStructValues(t *testing.T) {
- type address struct {
- Country string `structs:"country"`
- }
-
- type B struct {
- Foo map[string][]address
- }
-
- type A struct {
- B *B
- }
-
- b := &B{
- Foo: map[string][]address{
- "example_key": []address{
- {Country: "Turkey"},
- },
- },
- }
-
- a := &A{B: b}
- m := Map(a)
-
- if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- in, ok := m["B"].(map[string]interface{})
- if !ok {
- t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
- }
-
- foo := in["Foo"].(map[string]interface{})
-
- addresses := foo["example_key"].([]interface{})
-
- addr, ok := addresses[0].(map[string]interface{})
- if !ok {
- t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
- }
-
- if _, exists := addr["country"]; !exists {
- t.Errorf("Expecting country, but found Country")
- }
-}
-
-func TestMap_NestedSliceWithStructValues(t *testing.T) {
- type address struct {
- Country string `structs:"customCountryName"`
- }
-
- type person struct {
- Name string `structs:"name"`
- Addresses []address `structs:"addresses"`
- }
-
- p := person{
- Name: "test",
- Addresses: []address{
- address{Country: "England"},
- address{Country: "Italy"},
- },
- }
- mp := Map(p)
-
- mpAddresses := mp["addresses"].([]interface{})
- if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists {
- t.Errorf("Expecting customCountryName, but found Country")
- }
-
- if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists {
- t.Errorf("customCountryName key not found")
- }
-}
-
-func TestMap_NestedSliceWithPointerOfStructValues(t *testing.T) {
- type address struct {
- Country string `structs:"customCountryName"`
- }
-
- type person struct {
- Name string `structs:"name"`
- Addresses []*address `structs:"addresses"`
- }
-
- p := person{
- Name: "test",
- Addresses: []*address{
- &address{Country: "England"},
- &address{Country: "Italy"},
- },
- }
- mp := Map(p)
-
- mpAddresses := mp["addresses"].([]interface{})
- if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists {
- t.Errorf("Expecting customCountryName, but found Country")
- }
-
- if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists {
- t.Errorf("customCountryName key not found")
- }
-}
-
-func TestMap_NestedSliceWithIntValues(t *testing.T) {
- type person struct {
- Name string `structs:"name"`
- Ports []int `structs:"ports"`
- }
-
- p := person{
- Name: "test",
- Ports: []int{80},
- }
- m := Map(p)
-
- ports, ok := m["ports"].([]int)
- if !ok {
- t.Errorf("Nested type of map should be of type []int, have %T", m["ports"])
- }
-
- if ports[0] != 80 {
- t.Errorf("Map nested struct's ports field should give 80, got: %v", ports)
- }
-}
-
-func TestMap_Anonymous(t *testing.T) {
- type A struct {
- Name string
- }
- a := &A{Name: "example"}
-
- type B struct {
- *A
- }
- b := &B{}
- b.A = a
-
- m := Map(b)
-
- if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
- t.Errorf("Map should return a map type, got: %v", typ)
- }
-
- in, ok := m["A"].(map[string]interface{})
- if !ok {
- t.Error("Embedded structs is not available in the map")
- }
-
- if name := in["Name"].(string); name != "example" {
- t.Errorf("Embedded A struct's Name field should give example, got: %s", name)
- }
-}
-
-func TestMap_Flatnested(t *testing.T) {
- type A struct {
- Name string
- }
- a := A{Name: "example"}
-
- type B struct {
- A `structs:",flatten"`
- C int
- }
- b := &B{C: 123}
- b.A = a
-
- m := Map(b)
-
- _, ok := m["A"].(map[string]interface{})
- if ok {
- t.Error("Embedded A struct with tag flatten has to be flat in the map")
- }
-
- expectedMap := map[string]interface{}{"Name": "example", "C": 123}
- if !reflect.DeepEqual(m, expectedMap) {
- t.Errorf("The exprected map %+v does't correspond to %+v", expectedMap, m)
- }
-
-}
-
-func TestMap_FlatnestedOverwrite(t *testing.T) {
- type A struct {
- Name string
- }
- a := A{Name: "example"}
-
- type B struct {
- A `structs:",flatten"`
- Name string
- C int
- }
- b := &B{C: 123, Name: "bName"}
- b.A = a
-
- m := Map(b)
-
- _, ok := m["A"].(map[string]interface{})
- if ok {
- t.Error("Embedded A struct with tag flatten has to be flat in the map")
- }
-
- expectedMap := map[string]interface{}{"Name": "bName", "C": 123}
- if !reflect.DeepEqual(m, expectedMap) {
- t.Errorf("The exprected map %+v does't correspond to %+v", expectedMap, m)
- }
-}
-
-func TestMap_TimeField(t *testing.T) {
- type A struct {
- CreatedAt time.Time
- }
-
- a := &A{CreatedAt: time.Now().UTC()}
- m := Map(a)
-
- _, ok := m["CreatedAt"].(time.Time)
- if !ok {
- t.Error("Time field must be final")
- }
-}
-
-func TestFillMap(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
-
- a := make(map[string]interface{}, 0)
- FillMap(T, a)
-
- // we have three fields
- if len(a) != 3 {
- t.Errorf("FillMap should fill a map of len 3, got: %d", len(a))
- }
-
- inMap := func(val interface{}) bool {
- for _, v := range a {
- if reflect.DeepEqual(v, val) {
- return true
- }
- }
-
- return false
- }
-
- for _, val := range []interface{}{"a-value", 2, true} {
- if !inMap(val) {
- t.Errorf("FillMap should have the value %v", val)
- }
- }
-}
-
-func TestFillMap_Nil(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
-
- defer func() {
- err := recover()
- if err != nil {
- t.Error("FillMap should not panic if a nil map is passed")
- }
- }()
-
- // nil should no
- FillMap(T, nil)
-}
-func TestStruct(t *testing.T) {
- var T = struct{}{}
-
- if !IsStruct(T) {
- t.Errorf("T should be a struct, got: %T", T)
- }
-
- if !IsStruct(&T) {
- t.Errorf("T should be a struct, got: %T", T)
- }
-
-}
-
-func TestValues(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
-
- s := Values(T)
-
- if typ := reflect.TypeOf(s).Kind(); typ != reflect.Slice {
- t.Errorf("Values should return a slice type, got: %v", typ)
- }
-
- inSlice := func(val interface{}) bool {
- for _, v := range s {
- if reflect.DeepEqual(v, val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []interface{}{"a-value", 2, true} {
- if !inSlice(val) {
- t.Errorf("Values should have the value %v", val)
- }
- }
-}
-
-func TestValues_OmitEmpty(t *testing.T) {
- type A struct {
- Name string
- Value int `structs:",omitempty"`
- }
-
- a := A{Name: "example"}
- s := Values(a)
-
- if len(s) != 1 {
- t.Errorf("Values of omitted empty fields should be not counted")
- }
-
- if s[0].(string) != "example" {
- t.Errorf("Values of omitted empty fields should left the value example")
- }
-}
-
-func TestValues_OmitNested(t *testing.T) {
- type A struct {
- Name string
- Value int
- }
-
- a := A{
- Name: "example",
- Value: 123,
- }
-
- type B struct {
- A A `structs:",omitnested"`
- C int
- }
- b := &B{A: a, C: 123}
-
- s := Values(b)
-
- if len(s) != 2 {
- t.Errorf("Values of omitted nested struct should be not counted")
- }
-
- inSlice := func(val interface{}) bool {
- for _, v := range s {
- if reflect.DeepEqual(v, val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []interface{}{123, a} {
- if !inSlice(val) {
- t.Errorf("Values should have the value %v", val)
- }
- }
-}
-
-func TestValues_Nested(t *testing.T) {
- type A struct {
- Name string
- }
- a := A{Name: "example"}
-
- type B struct {
- A A
- C int
- }
- b := &B{A: a, C: 123}
-
- s := Values(b)
-
- inSlice := func(val interface{}) bool {
- for _, v := range s {
- if reflect.DeepEqual(v, val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []interface{}{"example", 123} {
- if !inSlice(val) {
- t.Errorf("Values should have the value %v", val)
- }
- }
-}
-
-func TestValues_Anonymous(t *testing.T) {
- type A struct {
- Name string
- }
- a := A{Name: "example"}
-
- type B struct {
- A
- C int
- }
- b := &B{C: 123}
- b.A = a
-
- s := Values(b)
-
- inSlice := func(val interface{}) bool {
- for _, v := range s {
- if reflect.DeepEqual(v, val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []interface{}{"example", 123} {
- if !inSlice(val) {
- t.Errorf("Values should have the value %v", val)
- }
- }
-}
-
-func TestNames(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
-
- s := Names(T)
-
- if len(s) != 3 {
- t.Errorf("Names should return a slice of len 3, got: %d", len(s))
- }
-
- inSlice := func(val string) bool {
- for _, v := range s {
- if reflect.DeepEqual(v, val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []string{"A", "B", "C"} {
- if !inSlice(val) {
- t.Errorf("Names should have the value %v", val)
- }
- }
-}
-
-func TestFields(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool
- }{
- A: "a-value",
- B: 2,
- C: true,
- }
-
- s := Fields(T)
-
- if len(s) != 3 {
- t.Errorf("Fields should return a slice of len 3, got: %d", len(s))
- }
-
- inSlice := func(val string) bool {
- for _, v := range s {
- if reflect.DeepEqual(v.Name(), val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []string{"A", "B", "C"} {
- if !inSlice(val) {
- t.Errorf("Fields should have the value %v", val)
- }
- }
-}
-
-func TestFields_OmitNested(t *testing.T) {
- type A struct {
- Name string
- Enabled bool
- }
- a := A{Name: "example"}
-
- type B struct {
- A A
- C int
- Value string `structs:"-"`
- Number int
- }
- b := &B{A: a, C: 123}
-
- s := Fields(b)
-
- if len(s) != 3 {
- t.Errorf("Fields should omit nested struct. Expecting 2 got: %d", len(s))
- }
-
- inSlice := func(val interface{}) bool {
- for _, v := range s {
- if reflect.DeepEqual(v.Name(), val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []interface{}{"A", "C"} {
- if !inSlice(val) {
- t.Errorf("Fields should have the value %v", val)
- }
- }
-}
-
-func TestFields_Anonymous(t *testing.T) {
- type A struct {
- Name string
- }
- a := A{Name: "example"}
-
- type B struct {
- A
- C int
- }
- b := &B{C: 123}
- b.A = a
-
- s := Fields(b)
-
- inSlice := func(val interface{}) bool {
- for _, v := range s {
- if reflect.DeepEqual(v.Name(), val) {
- return true
- }
- }
- return false
- }
-
- for _, val := range []interface{}{"A", "C"} {
- if !inSlice(val) {
- t.Errorf("Fields should have the value %v", val)
- }
- }
-}
-
-func TestIsZero(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool `structs:"-"`
- D []string
- }{}
-
- ok := IsZero(T)
- if !ok {
- t.Error("IsZero should return true because none of the fields are initialized.")
- }
-
- var X = struct {
- A string
- F *bool
- }{
- A: "a-value",
- }
-
- ok = IsZero(X)
- if ok {
- t.Error("IsZero should return false because A is initialized")
- }
-
- var Y = struct {
- A string
- B int
- }{
- A: "a-value",
- B: 123,
- }
-
- ok = IsZero(Y)
- if ok {
- t.Error("IsZero should return false because A and B is initialized")
- }
-}
-
-func TestIsZero_OmitNested(t *testing.T) {
- type A struct {
- Name string
- D string
- }
- a := A{Name: "example"}
-
- type B struct {
- A A `structs:",omitnested"`
- C int
- }
- b := &B{A: a, C: 123}
-
- ok := IsZero(b)
- if ok {
- t.Error("IsZero should return false because A, B and C are initialized")
- }
-
- aZero := A{}
- bZero := &B{A: aZero}
-
- ok = IsZero(bZero)
- if !ok {
- t.Error("IsZero should return true because neither A nor B is initialized")
- }
-
-}
-
-func TestIsZero_Nested(t *testing.T) {
- type A struct {
- Name string
- D string
- }
- a := A{Name: "example"}
-
- type B struct {
- A A
- C int
- }
- b := &B{A: a, C: 123}
-
- ok := IsZero(b)
- if ok {
- t.Error("IsZero should return false because A, B and C are initialized")
- }
-
- aZero := A{}
- bZero := &B{A: aZero}
-
- ok = IsZero(bZero)
- if !ok {
- t.Error("IsZero should return true because neither A nor B is initialized")
- }
-
-}
-
-func TestIsZero_Anonymous(t *testing.T) {
- type A struct {
- Name string
- D string
- }
- a := A{Name: "example"}
-
- type B struct {
- A
- C int
- }
- b := &B{C: 123}
- b.A = a
-
- ok := IsZero(b)
- if ok {
- t.Error("IsZero should return false because A, B and C are initialized")
- }
-
- aZero := A{}
- bZero := &B{}
- bZero.A = aZero
-
- ok = IsZero(bZero)
- if !ok {
- t.Error("IsZero should return true because neither A nor B is initialized")
- }
-}
-
-func TestHasZero(t *testing.T) {
- var T = struct {
- A string
- B int
- C bool `structs:"-"`
- D []string
- }{
- A: "a-value",
- B: 2,
- }
-
- ok := HasZero(T)
- if !ok {
- t.Error("HasZero should return true because A and B are initialized.")
- }
-
- var X = struct {
- A string
- F *bool
- }{
- A: "a-value",
- }
-
- ok = HasZero(X)
- if !ok {
- t.Error("HasZero should return true because A is initialized")
- }
-
- var Y = struct {
- A string
- B int
- }{
- A: "a-value",
- B: 123,
- }
-
- ok = HasZero(Y)
- if ok {
- t.Error("HasZero should return false because A and B is initialized")
- }
-}
-
-func TestHasZero_OmitNested(t *testing.T) {
- type A struct {
- Name string
- D string
- }
- a := A{Name: "example"}
-
- type B struct {
- A A `structs:",omitnested"`
- C int
- }
- b := &B{A: a, C: 123}
-
- // Because the Field A inside B is omitted HasZero should return false
- // because it will stop iterating deeper andnot going to lookup for D
- ok := HasZero(b)
- if ok {
- t.Error("HasZero should return false because A and C are initialized")
- }
-}
-
-func TestHasZero_Nested(t *testing.T) {
- type A struct {
- Name string
- D string
- }
- a := A{Name: "example"}
-
- type B struct {
- A A
- C int
- }
- b := &B{A: a, C: 123}
-
- ok := HasZero(b)
- if !ok {
- t.Error("HasZero should return true because D is not initialized")
- }
-}
-
-func TestHasZero_Anonymous(t *testing.T) {
- type A struct {
- Name string
- D string
- }
- a := A{Name: "example"}
-
- type B struct {
- A
- C int
- }
- b := &B{C: 123}
- b.A = a
-
- ok := HasZero(b)
- if !ok {
- t.Error("HasZero should return false because D is not initialized")
- }
-}
-
-func TestName(t *testing.T) {
- type Foo struct {
- A string
- B bool
- }
- f := &Foo{}
-
- n := Name(f)
- if n != "Foo" {
- t.Errorf("Name should return Foo, got: %s", n)
- }
-
- unnamed := struct{ Name string }{Name: "Cihangir"}
- m := Name(unnamed)
- if m != "" {
- t.Errorf("Name should return empty string for unnamed struct, got: %s", n)
- }
-
- defer func() {
- err := recover()
- if err == nil {
- t.Error("Name should panic if a non struct is passed")
- }
- }()
-
- Name([]string{})
-}
-
-func TestNestedNilPointer(t *testing.T) {
- type Collar struct {
- Engraving string
- }
-
- type Dog struct {
- Name string
- Collar *Collar
- }
-
- type Person struct {
- Name string
- Dog *Dog
- }
-
- person := &Person{
- Name: "John",
- }
-
- personWithDog := &Person{
- Name: "Ron",
- Dog: &Dog{
- Name: "Rover",
- },
- }
-
- personWithDogWithCollar := &Person{
- Name: "Kon",
- Dog: &Dog{
- Name: "Ruffles",
- Collar: &Collar{
- Engraving: "If lost, call Kon",
- },
- },
- }
-
- defer func() {
- err := recover()
- if err != nil {
- fmt.Printf("err %+v\n", err)
- t.Error("Internal nil pointer should not panic")
- }
- }()
-
- _ = Map(person) // Panics
- _ = Map(personWithDog) // Panics
- _ = Map(personWithDogWithCollar) // Doesn't panic
-}
-
-func TestSetValueOnNestedField(t *testing.T) {
- type Base struct {
- ID int
- }
-
- type User struct {
- Base
- Name string
- }
-
- u := User{}
- s := New(&u)
- f := s.Field("Base").Field("ID")
- err := f.Set(10)
- if err != nil {
- t.Errorf("Error %v", err)
- }
- if f.Value().(int) != 10 {
- t.Errorf("Value should be equal to 10, got %v", f.Value())
- }
-}
-
-type Person struct {
- Name string
- Age int
-}
-
-func (p *Person) String() string {
- return fmt.Sprintf("%s(%d)", p.Name, p.Age)
-}
-
-func TestTagWithStringOption(t *testing.T) {
-
- type Address struct {
- Country string `json:"country"`
- Person *Person `json:"person,string"`
- }
-
- person := &Person{
- Name: "John",
- Age: 23,
- }
-
- address := &Address{
- Country: "EU",
- Person: person,
- }
-
- defer func() {
- err := recover()
- if err != nil {
- fmt.Printf("err %+v\n", err)
- t.Error("Internal nil pointer should not panic")
- }
- }()
-
- s := New(address)
-
- s.TagName = "json"
- m := s.Map()
-
- if m["person"] != person.String() {
- t.Errorf("Value for field person should be %s, got: %s", person.String(), m["person"])
- }
-
- vs := s.Values()
- if vs[1] != person.String() {
- t.Errorf("Value for 2nd field (person) should be %T, got: %T", person.String(), vs[1])
- }
-}
-
-type Animal struct {
- Name string
- Age int
-}
-
-type Dog struct {
- Animal *Animal `json:"animal,string"`
-}
-
-func TestNonStringerTagWithStringOption(t *testing.T) {
- a := &Animal{
- Name: "Fluff",
- Age: 4,
- }
-
- d := &Dog{
- Animal: a,
- }
-
- defer func() {
- err := recover()
- if err != nil {
- fmt.Printf("err %+v\n", err)
- t.Error("Internal nil pointer should not panic")
- }
- }()
-
- s := New(d)
-
- s.TagName = "json"
- m := s.Map()
-
- if _, exists := m["animal"]; exists {
- t.Errorf("Value for field Animal should not exist")
- }
-}
-
-func TestMap_InterfaceValue(t *testing.T) {
- type TestStruct struct {
- A interface{}
- }
-
- expected := []byte("test value")
-
- a := TestStruct{A: expected}
- s := Map(a)
- if !reflect.DeepEqual(s["A"], expected) {
- t.Errorf("Value does not match expected: %q != %q", s["A"], expected)
- }
-}
-
-func TestPointer2Pointer(t *testing.T) {
- defer func() {
- err := recover()
- if err != nil {
- fmt.Printf("err %+v\n", err)
- t.Error("Internal nil pointer should not panic")
- }
- }()
- a := &Animal{
- Name: "Fluff",
- Age: 4,
- }
- _ = Map(&a)
-
- b := &a
- _ = Map(&b)
-
- c := &b
- _ = Map(&c)
-}
-
-func TestMap_InterfaceTypeWithMapValue(t *testing.T) {
- type A struct {
- Name string `structs:"name"`
- Ip string `structs:"ip"`
- Query string `structs:"query"`
- Payload interface{} `structs:"payload"`
- }
-
- a := A{
- Name: "test",
- Ip: "127.0.0.1",
- Query: "",
- Payload: map[string]string{"test_param": "test_param"},
- }
-
- defer func() {
- err := recover()
- if err != nil {
- t.Error("Converting Map with an interface{} type with map value should not panic")
- }
- }()
-
- _ = Map(a)
-}
diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go
deleted file mode 100644
index 8859341..0000000
--- a/vendor/github.com/fatih/structs/tags.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package structs
-
-import "strings"
-
-// tagOptions contains a slice of tag options
-type tagOptions []string
-
-// Has returns true if the given optiton is available in tagOptions
-func (t tagOptions) Has(opt string) bool {
- for _, tagOpt := range t {
- if tagOpt == opt {
- return true
- }
- }
-
- return false
-}
-
-// parseTag splits a struct field's tag into its name and a list of options
-// which comes after a name. A tag is in the form of: "name,option1,option2".
-// The name can be neglectected.
-func parseTag(tag string) (string, tagOptions) {
- // tag is one of followings:
- // ""
- // "name"
- // "name,opt"
- // "name,opt,opt2"
- // ",opt"
-
- res := strings.Split(tag, ",")
- return res[0], res[1:]
-}
diff --git a/vendor/github.com/fatih/structs/tags_test.go b/vendor/github.com/fatih/structs/tags_test.go
deleted file mode 100644
index 5d12724..0000000
--- a/vendor/github.com/fatih/structs/tags_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package structs
-
-import "testing"
-
-func TestParseTag_Name(t *testing.T) {
- tags := []struct {
- tag string
- has bool
- }{
- {"", false},
- {"name", true},
- {"name,opt", true},
- {"name , opt, opt2", false}, // has a single whitespace
- {", opt, opt2", false},
- }
-
- for _, tag := range tags {
- name, _ := parseTag(tag.tag)
-
- if (name != "name") && tag.has {
- t.Errorf("Parse tag should return name: %#v", tag)
- }
- }
-}
-
-func TestParseTag_Opts(t *testing.T) {
- tags := []struct {
- opts string
- has bool
- }{
- {"name", false},
- {"name,opt", true},
- {"name , opt, opt2", false}, // has a single whitespace
- {",opt, opt2", true},
- {", opt3, opt4", false},
- }
-
- // search for "opt"
- for _, tag := range tags {
- _, opts := parseTag(tag.opts)
-
- if opts.Has("opt") != tag.has {
- t.Errorf("Tag opts should have opt: %#v", tag)
- }
- }
-}
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 0000000..ba49e3c
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,5 @@
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
new file mode 100644
index 0000000..4cd0cba
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
new file mode 100644
index 0000000..981d1bb
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml
@@ -0,0 +1,30 @@
+sudo: false
+language: go
+
+go:
+ - 1.8.x
+ - 1.9.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+before_script:
+ - go get -u github.com/golang/lint/golint
+
+script:
+ - go test -v --race ./...
+
+after_script:
+ - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - go vet ./...
+
+os:
+ - linux
+ - osx
+
+notifications:
+ email: false
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 0000000..5ab5d41
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,52 @@
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L
+Adrien Bustany
+Amit Krishnan
+Anmol Sethi
+Bjørn Erik Pedersen
+Bruno Bigras
+Caleb Spare
+Case Nelson
+Chris Howey
+Christoffer Buchholz
+Daniel Wagner-Hall
+Dave Cheney
+Evan Phoenix
+Francisco Souza
+Hari haran
+John C Barstow
+Kelvin Fo
+Ken-ichirou MATSUZAWA
+Matt Layher
+Nathan Youngman
+Nickolai Zeldovich
+Patrick
+Paul Hammond
+Pawel Knap
+Pieter Droogendijk
+Pursuit92
+Riku Voipio
+Rob Figueiredo
+Rodrigo Chiossi
+Slawek Ligus
+Soge Zhang
+Tiffany Jernigan
+Tilak Sharma
+Tom Payne
+Travis Cline
+Tudor Golubenco
+Vahe Khachikyan
+Yukang
+bronze1man
+debrando
+henrikedwards
+铁哥
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 0000000..be4d7ea
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,317 @@
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 0000000..828a60b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 0000000..f21e540
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 0000000..3993207
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,79 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter |OS |Status |
+|----------|----------|----------|
+|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify |Linux 2.6.37+ | |
+|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 0000000..ced39cb
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 0000000..190bf0d
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 0000000..d9fd1b8
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,337 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
+ w.mu.Unlock()
+
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 0000000..cc7db4b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(0)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 0000000..86e76a3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,521 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+
+ // copy paths to remove while locked
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ for _, name := range pathsToRemove {
+ w.Remove(name)
+ }
+
+ // send a "quit" message to the reader goroutine
+ close(w.done)
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ break loop
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_no_getwd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
similarity index 53%
rename from vendor/golang.org/x/sys/unix/syscall_no_getwd.go
rename to vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
index 530792e..7d8de14 100644
--- a/vendor/golang.org/x/sys/unix/syscall_no_getwd.go
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build dragonfly freebsd netbsd openbsd
+// +build freebsd openbsd netbsd dragonfly
-package unix
+package fsnotify
-const ImplementsGetwd = false
+import "golang.org/x/sys/unix"
-func Getwd() (string, error) { return "", ENOTSUP }
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 0000000..9139e17
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 0000000..09436f3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp b/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp
deleted file mode 100644
index fc31f51..0000000
--- a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-To build the snappytool binary:
-g++ main.cpp /usr/lib/libsnappy.a -o snappytool
-or, if you have built the C++ snappy library from source:
-g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool
-after running "make" from your snappy checkout directory.
-*/
-
-#include
-#include
-#include
-#include
-
-#include "snappy.h"
-
-#define N 1000000
-
-char dst[N];
-char src[N];
-
-int main(int argc, char** argv) {
- // Parse args.
- if (argc != 2) {
- fprintf(stderr, "exactly one of -d or -e must be given\n");
- return 1;
- }
- bool decode = strcmp(argv[1], "-d") == 0;
- bool encode = strcmp(argv[1], "-e") == 0;
- if (decode == encode) {
- fprintf(stderr, "exactly one of -d or -e must be given\n");
- return 1;
- }
-
- // Read all of stdin into src[:s].
- size_t s = 0;
- while (1) {
- if (s == N) {
- fprintf(stderr, "input too large\n");
- return 1;
- }
- ssize_t n = read(0, src+s, N-s);
- if (n == 0) {
- break;
- }
- if (n < 0) {
- fprintf(stderr, "read error: %s\n", strerror(errno));
- // TODO: handle EAGAIN, EINTR?
- return 1;
- }
- s += n;
- }
-
- // Encode or decode src[:s] to dst[:d], and write to stdout.
- size_t d = 0;
- if (encode) {
- if (N < snappy::MaxCompressedLength(s)) {
- fprintf(stderr, "input too large after encoding\n");
- return 1;
- }
- snappy::RawCompress(src, s, dst, &d);
- } else {
- if (!snappy::GetUncompressedLength(src, s, &d)) {
- fprintf(stderr, "could not get uncompressed length\n");
- return 1;
- }
- if (N < d) {
- fprintf(stderr, "input too large after decoding\n");
- return 1;
- }
- if (!snappy::RawUncompress(src, s, dst)) {
- fprintf(stderr, "input was not valid Snappy-compressed data\n");
- return 1;
- }
- }
- write(1, dst, d);
- return 0;
-}
diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go
deleted file mode 100644
index e4496f9..0000000
--- a/vendor/github.com/golang/snappy/golden_test.go
+++ /dev/null
@@ -1,1965 +0,0 @@
-// Copyright 2016 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-// extendMatchGoldenTestCases is the i and j arguments, and the returned value,
-// for every extendMatch call issued when encoding the
-// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the
-// extendMatch implementation.
-//
-// It was generated manually by adding some print statements to the (pure Go)
-// extendMatch implementation:
-//
-// func extendMatch(src []byte, i, j int) int {
-// i0, j0 := i, j
-// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
-// }
-// println("{", i0, ",", j0, ",", j, "},")
-// return j
-// }
-//
-// and running "go test -test.run=EncodeGoldenInput -tags=noasm".
-var extendMatchGoldenTestCases = []struct {
- i, j, want int
-}{
- {11, 61, 62},
- {80, 81, 82},
- {86, 87, 101},
- {85, 133, 149},
- {152, 153, 162},
- {133, 168, 193},
- {168, 207, 225},
- {81, 255, 275},
- {278, 279, 283},
- {306, 417, 417},
- {373, 428, 430},
- {389, 444, 447},
- {474, 510, 512},
- {465, 533, 533},
- {47, 547, 547},
- {307, 551, 554},
- {420, 582, 587},
- {309, 604, 604},
- {604, 625, 625},
- {538, 629, 629},
- {328, 640, 640},
- {573, 645, 645},
- {319, 657, 657},
- {30, 664, 664},
- {45, 679, 680},
- {621, 684, 684},
- {376, 700, 700},
- {33, 707, 708},
- {601, 733, 733},
- {334, 744, 745},
- {625, 758, 759},
- {382, 763, 763},
- {550, 769, 771},
- {533, 789, 789},
- {804, 813, 813},
- {342, 841, 842},
- {742, 847, 847},
- {74, 852, 852},
- {810, 864, 864},
- {758, 868, 869},
- {714, 883, 883},
- {582, 889, 891},
- {61, 934, 935},
- {894, 942, 942},
- {939, 949, 949},
- {785, 956, 957},
- {886, 978, 978},
- {792, 998, 998},
- {998, 1005, 1005},
- {572, 1032, 1032},
- {698, 1051, 1053},
- {599, 1067, 1069},
- {1056, 1079, 1079},
- {942, 1089, 1090},
- {831, 1094, 1096},
- {1088, 1100, 1103},
- {732, 1113, 1114},
- {1037, 1118, 1118},
- {872, 1128, 1130},
- {1079, 1140, 1142},
- {332, 1162, 1162},
- {207, 1168, 1186},
- {1189, 1190, 1225},
- {105, 1229, 1230},
- {79, 1256, 1257},
- {1190, 1261, 1283},
- {255, 1306, 1306},
- {1319, 1339, 1358},
- {364, 1370, 1370},
- {955, 1378, 1380},
- {122, 1403, 1403},
- {1325, 1407, 1419},
- {664, 1423, 1424},
- {941, 1461, 1463},
- {867, 1477, 1478},
- {757, 1488, 1489},
- {1140, 1499, 1499},
- {31, 1506, 1506},
- {1487, 1510, 1512},
- {1089, 1520, 1521},
- {1467, 1525, 1529},
- {1394, 1537, 1537},
- {1499, 1541, 1541},
- {367, 1558, 1558},
- {1475, 1564, 1564},
- {1525, 1568, 1571},
- {1541, 1582, 1583},
- {864, 1587, 1588},
- {704, 1597, 1597},
- {336, 1602, 1602},
- {1383, 1613, 1613},
- {1498, 1617, 1618},
- {1051, 1623, 1625},
- {401, 1643, 1645},
- {1072, 1654, 1655},
- {1067, 1667, 1669},
- {699, 1673, 1674},
- {1587, 1683, 1684},
- {920, 1696, 1696},
- {1505, 1710, 1710},
- {1550, 1723, 1723},
- {996, 1727, 1727},
- {833, 1733, 1734},
- {1638, 1739, 1740},
- {1654, 1744, 1744},
- {753, 1761, 1761},
- {1548, 1773, 1773},
- {1568, 1777, 1780},
- {1683, 1793, 1794},
- {948, 1801, 1801},
- {1666, 1805, 1808},
- {1502, 1814, 1814},
- {1696, 1822, 1822},
- {502, 1836, 1837},
- {917, 1843, 1843},
- {1733, 1854, 1855},
- {970, 1859, 1859},
- {310, 1863, 1863},
- {657, 1872, 1872},
- {1005, 1876, 1876},
- {1662, 1880, 1880},
- {904, 1892, 1892},
- {1427, 1910, 1910},
- {1772, 1929, 1930},
- {1822, 1937, 1940},
- {1858, 1949, 1950},
- {1602, 1956, 1956},
- {1150, 1962, 1962},
- {1504, 1966, 1967},
- {51, 1971, 1971},
- {1605, 1979, 1979},
- {1458, 1983, 1988},
- {1536, 2001, 2006},
- {1373, 2014, 2018},
- {1494, 2025, 2025},
- {1667, 2029, 2031},
- {1592, 2035, 2035},
- {330, 2045, 2045},
- {1376, 2053, 2053},
- {1991, 2058, 2059},
- {1635, 2065, 2065},
- {1992, 2073, 2074},
- {2014, 2080, 2081},
- {1546, 2085, 2087},
- {59, 2099, 2099},
- {1996, 2106, 2106},
- {1836, 2110, 2110},
- {2068, 2114, 2114},
- {1338, 2122, 2122},
- {1562, 2128, 2130},
- {1934, 2134, 2134},
- {2114, 2141, 2142},
- {977, 2149, 2150},
- {956, 2154, 2155},
- {1407, 2162, 2162},
- {1773, 2166, 2166},
- {883, 2171, 2171},
- {623, 2175, 2178},
- {1520, 2191, 2192},
- {1162, 2200, 2200},
- {912, 2204, 2204},
- {733, 2208, 2208},
- {1777, 2212, 2215},
- {1532, 2219, 2219},
- {718, 2223, 2225},
- {2069, 2229, 2229},
- {2207, 2245, 2246},
- {1139, 2264, 2264},
- {677, 2274, 2274},
- {2099, 2279, 2279},
- {1863, 2283, 2283},
- {1966, 2305, 2306},
- {2279, 2313, 2313},
- {1628, 2319, 2319},
- {755, 2329, 2329},
- {1461, 2334, 2334},
- {2117, 2340, 2340},
- {2313, 2349, 2349},
- {1859, 2353, 2353},
- {1048, 2362, 2362},
- {895, 2366, 2366},
- {2278, 2373, 2373},
- {1884, 2377, 2377},
- {1402, 2387, 2392},
- {700, 2398, 2398},
- {1971, 2402, 2402},
- {2009, 2419, 2419},
- {1441, 2426, 2428},
- {2208, 2432, 2432},
- {2038, 2436, 2436},
- {932, 2443, 2443},
- {1759, 2447, 2448},
- {744, 2452, 2452},
- {1875, 2458, 2458},
- {2405, 2468, 2468},
- {1596, 2472, 2473},
- {1953, 2480, 2482},
- {736, 2487, 2487},
- {1913, 2493, 2493},
- {774, 2497, 2497},
- {1484, 2506, 2508},
- {2432, 2512, 2512},
- {752, 2519, 2519},
- {2497, 2523, 2523},
- {2409, 2528, 2529},
- {2122, 2533, 2533},
- {2396, 2537, 2538},
- {2410, 2547, 2548},
- {1093, 2555, 2560},
- {551, 2564, 2565},
- {2268, 2569, 2569},
- {1362, 2580, 2580},
- {1916, 2584, 2585},
- {994, 2589, 2590},
- {1979, 2596, 2596},
- {1041, 2602, 2602},
- {2104, 2614, 2616},
- {2609, 2621, 2628},
- {2329, 2638, 2638},
- {2211, 2657, 2658},
- {2638, 2662, 2667},
- {2578, 2676, 2679},
- {2153, 2685, 2686},
- {2608, 2696, 2697},
- {598, 2712, 2712},
- {2620, 2719, 2720},
- {1888, 2724, 2728},
- {2709, 2732, 2732},
- {1365, 2739, 2739},
- {784, 2747, 2748},
- {424, 2753, 2753},
- {2204, 2759, 2759},
- {812, 2768, 2769},
- {2455, 2773, 2773},
- {1722, 2781, 2781},
- {1917, 2792, 2792},
- {2705, 2799, 2799},
- {2685, 2806, 2807},
- {2742, 2811, 2811},
- {1370, 2818, 2818},
- {2641, 2830, 2830},
- {2512, 2837, 2837},
- {2457, 2841, 2841},
- {2756, 2845, 2845},
- {2719, 2855, 2855},
- {1423, 2859, 2859},
- {2849, 2863, 2865},
- {1474, 2871, 2871},
- {1161, 2875, 2876},
- {2282, 2880, 2881},
- {2746, 2888, 2888},
- {1783, 2893, 2893},
- {2401, 2899, 2900},
- {2632, 2920, 2923},
- {2422, 2928, 2930},
- {2715, 2939, 2939},
- {2162, 2943, 2943},
- {2859, 2947, 2947},
- {1910, 2951, 2951},
- {1431, 2955, 2956},
- {1439, 2964, 2964},
- {2501, 2968, 2969},
- {2029, 2973, 2976},
- {689, 2983, 2984},
- {1658, 2988, 2988},
- {1031, 2996, 2996},
- {2149, 3001, 3002},
- {25, 3009, 3013},
- {2964, 3023, 3023},
- {953, 3027, 3028},
- {2359, 3036, 3036},
- {3023, 3049, 3049},
- {2880, 3055, 3056},
- {2973, 3076, 3077},
- {2874, 3090, 3090},
- {2871, 3094, 3094},
- {2532, 3100, 3100},
- {2938, 3107, 3108},
- {350, 3115, 3115},
- {2196, 3119, 3121},
- {1133, 3127, 3129},
- {1797, 3134, 3150},
- {3032, 3158, 3158},
- {3016, 3172, 3172},
- {2533, 3179, 3179},
- {3055, 3187, 3188},
- {1384, 3192, 3193},
- {2799, 3199, 3199},
- {2126, 3203, 3207},
- {2334, 3215, 3215},
- {2105, 3220, 3221},
- {3199, 3229, 3229},
- {2891, 3233, 3233},
- {855, 3240, 3240},
- {1852, 3253, 3256},
- {2140, 3263, 3263},
- {1682, 3268, 3270},
- {3243, 3274, 3274},
- {924, 3279, 3279},
- {2212, 3283, 3283},
- {2596, 3287, 3287},
- {2999, 3291, 3291},
- {2353, 3295, 3295},
- {2480, 3302, 3304},
- {1959, 3308, 3311},
- {3000, 3318, 3318},
- {845, 3330, 3330},
- {2283, 3334, 3334},
- {2519, 3342, 3342},
- {3325, 3346, 3348},
- {2397, 3353, 3354},
- {2763, 3358, 3358},
- {3198, 3363, 3364},
- {3211, 3368, 3372},
- {2950, 3376, 3377},
- {3245, 3388, 3391},
- {2264, 3398, 3398},
- {795, 3403, 3403},
- {3287, 3407, 3407},
- {3358, 3411, 3411},
- {3317, 3415, 3415},
- {3232, 3431, 3431},
- {2128, 3435, 3437},
- {3236, 3441, 3441},
- {3398, 3445, 3446},
- {2814, 3450, 3450},
- {3394, 3466, 3466},
- {2425, 3470, 3470},
- {3330, 3476, 3476},
- {1612, 3480, 3480},
- {1004, 3485, 3486},
- {2732, 3490, 3490},
- {1117, 3494, 3495},
- {629, 3501, 3501},
- {3087, 3514, 3514},
- {684, 3518, 3518},
- {3489, 3522, 3524},
- {1760, 3529, 3529},
- {617, 3537, 3537},
- {3431, 3541, 3541},
- {997, 3547, 3547},
- {882, 3552, 3553},
- {2419, 3558, 3558},
- {610, 3562, 3563},
- {1903, 3567, 3569},
- {3005, 3575, 3575},
- {3076, 3585, 3586},
- {3541, 3590, 3590},
- {3490, 3594, 3594},
- {1899, 3599, 3599},
- {3545, 3606, 3606},
- {3290, 3614, 3615},
- {2056, 3619, 3620},
- {3556, 3625, 3625},
- {3294, 3632, 3633},
- {637, 3643, 3644},
- {3609, 3648, 3650},
- {3175, 3658, 3658},
- {3498, 3665, 3665},
- {1597, 3669, 3669},
- {1983, 3673, 3673},
- {3215, 3682, 3682},
- {3544, 3689, 3689},
- {3694, 3698, 3698},
- {3228, 3715, 3716},
- {2594, 3720, 3722},
- {3573, 3726, 3726},
- {2479, 3732, 3735},
- {3191, 3741, 3742},
- {1113, 3746, 3747},
- {2844, 3751, 3751},
- {3445, 3756, 3757},
- {3755, 3766, 3766},
- {3421, 3775, 3780},
- {3593, 3784, 3786},
- {3263, 3796, 3796},
- {3469, 3806, 3806},
- {2602, 3815, 3815},
- {723, 3819, 3821},
- {1608, 3826, 3826},
- {3334, 3830, 3830},
- {2198, 3835, 3835},
- {2635, 3840, 3840},
- {3702, 3852, 3853},
- {3406, 3858, 3859},
- {3681, 3867, 3870},
- {3407, 3880, 3880},
- {340, 3889, 3889},
- {3772, 3893, 3893},
- {593, 3897, 3897},
- {2563, 3914, 3916},
- {2981, 3929, 3929},
- {1835, 3933, 3934},
- {3906, 3951, 3951},
- {1459, 3958, 3958},
- {3889, 3974, 3974},
- {2188, 3982, 3982},
- {3220, 3986, 3987},
- {3585, 3991, 3993},
- {3712, 3997, 4001},
- {2805, 4007, 4007},
- {1879, 4012, 4013},
- {3618, 4018, 4018},
- {1145, 4031, 4032},
- {3901, 4037, 4037},
- {2772, 4046, 4047},
- {2802, 4053, 4054},
- {3299, 4058, 4058},
- {3725, 4066, 4066},
- {2271, 4070, 4070},
- {385, 4075, 4076},
- {3624, 4089, 4090},
- {3745, 4096, 4098},
- {1563, 4102, 4102},
- {4045, 4106, 4111},
- {3696, 4115, 4119},
- {3376, 4125, 4126},
- {1880, 4130, 4130},
- {2048, 4140, 4141},
- {2724, 4149, 4149},
- {1767, 4156, 4156},
- {2601, 4164, 4164},
- {2757, 4168, 4168},
- {3974, 4172, 4172},
- {3914, 4178, 4178},
- {516, 4185, 4185},
- {1032, 4189, 4190},
- {3462, 4197, 4198},
- {3805, 4202, 4203},
- {3910, 4207, 4212},
- {3075, 4221, 4221},
- {3756, 4225, 4226},
- {1872, 4236, 4237},
- {3844, 4241, 4241},
- {3991, 4245, 4249},
- {2203, 4258, 4258},
- {3903, 4267, 4268},
- {705, 4272, 4272},
- {1896, 4276, 4276},
- {1955, 4285, 4288},
- {3746, 4302, 4303},
- {2672, 4311, 4311},
- {3969, 4317, 4317},
- {3883, 4322, 4322},
- {1920, 4339, 4340},
- {3527, 4344, 4346},
- {1160, 4358, 4358},
- {3648, 4364, 4366},
- {2711, 4387, 4387},
- {3619, 4391, 4392},
- {1944, 4396, 4396},
- {4369, 4400, 4400},
- {2736, 4404, 4407},
- {2546, 4411, 4412},
- {4390, 4422, 4422},
- {3610, 4426, 4427},
- {4058, 4431, 4431},
- {4374, 4435, 4435},
- {3463, 4445, 4446},
- {1813, 4452, 4452},
- {3669, 4456, 4456},
- {3830, 4460, 4460},
- {421, 4464, 4465},
- {1719, 4471, 4471},
- {3880, 4475, 4475},
- {1834, 4485, 4487},
- {3590, 4491, 4491},
- {442, 4496, 4497},
- {4435, 4501, 4501},
- {3814, 4509, 4509},
- {987, 4513, 4513},
- {4494, 4518, 4521},
- {3218, 4526, 4529},
- {4221, 4537, 4537},
- {2778, 4543, 4545},
- {4422, 4552, 4552},
- {4031, 4558, 4559},
- {4178, 4563, 4563},
- {3726, 4567, 4574},
- {4027, 4578, 4578},
- {4339, 4585, 4587},
- {3796, 4592, 4595},
- {543, 4600, 4613},
- {2855, 4620, 4621},
- {2795, 4627, 4627},
- {3440, 4631, 4632},
- {4279, 4636, 4639},
- {4245, 4643, 4645},
- {4516, 4649, 4650},
- {3133, 4654, 4654},
- {4042, 4658, 4659},
- {3422, 4663, 4663},
- {4046, 4667, 4668},
- {4267, 4672, 4672},
- {4004, 4676, 4677},
- {2490, 4682, 4682},
- {2451, 4697, 4697},
- {3027, 4705, 4705},
- {4028, 4717, 4717},
- {4460, 4721, 4721},
- {2471, 4725, 4727},
- {3090, 4735, 4735},
- {3192, 4739, 4740},
- {3835, 4760, 4760},
- {4540, 4764, 4764},
- {4007, 4772, 4774},
- {619, 4784, 4784},
- {3561, 4789, 4791},
- {3367, 4805, 4805},
- {4490, 4810, 4811},
- {2402, 4815, 4815},
- {3352, 4819, 4822},
- {2773, 4828, 4828},
- {4552, 4832, 4832},
- {2522, 4840, 4841},
- {316, 4847, 4852},
- {4715, 4858, 4858},
- {2959, 4862, 4862},
- {4858, 4868, 4869},
- {2134, 4873, 4873},
- {578, 4878, 4878},
- {4189, 4889, 4890},
- {2229, 4894, 4894},
- {4501, 4898, 4898},
- {2297, 4903, 4903},
- {2933, 4909, 4909},
- {3008, 4913, 4913},
- {3153, 4917, 4917},
- {4819, 4921, 4921},
- {4921, 4932, 4933},
- {4920, 4944, 4945},
- {4814, 4954, 4955},
- {576, 4966, 4966},
- {1854, 4970, 4971},
- {1374, 4975, 4976},
- {3307, 4980, 4980},
- {974, 4984, 4988},
- {4721, 4992, 4992},
- {4898, 4996, 4996},
- {4475, 5006, 5006},
- {3819, 5012, 5012},
- {1948, 5019, 5021},
- {4954, 5027, 5029},
- {3740, 5038, 5040},
- {4763, 5044, 5045},
- {1936, 5051, 5051},
- {4844, 5055, 5060},
- {4215, 5069, 5072},
- {1146, 5076, 5076},
- {3845, 5082, 5082},
- {4865, 5090, 5090},
- {4624, 5094, 5094},
- {4815, 5098, 5098},
- {5006, 5105, 5105},
- {4980, 5109, 5109},
- {4795, 5113, 5115},
- {5043, 5119, 5121},
- {4782, 5129, 5129},
- {3826, 5139, 5139},
- {3876, 5156, 5156},
- {3111, 5167, 5171},
- {1470, 5177, 5177},
- {4431, 5181, 5181},
- {546, 5189, 5189},
- {4225, 5193, 5193},
- {1672, 5199, 5201},
- {4207, 5205, 5209},
- {4220, 5216, 5217},
- {4658, 5224, 5225},
- {3295, 5235, 5235},
- {2436, 5239, 5239},
- {2349, 5246, 5246},
- {2175, 5250, 5250},
- {5180, 5257, 5258},
- {3161, 5263, 5263},
- {5105, 5272, 5272},
- {3552, 5282, 5282},
- {4944, 5299, 5300},
- {4130, 5312, 5313},
- {902, 5323, 5323},
- {913, 5327, 5327},
- {2987, 5333, 5334},
- {5150, 5344, 5344},
- {5249, 5348, 5348},
- {1965, 5358, 5359},
- {5330, 5364, 5364},
- {2012, 5373, 5377},
- {712, 5384, 5386},
- {5235, 5390, 5390},
- {5044, 5398, 5399},
- {564, 5406, 5406},
- {39, 5410, 5410},
- {4642, 5422, 5425},
- {4421, 5437, 5438},
- {2347, 5449, 5449},
- {5333, 5453, 5454},
- {4136, 5458, 5459},
- {3793, 5468, 5468},
- {2243, 5480, 5480},
- {4889, 5492, 5493},
- {4295, 5504, 5504},
- {2785, 5511, 5511},
- {2377, 5518, 5518},
- {3662, 5525, 5525},
- {5097, 5529, 5530},
- {4781, 5537, 5538},
- {4697, 5547, 5548},
- {436, 5552, 5553},
- {5542, 5558, 5558},
- {3692, 5562, 5562},
- {2696, 5568, 5569},
- {4620, 5578, 5578},
- {2898, 5590, 5590},
- {5557, 5596, 5618},
- {2797, 5623, 5625},
- {2792, 5629, 5629},
- {5243, 5633, 5633},
- {5348, 5637, 5637},
- {5547, 5643, 5643},
- {4296, 5654, 5655},
- {5568, 5662, 5662},
- {3001, 5670, 5671},
- {3794, 5679, 5679},
- {4006, 5685, 5686},
- {4969, 5690, 5692},
- {687, 5704, 5704},
- {4563, 5708, 5708},
- {1723, 5738, 5738},
- {649, 5742, 5742},
- {5163, 5748, 5755},
- {3907, 5759, 5759},
- {3074, 5764, 5764},
- {5326, 5771, 5771},
- {2951, 5776, 5776},
- {5181, 5780, 5780},
- {2614, 5785, 5788},
- {4709, 5794, 5794},
- {2784, 5799, 5799},
- {5518, 5803, 5803},
- {4155, 5812, 5815},
- {921, 5819, 5819},
- {5224, 5823, 5824},
- {2853, 5830, 5836},
- {5776, 5840, 5840},
- {2955, 5844, 5845},
- {5745, 5853, 5853},
- {3291, 5857, 5857},
- {2988, 5861, 5861},
- {2647, 5865, 5865},
- {5398, 5869, 5870},
- {1085, 5874, 5875},
- {4906, 5881, 5881},
- {802, 5886, 5886},
- {5119, 5890, 5893},
- {5802, 5899, 5900},
- {3415, 5904, 5904},
- {5629, 5908, 5908},
- {3714, 5912, 5914},
- {5558, 5921, 5921},
- {2710, 5927, 5928},
- {1094, 5932, 5934},
- {2653, 5940, 5941},
- {4735, 5954, 5954},
- {5861, 5958, 5958},
- {1040, 5971, 5971},
- {5514, 5977, 5977},
- {5048, 5981, 5982},
- {5953, 5992, 5993},
- {3751, 5997, 5997},
- {4991, 6001, 6002},
- {5885, 6006, 6007},
- {5529, 6011, 6012},
- {4974, 6019, 6020},
- {5857, 6024, 6024},
- {3483, 6032, 6032},
- {3594, 6036, 6036},
- {1997, 6040, 6040},
- {5997, 6044, 6047},
- {5197, 6051, 6051},
- {1764, 6055, 6055},
- {6050, 6059, 6059},
- {5239, 6063, 6063},
- {5049, 6067, 6067},
- {5957, 6073, 6074},
- {1022, 6078, 6078},
- {3414, 6083, 6084},
- {3809, 6090, 6090},
- {4562, 6095, 6096},
- {5878, 6104, 6104},
- {594, 6108, 6109},
- {3353, 6115, 6116},
- {4992, 6120, 6121},
- {2424, 6125, 6125},
- {4484, 6130, 6130},
- {3900, 6134, 6135},
- {5793, 6139, 6141},
- {3562, 6145, 6145},
- {1438, 6152, 6153},
- {6058, 6157, 6158},
- {4411, 6162, 6163},
- {4590, 6167, 6171},
- {4748, 6175, 6175},
- {5517, 6183, 6184},
- {6095, 6191, 6192},
- {1471, 6203, 6203},
- {2643, 6209, 6210},
- {450, 6220, 6220},
- {5266, 6226, 6226},
- {2576, 6233, 6233},
- {2607, 6239, 6240},
- {5164, 6244, 6251},
- {6054, 6255, 6255},
- {1789, 6260, 6261},
- {5250, 6265, 6265},
- {6062, 6273, 6278},
- {5990, 6282, 6282},
- {3283, 6286, 6286},
- {5436, 6290, 6290},
- {6059, 6294, 6294},
- {5668, 6298, 6300},
- {3072, 6324, 6329},
- {3132, 6338, 6339},
- {3246, 6343, 6344},
- {28, 6348, 6349},
- {1503, 6353, 6355},
- {6067, 6359, 6359},
- {3384, 6364, 6364},
- {545, 6375, 6376},
- {5803, 6380, 6380},
- {5522, 6384, 6385},
- {5908, 6389, 6389},
- {2796, 6393, 6396},
- {4831, 6403, 6404},
- {6388, 6412, 6412},
- {6005, 6417, 6420},
- {4450, 6430, 6430},
- {4050, 6435, 6435},
- {5372, 6441, 6441},
- {4378, 6447, 6447},
- {6199, 6452, 6452},
- {3026, 6456, 6456},
- {2642, 6460, 6462},
- {6392, 6470, 6470},
- {6459, 6474, 6474},
- {2829, 6487, 6488},
- {2942, 6499, 6504},
- {5069, 6508, 6511},
- {5341, 6515, 6516},
- {5853, 6521, 6525},
- {6104, 6531, 6531},
- {5759, 6535, 6538},
- {4672, 6542, 6543},
- {2443, 6550, 6550},
- {5109, 6554, 6554},
- {6494, 6558, 6560},
- {6006, 6570, 6572},
- {6424, 6576, 6580},
- {4693, 6591, 6592},
- {6439, 6596, 6597},
- {3179, 6601, 6601},
- {5299, 6606, 6607},
- {4148, 6612, 6613},
- {3774, 6617, 6617},
- {3537, 6623, 6624},
- {4975, 6628, 6629},
- {3848, 6636, 6636},
- {856, 6640, 6640},
- {5724, 6645, 6645},
- {6632, 6651, 6651},
- {4630, 6656, 6658},
- {1440, 6662, 6662},
- {4281, 6666, 6667},
- {4302, 6671, 6672},
- {2589, 6676, 6677},
- {5647, 6681, 6687},
- {6082, 6691, 6693},
- {6144, 6698, 6698},
- {6103, 6709, 6710},
- {3710, 6714, 6714},
- {4253, 6718, 6721},
- {2467, 6730, 6730},
- {4778, 6734, 6734},
- {6528, 6738, 6738},
- {4358, 6747, 6747},
- {5889, 6753, 6753},
- {5193, 6757, 6757},
- {5797, 6761, 6761},
- {3858, 6765, 6766},
- {5951, 6776, 6776},
- {6487, 6781, 6782},
- {3282, 6786, 6787},
- {4667, 6797, 6799},
- {1927, 6803, 6806},
- {6583, 6810, 6810},
- {4937, 6814, 6814},
- {6099, 6824, 6824},
- {4415, 6835, 6836},
- {6332, 6840, 6841},
- {5160, 6850, 6850},
- {4764, 6854, 6854},
- {6814, 6858, 6859},
- {3018, 6864, 6864},
- {6293, 6868, 6869},
- {6359, 6877, 6877},
- {3047, 6884, 6886},
- {5262, 6890, 6891},
- {5471, 6900, 6900},
- {3268, 6910, 6912},
- {1047, 6916, 6916},
- {5904, 6923, 6923},
- {5798, 6933, 6938},
- {4149, 6942, 6942},
- {1821, 6946, 6946},
- {3599, 6952, 6952},
- {6470, 6957, 6957},
- {5562, 6961, 6961},
- {6268, 6965, 6967},
- {6389, 6971, 6971},
- {6596, 6975, 6976},
- {6553, 6980, 6981},
- {6576, 6985, 6989},
- {1375, 6993, 6993},
- {652, 6998, 6998},
- {4876, 7002, 7003},
- {5768, 7011, 7013},
- {3973, 7017, 7017},
- {6802, 7025, 7025},
- {6955, 7034, 7036},
- {6974, 7040, 7040},
- {5944, 7044, 7044},
- {6992, 7048, 7054},
- {6872, 7059, 7059},
- {2943, 7063, 7063},
- {6923, 7067, 7067},
- {5094, 7071, 7071},
- {4873, 7075, 7075},
- {5819, 7079, 7079},
- {5945, 7085, 7085},
- {1540, 7090, 7091},
- {2090, 7095, 7095},
- {5024, 7104, 7105},
- {6900, 7109, 7109},
- {6024, 7113, 7114},
- {6000, 7118, 7120},
- {2187, 7124, 7125},
- {6760, 7129, 7130},
- {5898, 7134, 7136},
- {7032, 7144, 7144},
- {4271, 7148, 7148},
- {3706, 7152, 7152},
- {6970, 7156, 7157},
- {7088, 7161, 7163},
- {2718, 7168, 7169},
- {5674, 7175, 7175},
- {4631, 7182, 7182},
- {7070, 7188, 7189},
- {6220, 7196, 7196},
- {3458, 7201, 7202},
- {2041, 7211, 7212},
- {1454, 7216, 7216},
- {5199, 7225, 7227},
- {3529, 7234, 7234},
- {6890, 7238, 7238},
- {3815, 7242, 7243},
- {5490, 7250, 7253},
- {6554, 7257, 7263},
- {5890, 7267, 7269},
- {6877, 7273, 7273},
- {4877, 7277, 7277},
- {2502, 7285, 7285},
- {1483, 7289, 7295},
- {7210, 7304, 7308},
- {6845, 7313, 7316},
- {7219, 7320, 7320},
- {7001, 7325, 7329},
- {6853, 7333, 7334},
- {6120, 7338, 7338},
- {6606, 7342, 7343},
- {7020, 7348, 7350},
- {3509, 7354, 7354},
- {7133, 7359, 7363},
- {3434, 7371, 7374},
- {2787, 7384, 7384},
- {7044, 7388, 7388},
- {6960, 7394, 7395},
- {6676, 7399, 7400},
- {7161, 7404, 7404},
- {7285, 7417, 7418},
- {4558, 7425, 7426},
- {4828, 7430, 7430},
- {6063, 7436, 7436},
- {3597, 7442, 7442},
- {914, 7446, 7446},
- {7320, 7452, 7454},
- {7267, 7458, 7460},
- {5076, 7464, 7464},
- {7430, 7468, 7469},
- {6273, 7473, 7474},
- {7440, 7478, 7487},
- {7348, 7491, 7494},
- {1021, 7510, 7510},
- {7473, 7515, 7515},
- {2823, 7519, 7519},
- {6264, 7527, 7527},
- {7302, 7531, 7531},
- {7089, 7535, 7535},
- {7342, 7540, 7541},
- {3688, 7547, 7551},
- {3054, 7558, 7560},
- {4177, 7566, 7567},
- {6691, 7574, 7575},
- {7156, 7585, 7586},
- {7147, 7590, 7592},
- {7407, 7598, 7598},
- {7403, 7602, 7603},
- {6868, 7607, 7607},
- {6636, 7611, 7611},
- {4805, 7617, 7617},
- {5779, 7623, 7623},
- {7063, 7627, 7627},
- {5079, 7632, 7632},
- {7377, 7637, 7637},
- {7337, 7641, 7642},
- {6738, 7655, 7655},
- {7338, 7659, 7659},
- {6541, 7669, 7671},
- {595, 7675, 7675},
- {7658, 7679, 7680},
- {7647, 7685, 7686},
- {2477, 7690, 7690},
- {5823, 7694, 7694},
- {4156, 7699, 7699},
- {5931, 7703, 7706},
- {6854, 7712, 7712},
- {4931, 7718, 7718},
- {6979, 7722, 7722},
- {5085, 7727, 7727},
- {6965, 7732, 7732},
- {7201, 7736, 7737},
- {3639, 7741, 7743},
- {7534, 7749, 7749},
- {4292, 7753, 7753},
- {3427, 7759, 7763},
- {7273, 7767, 7767},
- {940, 7778, 7778},
- {4838, 7782, 7785},
- {4216, 7790, 7792},
- {922, 7800, 7801},
- {7256, 7810, 7811},
- {7789, 7815, 7819},
- {7225, 7823, 7825},
- {7531, 7829, 7829},
- {6997, 7833, 7833},
- {7757, 7837, 7838},
- {4129, 7842, 7842},
- {7333, 7848, 7849},
- {6776, 7855, 7855},
- {7527, 7859, 7859},
- {4370, 7863, 7863},
- {4512, 7868, 7868},
- {5679, 7880, 7880},
- {3162, 7884, 7885},
- {3933, 7892, 7894},
- {7804, 7899, 7902},
- {6363, 7906, 7907},
- {7848, 7911, 7912},
- {5584, 7917, 7921},
- {874, 7926, 7926},
- {3342, 7930, 7930},
- {4507, 7935, 7937},
- {3672, 7943, 7944},
- {7911, 7948, 7949},
- {6402, 7956, 7956},
- {7940, 7960, 7960},
- {7113, 7964, 7964},
- {1073, 7968, 7968},
- {7740, 7974, 7974},
- {7601, 7978, 7982},
- {6797, 7987, 7988},
- {3528, 7994, 7995},
- {5483, 7999, 7999},
- {5717, 8011, 8011},
- {5480, 8017, 8017},
- {7770, 8023, 8030},
- {2452, 8034, 8034},
- {5282, 8047, 8047},
- {7967, 8051, 8051},
- {1128, 8058, 8066},
- {6348, 8070, 8070},
- {8055, 8077, 8077},
- {7925, 8081, 8086},
- {6810, 8090, 8090},
- {5051, 8101, 8101},
- {4696, 8109, 8110},
- {5129, 8119, 8119},
- {4449, 8123, 8123},
- {7222, 8127, 8127},
- {4649, 8131, 8134},
- {7994, 8138, 8138},
- {5954, 8148, 8148},
- {475, 8152, 8153},
- {7906, 8157, 8157},
- {7458, 8164, 8166},
- {7632, 8171, 8173},
- {3874, 8177, 8183},
- {4391, 8187, 8187},
- {561, 8191, 8191},
- {2417, 8195, 8195},
- {2357, 8204, 8204},
- {2269, 8216, 8218},
- {3968, 8222, 8222},
- {2200, 8226, 8227},
- {3453, 8247, 8247},
- {2439, 8251, 8252},
- {7175, 8257, 8257},
- {976, 8262, 8264},
- {4953, 8273, 8273},
- {4219, 8278, 8278},
- {6, 8285, 8291},
- {5703, 8295, 8296},
- {5272, 8300, 8300},
- {8037, 8304, 8304},
- {8186, 8314, 8314},
- {8304, 8318, 8318},
- {8051, 8326, 8326},
- {8318, 8330, 8330},
- {2671, 8334, 8335},
- {2662, 8339, 8339},
- {8081, 8349, 8350},
- {3328, 8356, 8356},
- {2879, 8360, 8362},
- {8050, 8370, 8371},
- {8330, 8375, 8376},
- {8375, 8386, 8386},
- {4961, 8390, 8390},
- {1017, 8403, 8405},
- {3533, 8416, 8416},
- {4555, 8422, 8422},
- {6445, 8426, 8426},
- {8169, 8432, 8432},
- {990, 8436, 8436},
- {4102, 8440, 8440},
- {7398, 8444, 8446},
- {3480, 8450, 8450},
- {6324, 8462, 8462},
- {7948, 8466, 8467},
- {5950, 8471, 8471},
- {5189, 8476, 8476},
- {4026, 8490, 8490},
- {8374, 8494, 8495},
- {4682, 8501, 8501},
- {7387, 8506, 8506},
- {8164, 8510, 8515},
- {4079, 8524, 8524},
- {8360, 8529, 8531},
- {7446, 8540, 8543},
- {7971, 8547, 8548},
- {4311, 8552, 8552},
- {5204, 8556, 8557},
- {7968, 8562, 8562},
- {7847, 8571, 8573},
- {8547, 8577, 8577},
- {5320, 8581, 8581},
- {8556, 8585, 8586},
- {8504, 8590, 8590},
- {7669, 8602, 8604},
- {5874, 8608, 8609},
- {5828, 8613, 8613},
- {7998, 8617, 8617},
- {8519, 8625, 8625},
- {7250, 8637, 8637},
- {426, 8641, 8641},
- {8436, 8645, 8645},
- {5986, 8649, 8656},
- {8157, 8660, 8660},
- {7182, 8665, 8665},
- {8421, 8675, 8675},
- {8509, 8681, 8681},
- {5137, 8688, 8689},
- {8625, 8694, 8695},
- {5228, 8701, 8702},
- {6661, 8714, 8714},
- {1010, 8719, 8719},
- {6648, 8723, 8723},
- {3500, 8728, 8728},
- {2442, 8735, 8735},
- {8494, 8740, 8741},
- {8171, 8753, 8755},
- {7242, 8763, 8764},
- {4739, 8768, 8769},
- {7079, 8773, 8773},
- {8386, 8777, 8777},
- {8624, 8781, 8787},
- {661, 8791, 8794},
- {8631, 8801, 8801},
- {7753, 8805, 8805},
- {4783, 8809, 8810},
- {1673, 8814, 8815},
- {6623, 8819, 8819},
- {4404, 8823, 8823},
- {8089, 8827, 8828},
- {8773, 8832, 8832},
- {5394, 8836, 8836},
- {6231, 8841, 8843},
- {1015, 8852, 8853},
- {6873, 8857, 8857},
- {6289, 8865, 8865},
- {8577, 8869, 8869},
- {8114, 8873, 8875},
- {8534, 8883, 8883},
- {3007, 8887, 8888},
- {8827, 8892, 8893},
- {4788, 8897, 8900},
- {5698, 8906, 8907},
- {7690, 8911, 8911},
- {6643, 8919, 8919},
- {7206, 8923, 8924},
- {7866, 8929, 8931},
- {8880, 8942, 8942},
- {8630, 8951, 8952},
- {6027, 8958, 8958},
- {7749, 8966, 8967},
- {4932, 8972, 8973},
- {8892, 8980, 8981},
- {634, 9003, 9003},
- {8109, 9007, 9008},
- {8777, 9012, 9012},
- {3981, 9016, 9017},
- {5723, 9025, 9025},
- {7662, 9034, 9038},
- {8955, 9042, 9042},
- {8070, 9060, 9062},
- {8910, 9066, 9066},
- {5363, 9070, 9071},
- {7699, 9075, 9076},
- {8991, 9081, 9081},
- {6850, 9085, 9085},
- {5811, 9092, 9094},
- {9079, 9098, 9102},
- {6456, 9106, 9106},
- {2259, 9111, 9111},
- {4752, 9116, 9116},
- {9060, 9120, 9123},
- {8090, 9127, 9127},
- {5305, 9131, 9132},
- {8623, 9137, 9137},
- {7417, 9141, 9141},
- {6564, 9148, 9149},
- {9126, 9157, 9158},
- {4285, 9169, 9170},
- {8698, 9174, 9174},
- {8869, 9178, 9178},
- {2572, 9182, 9183},
- {6482, 9188, 9190},
- {9181, 9201, 9201},
- {2968, 9208, 9209},
- {2506, 9213, 9215},
- {9127, 9219, 9219},
- {7910, 9225, 9227},
- {5422, 9235, 9239},
- {8813, 9244, 9246},
- {9178, 9250, 9250},
- {8748, 9255, 9255},
- {7354, 9265, 9265},
- {7767, 9269, 9269},
- {7710, 9281, 9283},
- {8826, 9288, 9290},
- {861, 9295, 9295},
- {4482, 9301, 9301},
- {9264, 9305, 9306},
- {8805, 9310, 9310},
- {4995, 9314, 9314},
- {6730, 9318, 9318},
- {7457, 9328, 9328},
- {2547, 9335, 9336},
- {6298, 9340, 9343},
- {9305, 9353, 9354},
- {9269, 9358, 9358},
- {6338, 9370, 9370},
- {7289, 9376, 9379},
- {5780, 9383, 9383},
- {7607, 9387, 9387},
- {2065, 9392, 9392},
- {7238, 9396, 9396},
- {8856, 9400, 9400},
- {8069, 9412, 9413},
- {611, 9420, 9420},
- {7071, 9424, 9424},
- {3089, 9430, 9431},
- {7117, 9435, 9438},
- {1976, 9445, 9445},
- {6640, 9449, 9449},
- {5488, 9453, 9453},
- {8739, 9457, 9459},
- {5958, 9466, 9466},
- {7985, 9470, 9470},
- {8735, 9475, 9475},
- {5009, 9479, 9479},
- {8073, 9483, 9484},
- {2328, 9490, 9491},
- {9250, 9495, 9495},
- {4043, 9502, 9502},
- {7712, 9506, 9506},
- {9012, 9510, 9510},
- {9028, 9514, 9515},
- {2190, 9521, 9524},
- {9029, 9528, 9528},
- {9519, 9532, 9532},
- {9495, 9536, 9536},
- {8527, 9540, 9540},
- {2137, 9550, 9550},
- {8419, 9557, 9557},
- {9383, 9561, 9562},
- {8970, 9575, 9578},
- {8911, 9582, 9582},
- {7828, 9595, 9596},
- {6180, 9600, 9600},
- {8738, 9604, 9607},
- {7540, 9611, 9612},
- {9599, 9616, 9618},
- {9187, 9623, 9623},
- {9294, 9628, 9629},
- {4536, 9639, 9639},
- {3867, 9643, 9643},
- {6305, 9648, 9648},
- {1617, 9654, 9657},
- {5762, 9666, 9666},
- {8314, 9670, 9670},
- {9666, 9674, 9675},
- {9506, 9679, 9679},
- {9669, 9685, 9686},
- {9683, 9690, 9690},
- {8763, 9697, 9698},
- {7468, 9702, 9702},
- {460, 9707, 9707},
- {3115, 9712, 9712},
- {9424, 9716, 9717},
- {7359, 9721, 9724},
- {7547, 9728, 9729},
- {7151, 9733, 9738},
- {7627, 9742, 9742},
- {2822, 9747, 9747},
- {8247, 9751, 9753},
- {9550, 9758, 9758},
- {7585, 9762, 9763},
- {1002, 9767, 9767},
- {7168, 9772, 9773},
- {6941, 9777, 9780},
- {9728, 9784, 9786},
- {9770, 9792, 9796},
- {6411, 9801, 9802},
- {3689, 9806, 9808},
- {9575, 9814, 9816},
- {7025, 9820, 9821},
- {2776, 9826, 9826},
- {9806, 9830, 9830},
- {9820, 9834, 9835},
- {9800, 9839, 9847},
- {9834, 9851, 9852},
- {9829, 9856, 9862},
- {1400, 9866, 9866},
- {3197, 9870, 9871},
- {9851, 9875, 9876},
- {9742, 9883, 9884},
- {3362, 9888, 9889},
- {9883, 9893, 9893},
- {5711, 9899, 9910},
- {7806, 9915, 9915},
- {9120, 9919, 9919},
- {9715, 9925, 9934},
- {2580, 9938, 9938},
- {4907, 9942, 9944},
- {6239, 9953, 9954},
- {6961, 9963, 9963},
- {5295, 9967, 9968},
- {1915, 9972, 9973},
- {3426, 9983, 9985},
- {9875, 9994, 9995},
- {6942, 9999, 9999},
- {6621, 10005, 10005},
- {7589, 10010, 10012},
- {9286, 10020, 10020},
- {838, 10024, 10024},
- {9980, 10028, 10031},
- {9994, 10035, 10041},
- {2702, 10048, 10051},
- {2621, 10059, 10059},
- {10054, 10065, 10065},
- {8612, 10073, 10074},
- {7033, 10078, 10078},
- {916, 10082, 10082},
- {10035, 10086, 10087},
- {8613, 10097, 10097},
- {9919, 10107, 10108},
- {6133, 10114, 10115},
- {10059, 10119, 10119},
- {10065, 10126, 10127},
- {7732, 10131, 10131},
- {7155, 10135, 10136},
- {6728, 10140, 10140},
- {6162, 10144, 10145},
- {4724, 10150, 10150},
- {1665, 10154, 10154},
- {10126, 10163, 10163},
- {9783, 10168, 10168},
- {1715, 10172, 10173},
- {7152, 10177, 10182},
- {8760, 10187, 10187},
- {7829, 10191, 10191},
- {9679, 10196, 10196},
- {9369, 10201, 10201},
- {2928, 10206, 10208},
- {6951, 10214, 10217},
- {5633, 10221, 10221},
- {7199, 10225, 10225},
- {10118, 10230, 10231},
- {9999, 10235, 10236},
- {10045, 10240, 10249},
- {5565, 10256, 10256},
- {9866, 10261, 10261},
- {10163, 10268, 10268},
- {9869, 10272, 10272},
- {9789, 10276, 10283},
- {10235, 10287, 10288},
- {10214, 10298, 10299},
- {6971, 10303, 10303},
- {3346, 10307, 10307},
- {10185, 10311, 10312},
- {9993, 10318, 10320},
- {2779, 10332, 10334},
- {1726, 10338, 10338},
- {741, 10354, 10360},
- {10230, 10372, 10373},
- {10260, 10384, 10385},
- {10131, 10389, 10398},
- {6946, 10406, 10409},
- {10158, 10413, 10420},
- {10123, 10424, 10424},
- {6157, 10428, 10429},
- {4518, 10434, 10434},
- {9893, 10438, 10438},
- {9865, 10442, 10446},
- {7558, 10454, 10454},
- {10434, 10460, 10460},
- {10064, 10466, 10468},
- {2703, 10472, 10474},
- {9751, 10478, 10479},
- {6714, 10485, 10485},
- {8020, 10490, 10490},
- {10303, 10494, 10494},
- {3521, 10499, 10500},
- {9281, 10513, 10515},
- {6028, 10519, 10523},
- {9387, 10527, 10527},
- {7614, 10531, 10531},
- {3611, 10536, 10536},
- {9162, 10540, 10540},
- {10081, 10546, 10547},
- {10034, 10560, 10562},
- {6726, 10567, 10571},
- {8237, 10575, 10575},
- {10438, 10579, 10583},
- {10140, 10587, 10587},
- {5784, 10592, 10592},
- {9819, 10597, 10600},
- {10567, 10604, 10608},
- {9335, 10613, 10613},
- {8300, 10617, 10617},
- {10575, 10621, 10621},
- {9678, 10625, 10626},
- {9962, 10632, 10633},
- {10535, 10637, 10638},
- {8199, 10642, 10642},
- {10372, 10647, 10648},
- {10637, 10656, 10657},
- {10579, 10667, 10668},
- {10465, 10677, 10680},
- {6702, 10684, 10685},
- {10073, 10691, 10692},
- {4505, 10696, 10697},
- {9042, 10701, 10701},
- {6460, 10705, 10706},
- {10010, 10714, 10716},
- {10656, 10720, 10722},
- {7282, 10727, 10729},
- {2327, 10733, 10733},
- {2491, 10740, 10741},
- {10704, 10748, 10750},
- {6465, 10754, 10754},
- {10647, 10758, 10759},
- {10424, 10763, 10763},
- {10748, 10776, 10776},
- {10546, 10780, 10781},
- {10758, 10785, 10786},
- {10287, 10790, 10797},
- {10785, 10801, 10807},
- {10240, 10811, 10826},
- {9509, 10830, 10830},
- {2579, 10836, 10838},
- {9801, 10843, 10845},
- {7555, 10849, 10850},
- {10776, 10860, 10865},
- {8023, 10869, 10869},
- {10046, 10876, 10884},
- {10253, 10888, 10892},
- {9941, 10897, 10897},
- {7898, 10901, 10905},
- {6725, 10909, 10913},
- {10757, 10921, 10923},
- {10160, 10931, 10931},
- {10916, 10935, 10942},
- {10261, 10946, 10946},
- {10318, 10952, 10954},
- {5911, 10959, 10961},
- {10801, 10965, 10966},
- {10946, 10970, 10977},
- {10592, 10982, 10984},
- {9913, 10988, 10990},
- {8510, 10994, 10996},
- {9419, 11000, 11001},
- {6765, 11006, 11007},
- {10725, 11011, 11011},
- {5537, 11017, 11019},
- {9208, 11024, 11025},
- {5850, 11030, 11030},
- {9610, 11034, 11036},
- {8846, 11041, 11047},
- {9697, 11051, 11051},
- {1622, 11055, 11058},
- {2370, 11062, 11062},
- {8393, 11067, 11067},
- {9756, 11071, 11071},
- {10172, 11076, 11076},
- {27, 11081, 11081},
- {7357, 11087, 11092},
- {8151, 11104, 11106},
- {6115, 11110, 11110},
- {10667, 11114, 11115},
- {11099, 11121, 11123},
- {10705, 11127, 11127},
- {8938, 11131, 11131},
- {11114, 11135, 11136},
- {1390, 11140, 11141},
- {10964, 11146, 11148},
- {11140, 11152, 11155},
- {9813, 11159, 11166},
- {624, 11171, 11172},
- {3118, 11177, 11179},
- {11029, 11184, 11186},
- {10186, 11190, 11190},
- {10306, 11196, 11196},
- {8665, 11201, 11201},
- {7382, 11205, 11205},
- {1100, 11210, 11210},
- {2337, 11216, 11217},
- {1609, 11221, 11223},
- {5763, 11228, 11229},
- {5220, 11233, 11233},
- {11061, 11241, 11241},
- {10617, 11246, 11246},
- {11190, 11250, 11251},
- {10144, 11255, 11256},
- {11232, 11260, 11260},
- {857, 11264, 11265},
- {10994, 11269, 11271},
- {3879, 11280, 11281},
- {11184, 11287, 11289},
- {9611, 11293, 11295},
- {11250, 11299, 11299},
- {4495, 11304, 11304},
- {7574, 11308, 11309},
- {9814, 11315, 11317},
- {1713, 11321, 11324},
- {1905, 11328, 11328},
- {8745, 11335, 11340},
- {8883, 11351, 11351},
- {8119, 11358, 11358},
- {1842, 11363, 11364},
- {11237, 11368, 11368},
- {8814, 11373, 11374},
- {5684, 11378, 11378},
- {11011, 11382, 11382},
- {6520, 11389, 11389},
- {11183, 11393, 11396},
- {1790, 11404, 11404},
- {9536, 11408, 11408},
- {11298, 11418, 11419},
- {3929, 11425, 11425},
- {5588, 11429, 11429},
- {8476, 11436, 11436},
- {4096, 11440, 11442},
- {11084, 11446, 11454},
- {10603, 11458, 11463},
- {7332, 11472, 11474},
- {7611, 11483, 11486},
- {4836, 11490, 11491},
- {10024, 11495, 11495},
- {4917, 11501, 11506},
- {6486, 11510, 11512},
- {11269, 11516, 11518},
- {3603, 11522, 11525},
- {11126, 11535, 11535},
- {11418, 11539, 11541},
- {11408, 11545, 11545},
- {9021, 11549, 11552},
- {6745, 11557, 11557},
- {5118, 11561, 11564},
- {7590, 11568, 11569},
- {4426, 11573, 11578},
- {9790, 11582, 11583},
- {6447, 11587, 11587},
- {10229, 11591, 11594},
- {10457, 11598, 11598},
- {10168, 11604, 11604},
- {10543, 11608, 11608},
- {7404, 11612, 11612},
- {11127, 11616, 11616},
- {3337, 11620, 11620},
- {11501, 11624, 11628},
- {4543, 11633, 11635},
- {8449, 11642, 11642},
- {4943, 11646, 11648},
- {10526, 11652, 11654},
- {11620, 11659, 11659},
- {8927, 11664, 11669},
- {532, 11673, 11673},
- {10513, 11677, 11679},
- {10428, 11683, 11683},
- {10999, 11689, 11690},
- {9469, 11695, 11695},
- {3606, 11699, 11699},
- {9560, 11708, 11709},
- {1564, 11714, 11714},
- {10527, 11718, 11718},
- {3071, 11723, 11726},
- {11590, 11731, 11732},
- {6605, 11737, 11737},
- {11624, 11741, 11745},
- {7822, 11749, 11752},
- {5269, 11757, 11758},
- {1339, 11767, 11767},
- {1363, 11771, 11773},
- {3704, 11777, 11777},
- {10952, 11781, 11783},
- {6764, 11793, 11795},
- {8675, 11800, 11800},
- {9963, 11804, 11804},
- {11573, 11808, 11809},
- {9548, 11813, 11813},
- {11591, 11817, 11818},
- {11446, 11822, 11822},
- {9224, 11828, 11828},
- {3158, 11836, 11836},
- {10830, 11840, 11840},
- {7234, 11846, 11846},
- {11299, 11850, 11850},
- {11544, 11854, 11855},
- {11498, 11859, 11859},
- {10993, 11865, 11868},
- {9720, 11872, 11878},
- {10489, 11882, 11890},
- {11712, 11898, 11904},
- {11516, 11908, 11910},
- {11568, 11914, 11915},
- {10177, 11919, 11924},
- {11363, 11928, 11929},
- {10494, 11933, 11933},
- {9870, 11937, 11938},
- {9427, 11942, 11942},
- {11481, 11949, 11949},
- {6030, 11955, 11957},
- {11718, 11961, 11961},
- {10531, 11965, 11983},
- {5126, 11987, 11987},
- {7515, 11991, 11991},
- {10646, 11996, 11997},
- {2947, 12001, 12001},
- {9582, 12009, 12010},
- {6202, 12017, 12018},
- {11714, 12022, 12022},
- {9235, 12033, 12037},
- {9721, 12041, 12044},
- {11932, 12051, 12052},
- {12040, 12056, 12056},
- {12051, 12060, 12060},
- {11601, 12066, 12066},
- {8426, 12070, 12070},
- {4053, 12077, 12077},
- {4262, 12081, 12081},
- {9761, 12086, 12088},
- {11582, 12092, 12093},
- {10965, 12097, 12098},
- {11803, 12103, 12104},
- {11933, 12108, 12109},
- {10688, 12117, 12117},
- {12107, 12125, 12126},
- {6774, 12130, 12132},
- {6286, 12137, 12137},
- {9543, 12141, 12141},
- {12097, 12145, 12146},
- {10790, 12150, 12150},
- {10125, 12154, 12156},
- {12125, 12164, 12164},
- {12064, 12168, 12172},
- {10811, 12178, 12188},
- {12092, 12192, 12193},
- {10058, 12197, 12198},
- {11611, 12211, 12212},
- {3459, 12216, 12216},
- {10291, 12225, 12228},
- {12191, 12232, 12234},
- {12145, 12238, 12238},
- {12001, 12242, 12250},
- {3840, 12255, 12255},
- {12216, 12259, 12259},
- {674, 12272, 12272},
- {12141, 12276, 12276},
- {10766, 12280, 12280},
- {11545, 12284, 12284},
- {6496, 12290, 12290},
- {11381, 12294, 12295},
- {603, 12302, 12303},
- {12276, 12308, 12308},
- {11850, 12313, 12314},
- {565, 12319, 12319},
- {9351, 12324, 12324},
- {11822, 12328, 12328},
- {2691, 12333, 12334},
- {11840, 12338, 12338},
- {11070, 12343, 12343},
- {9510, 12347, 12347},
- {11024, 12352, 12353},
- {7173, 12359, 12359},
- {517, 12363, 12363},
- {6311, 12367, 12368},
- {11367, 12372, 12373},
- {12008, 12377, 12377},
- {11372, 12382, 12384},
- {11358, 12391, 12392},
- {11382, 12396, 12396},
- {6882, 12400, 12401},
- {11246, 12405, 12405},
- {8359, 12409, 12412},
- {10154, 12418, 12418},
- {12016, 12425, 12426},
- {8972, 12434, 12435},
- {10478, 12439, 12440},
- {12395, 12449, 12449},
- {11612, 12454, 12454},
- {12347, 12458, 12458},
- {10700, 12466, 12467},
- {3637, 12471, 12476},
- {1042, 12480, 12481},
- {6747, 12488, 12488},
- {12396, 12492, 12493},
- {9420, 12497, 12497},
- {11285, 12501, 12510},
- {4470, 12515, 12515},
- {9374, 12519, 12519},
- {11293, 12528, 12528},
- {2058, 12534, 12535},
- {6521, 12539, 12539},
- {12492, 12543, 12543},
- {3043, 12547, 12547},
- {2982, 12551, 12553},
- {11030, 12557, 12563},
- {7636, 12568, 12568},
- {9639, 12572, 12572},
- {12543, 12576, 12576},
- {5989, 12580, 12583},
- {11051, 12587, 12587},
- {1061, 12592, 12594},
- {12313, 12599, 12601},
- {11846, 12605, 12605},
- {12576, 12609, 12609},
- {11040, 12618, 12625},
- {12479, 12629, 12629},
- {6903, 12633, 12633},
- {12322, 12639, 12639},
- {12253, 12643, 12645},
- {5594, 12651, 12651},
- {12522, 12655, 12655},
- {11703, 12659, 12659},
- {1377, 12665, 12665},
- {8022, 12669, 12669},
- {12280, 12674, 12674},
- {9023, 12680, 12681},
- {12328, 12685, 12685},
- {3085, 12689, 12693},
- {4700, 12698, 12698},
- {10224, 12702, 12702},
- {8781, 12706, 12706},
- {1651, 12710, 12710},
- {12458, 12714, 12714},
- {12005, 12718, 12721},
- {11908, 12725, 12726},
- {8202, 12733, 12733},
- {11708, 12739, 12740},
- {12599, 12744, 12745},
- {12284, 12749, 12749},
- {5285, 12756, 12756},
- {12055, 12775, 12777},
- {6919, 12782, 12782},
- {12242, 12786, 12786},
- {12009, 12790, 12790},
- {9628, 12794, 12796},
- {11354, 12801, 12802},
- {10225, 12806, 12807},
- {579, 12813, 12813},
- {8935, 12817, 12822},
- {8753, 12827, 12829},
- {11006, 12835, 12835},
- {858, 12841, 12845},
- {476, 12849, 12849},
- {7667, 12854, 12854},
- {12760, 12860, 12871},
- {11677, 12875, 12877},
- {12714, 12881, 12881},
- {12731, 12885, 12890},
- {7108, 12894, 12896},
- {1165, 12900, 12900},
- {4021, 12906, 12906},
- {10829, 12910, 12911},
- {12331, 12915, 12915},
- {8887, 12919, 12921},
- {11639, 12925, 12925},
- {7964, 12929, 12929},
- {12528, 12937, 12937},
- {8148, 12941, 12941},
- {12770, 12948, 12950},
- {12609, 12954, 12954},
- {12685, 12958, 12958},
- {2803, 12962, 12962},
- {9561, 12966, 12966},
- {6671, 12972, 12973},
- {12056, 12977, 12977},
- {6380, 12981, 12981},
- {12048, 12985, 12985},
- {11961, 12989, 12993},
- {3368, 12997, 12999},
- {6634, 13004, 13004},
- {6775, 13009, 13010},
- {12136, 13014, 13019},
- {10341, 13023, 13023},
- {13002, 13027, 13027},
- {10587, 13031, 13031},
- {10307, 13035, 13035},
- {12736, 13039, 13039},
- {12744, 13043, 13044},
- {6175, 13048, 13048},
- {9702, 13053, 13054},
- {662, 13059, 13061},
- {12718, 13065, 13068},
- {12893, 13072, 13075},
- {8299, 13086, 13091},
- {12604, 13095, 13096},
- {12848, 13100, 13101},
- {12749, 13105, 13105},
- {12526, 13109, 13114},
- {9173, 13122, 13122},
- {12769, 13128, 13128},
- {13038, 13132, 13132},
- {12725, 13136, 13137},
- {12639, 13146, 13146},
- {9711, 13150, 13151},
- {12137, 13155, 13155},
- {13039, 13159, 13159},
- {4681, 13163, 13164},
- {12954, 13168, 13168},
- {13158, 13175, 13176},
- {13105, 13180, 13180},
- {10754, 13184, 13184},
- {13167, 13188, 13188},
- {12658, 13192, 13192},
- {4294, 13199, 13200},
- {11682, 13204, 13205},
- {11695, 13209, 13209},
- {11076, 13214, 13214},
- {12232, 13218, 13218},
- {9399, 13223, 13224},
- {12880, 13228, 13229},
- {13048, 13234, 13234},
- {9701, 13238, 13239},
- {13209, 13243, 13243},
- {3658, 13248, 13248},
- {3698, 13252, 13254},
- {12237, 13260, 13260},
- {8872, 13266, 13266},
- {12957, 13272, 13273},
- {1393, 13281, 13281},
- {2013, 13285, 13288},
- {4244, 13296, 13299},
- {9428, 13303, 13303},
- {12702, 13307, 13307},
- {13078, 13311, 13311},
- {6071, 13315, 13315},
- {3061, 13319, 13319},
- {2051, 13324, 13324},
- {11560, 13328, 13331},
- {6584, 13336, 13336},
- {8482, 13340, 13340},
- {5331, 13344, 13344},
- {4171, 13348, 13348},
- {8501, 13352, 13352},
- {9219, 13356, 13356},
- {9473, 13360, 13363},
- {12881, 13367, 13367},
- {13065, 13371, 13375},
- {2979, 13379, 13384},
- {1518, 13388, 13388},
- {11177, 13392, 13392},
- {9457, 13398, 13398},
- {12293, 13407, 13410},
- {3697, 13414, 13417},
- {10338, 13425, 13425},
- {13367, 13429, 13429},
- {11074, 13433, 13437},
- {4201, 13441, 13443},
- {1812, 13447, 13448},
- {13360, 13452, 13456},
- {13188, 13463, 13463},
- {9732, 13470, 13470},
- {11332, 13477, 13477},
- {9918, 13487, 13487},
- {6337, 13497, 13497},
- {13429, 13501, 13501},
- {11413, 13505, 13505},
- {4685, 13512, 13513},
- {13136, 13517, 13519},
- {7416, 13528, 13530},
- {12929, 13534, 13534},
- {11110, 13539, 13539},
- {11521, 13543, 13543},
- {12825, 13553, 13553},
- {13447, 13557, 13558},
- {12299, 13562, 13563},
- {9003, 13570, 13570},
- {12500, 13577, 13577},
- {13501, 13581, 13581},
- {9392, 13586, 13586},
- {12454, 13590, 13590},
- {6189, 13595, 13595},
- {13053, 13599, 13599},
- {11881, 13604, 13604},
- {13159, 13608, 13608},
- {4894, 13612, 13612},
- {13221, 13621, 13621},
- {8950, 13625, 13625},
- {13533, 13629, 13629},
- {9633, 13633, 13633},
- {7892, 13637, 13639},
- {13581, 13643, 13643},
- {13616, 13647, 13649},
- {12794, 13653, 13654},
- {8919, 13659, 13659},
- {9674, 13663, 13663},
- {13577, 13668, 13668},
- {12966, 13672, 13672},
- {12659, 13676, 13683},
- {6124, 13688, 13688},
- {9225, 13693, 13695},
- {11833, 13702, 13702},
- {12904, 13709, 13717},
- {13647, 13721, 13722},
- {11687, 13726, 13727},
- {12434, 13731, 13732},
- {12689, 13736, 13742},
- {13168, 13746, 13746},
- {6151, 13751, 13752},
- {11821, 13756, 13757},
- {6467, 13764, 13764},
- {5730, 13769, 13769},
- {5136, 13780, 13780},
- {724, 13784, 13785},
- {13517, 13789, 13791},
- {640, 13795, 13796},
- {7721, 13800, 13802},
- {11121, 13806, 13807},
- {5791, 13811, 13815},
- {12894, 13819, 13819},
- {11100, 13824, 13824},
- {7011, 13830, 13830},
- {7129, 13834, 13837},
- {13833, 13841, 13841},
- {11276, 13847, 13847},
- {13621, 13853, 13853},
- {13589, 13862, 13863},
- {12989, 13867, 13867},
- {12789, 13871, 13871},
- {1239, 13875, 13875},
- {4675, 13879, 13881},
- {4686, 13885, 13885},
- {707, 13889, 13889},
- {5449, 13897, 13898},
- {13867, 13902, 13903},
- {10613, 13908, 13908},
- {13789, 13912, 13914},
- {4451, 13918, 13919},
- {9200, 13924, 13924},
- {2011, 13930, 13930},
- {11433, 13934, 13936},
- {4695, 13942, 13943},
- {9435, 13948, 13951},
- {13688, 13955, 13957},
- {11694, 13961, 13962},
- {5712, 13966, 13966},
- {5991, 13970, 13972},
- {13477, 13976, 13976},
- {10213, 13987, 13987},
- {11839, 13991, 13993},
- {12272, 13997, 13997},
- {6206, 14001, 14001},
- {13179, 14006, 14007},
- {2939, 14011, 14011},
- {12972, 14016, 14017},
- {13918, 14021, 14022},
- {7436, 14026, 14027},
- {7678, 14032, 14034},
- {13586, 14040, 14040},
- {13347, 14044, 14044},
- {13109, 14048, 14051},
- {9244, 14055, 14057},
- {13315, 14061, 14061},
- {13276, 14067, 14067},
- {11435, 14073, 14074},
- {13853, 14078, 14078},
- {13452, 14082, 14082},
- {14044, 14087, 14087},
- {4440, 14091, 14095},
- {4479, 14100, 14103},
- {9395, 14107, 14109},
- {6834, 14119, 14119},
- {10458, 14123, 14124},
- {1429, 14129, 14129},
- {8443, 14135, 14135},
- {10365, 14140, 14140},
- {5267, 14145, 14145},
- {11834, 14151, 14153},
-}
diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go
deleted file mode 100644
index 2712710..0000000
--- a/vendor/github.com/golang/snappy/snappy_test.go
+++ /dev/null
@@ -1,1353 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-import (
- "bytes"
- "encoding/binary"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "math/rand"
- "net/http"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
-)
-
-var (
- download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
- testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data")
- benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data")
-)
-
-// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by
-// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on
-// this GOARCH. There is more than one valid encoding of any given input, and
-// there is more than one good algorithm along the frontier of trading off
-// throughput for output size. Nonetheless, we presume that the C++ encoder's
-// algorithm is a good one and has been tested on a wide range of inputs, so
-// matching that exactly should mean that the Go encoder's algorithm is also
-// good, without needing to gather our own corpus of test data.
-//
-// The exact algorithm used by the C++ code is potentially endian dependent, as
-// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes
-// at a time. The Go implementation is endian agnostic, in that its output is
-// the same (as little-endian C++ code), regardless of the CPU's endianness.
-//
-// Thus, when comparing Go's output to C++ output generated beforehand, such as
-// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little-
-// endian system, we can run that test regardless of the runtime.GOARCH value.
-//
-// When comparing Go's output to dynamically generated C++ output, i.e. the
-// result of fork/exec'ing a C++ program, we can run that test only on
-// little-endian systems, because the C++ output might be different on
-// big-endian systems. The runtime package doesn't export endianness per se,
-// but we can restrict this match-C++ test to common little-endian systems.
-const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm"
-
-func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) {
- got := maxEncodedLenOfMaxBlockSize
- want := MaxEncodedLen(maxBlockSize)
- if got != want {
- t.Fatalf("got %d, want %d", got, want)
- }
-}
-
-func cmp(a, b []byte) error {
- if bytes.Equal(a, b) {
- return nil
- }
- if len(a) != len(b) {
- return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
- }
- for i := range a {
- if a[i] != b[i] {
- return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
- }
- }
- return nil
-}
-
-func roundtrip(b, ebuf, dbuf []byte) error {
- d, err := Decode(dbuf, Encode(ebuf, b))
- if err != nil {
- return fmt.Errorf("decoding error: %v", err)
- }
- if err := cmp(d, b); err != nil {
- return fmt.Errorf("roundtrip mismatch: %v", err)
- }
- return nil
-}
-
-func TestEmpty(t *testing.T) {
- if err := roundtrip(nil, nil, nil); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestSmallCopy(t *testing.T) {
- for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
- for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
- for i := 0; i < 32; i++ {
- s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
- if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
- t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
- }
- }
- }
- }
-}
-
-func TestSmallRand(t *testing.T) {
- rng := rand.New(rand.NewSource(1))
- for n := 1; n < 20000; n += 23 {
- b := make([]byte, n)
- for i := range b {
- b[i] = uint8(rng.Intn(256))
- }
- if err := roundtrip(b, nil, nil); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestSmallRegular(t *testing.T) {
- for n := 1; n < 20000; n += 23 {
- b := make([]byte, n)
- for i := range b {
- b[i] = uint8(i%10 + 'a')
- }
- if err := roundtrip(b, nil, nil); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestInvalidVarint(t *testing.T) {
- testCases := []struct {
- desc string
- input string
- }{{
- "invalid varint, final byte has continuation bit set",
- "\xff",
- }, {
- "invalid varint, value overflows uint64",
- "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00",
- }, {
- // https://github.com/google/snappy/blob/master/format_description.txt
- // says that "the stream starts with the uncompressed length [as a
- // varint] (up to a maximum of 2^32 - 1)".
- "valid varint (as uint64), but value overflows uint32",
- "\x80\x80\x80\x80\x10",
- }}
-
- for _, tc := range testCases {
- input := []byte(tc.input)
- if _, err := DecodedLen(input); err != ErrCorrupt {
- t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err)
- }
- if _, err := Decode(nil, input); err != ErrCorrupt {
- t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err)
- }
- }
-}
-
-func TestDecode(t *testing.T) {
- lit40Bytes := make([]byte, 40)
- for i := range lit40Bytes {
- lit40Bytes[i] = byte(i)
- }
- lit40 := string(lit40Bytes)
-
- testCases := []struct {
- desc string
- input string
- want string
- wantErr error
- }{{
- `decodedLen=0; valid input`,
- "\x00",
- "",
- nil,
- }, {
- `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`,
- "\x03" + "\x08\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`,
- "\x02" + "\x08\xff\xff\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`,
- "\x03" + "\x08\xff\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`,
- "\x28" + "\x9c" + lit40,
- lit40,
- nil,
- }, {
- `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`,
- "\x01" + "\xf0",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`,
- "\x03" + "\xf0\x02\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`,
- "\x01" + "\xf4\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`,
- "\x03" + "\xf4\x02\x00\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`,
- "\x01" + "\xf8\x00\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`,
- "\x03" + "\xf8\x02\x00\x00\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`,
- "\x01" + "\xfc\x00\x00\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`,
- "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`,
- "\x04" + "\xfc\x02\x00\x00\x00\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`,
- "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`,
- "\x04" + "\x01",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`,
- "\x04" + "\x02\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`,
- "\x04" + "\x03\x00\x00\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`,
- "\x04" + "\x0cabcd",
- "abcd",
- nil,
- }, {
- `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`,
- "\x0d" + "\x0cabcd" + "\x15\x04",
- "abcdabcdabcda",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`,
- "\x08" + "\x0cabcd" + "\x01\x04",
- "abcdabcd",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`,
- "\x08" + "\x0cabcd" + "\x01\x02",
- "abcdcdcd",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`,
- "\x08" + "\x0cabcd" + "\x01\x01",
- "abcddddd",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`,
- "\x08" + "\x0cabcd" + "\x01\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`,
- "\x09" + "\x0cabcd" + "\x01\x04",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`,
- "\x08" + "\x0cabcd" + "\x01\x05",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`,
- "\x07" + "\x0cabcd" + "\x01\x04",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`,
- "\x06" + "\x0cabcd" + "\x06\x03\x00",
- "abcdbc",
- nil,
- }, {
- `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`,
- "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00",
- "abcdbc",
- nil,
- }}
-
- const (
- // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
- // not present in either the input or the output. It is written to dBuf
- // to check that Decode does not write bytes past the end of
- // dBuf[:dLen].
- //
- // The magic number 37 was chosen because it is prime. A more 'natural'
- // number like 32 might lead to a false negative if, for example, a
- // byte was incorrectly copied 4*8 bytes later.
- notPresentBase = 0xa0
- notPresentLen = 37
- )
-
- var dBuf [100]byte
-loop:
- for i, tc := range testCases {
- input := []byte(tc.input)
- for _, x := range input {
- if notPresentBase <= x && x < notPresentBase+notPresentLen {
- t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input)
- continue loop
- }
- }
-
- dLen, n := binary.Uvarint(input)
- if n <= 0 {
- t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc)
- continue
- }
- if dLen > uint64(len(dBuf)) {
- t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen)
- continue
- }
-
- for j := range dBuf {
- dBuf[j] = byte(notPresentBase + j%notPresentLen)
- }
- g, gotErr := Decode(dBuf[:], input)
- if got := string(g); got != tc.want || gotErr != tc.wantErr {
- t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v",
- i, tc.desc, got, gotErr, tc.want, tc.wantErr)
- continue
- }
- for j, x := range dBuf {
- if uint64(j) < dLen {
- continue
- }
- if w := byte(notPresentBase + j%notPresentLen); x != w {
- t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x",
- i, tc.desc, j, x, w, dBuf)
- continue loop
- }
- }
- }
-}
-
-func TestDecodeCopy4(t *testing.T) {
- dots := strings.Repeat(".", 65536)
-
- input := strings.Join([]string{
- "\x89\x80\x04", // decodedLen = 65545.
- "\x0cpqrs", // 4-byte literal "pqrs".
- "\xf4\xff\xff" + dots, // 65536-byte literal dots.
- "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540.
- }, "")
-
- gotBytes, err := Decode(nil, []byte(input))
- if err != nil {
- t.Fatal(err)
- }
- got := string(gotBytes)
- want := "pqrs" + dots + "pqrs."
- if len(got) != len(want) {
- t.Fatalf("got %d bytes, want %d", len(got), len(want))
- }
- if got != want {
- for i := 0; i < len(got); i++ {
- if g, w := got[i], want[i]; g != w {
- t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w)
- }
- }
- }
-}
-
-// TestDecodeLengthOffset tests decoding an encoding of the form literal +
-// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB".
-func TestDecodeLengthOffset(t *testing.T) {
- const (
- prefix = "abcdefghijklmnopqr"
- suffix = "ABCDEFGHIJKLMNOPQR"
-
- // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
- // not present in either the input or the output. It is written to
- // gotBuf to check that Decode does not write bytes past the end of
- // gotBuf[:totalLen].
- //
- // The magic number 37 was chosen because it is prime. A more 'natural'
- // number like 32 might lead to a false negative if, for example, a
- // byte was incorrectly copied 4*8 bytes later.
- notPresentBase = 0xa0
- notPresentLen = 37
- )
- var gotBuf, wantBuf, inputBuf [128]byte
- for length := 1; length <= 18; length++ {
- for offset := 1; offset <= 18; offset++ {
- loop:
- for suffixLen := 0; suffixLen <= 18; suffixLen++ {
- totalLen := len(prefix) + length + suffixLen
-
- inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen))
- inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1)
- inputLen++
- inputLen += copy(inputBuf[inputLen:], prefix)
- inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1)
- inputBuf[inputLen+1] = byte(offset)
- inputBuf[inputLen+2] = 0x00
- inputLen += 3
- if suffixLen > 0 {
- inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1)
- inputLen++
- inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen])
- }
- input := inputBuf[:inputLen]
-
- for i := range gotBuf {
- gotBuf[i] = byte(notPresentBase + i%notPresentLen)
- }
- got, err := Decode(gotBuf[:], input)
- if err != nil {
- t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err)
- continue
- }
-
- wantLen := 0
- wantLen += copy(wantBuf[wantLen:], prefix)
- for i := 0; i < length; i++ {
- wantBuf[wantLen] = wantBuf[wantLen-offset]
- wantLen++
- }
- wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen])
- want := wantBuf[:wantLen]
-
- for _, x := range input {
- if notPresentBase <= x && x < notPresentBase+notPresentLen {
- t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x",
- length, offset, suffixLen, x, input)
- continue loop
- }
- }
- for i, x := range gotBuf {
- if i < totalLen {
- continue
- }
- if w := byte(notPresentBase + i%notPresentLen); x != w {
- t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+
- "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x",
- length, offset, suffixLen, totalLen, i, x, w, gotBuf)
- continue loop
- }
- }
- for _, x := range want {
- if notPresentBase <= x && x < notPresentBase+notPresentLen {
- t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x",
- length, offset, suffixLen, x, want)
- continue loop
- }
- }
-
- if !bytes.Equal(got, want) {
- t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x",
- length, offset, suffixLen, input, got, want)
- continue
- }
- }
- }
- }
-}
-
-const (
- goldenText = "Mark.Twain-Tom.Sawyer.txt"
- goldenCompressed = goldenText + ".rawsnappy"
-)
-
-func TestDecodeGoldenInput(t *testing.T) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- got, err := Decode(nil, src)
- if err != nil {
- t.Fatalf("Decode: %v", err)
- }
- want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- if err := cmp(got, want); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestEncodeGoldenInput(t *testing.T) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- got := Encode(nil, src)
- want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- if err := cmp(got, want); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestExtendMatchGoldenInput(t *testing.T) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- for i, tc := range extendMatchGoldenTestCases {
- got := extendMatch(src, tc.i, tc.j)
- if got != tc.want {
- t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)",
- i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j)
- }
- }
-}
-
-func TestExtendMatch(t *testing.T) {
- // ref is a simple, reference implementation of extendMatch.
- ref := func(src []byte, i, j int) int {
- for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
- }
- return j
- }
-
- nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40}
- for yIndex := 40; yIndex > 30; yIndex-- {
- xxx := bytes.Repeat([]byte("x"), 40)
- if yIndex < len(xxx) {
- xxx[yIndex] = 'y'
- }
- for _, i := range nums {
- for _, j := range nums {
- if i >= j {
- continue
- }
- got := extendMatch(xxx, i, j)
- want := ref(xxx, i, j)
- if got != want {
- t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want)
- }
- }
- }
- }
-}
-
-const snappytoolCmdName = "cmd/snappytool/snappytool"
-
-func skipTestSameEncodingAsCpp() (msg string) {
- if !goEncoderShouldMatchCppEncoder {
- return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH)
- }
- if _, err := os.Stat(snappytoolCmdName); err != nil {
- return fmt.Sprintf("could not find snappytool: %v", err)
- }
- return ""
-}
-
-func runTestSameEncodingAsCpp(src []byte) error {
- got := Encode(nil, src)
-
- cmd := exec.Command(snappytoolCmdName, "-e")
- cmd.Stdin = bytes.NewReader(src)
- want, err := cmd.Output()
- if err != nil {
- return fmt.Errorf("could not run snappytool: %v", err)
- }
- return cmp(got, want)
-}
-
-func TestSameEncodingAsCppShortCopies(t *testing.T) {
- if msg := skipTestSameEncodingAsCpp(); msg != "" {
- t.Skip(msg)
- }
- src := bytes.Repeat([]byte{'a'}, 20)
- for i := 0; i <= len(src); i++ {
- if err := runTestSameEncodingAsCpp(src[:i]); err != nil {
- t.Errorf("i=%d: %v", i, err)
- }
- }
-}
-
-func TestSameEncodingAsCppLongFiles(t *testing.T) {
- if msg := skipTestSameEncodingAsCpp(); msg != "" {
- t.Skip(msg)
- }
- bDir := filepath.FromSlash(*benchdataDir)
- failed := false
- for i, tf := range testFiles {
- if err := downloadBenchmarkFiles(t, tf.filename); err != nil {
- t.Fatalf("failed to download testdata: %s", err)
- }
- data := readFile(t, filepath.Join(bDir, tf.filename))
- if n := tf.sizeLimit; 0 < n && n < len(data) {
- data = data[:n]
- }
- if err := runTestSameEncodingAsCpp(data); err != nil {
- t.Errorf("i=%d: %v", i, err)
- failed = true
- }
- }
- if failed {
- t.Errorf("was the snappytool program built against the C++ snappy library version " +
- "d53de187 or later, commited on 2016-04-05? See " +
- "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc")
- }
-}
-
-// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm
-// described in decode_amd64.s and its claim of a 10 byte overrun worst case.
-func TestSlowForwardCopyOverrun(t *testing.T) {
- const base = 100
-
- for length := 1; length < 18; length++ {
- for offset := 1; offset < 18; offset++ {
- highWaterMark := base
- d := base
- l := length
- o := offset
-
- // makeOffsetAtLeast8
- for o < 8 {
- if end := d + 8; highWaterMark < end {
- highWaterMark = end
- }
- l -= o
- d += o
- o += o
- }
-
- // fixUpSlowForwardCopy
- a := d
- d += l
-
- // finishSlowForwardCopy
- for l > 0 {
- if end := a + 8; highWaterMark < end {
- highWaterMark = end
- }
- a += 8
- l -= 8
- }
-
- dWant := base + length
- overrun := highWaterMark - dWant
- if d != dWant || overrun < 0 || 10 < overrun {
- t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])",
- length, offset, d, overrun, dWant)
- }
- }
- }
-}
-
-// TestEncodeNoiseThenRepeats encodes input for which the first half is very
-// incompressible and the second half is very compressible. The encoded form's
-// length should be closer to 50% of the original length than 100%.
-func TestEncodeNoiseThenRepeats(t *testing.T) {
- for _, origLen := range []int{256 * 1024, 2048 * 1024} {
- src := make([]byte, origLen)
- rng := rand.New(rand.NewSource(1))
- firstHalf, secondHalf := src[:origLen/2], src[origLen/2:]
- for i := range firstHalf {
- firstHalf[i] = uint8(rng.Intn(256))
- }
- for i := range secondHalf {
- secondHalf[i] = uint8(i >> 8)
- }
- dst := Encode(nil, src)
- if got, want := len(dst), origLen*3/4; got >= want {
- t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want)
- }
- }
-}
-
-func TestFramingFormat(t *testing.T) {
- // src is comprised of alternating 1e5-sized sequences of random
- // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
- // because it is larger than maxBlockSize (64k).
- src := make([]byte, 1e6)
- rng := rand.New(rand.NewSource(1))
- for i := 0; i < 10; i++ {
- if i%2 == 0 {
- for j := 0; j < 1e5; j++ {
- src[1e5*i+j] = uint8(rng.Intn(256))
- }
- } else {
- for j := 0; j < 1e5; j++ {
- src[1e5*i+j] = uint8(i)
- }
- }
- }
-
- buf := new(bytes.Buffer)
- if _, err := NewWriter(buf).Write(src); err != nil {
- t.Fatalf("Write: encoding: %v", err)
- }
- dst, err := ioutil.ReadAll(NewReader(buf))
- if err != nil {
- t.Fatalf("ReadAll: decoding: %v", err)
- }
- if err := cmp(dst, src); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestWriterGoldenOutput(t *testing.T) {
- buf := new(bytes.Buffer)
- w := NewBufferedWriter(buf)
- defer w.Close()
- w.Write([]byte("abcd")) // Not compressible.
- w.Flush()
- w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible.
- w.Flush()
- // The next chunk is also compressible, but a naive, greedy encoding of the
- // overall length 67 copy as a length 64 copy (the longest expressible as a
- // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte
- // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4
- // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2
- // (of length 60) and a 2-byte tagCopy1 (of length 7).
- w.Write(bytes.Repeat([]byte{'B'}, 68))
- w.Write([]byte("efC")) // Not compressible.
- w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible.
- w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible.
- w.Write([]byte("g")) // Not compressible.
- w.Flush()
-
- got := buf.String()
- want := strings.Join([]string{
- magicChunk,
- "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum).
- "\x68\x10\xe6\xb6", // Checksum.
- "\x61\x62\x63\x64", // Uncompressed payload: "abcd".
- "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum).
- "\x5f\xeb\xf2\x10", // Checksum.
- "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150.
- "\x00\x41", // Compressed payload: tagLiteral, length=1, "A".
- "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1.
- "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1.
- "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1.
- "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum).
- "\x30\x85\x69\xeb", // Checksum.
- "\x70", // Compressed payload: Uncompressed length (varint encoded): 112.
- "\x00\x42", // Compressed payload: tagLiteral, length=1, "B".
- "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1.
- "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1.
- "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC".
- "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1.
- "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90.
- "\x00\x67", // Compressed payload: tagLiteral, length=1, "g".
- }, "")
- if got != want {
- t.Fatalf("\ngot: % x\nwant: % x", got, want)
- }
-}
-
-func TestEmitLiteral(t *testing.T) {
- testCases := []struct {
- length int
- want string
- }{
- {1, "\x00"},
- {2, "\x04"},
- {59, "\xe8"},
- {60, "\xec"},
- {61, "\xf0\x3c"},
- {62, "\xf0\x3d"},
- {254, "\xf0\xfd"},
- {255, "\xf0\xfe"},
- {256, "\xf0\xff"},
- {257, "\xf4\x00\x01"},
- {65534, "\xf4\xfd\xff"},
- {65535, "\xf4\xfe\xff"},
- {65536, "\xf4\xff\xff"},
- }
-
- dst := make([]byte, 70000)
- nines := bytes.Repeat([]byte{0x99}, 65536)
- for _, tc := range testCases {
- lit := nines[:tc.length]
- n := emitLiteral(dst, lit)
- if !bytes.HasSuffix(dst[:n], lit) {
- t.Errorf("length=%d: did not end with that many literal bytes", tc.length)
- continue
- }
- got := string(dst[:n-tc.length])
- if got != tc.want {
- t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want)
- continue
- }
- }
-}
-
-func TestEmitCopy(t *testing.T) {
- testCases := []struct {
- offset int
- length int
- want string
- }{
- {8, 04, "\x01\x08"},
- {8, 11, "\x1d\x08"},
- {8, 12, "\x2e\x08\x00"},
- {8, 13, "\x32\x08\x00"},
- {8, 59, "\xea\x08\x00"},
- {8, 60, "\xee\x08\x00"},
- {8, 61, "\xf2\x08\x00"},
- {8, 62, "\xf6\x08\x00"},
- {8, 63, "\xfa\x08\x00"},
- {8, 64, "\xfe\x08\x00"},
- {8, 65, "\xee\x08\x00\x05\x08"},
- {8, 66, "\xee\x08\x00\x09\x08"},
- {8, 67, "\xee\x08\x00\x0d\x08"},
- {8, 68, "\xfe\x08\x00\x01\x08"},
- {8, 69, "\xfe\x08\x00\x05\x08"},
- {8, 80, "\xfe\x08\x00\x3e\x08\x00"},
-
- {256, 04, "\x21\x00"},
- {256, 11, "\x3d\x00"},
- {256, 12, "\x2e\x00\x01"},
- {256, 13, "\x32\x00\x01"},
- {256, 59, "\xea\x00\x01"},
- {256, 60, "\xee\x00\x01"},
- {256, 61, "\xf2\x00\x01"},
- {256, 62, "\xf6\x00\x01"},
- {256, 63, "\xfa\x00\x01"},
- {256, 64, "\xfe\x00\x01"},
- {256, 65, "\xee\x00\x01\x25\x00"},
- {256, 66, "\xee\x00\x01\x29\x00"},
- {256, 67, "\xee\x00\x01\x2d\x00"},
- {256, 68, "\xfe\x00\x01\x21\x00"},
- {256, 69, "\xfe\x00\x01\x25\x00"},
- {256, 80, "\xfe\x00\x01\x3e\x00\x01"},
-
- {2048, 04, "\x0e\x00\x08"},
- {2048, 11, "\x2a\x00\x08"},
- {2048, 12, "\x2e\x00\x08"},
- {2048, 13, "\x32\x00\x08"},
- {2048, 59, "\xea\x00\x08"},
- {2048, 60, "\xee\x00\x08"},
- {2048, 61, "\xf2\x00\x08"},
- {2048, 62, "\xf6\x00\x08"},
- {2048, 63, "\xfa\x00\x08"},
- {2048, 64, "\xfe\x00\x08"},
- {2048, 65, "\xee\x00\x08\x12\x00\x08"},
- {2048, 66, "\xee\x00\x08\x16\x00\x08"},
- {2048, 67, "\xee\x00\x08\x1a\x00\x08"},
- {2048, 68, "\xfe\x00\x08\x0e\x00\x08"},
- {2048, 69, "\xfe\x00\x08\x12\x00\x08"},
- {2048, 80, "\xfe\x00\x08\x3e\x00\x08"},
- }
-
- dst := make([]byte, 1024)
- for _, tc := range testCases {
- n := emitCopy(dst, tc.offset, tc.length)
- got := string(dst[:n])
- if got != tc.want {
- t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want)
- }
- }
-}
-
-func TestNewBufferedWriter(t *testing.T) {
- // Test all 32 possible sub-sequences of these 5 input slices.
- //
- // Their lengths sum to 400,000, which is over 6 times the Writer ibuf
- // capacity: 6 * maxBlockSize is 393,216.
- inputs := [][]byte{
- bytes.Repeat([]byte{'a'}, 40000),
- bytes.Repeat([]byte{'b'}, 150000),
- bytes.Repeat([]byte{'c'}, 60000),
- bytes.Repeat([]byte{'d'}, 120000),
- bytes.Repeat([]byte{'e'}, 30000),
- }
-loop:
- for i := 0; i < 1< 0; {
- i := copy(x, src)
- x = x[i:]
- }
- return dst
-}
-
-func benchWords(b *testing.B, n int, decode bool) {
- // Note: the file is OS-language dependent so the resulting values are not
- // directly comparable for non-US-English OS installations.
- data := expand(readFile(b, "/usr/share/dict/words"), n)
- if decode {
- benchDecode(b, data)
- } else {
- benchEncode(b, data)
- }
-}
-
-func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) }
-func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) }
-func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
-func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
-func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
-func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
-func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) }
-func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) }
-func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
-func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
-func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
-func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
-
-func BenchmarkRandomEncode(b *testing.B) {
- rng := rand.New(rand.NewSource(1))
- data := make([]byte, 1<<20)
- for i := range data {
- data[i] = uint8(rng.Intn(256))
- }
- benchEncode(b, data)
-}
-
-// testFiles' values are copied directly from
-// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
-// The label field is unused in snappy-go.
-var testFiles = []struct {
- label string
- filename string
- sizeLimit int
-}{
- {"html", "html", 0},
- {"urls", "urls.10K", 0},
- {"jpg", "fireworks.jpeg", 0},
- {"jpg_200", "fireworks.jpeg", 200},
- {"pdf", "paper-100k.pdf", 0},
- {"html4", "html_x_4", 0},
- {"txt1", "alice29.txt", 0},
- {"txt2", "asyoulik.txt", 0},
- {"txt3", "lcet10.txt", 0},
- {"txt4", "plrabn12.txt", 0},
- {"pb", "geo.protodata", 0},
- {"gaviota", "kppkn.gtb", 0},
-}
-
-const (
- // The benchmark data files are at this canonical URL.
- benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
-)
-
-func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) {
- bDir := filepath.FromSlash(*benchdataDir)
- filename := filepath.Join(bDir, basename)
- if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
- return nil
- }
-
- if !*download {
- b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b))
- }
- // Download the official snappy C++ implementation reference test data
- // files for benchmarking.
- if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) {
- return fmt.Errorf("failed to create %s: %s", bDir, err)
- }
-
- f, err := os.Create(filename)
- if err != nil {
- return fmt.Errorf("failed to create %s: %s", filename, err)
- }
- defer f.Close()
- defer func() {
- if errRet != nil {
- os.Remove(filename)
- }
- }()
- url := benchURL + basename
- resp, err := http.Get(url)
- if err != nil {
- return fmt.Errorf("failed to download %s: %s", url, err)
- }
- defer resp.Body.Close()
- if s := resp.StatusCode; s != http.StatusOK {
- return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
- }
- _, err = io.Copy(f, resp.Body)
- if err != nil {
- return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
- }
- return nil
-}
-
-func benchFile(b *testing.B, i int, decode bool) {
- if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil {
- b.Fatalf("failed to download testdata: %s", err)
- }
- bDir := filepath.FromSlash(*benchdataDir)
- data := readFile(b, filepath.Join(bDir, testFiles[i].filename))
- if n := testFiles[i].sizeLimit; 0 < n && n < len(data) {
- data = data[:n]
- }
- if decode {
- benchDecode(b, data)
- } else {
- benchEncode(b, data)
- }
-}
-
-// Naming convention is kept similar to what snappy's C++ implementation uses.
-func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
-func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
-func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
-func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
-func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
-func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
-func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
-func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
-func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
-func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
-func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
-func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
-func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
-func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
-func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
-func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
-func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
-func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
-func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
-func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
-func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
-func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
-func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
-func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
-
-func BenchmarkExtendMatch(b *testing.B) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- b.Fatalf("ReadFile: %v", err)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- for _, tc := range extendMatchGoldenTestCases {
- extendMatch(src, tc.i, tc.j)
- }
- }
-}
diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
deleted file mode 100644
index 86a1875..0000000
--- a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
+++ /dev/null
@@ -1,396 +0,0 @@
-Produced by David Widger. The previous edition was updated by Jose
-Menendez.
-
-
-
-
-
- THE ADVENTURES OF TOM SAWYER
- BY
- MARK TWAIN
- (Samuel Langhorne Clemens)
-
-
-
-
- P R E F A C E
-
-MOST of the adventures recorded in this book really occurred; one or
-two were experiences of my own, the rest those of boys who were
-schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but
-not from an individual--he is a combination of the characteristics of
-three boys whom I knew, and therefore belongs to the composite order of
-architecture.
-
-The odd superstitions touched upon were all prevalent among children
-and slaves in the West at the period of this story--that is to say,
-thirty or forty years ago.
-
-Although my book is intended mainly for the entertainment of boys and
-girls, I hope it will not be shunned by men and women on that account,
-for part of my plan has been to try to pleasantly remind adults of what
-they once were themselves, and of how they felt and thought and talked,
-and what queer enterprises they sometimes engaged in.
-
- THE AUTHOR.
-
-HARTFORD, 1876.
-
-
-
- T O M S A W Y E R
-
-
-
-CHAPTER I
-
-"TOM!"
-
-No answer.
-
-"TOM!"
-
-No answer.
-
-"What's gone with that boy, I wonder? You TOM!"
-
-No answer.
-
-The old lady pulled her spectacles down and looked over them about the
-room; then she put them up and looked out under them. She seldom or
-never looked THROUGH them for so small a thing as a boy; they were her
-state pair, the pride of her heart, and were built for "style," not
-service--she could have seen through a pair of stove-lids just as well.
-She looked perplexed for a moment, and then said, not fiercely, but
-still loud enough for the furniture to hear:
-
-"Well, I lay if I get hold of you I'll--"
-
-She did not finish, for by this time she was bending down and punching
-under the bed with the broom, and so she needed breath to punctuate the
-punches with. She resurrected nothing but the cat.
-
-"I never did see the beat of that boy!"
-
-She went to the open door and stood in it and looked out among the
-tomato vines and "jimpson" weeds that constituted the garden. No Tom.
-So she lifted up her voice at an angle calculated for distance and
-shouted:
-
-"Y-o-u-u TOM!"
-
-There was a slight noise behind her and she turned just in time to
-seize a small boy by the slack of his roundabout and arrest his flight.
-
-"There! I might 'a' thought of that closet. What you been doing in
-there?"
-
-"Nothing."
-
-"Nothing! Look at your hands. And look at your mouth. What IS that
-truck?"
-
-"I don't know, aunt."
-
-"Well, I know. It's jam--that's what it is. Forty times I've said if
-you didn't let that jam alone I'd skin you. Hand me that switch."
-
-The switch hovered in the air--the peril was desperate--
-
-"My! Look behind you, aunt!"
-
-The old lady whirled round, and snatched her skirts out of danger. The
-lad fled on the instant, scrambled up the high board-fence, and
-disappeared over it.
-
-His aunt Polly stood surprised a moment, and then broke into a gentle
-laugh.
-
-"Hang the boy, can't I never learn anything? Ain't he played me tricks
-enough like that for me to be looking out for him by this time? But old
-fools is the biggest fools there is. Can't learn an old dog new tricks,
-as the saying is. But my goodness, he never plays them alike, two days,
-and how is a body to know what's coming? He 'pears to know just how
-long he can torment me before I get my dander up, and he knows if he
-can make out to put me off for a minute or make me laugh, it's all down
-again and I can't hit him a lick. I ain't doing my duty by that boy,
-and that's the Lord's truth, goodness knows. Spare the rod and spile
-the child, as the Good Book says. I'm a laying up sin and suffering for
-us both, I know. He's full of the Old Scratch, but laws-a-me! he's my
-own dead sister's boy, poor thing, and I ain't got the heart to lash
-him, somehow. Every time I let him off, my conscience does hurt me so,
-and every time I hit him my old heart most breaks. Well-a-well, man
-that is born of woman is of few days and full of trouble, as the
-Scripture says, and I reckon it's so. He'll play hookey this evening, *
-and [* Southwestern for "afternoon"] I'll just be obleeged to make him
-work, to-morrow, to punish him. It's mighty hard to make him work
-Saturdays, when all the boys is having holiday, but he hates work more
-than he hates anything else, and I've GOT to do some of my duty by him,
-or I'll be the ruination of the child."
-
-Tom did play hookey, and he had a very good time. He got back home
-barely in season to help Jim, the small colored boy, saw next-day's
-wood and split the kindlings before supper--at least he was there in
-time to tell his adventures to Jim while Jim did three-fourths of the
-work. Tom's younger brother (or rather half-brother) Sid was already
-through with his part of the work (picking up chips), for he was a
-quiet boy, and had no adventurous, troublesome ways.
-
-While Tom was eating his supper, and stealing sugar as opportunity
-offered, Aunt Polly asked him questions that were full of guile, and
-very deep--for she wanted to trap him into damaging revealments. Like
-many other simple-hearted souls, it was her pet vanity to believe she
-was endowed with a talent for dark and mysterious diplomacy, and she
-loved to contemplate her most transparent devices as marvels of low
-cunning. Said she:
-
-"Tom, it was middling warm in school, warn't it?"
-
-"Yes'm."
-
-"Powerful warm, warn't it?"
-
-"Yes'm."
-
-"Didn't you want to go in a-swimming, Tom?"
-
-A bit of a scare shot through Tom--a touch of uncomfortable suspicion.
-He searched Aunt Polly's face, but it told him nothing. So he said:
-
-"No'm--well, not very much."
-
-The old lady reached out her hand and felt Tom's shirt, and said:
-
-"But you ain't too warm now, though." And it flattered her to reflect
-that she had discovered that the shirt was dry without anybody knowing
-that that was what she had in her mind. But in spite of her, Tom knew
-where the wind lay, now. So he forestalled what might be the next move:
-
-"Some of us pumped on our heads--mine's damp yet. See?"
-
-Aunt Polly was vexed to think she had overlooked that bit of
-circumstantial evidence, and missed a trick. Then she had a new
-inspiration:
-
-"Tom, you didn't have to undo your shirt collar where I sewed it, to
-pump on your head, did you? Unbutton your jacket!"
-
-The trouble vanished out of Tom's face. He opened his jacket. His
-shirt collar was securely sewed.
-
-"Bother! Well, go 'long with you. I'd made sure you'd played hookey
-and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a
-singed cat, as the saying is--better'n you look. THIS time."
-
-She was half sorry her sagacity had miscarried, and half glad that Tom
-had stumbled into obedient conduct for once.
-
-But Sidney said:
-
-"Well, now, if I didn't think you sewed his collar with white thread,
-but it's black."
-
-"Why, I did sew it with white! Tom!"
-
-But Tom did not wait for the rest. As he went out at the door he said:
-
-"Siddy, I'll lick you for that."
-
-In a safe place Tom examined two large needles which were thrust into
-the lapels of his jacket, and had thread bound about them--one needle
-carried white thread and the other black. He said:
-
-"She'd never noticed if it hadn't been for Sid. Confound it! sometimes
-she sews it with white, and sometimes she sews it with black. I wish to
-geeminy she'd stick to one or t'other--I can't keep the run of 'em. But
-I bet you I'll lam Sid for that. I'll learn him!"
-
-He was not the Model Boy of the village. He knew the model boy very
-well though--and loathed him.
-
-Within two minutes, or even less, he had forgotten all his troubles.
-Not because his troubles were one whit less heavy and bitter to him
-than a man's are to a man, but because a new and powerful interest bore
-them down and drove them out of his mind for the time--just as men's
-misfortunes are forgotten in the excitement of new enterprises. This
-new interest was a valued novelty in whistling, which he had just
-acquired from a negro, and he was suffering to practise it undisturbed.
-It consisted in a peculiar bird-like turn, a sort of liquid warble,
-produced by touching the tongue to the roof of the mouth at short
-intervals in the midst of the music--the reader probably remembers how
-to do it, if he has ever been a boy. Diligence and attention soon gave
-him the knack of it, and he strode down the street with his mouth full
-of harmony and his soul full of gratitude. He felt much as an
-astronomer feels who has discovered a new planet--no doubt, as far as
-strong, deep, unalloyed pleasure is concerned, the advantage was with
-the boy, not the astronomer.
-
-The summer evenings were long. It was not dark, yet. Presently Tom
-checked his whistle. A stranger was before him--a boy a shade larger
-than himself. A new-comer of any age or either sex was an impressive
-curiosity in the poor little shabby village of St. Petersburg. This boy
-was well dressed, too--well dressed on a week-day. This was simply
-astounding. His cap was a dainty thing, his close-buttoned blue cloth
-roundabout was new and natty, and so were his pantaloons. He had shoes
-on--and it was only Friday. He even wore a necktie, a bright bit of
-ribbon. He had a citified air about him that ate into Tom's vitals. The
-more Tom stared at the splendid marvel, the higher he turned up his
-nose at his finery and the shabbier and shabbier his own outfit seemed
-to him to grow. Neither boy spoke. If one moved, the other moved--but
-only sidewise, in a circle; they kept face to face and eye to eye all
-the time. Finally Tom said:
-
-"I can lick you!"
-
-"I'd like to see you try it."
-
-"Well, I can do it."
-
-"No you can't, either."
-
-"Yes I can."
-
-"No you can't."
-
-"I can."
-
-"You can't."
-
-"Can!"
-
-"Can't!"
-
-An uncomfortable pause. Then Tom said:
-
-"What's your name?"
-
-"'Tisn't any of your business, maybe."
-
-"Well I 'low I'll MAKE it my business."
-
-"Well why don't you?"
-
-"If you say much, I will."
-
-"Much--much--MUCH. There now."
-
-"Oh, you think you're mighty smart, DON'T you? I could lick you with
-one hand tied behind me, if I wanted to."
-
-"Well why don't you DO it? You SAY you can do it."
-
-"Well I WILL, if you fool with me."
-
-"Oh yes--I've seen whole families in the same fix."
-
-"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!"
-
-"You can lump that hat if you don't like it. I dare you to knock it
-off--and anybody that'll take a dare will suck eggs."
-
-"You're a liar!"
-
-"You're another."
-
-"You're a fighting liar and dasn't take it up."
-
-"Aw--take a walk!"
-
-"Say--if you give me much more of your sass I'll take and bounce a
-rock off'n your head."
-
-"Oh, of COURSE you will."
-
-"Well I WILL."
-
-"Well why don't you DO it then? What do you keep SAYING you will for?
-Why don't you DO it? It's because you're afraid."
-
-"I AIN'T afraid."
-
-"You are."
-
-"I ain't."
-
-"You are."
-
-Another pause, and more eying and sidling around each other. Presently
-they were shoulder to shoulder. Tom said:
-
-"Get away from here!"
-
-"Go away yourself!"
-
-"I won't."
-
-"I won't either."
-
-So they stood, each with a foot placed at an angle as a brace, and
-both shoving with might and main, and glowering at each other with
-hate. But neither could get an advantage. After struggling till both
-were hot and flushed, each relaxed his strain with watchful caution,
-and Tom said:
-
-"You're a coward and a pup. I'll tell my big brother on you, and he
-can thrash you with his little finger, and I'll make him do it, too."
-
-"What do I care for your big brother? I've got a brother that's bigger
-than he is--and what's more, he can throw him over that fence, too."
-[Both brothers were imaginary.]
-
-"That's a lie."
-
-"YOUR saying so don't make it so."
-
-Tom drew a line in the dust with his big toe, and said:
-
-"I dare you to step over that, and I'll lick you till you can't stand
-up. Anybody that'll take a dare will steal sheep."
-
-The new boy stepped over promptly, and said:
-
-"Now you said you'd do it, now let's see you do it."
-
-"Don't you crowd me now; you better look out."
-
-"Well, you SAID you'd do it--why don't you do it?"
-
-"By jingo! for two cents I WILL do it."
-
-The new boy took two broad coppers out of his pocket and held them out
-with derision. Tom struck them to the ground. In an instant both boys
-were rolling and tumbling in the dirt, gripped together like cats; and
-for the space of a minute they tugged and tore at each other's hair and
-clothes, punched and scratched each other's nose, and covered
-themselves with dust and glory. Presently the confusion took form, and
-through the fog of battle Tom appeared, seated astride the new boy, and
-pounding him with his fists. "Holler 'nuff!" said he.
-
-The boy only struggled to free himself. He was crying--mainly from rage.
-
-"Holler 'nuff!"--and the pounding went on.
-
-At last the stranger got out a smothered "'Nuff!" and Tom let him up
-and said:
-
-"Now that'll learn you. Better look out who you're fooling with next
-time."
-
-The new boy went off brushing the dust from his clothes, sobbing,
-snuffling, and occasionally looking back and shaking his head and
-threatening what he would do to Tom the "next time he caught him out."
-To which Tom responded with jeers, and started off in high feather, and
-as soon as his back was turned the new boy snatched up a stone, threw
-it and hit him between the shoulders and then turned tail and ran like
-an antelope. Tom chased the traitor home, and thus found out where he
-lived. He then held a position at the gate for some time, daring the
-enemy to come outside, but the enemy only made faces at him through the
-window and declined. At last the enemy's mother appeared, and called
-Tom a bad, vicious, vulgar child, and ordered him away. So he went
-away; but he said he "'lowed" to "lay" for that boy.
-
-He got home pretty late that night, and when he climbed cautiously in
-at the window, he uncovered an ambuscade, in the person of his aunt;
-and when she saw the state his clothes were in her resolution to turn
-his Saturday holiday into captivity at hard labor became adamantine in
-its firmness.
diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy
deleted file mode 100644
index 9c56d98..0000000
Binary files a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy and /dev/null differ
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap_test.go b/vendor/github.com/hashicorp/errwrap/errwrap_test.go
deleted file mode 100644
index 5ae5f8e..0000000
--- a/vendor/github.com/hashicorp/errwrap/errwrap_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package errwrap
-
-import (
- "fmt"
- "testing"
-)
-
-func TestWrappedError_impl(t *testing.T) {
- var _ error = new(wrappedError)
-}
-
-func TestGetAll(t *testing.T) {
- cases := []struct {
- Err error
- Msg string
- Len int
- }{
- {},
- {
- fmt.Errorf("foo"),
- "foo",
- 1,
- },
- {
- fmt.Errorf("bar"),
- "foo",
- 0,
- },
- {
- Wrapf("bar", fmt.Errorf("foo")),
- "foo",
- 1,
- },
- {
- Wrapf("{{err}}", fmt.Errorf("foo")),
- "foo",
- 2,
- },
- {
- Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))),
- "foo",
- 1,
- },
- }
-
- for i, tc := range cases {
- actual := GetAll(tc.Err, tc.Msg)
- if len(actual) != tc.Len {
- t.Fatalf("%d: bad: %#v", i, actual)
- }
- for _, v := range actual {
- if v.Error() != tc.Msg {
- t.Fatalf("%d: bad: %#v", i, actual)
- }
- }
- }
-}
-
-func TestGetAllType(t *testing.T) {
- cases := []struct {
- Err error
- Type interface{}
- Len int
- }{
- {},
- {
- fmt.Errorf("foo"),
- "foo",
- 0,
- },
- {
- fmt.Errorf("bar"),
- fmt.Errorf("foo"),
- 1,
- },
- {
- Wrapf("bar", fmt.Errorf("foo")),
- fmt.Errorf("baz"),
- 2,
- },
- {
- Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))),
- Wrapf("", nil),
- 0,
- },
- }
-
- for i, tc := range cases {
- actual := GetAllType(tc.Err, tc.Type)
- if len(actual) != tc.Len {
- t.Fatalf("%d: bad: %#v", i, actual)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
index 7d8a57c..8d306bf 100644
--- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
+++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -26,6 +26,7 @@ func DefaultPooledTransport() *http.Transport {
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
+ DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
new file mode 100644
index 0000000..7eda377
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
@@ -0,0 +1,43 @@
+package cleanhttp
+
+import (
+ "net/http"
+ "strings"
+ "unicode"
+)
+
+// HandlerInput provides input options to cleanhttp's handlers
+type HandlerInput struct {
+ ErrStatus int
+}
+
+// PrintablePathCheckHandler is a middleware that ensures the request path
+// contains only printable runes.
+func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
+ // Nil-check on input to make it optional
+ if input == nil {
+ input = &HandlerInput{
+ ErrStatus: http.StatusBadRequest,
+ }
+ }
+
+ // Default to http.StatusBadRequest on error
+ if input.ErrStatus == 0 {
+ input.ErrStatus = http.StatusBadRequest
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Check URL path for non-printable characters
+ idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
+ return !unicode.IsPrint(c)
+ })
+
+ if idx != -1 {
+ w.WriteHeader(input.ErrStatus)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ return
+ })
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml
index 4b865d1..304a835 100644
--- a/vendor/github.com/hashicorp/go-multierror/.travis.yml
+++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml
@@ -3,7 +3,7 @@ sudo: false
language: go
go:
- - 1.6
+ - 1.x
branches:
only:
diff --git a/vendor/github.com/hashicorp/go-multierror/append_test.go b/vendor/github.com/hashicorp/go-multierror/append_test.go
deleted file mode 100644
index 58ddafa..0000000
--- a/vendor/github.com/hashicorp/go-multierror/append_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package multierror
-
-import (
- "errors"
- "testing"
-)
-
-func TestAppend_Error(t *testing.T) {
- original := &Error{
- Errors: []error{errors.New("foo")},
- }
-
- result := Append(original, errors.New("bar"))
- if len(result.Errors) != 2 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-
- original = &Error{}
- result = Append(original, errors.New("bar"))
- if len(result.Errors) != 1 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-
- // Test when a typed nil is passed
- var e *Error
- result = Append(e, errors.New("baz"))
- if len(result.Errors) != 1 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-
- // Test flattening
- original = &Error{
- Errors: []error{errors.New("foo")},
- }
-
- result = Append(original, Append(nil, errors.New("foo"), errors.New("bar")))
- if len(result.Errors) != 3 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-}
-
-func TestAppend_NilError(t *testing.T) {
- var err error
- result := Append(err, errors.New("bar"))
- if len(result.Errors) != 1 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-}
-
-func TestAppend_NilErrorArg(t *testing.T) {
- var err error
- var nilErr *Error
- result := Append(err, nilErr)
- if len(result.Errors) != 0 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-}
-
-func TestAppend_NilErrorIfaceArg(t *testing.T) {
- var err error
- var nilErr error
- result := Append(err, nilErr)
- if len(result.Errors) != 0 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-}
-
-func TestAppend_NonError(t *testing.T) {
- original := errors.New("foo")
- result := Append(original, errors.New("bar"))
- if len(result.Errors) != 2 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-}
-
-func TestAppend_NonError_Error(t *testing.T) {
- original := errors.New("foo")
- result := Append(original, Append(nil, errors.New("bar")))
- if len(result.Errors) != 2 {
- t.Fatalf("wrong len: %d", len(result.Errors))
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten_test.go b/vendor/github.com/hashicorp/go-multierror/flatten_test.go
deleted file mode 100644
index 9fbacad..0000000
--- a/vendor/github.com/hashicorp/go-multierror/flatten_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package multierror
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strings"
- "testing"
-)
-
-func TestFlatten(t *testing.T) {
- original := &Error{
- Errors: []error{
- errors.New("one"),
- &Error{
- Errors: []error{
- errors.New("two"),
- &Error{
- Errors: []error{
- errors.New("three"),
- },
- },
- },
- },
- },
- }
-
- expected := strings.TrimSpace(`
-3 errors occurred:
-
-* one
-* two
-* three
- `)
- actual := fmt.Sprintf("%s", Flatten(original))
-
- if expected != actual {
- t.Fatalf("expected: %s, got: %s", expected, actual)
- }
-}
-
-func TestFlatten_nonError(t *testing.T) {
- err := errors.New("foo")
- actual := Flatten(err)
- if !reflect.DeepEqual(actual, err) {
- t.Fatalf("bad: %#v", actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/format_test.go b/vendor/github.com/hashicorp/go-multierror/format_test.go
deleted file mode 100644
index 3359e02..0000000
--- a/vendor/github.com/hashicorp/go-multierror/format_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package multierror
-
-import (
- "errors"
- "testing"
-)
-
-func TestListFormatFuncSingle(t *testing.T) {
- expected := `1 error occurred:
-
-* foo`
-
- errors := []error{
- errors.New("foo"),
- }
-
- actual := ListFormatFunc(errors)
- if actual != expected {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestListFormatFuncMultiple(t *testing.T) {
- expected := `2 errors occurred:
-
-* foo
-* bar`
-
- errors := []error{
- errors.New("foo"),
- errors.New("bar"),
- }
-
- actual := ListFormatFunc(errors)
- if actual != expected {
- t.Fatalf("bad: %#v", actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror_test.go b/vendor/github.com/hashicorp/go-multierror/multierror_test.go
deleted file mode 100644
index 5567d1c..0000000
--- a/vendor/github.com/hashicorp/go-multierror/multierror_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package multierror
-
-import (
- "errors"
- "reflect"
- "testing"
-)
-
-func TestError_Impl(t *testing.T) {
- var _ error = new(Error)
-}
-
-func TestErrorError_custom(t *testing.T) {
- errors := []error{
- errors.New("foo"),
- errors.New("bar"),
- }
-
- fn := func(es []error) string {
- return "foo"
- }
-
- multi := &Error{Errors: errors, ErrorFormat: fn}
- if multi.Error() != "foo" {
- t.Fatalf("bad: %s", multi.Error())
- }
-}
-
-func TestErrorError_default(t *testing.T) {
- expected := `2 errors occurred:
-
-* foo
-* bar`
-
- errors := []error{
- errors.New("foo"),
- errors.New("bar"),
- }
-
- multi := &Error{Errors: errors}
- if multi.Error() != expected {
- t.Fatalf("bad: %s", multi.Error())
- }
-}
-
-func TestErrorErrorOrNil(t *testing.T) {
- err := new(Error)
- if err.ErrorOrNil() != nil {
- t.Fatalf("bad: %#v", err.ErrorOrNil())
- }
-
- err.Errors = []error{errors.New("foo")}
- if v := err.ErrorOrNil(); v == nil {
- t.Fatal("should not be nil")
- } else if !reflect.DeepEqual(v, err) {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-func TestErrorWrappedErrors(t *testing.T) {
- errors := []error{
- errors.New("foo"),
- errors.New("bar"),
- }
-
- multi := &Error{Errors: errors}
- if !reflect.DeepEqual(multi.Errors, multi.WrappedErrors()) {
- t.Fatalf("bad: %s", multi.WrappedErrors())
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix_test.go b/vendor/github.com/hashicorp/go-multierror/prefix_test.go
deleted file mode 100644
index 1d4a6f6..0000000
--- a/vendor/github.com/hashicorp/go-multierror/prefix_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package multierror
-
-import (
- "errors"
- "testing"
-)
-
-func TestPrefix_Error(t *testing.T) {
- original := &Error{
- Errors: []error{errors.New("foo")},
- }
-
- result := Prefix(original, "bar")
- if result.(*Error).Errors[0].Error() != "bar foo" {
- t.Fatalf("bad: %s", result)
- }
-}
-
-func TestPrefix_NilError(t *testing.T) {
- var err error
- result := Prefix(err, "bar")
- if result != nil {
- t.Fatalf("bad: %#v", result)
- }
-}
-
-func TestPrefix_NonError(t *testing.T) {
- original := errors.New("foo")
- result := Prefix(original, "bar")
- if result.Error() != "bar foo" {
- t.Fatalf("bad: %s", result)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh b/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh
deleted file mode 100755
index 1d2fcf9..0000000
--- a/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-#
-# This script updates dependencies using a temporary directory. This is required
-# to avoid any auxillary dependencies that sneak into GOPATH.
-set -e
-
-# Get the parent directory of where this script is.
-SOURCE="${BASH_SOURCE[0]}"
-while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
-DIR="$(cd -P "$(dirname "$SOURCE")/.." && pwd)"
-
-# Change into that directory
-cd "$DIR"
-
-# Get the name from the directory
-NAME=${NAME:-"$(basename $(pwd))"}
-
-# Announce
-echo "==> Updating dependencies..."
-
-echo "--> Making tmpdir..."
-tmpdir=$(mktemp -d)
-function cleanup {
- rm -rf "${tmpdir}"
-}
-trap cleanup EXIT
-
-export GOPATH="${tmpdir}"
-export PATH="${tmpdir}/bin:$PATH"
-
-mkdir -p "${tmpdir}/src/github.com/hashicorp"
-pushd "${tmpdir}/src/github.com/hashicorp" &>/dev/null
-
-echo "--> Copying ${NAME}..."
-cp -R "$DIR" "${tmpdir}/src/github.com/hashicorp/${NAME}"
-pushd "${tmpdir}/src/github.com/hashicorp/${NAME}" &>/dev/null
-rm -rf vendor/
-
-echo "--> Installing dependency manager..."
-go get -u github.com/kardianos/govendor
-govendor init
-
-echo "--> Installing all dependencies (may take some time)..."
-govendor fetch -v +outside
-
-echo "--> Vendoring..."
-govendor add +external
-
-echo "--> Moving into place..."
-vpath="${tmpdir}/src/github.com/hashicorp/${NAME}/vendor"
-popd &>/dev/null
-popd &>/dev/null
-rm -rf vendor/
-cp -R "${vpath}" .
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go
deleted file mode 100644
index 2129c15..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package rootcerts
-
-import "testing"
-
-func TestSystemCAsOnDarwin(t *testing.T) {
- _, err := LoadSystemCAs()
- if err != nil {
- t.Fatalf("Got error: %s", err)
- }
-}
-
-func TestCertKeychains(t *testing.T) {
- keychains := certKeychains()
- if len(keychains) != 3 {
- t.Fatalf("Expected 3 keychains, got %#v", keychains)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go
deleted file mode 100644
index 9634385..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package rootcerts
-
-import (
- "path/filepath"
- "testing"
-)
-
-const fixturesDir = "./test-fixtures"
-
-func TestConfigureTLSHandlesNil(t *testing.T) {
- err := ConfigureTLS(nil, nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestLoadCACertsHandlesNil(t *testing.T) {
- _, err := LoadCACerts(nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestLoadCACertsFromFile(t *testing.T) {
- path := testFixture("cafile", "cacert.pem")
- _, err := LoadCACerts(&Config{CAFile: path})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestLoadCACertsFromDir(t *testing.T) {
- path := testFixture("capath")
- _, err := LoadCACerts(&Config{CAPath: path})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestLoadCACertsFromDirWithSymlinks(t *testing.T) {
- path := testFixture("capath-with-symlinks")
- _, err := LoadCACerts(&Config{CAPath: path})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func testFixture(n ...string) string {
- parts := []string{fixturesDir}
- parts = append(parts, n...)
- return filepath.Join(parts...)
-}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem
deleted file mode 100644
index 86d732f..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIExDCCA6ygAwIBAgIJAJ7PV+3kJZqZMA0GCSqGSIb3DQEBBQUAMIGcMQswCQYD
-VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xEjAQ
-BgNVBAoTCUhhc2hpQ29ycDEUMBIGA1UECxMLRW5naW5lZXJpbmcxGzAZBgNVBAMU
-EiouYXRsYXMucGhpbnplLmNvbTEhMB8GCSqGSIb3DQEJARYScGF1bEBoYXNoaWNv
-cnAuY29tMB4XDTE2MDQyNzE1MjYyMVoXDTE3MDQyNzE1MjYyMVowgZwxCzAJBgNV
-BAYTAlVTMREwDwYDVQQIEwhJbGxpbm9pczEQMA4GA1UEBxMHQ2hpY2FnbzESMBAG
-A1UEChMJSGFzaGlDb3JwMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEbMBkGA1UEAxQS
-Ki5hdGxhcy5waGluemUuY29tMSEwHwYJKoZIhvcNAQkBFhJwYXVsQGhhc2hpY29y
-cC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDWRXdMnsTpxpwZ
-D2olsun9WO7SnMQ/SIR3DV/fttPIDHSQm2ad4r2pKEuiV+TKEFUgj/Id9bCAfQYs
-jsa1qX1GmieXz+83OnK3MDEcDczpjGhSplTYGOjlxKLMhMBAOtdV5hJAYz3nwV3c
-R+IQu/4213+em40shZAQRNZ2apnyE3+QB+gPlEs9Nw0OcbSKLmAiuKPbJpO+94ou
-n1h0/w/+DPz6yO/fFPoA3vlisGM6B4R9U2JVwWjXrU71fU1i82ulFQdApdfUs1FP
-wRrZxgX5ldUrRvFr8lJiMehdX8khO7Ue4rT6yxbI6KVM04Q5mNt1ARRLI69rN9My
-pGXiItcxAgMBAAGjggEFMIIBATAdBgNVHQ4EFgQUjwsj8l0Y9HFQLH0GaJAsOHof
-PhwwgdEGA1UdIwSByTCBxoAUjwsj8l0Y9HFQLH0GaJAsOHofPhyhgaKkgZ8wgZwx
-CzAJBgNVBAYTAlVTMREwDwYDVQQIEwhJbGxpbm9pczEQMA4GA1UEBxMHQ2hpY2Fn
-bzESMBAGA1UEChMJSGFzaGlDb3JwMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEbMBkG
-A1UEAxQSKi5hdGxhcy5waGluemUuY29tMSEwHwYJKoZIhvcNAQkBFhJwYXVsQGhh
-c2hpY29ycC5jb22CCQCez1ft5CWamTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEB
-BQUAA4IBAQC4tFfxpB8xEk9ewb5CNhhac4oKwGths+oq45DjoNtlagDMmIs2bl18
-q45PIB7fuFkAz/YHcOL0UEOAiw4jbuROp9jacHxBV21lRLLmNlK1Llc3eNVvLJ38
-ud6/Skilv9XyC4JNk0P5KrghxR6SOGwRuYZNqF+tthf+Bp9wJvLyfqDuJfGBal7C
-ezobMoh4tp8Dh1JeQlwvJcVt2k0UFJpa57MNr78c684Bq55ow+jd6wFG0XM0MMmy
-u+QRgJEGfYuYDPFEO8C8IfRyrHuV7Ll9P6eyEEFCneznXY0yJc/Gn3ZcX7ANqJsc
-ueMOWw/vUnonzxAFKW+I9U9ptyVSNMLY
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem
deleted file mode 100644
index 3740092..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
-MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
-FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
-MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
-cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
-Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
-0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
-wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
-7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
-8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
-BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
-/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
-JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
-NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
-6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
-3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
-D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
-CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
-3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem
deleted file mode 100644
index 998460f..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem
+++ /dev/null
@@ -1,25 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
-qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
-Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
-MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
-BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
-NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
-LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
-A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
-IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
-W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
-3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
-6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
-Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
-NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
-MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
-r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
-DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
-YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
-xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
-/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
-LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
-jVaMaA==
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/go-sockaddr/.gitignore b/vendor/github.com/hashicorp/go-sockaddr/.gitignore
new file mode 100644
index 0000000..41720b8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+.cover.out*
+coverage.html
diff --git a/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile
new file mode 100644
index 0000000..f3dfd24
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile
@@ -0,0 +1,65 @@
+TOOLS= golang.org/x/tools/cover
+GOCOVER_TMPFILE?= $(GOCOVER_FILE).tmp
+GOCOVER_FILE?= .cover.out
+GOCOVERHTML?= coverage.html
+FIND=`/usr/bin/which 2> /dev/null gfind find | /usr/bin/grep -v ^no | /usr/bin/head -n 1`
+XARGS=`/usr/bin/which 2> /dev/null gxargs xargs | /usr/bin/grep -v ^no | /usr/bin/head -n 1`
+
+test:: $(GOCOVER_FILE)
+ @$(MAKE) -C cmd/sockaddr test
+
+cover:: coverage_report
+
+$(GOCOVER_FILE)::
+ @${FIND} . -type d ! -path '*cmd*' ! -path '*.git*' -print0 | ${XARGS} -0 -I % sh -ec "cd % && rm -f $(GOCOVER_TMPFILE) && go test -coverprofile=$(GOCOVER_TMPFILE)"
+
+ @echo 'mode: set' > $(GOCOVER_FILE)
+ @${FIND} . -type f ! -path '*cmd*' ! -path '*.git*' -name "$(GOCOVER_TMPFILE)" -print0 | ${XARGS} -0 -n1 cat $(GOCOVER_TMPFILE) | grep -v '^mode: ' >> ${PWD}/$(GOCOVER_FILE)
+
+$(GOCOVERHTML): $(GOCOVER_FILE)
+ go tool cover -html=$(GOCOVER_FILE) -o $(GOCOVERHTML)
+
+coverage_report:: $(GOCOVER_FILE)
+ go tool cover -html=$(GOCOVER_FILE)
+
+audit_tools::
+ @go get -u github.com/golang/lint/golint && echo "Installed golint:"
+ @go get -u github.com/fzipp/gocyclo && echo "Installed gocyclo:"
+ @go get -u github.com/remyoudompheng/go-misc/deadcode && echo "Installed deadcode:"
+ @go get -u github.com/client9/misspell/cmd/misspell && echo "Installed misspell:"
+ @go get -u github.com/gordonklaus/ineffassign && echo "Installed ineffassign:"
+
+audit::
+ deadcode
+ go tool vet -all *.go
+ go tool vet -shadow=true *.go
+ golint *.go
+ ineffassign .
+ gocyclo -over 65 *.go
+ misspell *.go
+
+clean::
+ rm -f $(GOCOVER_FILE) $(GOCOVERHTML)
+
+dev::
+ @go build
+ @$(MAKE) -B -C cmd/sockaddr sockaddr
+
+install::
+ @go install
+ @$(MAKE) -C cmd/sockaddr install
+
+doc::
+ @echo Visit: http://127.0.0.1:6161/pkg/github.com/hashicorp/go-sockaddr/
+ godoc -http=:6161 -goroot $GOROOT
+
+world::
+ @set -e; \
+ for os in solaris darwin freebsd linux windows; do \
+ for arch in amd64; do \
+ printf "Building on %s-%s\n" "$${os}" "$${arch}" ; \
+ env GOOS="$${os}" GOARCH="$${arch}" go build -o /dev/null; \
+ done; \
+ done
+
+ $(MAKE) -C cmd/sockaddr world
diff --git a/vendor/github.com/hashicorp/go-sockaddr/LICENSE b/vendor/github.com/hashicorp/go-sockaddr/LICENSE
new file mode 100644
index 0000000..a612ad9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-sockaddr/README.md b/vendor/github.com/hashicorp/go-sockaddr/README.md
new file mode 100644
index 0000000..a2e170a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/README.md
@@ -0,0 +1,118 @@
+# go-sockaddr
+
+## `sockaddr` Library
+
+Socket address convenience functions for Go. `go-sockaddr` is a convenience
+library that makes doing the right thing with IP addresses easy. `go-sockaddr`
+is loosely modeled after the UNIX `sockaddr_t` and creates a union of the family
+of `sockaddr_t` types (see below for an ascii diagram). Library documentation
+is available
+at
+[https://godoc.org/github.com/hashicorp/go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr).
+The primary intent of the library was to make it possible to define heuristics
+for selecting the correct IP addresses when a configuration is evaluated at
+runtime. See
+the
+[docs](https://godoc.org/github.com/hashicorp/go-sockaddr),
+[`template` package](https://godoc.org/github.com/hashicorp/go-sockaddr/template),
+tests,
+and
+[CLI utility](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr)
+for details and hints as to how to use this library.
+
+For example, with this library it is possible to find an IP address that:
+
+* is attached to a default route
+ ([`GetDefaultInterfaces()`](https://godoc.org/github.com/hashicorp/go-sockaddr#GetDefaultInterfaces))
+* is contained within a CIDR block ([`IfByNetwork()`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByNetwork))
+* is an RFC1918 address
+ ([`IfByRFC("1918")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC))
+* is ordered
+ ([`OrderedIfAddrBy(args)`](https://godoc.org/github.com/hashicorp/go-sockaddr#OrderedIfAddrBy) where
+ `args` includes, but is not limited
+ to,
+ [`AscIfType`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscIfType),
+ [`AscNetworkSize`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscNetworkSize))
+* excludes all IPv6 addresses
+ ([`IfByType("^(IPv4)$")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByType))
+* is larger than a `/32`
+ ([`IfByMaskSize(32)`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByMaskSize))
+* is not on a `down` interface
+ ([`ExcludeIfs("flags", "down")`](https://godoc.org/github.com/hashicorp/go-sockaddr#ExcludeIfs))
+* preferences an IPv6 address over an IPv4 address
+ ([`SortIfByType()`](https://godoc.org/github.com/hashicorp/go-sockaddr#SortIfByType) +
+ [`ReverseIfAddrs()`](https://godoc.org/github.com/hashicorp/go-sockaddr#ReverseIfAddrs)); and
+* excludes any IP in RFC6890 address
+ ([`IfByRFC("6890")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC))
+
+Or any combination or variation therein.
+
+There are also a few simple helper functions such as `GetPublicIP` and
+`GetPrivateIP` which both return strings and select the first public or private
+IP address on the default interface, respectively. Similarly, there is also a
+helper function called `GetInterfaceIP` which returns the first usable IP
+address on the named interface.
+
+## `sockaddr` CLI
+
+Given the possible complexity of the `sockaddr` library, there is a CLI utility
+that accompanies the library, also
+called
+[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr).
+The
+[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr)
+utility exposes nearly all of the functionality of the library and can be used
+either as an administrative tool or testing tool. To install
+the
+[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr),
+run:
+
+```text
+$ go get -u github.com/hashicorp/go-sockaddr/cmd/sockaddr
+```
+
+If you're familiar with UNIX's `sockaddr` struct's, the following diagram
+mapping the C `sockaddr` (top) to `go-sockaddr` structs (bottom) and
+interfaces will be helpful:
+
+```
++-------------------------------------------------------+
+| |
+| sockaddr |
+| SockAddr |
+| |
+| +--------------+ +----------------------------------+ |
+| | sockaddr_un | | | |
+| | SockAddrUnix | | sockaddr_in{,6} | |
+| +--------------+ | IPAddr | |
+| | | |
+| | +-------------+ +--------------+ | |
+| | | sockaddr_in | | sockaddr_in6 | | |
+| | | IPv4Addr | | IPv6Addr | | |
+| | +-------------+ +--------------+ | |
+| | | |
+| +----------------------------------+ |
+| |
++-------------------------------------------------------+
+```
+
+## Inspiration and Design
+
+There were many subtle inspirations that led to this design, but the most direct
+inspiration for the filtering syntax was
+OpenBSD's
+[`pf.conf(5)`](https://www.freebsd.org/cgi/man.cgi?query=pf.conf&apropos=0&sektion=0&arch=default&format=html#PARAMETERS) firewall
+syntax that lets you select the first IP address on a given named interface.
+The original problem stemmed from:
+
+* needing to create immutable images using [Packer](https://www.packer.io) that
+ ran the [Consul](https://www.consul.io) process (Consul can only use one IP
+ address at a time);
+* images that may or may not have multiple interfaces or IP addresses at
+ runtime; and
+* we didn't want to rely on configuration management to render out the correct
+ IP address if the VM image was being used in an auto-scaling group.
+
+Instead we needed some way to codify a heuristic that would correctly select the
+right IP address but the input parameters were not known when the image was
+created.
diff --git a/vendor/github.com/hashicorp/go-sockaddr/doc.go b/vendor/github.com/hashicorp/go-sockaddr/doc.go
new file mode 100644
index 0000000..90671de
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/doc.go
@@ -0,0 +1,5 @@
+/*
+Package sockaddr is a Go implementation of the UNIX socket family data types and
+related helper functions.
+*/
+package sockaddr
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go
new file mode 100644
index 0000000..0811b27
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go
@@ -0,0 +1,254 @@
+package sockaddr
+
+import "strings"
+
+// ifAddrAttrMap is a map of the IfAddr type-specific attributes.
+var ifAddrAttrMap map[AttrName]func(IfAddr) string
+var ifAddrAttrs []AttrName
+
+func init() {
+ ifAddrAttrInit()
+}
+
+// GetPrivateIP returns a string with a single IP address that is part of RFC
+// 6890 and has a default route. If the system can't determine its IP address
+// or find an RFC 6890 IP address, an empty string will be returned instead.
+// This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}'
+/// ```
+func GetPrivateIP() (string, error) {
+ privateIfs, err := GetPrivateInterfaces()
+ if err != nil {
+ return "", err
+ }
+ if len(privateIfs) < 1 {
+ return "", nil
+ }
+
+ ifAddr := privateIfs[0]
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ return ip.NetIP().String(), nil
+}
+
+// GetPrivateIPs returns a string with all IP addresses that are part of RFC
+// 6890 (regardless of whether or not there is a default route, unlike
+// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty
+// string will be returned instead. This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}'
+/// ```
+func GetPrivateIPs() (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) < 1 {
+ return "", nil
+ }
+
+ ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP)
+ if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs)
+
+ ifAddrs, _, err = IfByRFC("6890", ifAddrs)
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ _, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs)
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ ips := make([]string, 0, len(ifAddrs))
+ for _, ifAddr := range ifAddrs {
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ s := ip.NetIP().String()
+ ips = append(ips, s)
+ }
+
+ return strings.Join(ips, " "), nil
+}
+
+// GetPublicIP returns a string with a single IP address that is NOT part of RFC
+// 6890 and has a default route. If the system can't determine its IP address
+// or find a non RFC 6890 IP address, an empty string will be returned instead.
+// This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}'
+/// ```
+func GetPublicIP() (string, error) {
+ publicIfs, err := GetPublicInterfaces()
+ if err != nil {
+ return "", err
+ } else if len(publicIfs) < 1 {
+ return "", nil
+ }
+
+ ifAddr := publicIfs[0]
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ return ip.NetIP().String(), nil
+}
+
+// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC
+// 6890 (regardless of whether or not there is a default route, unlike
+// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an
+// empty string will be returned instead. This function is the `eval`
+// equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}'
+/// ```
+func GetPublicIPs() (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) < 1 {
+ return "", nil
+ }
+
+ ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP)
+ if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs)
+
+ _, ifAddrs, err = IfByRFC("6890", ifAddrs)
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ ips := make([]string, 0, len(ifAddrs))
+ for _, ifAddr := range ifAddrs {
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ s := ip.NetIP().String()
+ ips = append(ips, s)
+ }
+
+ return strings.Join(ips, " "), nil
+}
+
+// GetInterfaceIP returns a string with a single IP address sorted by the size
+// of the network (i.e. IP addresses with a smaller netmask, larger network
+// size, are sorted first). This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | include "flag" "forwardable" | attr "address" }}'
+/// ```
+func GetInterfaceIP(namedIfRE string) (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, _, err = IfByName(namedIfRE, ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, _, err = IfByFlag("forwardable", ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, err = SortIfBy("+type,+size", ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ if len(ifAddrs) == 0 {
+ return "", err
+ }
+
+ ip := ToIPAddr(ifAddrs[0].SockAddr)
+ if ip == nil {
+ return "", err
+ }
+
+ return IPAddrAttr(*ip, "address"), nil
+}
+
+// GetInterfaceIPs returns a string with all IPs, sorted by the size of the
+// network (i.e. IP addresses with a smaller netmask, larger network size, are
+// sorted first), on a named interface. This function is the `eval` equivalent
+// of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | join "address" " "}}'
+/// ```
+func GetInterfaceIPs(namedIfRE string) (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, _, err = IfByName(namedIfRE, ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, err = SortIfBy("+type,+size", ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ if len(ifAddrs) == 0 {
+ return "", err
+ }
+
+ ips := make([]string, 0, len(ifAddrs))
+ for _, ifAddr := range ifAddrs {
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ s := ip.NetIP().String()
+ ips = append(ips, s)
+ }
+
+ return strings.Join(ips, " "), nil
+}
+
+// IfAddrAttrs returns a list of attributes supported by the IfAddr type
+func IfAddrAttrs() []AttrName {
+ return ifAddrAttrs
+}
+
+// IfAddrAttr returns a string representation of an attribute for the given
+// IfAddr.
+func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string {
+ fn, found := ifAddrAttrMap[attrName]
+ if !found {
+ return ""
+ }
+
+ return fn(ifAddr)
+}
+
+// ifAddrAttrInit is called once at init()
+func ifAddrAttrInit() {
+ // Sorted for human readability
+ ifAddrAttrs = []AttrName{
+ "flags",
+ "name",
+ }
+
+ ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{
+ "flags": func(ifAddr IfAddr) string {
+ return ifAddr.Interface.Flags.String()
+ },
+ "name": func(ifAddr IfAddr) string {
+ return ifAddr.Interface.Name
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go
new file mode 100644
index 0000000..90b6576
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go
@@ -0,0 +1,1281 @@
+package sockaddr
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // Centralize all regexps and regexp.Copy() where necessary.
+ signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`)
+ whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`)
+ ifNameRE *regexp.Regexp = regexp.MustCompile(`^Ethernet adapter ([^:]+):`)
+ ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`)
+)
+
+// IfAddrs is a slice of IfAddr
+type IfAddrs []IfAddr
+
+func (ifs IfAddrs) Len() int { return len(ifs) }
+
+// CmpIfFunc is the function signature that must be met to be used in the
+// OrderedIfAddrBy multiIfAddrSorter
+type CmpIfAddrFunc func(p1, p2 *IfAddr) int
+
+// multiIfAddrSorter implements the Sort interface, sorting the IfAddrs within.
+type multiIfAddrSorter struct {
+ ifAddrs IfAddrs
+ cmp []CmpIfAddrFunc
+}
+
+// Sort sorts the argument slice according to the Cmp functions passed to
+// OrderedIfAddrBy.
+func (ms *multiIfAddrSorter) Sort(ifAddrs IfAddrs) {
+ ms.ifAddrs = ifAddrs
+ sort.Sort(ms)
+}
+
+// OrderedIfAddrBy sorts SockAddr by the list of sort function pointers.
+func OrderedIfAddrBy(cmpFuncs ...CmpIfAddrFunc) *multiIfAddrSorter {
+ return &multiIfAddrSorter{
+ cmp: cmpFuncs,
+ }
+}
+
+// Len is part of sort.Interface.
+func (ms *multiIfAddrSorter) Len() int {
+ return len(ms.ifAddrs)
+}
+
+// Less is part of sort.Interface. It is implemented by looping along the Cmp()
+// functions until it finds a comparison that is either less than or greater
+// than. A return value of 0 defers sorting to the next function in the
+// multisorter (which means the results of sorting may leave the resutls in a
+// non-deterministic order).
+func (ms *multiIfAddrSorter) Less(i, j int) bool {
+ p, q := &ms.ifAddrs[i], &ms.ifAddrs[j]
+ // Try all but the last comparison.
+ var k int
+ for k = 0; k < len(ms.cmp)-1; k++ {
+ cmp := ms.cmp[k]
+ x := cmp(p, q)
+ switch x {
+ case -1:
+ // p < q, so we have a decision.
+ return true
+ case 1:
+ // p > q, so we have a decision.
+ return false
+ }
+ // p == q; try the next comparison.
+ }
+ // All comparisons to here said "equal", so just return whatever the
+ // final comparison reports.
+ switch ms.cmp[k](p, q) {
+ case -1:
+ return true
+ case 1:
+ return false
+ default:
+ // Still a tie! Now what?
+ return false
+ panic("undefined sort order for remaining items in the list")
+ }
+}
+
+// Swap is part of sort.Interface.
+func (ms *multiIfAddrSorter) Swap(i, j int) {
+ ms.ifAddrs[i], ms.ifAddrs[j] = ms.ifAddrs[j], ms.ifAddrs[i]
+}
+
+// AscIfAddress is a sorting function to sort IfAddrs by their respective
+// address type. Non-equal types are deferred in the sort.
+func AscIfAddress(p1Ptr, p2Ptr *IfAddr) int {
+ return AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfDefault is a sorting function to sort IfAddrs by whether or not they
+// have a default route or not. Non-equal types are deferred in the sort.
+//
+// FIXME: This is a particularly expensive sorting operation because of the
+// non-memoized calls to NewRouteInfo(). In an ideal world the routeInfo data
+// once at the start of the sort and pass it along as a context or by wrapping
+// the IfAddr type with this information (this would also solve the inability to
+// return errors and the possibility of failing silently). Fortunately,
+// N*log(N) where N = 3 is only ~6.2 invocations. Not ideal, but not worth
+// optimizing today. The common case is this gets called once or twice.
+// Patches welcome.
+func AscIfDefault(p1Ptr, p2Ptr *IfAddr) int {
+ ri, err := NewRouteInfo()
+ if err != nil {
+ return sortDeferDecision
+ }
+
+ defaultIfName, err := ri.GetDefaultInterfaceName()
+ if err != nil {
+ return sortDeferDecision
+ }
+
+ switch {
+ case p1Ptr.Interface.Name == defaultIfName && p2Ptr.Interface.Name == defaultIfName:
+ return sortDeferDecision
+ case p1Ptr.Interface.Name == defaultIfName:
+ return sortReceiverBeforeArg
+ case p2Ptr.Interface.Name == defaultIfName:
+ return sortArgBeforeReceiver
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscIfName is a sorting function to sort IfAddrs by their interface names.
+func AscIfName(p1Ptr, p2Ptr *IfAddr) int {
+ return strings.Compare(p1Ptr.Name, p2Ptr.Name)
+}
+
+// AscIfNetworkSize is a sorting function to sort IfAddrs by their respective
+// network mask size.
+func AscIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int {
+ return AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfPort is a sorting function to sort IfAddrs by their respective
+// port type. Non-equal types are deferred in the sort.
+func AscIfPort(p1Ptr, p2Ptr *IfAddr) int {
+ return AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfPrivate is a sorting function to sort IfAddrs by "private" values before
+// "public" values. Both IPv4 and IPv6 are compared against RFC6890 (RFC6890
+// includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and IPv6
+// includes RFC4193).
+func AscIfPrivate(p1Ptr, p2Ptr *IfAddr) int {
+ return AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfType is a sorting function to sort IfAddrs by their respective address
+// type. Non-equal types are deferred in the sort.
+func AscIfType(p1Ptr, p2Ptr *IfAddr) int {
+ return AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfAddress is identical to AscIfAddress but reverse ordered.
+func DescIfAddress(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfDefault is identical to AscIfDefault but reverse ordered.
+func DescIfDefault(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscIfDefault(p1Ptr, p2Ptr)
+}
+
+// DescIfName is identical to AscIfName but reverse ordered.
+func DescIfName(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * strings.Compare(p1Ptr.Name, p2Ptr.Name)
+}
+
+// DescIfNetworkSize is identical to AscIfNetworkSize but reverse ordered.
+func DescIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfPort is identical to AscIfPort but reverse ordered.
+func DescIfPort(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfPrivate is identical to AscIfPrivate but reverse ordered.
+func DescIfPrivate(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfType is identical to AscIfType but reverse ordered.
+func DescIfType(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// FilterIfByType filters IfAddrs and returns a list of the matching type
+func FilterIfByType(ifAddrs IfAddrs, type_ SockAddrType) (matchedIfs, excludedIfs IfAddrs) {
+ excludedIfs = make(IfAddrs, 0, len(ifAddrs))
+ matchedIfs = make(IfAddrs, 0, len(ifAddrs))
+
+ for _, ifAddr := range ifAddrs {
+ if ifAddr.SockAddr.Type()&type_ != 0 {
+ matchedIfs = append(matchedIfs, ifAddr)
+ } else {
+ excludedIfs = append(excludedIfs, ifAddr)
+ }
+ }
+ return matchedIfs, excludedIfs
+}
+
+// IfAttr forwards the selector to IfAttr.Attr() for resolution. If there is
+// more than one IfAddr, only the first IfAddr is used.
+func IfAttr(selectorName string, ifAddr IfAddr) (string, error) {
+ attrName := AttrName(strings.ToLower(selectorName))
+ attrVal, err := ifAddr.Attr(attrName)
+ return attrVal, err
+}
+
+// IfAttrs forwards the selector to IfAttrs.Attr() for resolution. If there is
+// more than one IfAddr, only the first IfAddr is used.
+func IfAttrs(selectorName string, ifAddrs IfAddrs) (string, error) {
+ if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ attrName := AttrName(strings.ToLower(selectorName))
+ attrVal, err := ifAddrs[0].Attr(attrName)
+ return attrVal, err
+}
+
+// GetAllInterfaces iterates over all available network interfaces and finds all
+// available IP addresses on each interface and converts them to
+// sockaddr.IPAddrs, and returning the result as an array of IfAddr.
+func GetAllInterfaces() (IfAddrs, error) {
+ ifs, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ ifAddrs := make(IfAddrs, 0, len(ifs))
+ for _, intf := range ifs {
+ addrs, err := intf.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, addr := range addrs {
+ var ipAddr IPAddr
+ ipAddr, err = NewIPAddr(addr.String())
+ if err != nil {
+ return IfAddrs{}, fmt.Errorf("unable to create an IP address from %q", addr.String())
+ }
+
+ ifAddr := IfAddr{
+ SockAddr: ipAddr,
+ Interface: intf,
+ }
+ ifAddrs = append(ifAddrs, ifAddr)
+ }
+ }
+
+ return ifAddrs, nil
+}
+
+// GetDefaultInterfaces returns IfAddrs of the addresses attached to the default
+// route.
+func GetDefaultInterfaces() (IfAddrs, error) {
+ ri, err := NewRouteInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ defaultIfName, err := ri.GetDefaultInterfaceName()
+ if err != nil {
+ return nil, err
+ }
+
+ var defaultIfs, ifAddrs IfAddrs
+ ifAddrs, err = GetAllInterfaces()
+ for _, ifAddr := range ifAddrs {
+ if ifAddr.Name == defaultIfName {
+ defaultIfs = append(defaultIfs, ifAddr)
+ }
+ }
+
+ return defaultIfs, nil
+}
+
+// GetPrivateInterfaces returns an IfAddrs that are part of RFC 6890 and have a
+// default route. If the system can't determine its IP address or find an RFC
+// 6890 IP address, an empty IfAddrs will be returned instead. This function is
+// the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | include "RFC" "6890" }}'
+/// ```
+func GetPrivateInterfaces() (IfAddrs, error) {
+ privateIfs, err := GetAllInterfaces()
+ if err != nil {
+ return IfAddrs{}, err
+ }
+ if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ privateIfs, _ = FilterIfByType(privateIfs, TypeIP)
+ if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ privateIfs, _, err = IfByFlag("forwardable", privateIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ privateIfs, _, err = IfByFlag("up", privateIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(privateIfs)
+
+ privateIfs, _, err = IfByRFC("6890", privateIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ } else if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ return privateIfs, nil
+}
+
+// GetPublicInterfaces returns an IfAddrs that are NOT part of RFC 6890 and has a
+// default route. If the system can't determine its IP address or find a non
+// RFC 6890 IP address, an empty IfAddrs will be returned instead. This
+// function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | exclude "RFC" "6890" }}'
+/// ```
+func GetPublicInterfaces() (IfAddrs, error) {
+ publicIfs, err := GetAllInterfaces()
+ if err != nil {
+ return IfAddrs{}, err
+ }
+ if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ publicIfs, _ = FilterIfByType(publicIfs, TypeIP)
+ if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ publicIfs, _, err = IfByFlag("forwardable", publicIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ publicIfs, _, err = IfByFlag("up", publicIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(publicIfs)
+
+ _, publicIfs, err = IfByRFC("6890", publicIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ } else if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ return publicIfs, nil
+}
+
+// IfByAddress returns a list of matched and non-matched IfAddrs, or an error if
+// the regexp fails to compile.
+func IfByAddress(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ re, err := regexp.Compile(inputRe)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Unable to compile address regexp %+q: %v", inputRe, err)
+ }
+
+ matchedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ excludedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ for _, addr := range ifAddrs {
+ if re.MatchString(addr.SockAddr.String()) {
+ matchedAddrs = append(matchedAddrs, addr)
+ } else {
+ excludedAddrs = append(excludedAddrs, addr)
+ }
+ }
+
+ return matchedAddrs, excludedAddrs, nil
+}
+
+// IfByName returns a list of matched and non-matched IfAddrs, or an error if
+// the regexp fails to compile.
+func IfByName(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ re, err := regexp.Compile(inputRe)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Unable to compile name regexp %+q: %v", inputRe, err)
+ }
+
+ matchedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ excludedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ for _, addr := range ifAddrs {
+ if re.MatchString(addr.Name) {
+ matchedAddrs = append(matchedAddrs, addr)
+ } else {
+ excludedAddrs = append(excludedAddrs, addr)
+ }
+ }
+
+ return matchedAddrs, excludedAddrs, nil
+}
+
+// IfByPort returns a list of matched and non-matched IfAddrs, or an error if
+// the regexp fails to compile.
+func IfByPort(inputRe string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) {
+ re, err := regexp.Compile(inputRe)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Unable to compile port regexp %+q: %v", inputRe, err)
+ }
+
+ ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP)
+ matchedIfs = make(IfAddrs, 0, len(ipIfs))
+ excludedIfs = append(IfAddrs(nil), nonIfs...)
+ for _, addr := range ipIfs {
+ ipAddr := ToIPAddr(addr.SockAddr)
+ if ipAddr == nil {
+ continue
+ }
+
+ port := strconv.FormatInt(int64((*ipAddr).IPPort()), 10)
+ if re.MatchString(port) {
+ matchedIfs = append(matchedIfs, addr)
+ } else {
+ excludedIfs = append(excludedIfs, addr)
+ }
+ }
+
+ return matchedIfs, excludedIfs, nil
+}
+
+// IfByRFC returns a list of matched and non-matched IfAddrs that contain the
+// relevant RFC-specified traits.
+func IfByRFC(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ inputRFC, err := strconv.ParseUint(selectorParam, 10, 64)
+ if err != nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to parse RFC number %q: %v", selectorParam, err)
+ }
+
+ matchedIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+ remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+
+ rfcNetMap := KnownRFCs()
+ rfcNets, ok := rfcNetMap[uint(inputRFC)]
+ if !ok {
+ return nil, nil, fmt.Errorf("unsupported RFC %d", inputRFC)
+ }
+
+ for _, ifAddr := range ifAddrs {
+ var contained bool
+ for _, rfcNet := range rfcNets {
+ if rfcNet.Contains(ifAddr.SockAddr) {
+ matchedIfAddrs = append(matchedIfAddrs, ifAddr)
+ contained = true
+ break
+ }
+ }
+ if !contained {
+ remainingIfAddrs = append(remainingIfAddrs, ifAddr)
+ }
+ }
+
+ return matchedIfAddrs, remainingIfAddrs, nil
+}
+
+// IfByRFCs returns a list of matched and non-matched IfAddrs that contain the
+// relevant RFC-specified traits. Multiple RFCs can be specified and separated
+// by the `|` symbol. No protection is taken to ensure an IfAddr does not end
+// up in both the included and excluded list.
+func IfByRFCs(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ var includedIfs, excludedIfs IfAddrs
+ for _, rfcStr := range strings.Split(selectorParam, "|") {
+ includedRFCIfs, excludedRFCIfs, err := IfByRFC(rfcStr, ifAddrs)
+ if err != nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to lookup RFC number %q: %v", rfcStr, err)
+ }
+ includedIfs = append(includedIfs, includedRFCIfs...)
+ excludedIfs = append(excludedIfs, excludedRFCIfs...)
+ }
+
+ return includedIfs, excludedIfs, nil
+}
+
+// IfByMaskSize returns a list of matched and non-matched IfAddrs that have the
+// matching mask size.
+func IfByMaskSize(selectorParam string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) {
+ maskSize, err := strconv.ParseUint(selectorParam, 10, 64)
+ if err != nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("invalid exclude size argument (%q): %v", selectorParam, err)
+ }
+
+ ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP)
+ matchedIfs = make(IfAddrs, 0, len(ipIfs))
+ excludedIfs = append(IfAddrs(nil), nonIfs...)
+ for _, addr := range ipIfs {
+ ipAddr := ToIPAddr(addr.SockAddr)
+ if ipAddr == nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to filter mask sizes on non-IP type %s: %v", addr.SockAddr.Type().String(), addr.SockAddr.String())
+ }
+
+ switch {
+ case (*ipAddr).Type()&TypeIPv4 != 0 && maskSize > 32:
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv4 address: %d", maskSize)
+ case (*ipAddr).Type()&TypeIPv6 != 0 && maskSize > 128:
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv6 address: %d", maskSize)
+ }
+
+ if (*ipAddr).Maskbits() == int(maskSize) {
+ matchedIfs = append(matchedIfs, addr)
+ } else {
+ excludedIfs = append(excludedIfs, addr)
+ }
+ }
+
+ return matchedIfs, excludedIfs, nil
+}
+
+// IfByType returns a list of matching and non-matching IfAddr that match the
+// specified type. For instance:
+//
+// include "type" "IPv4,IPv6"
+//
+// will include any IfAddrs that is either an IPv4 or IPv6 address. Any
+// addresses on those interfaces that don't match will be included in the
+// remainder results.
+func IfByType(inputTypes string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ matchingIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+ remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+
+ ifTypes := strings.Split(strings.ToLower(inputTypes), "|")
+ for _, ifType := range ifTypes {
+ switch ifType {
+ case "ip", "ipv4", "ipv6", "unix":
+ // Valid types
+ default:
+ return nil, nil, fmt.Errorf("unsupported type %q %q", ifType, inputTypes)
+ }
+ }
+
+ for _, ifAddr := range ifAddrs {
+ for _, ifType := range ifTypes {
+ var matched bool
+ switch {
+ case ifType == "ip" && ifAddr.SockAddr.Type()&TypeIP != 0:
+ matched = true
+ case ifType == "ipv4" && ifAddr.SockAddr.Type()&TypeIPv4 != 0:
+ matched = true
+ case ifType == "ipv6" && ifAddr.SockAddr.Type()&TypeIPv6 != 0:
+ matched = true
+ case ifType == "unix" && ifAddr.SockAddr.Type()&TypeUnix != 0:
+ matched = true
+ }
+
+ if matched {
+ matchingIfAddrs = append(matchingIfAddrs, ifAddr)
+ } else {
+ remainingIfAddrs = append(remainingIfAddrs, ifAddr)
+ }
+ }
+ }
+
+ return matchingIfAddrs, remainingIfAddrs, nil
+}
+
+// IfByFlag returns a list of matching and non-matching IfAddrs that match the
+// specified type. For instance:
+//
+// include "flag" "up,broadcast"
+//
+// will include any IfAddrs that have both the "up" and "broadcast" flags set.
+// Any addresses on those interfaces that don't match will be omitted from the
+// results.
+func IfByFlag(inputFlags string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ matchedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ excludedAddrs := make(IfAddrs, 0, len(ifAddrs))
+
+ var wantForwardable,
+ wantGlobalUnicast,
+ wantInterfaceLocalMulticast,
+ wantLinkLocalMulticast,
+ wantLinkLocalUnicast,
+ wantLoopback,
+ wantMulticast,
+ wantUnspecified bool
+ var ifFlags net.Flags
+ var checkFlags, checkAttrs bool
+ for _, flagName := range strings.Split(strings.ToLower(inputFlags), "|") {
+ switch flagName {
+ case "broadcast":
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagBroadcast
+ case "down":
+ checkFlags = true
+ ifFlags = (ifFlags &^ net.FlagUp)
+ case "forwardable":
+ checkAttrs = true
+ wantForwardable = true
+ case "global unicast":
+ checkAttrs = true
+ wantGlobalUnicast = true
+ case "interface-local multicast":
+ checkAttrs = true
+ wantInterfaceLocalMulticast = true
+ case "link-local multicast":
+ checkAttrs = true
+ wantLinkLocalMulticast = true
+ case "link-local unicast":
+ checkAttrs = true
+ wantLinkLocalUnicast = true
+ case "loopback":
+ checkAttrs = true
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagLoopback
+ wantLoopback = true
+ case "multicast":
+ checkAttrs = true
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagMulticast
+ wantMulticast = true
+ case "point-to-point":
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagPointToPoint
+ case "unspecified":
+ checkAttrs = true
+ wantUnspecified = true
+ case "up":
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagUp
+ default:
+ return nil, nil, fmt.Errorf("Unknown interface flag: %+q", flagName)
+ }
+ }
+
+ for _, ifAddr := range ifAddrs {
+ var matched bool
+ if checkFlags && ifAddr.Interface.Flags&ifFlags == ifFlags {
+ matched = true
+ }
+ if checkAttrs {
+ if ip := ToIPAddr(ifAddr.SockAddr); ip != nil {
+ netIP := (*ip).NetIP()
+ switch {
+ case wantGlobalUnicast && netIP.IsGlobalUnicast():
+ matched = true
+ case wantInterfaceLocalMulticast && netIP.IsInterfaceLocalMulticast():
+ matched = true
+ case wantLinkLocalMulticast && netIP.IsLinkLocalMulticast():
+ matched = true
+ case wantLinkLocalUnicast && netIP.IsLinkLocalUnicast():
+ matched = true
+ case wantLoopback && netIP.IsLoopback():
+ matched = true
+ case wantMulticast && netIP.IsMulticast():
+ matched = true
+ case wantUnspecified && netIP.IsUnspecified():
+ matched = true
+ case wantForwardable && !IsRFC(ForwardingBlacklist, ifAddr.SockAddr):
+ matched = true
+ }
+ }
+ }
+ if matched {
+ matchedAddrs = append(matchedAddrs, ifAddr)
+ } else {
+ excludedAddrs = append(excludedAddrs, ifAddr)
+ }
+ }
+ return matchedAddrs, excludedAddrs, nil
+}
+
+// IfByNetwork returns an IfAddrs that are equal to or included within the
+// network passed in by selector.
+func IfByNetwork(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, IfAddrs, error) {
+ var includedIfs, excludedIfs IfAddrs
+ for _, netStr := range strings.Split(selectorParam, "|") {
+ netAddr, err := NewIPAddr(netStr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to create an IP address from %+q: %v", netStr, err)
+ }
+
+ for _, ifAddr := range inputIfAddrs {
+ if netAddr.Contains(ifAddr.SockAddr) {
+ includedIfs = append(includedIfs, ifAddr)
+ } else {
+ excludedIfs = append(excludedIfs, ifAddr)
+ }
+ }
+ }
+
+ return includedIfs, excludedIfs, nil
+}
+
+// IfAddrMath will return a new IfAddr struct with a mutated value.
+func IfAddrMath(operation, value string, inputIfAddr IfAddr) (IfAddr, error) {
+ // Regexp used to enforce the sign being a required part of the grammar for
+ // some values.
+ signRe := signRE.Copy()
+
+ switch strings.ToLower(operation) {
+ case "address":
+ // "address" operates on the IP address and is allowed to overflow or
+ // underflow networks, however it will wrap along the underlying address's
+ // underlying type.
+
+ if !signRe.MatchString(value) {
+ return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation)
+ }
+
+ switch sockType := inputIfAddr.SockAddr.Type(); sockType {
+ case TypeIPv4:
+ // 33 == Accept any uint32 value
+ // TODO(seanc@): Add the ability to parse hex
+ i, err := strconv.ParseInt(value, 10, 33)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr)
+ ipv4Uint32 := uint32(ipv4.Address)
+ ipv4Uint32 += uint32(i)
+ return IfAddr{
+ SockAddr: IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: ipv4.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ case TypeIPv6:
+ // 64 == Accept any int32 value
+ // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int.
+ i, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr)
+ ipv6BigIntA := new(big.Int)
+ ipv6BigIntA.Set(ipv6.Address)
+ ipv6BigIntB := big.NewInt(i)
+
+ ipv6Addr := ipv6BigIntA.Add(ipv6BigIntA, ipv6BigIntB)
+ ipv6Addr.And(ipv6Addr, ipv6HostMask)
+
+ return IfAddr{
+ SockAddr: IPv6Addr{
+ Address: IPv6Address(ipv6Addr),
+ Mask: ipv6.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType)
+ }
+ case "network":
+ // "network" operates on the network address. Positive values start at the
+ // network address and negative values wrap at the network address, which
+ // means a "-1" value on a network will be the broadcast address after
+ // wrapping is applied.
+
+ if !signRe.MatchString(value) {
+ return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation)
+ }
+
+ switch sockType := inputIfAddr.SockAddr.Type(); sockType {
+ case TypeIPv4:
+ // 33 == Accept any uint32 value
+ // TODO(seanc@): Add the ability to parse hex
+ i, err := strconv.ParseInt(value, 10, 33)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr)
+ ipv4Uint32 := uint32(ipv4.NetworkAddress())
+
+ // Wrap along network mask boundaries. EZ-mode wrapping made possible by
+ // use of int64 vs a uint.
+ var wrappedMask int64
+ if i >= 0 {
+ wrappedMask = i
+ } else {
+ wrappedMask = 1 + i + int64(^uint32(ipv4.Mask))
+ }
+
+ ipv4Uint32 = ipv4Uint32 + (uint32(wrappedMask) &^ uint32(ipv4.Mask))
+
+ return IfAddr{
+ SockAddr: IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: ipv4.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ case TypeIPv6:
+ // 64 == Accept any int32 value
+ // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int.
+ i, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr)
+ ipv6BigInt := new(big.Int)
+ ipv6BigInt.Set(ipv6.NetworkAddress())
+
+ mask := new(big.Int)
+ mask.Set(ipv6.Mask)
+ if i > 0 {
+ wrappedMask := new(big.Int)
+ wrappedMask.SetInt64(i)
+
+ wrappedMask.AndNot(wrappedMask, mask)
+ ipv6BigInt.Add(ipv6BigInt, wrappedMask)
+ } else {
+ // Mask off any bits that exceed the network size. Subtract the
+ // wrappedMask from the last usable - 1
+ wrappedMask := new(big.Int)
+ wrappedMask.SetInt64(-1 * i)
+ wrappedMask.Sub(wrappedMask, big.NewInt(1))
+
+ wrappedMask.AndNot(wrappedMask, mask)
+
+ lastUsable := new(big.Int)
+ lastUsable.Set(ipv6.LastUsable().(IPv6Addr).Address)
+
+ ipv6BigInt = lastUsable.Sub(lastUsable, wrappedMask)
+ }
+
+ return IfAddr{
+ SockAddr: IPv6Addr{
+ Address: IPv6Address(ipv6BigInt),
+ Mask: ipv6.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType)
+ }
+ case "mask":
+ // "mask" operates on the IP address and returns the IP address on
+ // which the given integer mask has been applied. If the applied mask
+ // corresponds to a larger network than the mask of the IP address,
+ // the latter will be replaced by the former.
+ switch sockType := inputIfAddr.SockAddr.Type(); sockType {
+ case TypeIPv4:
+ i, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ if i > 32 {
+ return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv4 addresses must be between 0 and 32", operation)
+ }
+
+ ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr)
+
+ ipv4Mask := net.CIDRMask(int(i), 32)
+ ipv4MaskUint32 := binary.BigEndian.Uint32(ipv4Mask)
+
+ maskedIpv4 := ipv4.NetIP().Mask(ipv4Mask)
+ maskedIpv4Uint32 := binary.BigEndian.Uint32(maskedIpv4)
+
+ maskedIpv4MaskUint32 := uint32(ipv4.Mask)
+
+ if ipv4MaskUint32 < maskedIpv4MaskUint32 {
+ maskedIpv4MaskUint32 = ipv4MaskUint32
+ }
+
+ return IfAddr{
+ SockAddr: IPv4Addr{
+ Address: IPv4Address(maskedIpv4Uint32),
+ Mask: IPv4Mask(maskedIpv4MaskUint32),
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ case TypeIPv6:
+ i, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ if i > 128 {
+ return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv6 addresses must be between 0 and 64", operation)
+ }
+
+ ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr)
+
+ ipv6Mask := net.CIDRMask(int(i), 128)
+ ipv6MaskBigInt := new(big.Int)
+ ipv6MaskBigInt.SetBytes(ipv6Mask)
+
+ maskedIpv6 := ipv6.NetIP().Mask(ipv6Mask)
+ maskedIpv6BigInt := new(big.Int)
+ maskedIpv6BigInt.SetBytes(maskedIpv6)
+
+ maskedIpv6MaskBigInt := new(big.Int)
+ maskedIpv6MaskBigInt.Set(ipv6.Mask)
+
+ if ipv6MaskBigInt.Cmp(maskedIpv6MaskBigInt) == -1 {
+ maskedIpv6MaskBigInt = ipv6MaskBigInt
+ }
+
+ return IfAddr{
+ SockAddr: IPv6Addr{
+ Address: IPv6Address(maskedIpv6BigInt),
+ Mask: IPv6Mask(maskedIpv6MaskBigInt),
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType)
+ }
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported math operation: %q", operation)
+ }
+}
+
+// IfAddrsMath will apply an IfAddrMath operation each IfAddr struct. Any
+// failure will result in zero results.
+func IfAddrsMath(operation, value string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ outputAddrs := make(IfAddrs, 0, len(inputIfAddrs))
+ for _, ifAddr := range inputIfAddrs {
+ result, err := IfAddrMath(operation, value, ifAddr)
+ if err != nil {
+ return IfAddrs{}, fmt.Errorf("unable to perform an IPMath operation on %s: %v", ifAddr, err)
+ }
+ outputAddrs = append(outputAddrs, result)
+ }
+ return outputAddrs, nil
+}
+
+// IncludeIfs returns an IfAddrs based on the passed in selector.
+func IncludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ var includedIfs IfAddrs
+ var err error
+
+ switch strings.ToLower(selectorName) {
+ case "address":
+ includedIfs, _, err = IfByAddress(selectorParam, inputIfAddrs)
+ case "flag", "flags":
+ includedIfs, _, err = IfByFlag(selectorParam, inputIfAddrs)
+ case "name":
+ includedIfs, _, err = IfByName(selectorParam, inputIfAddrs)
+ case "network":
+ includedIfs, _, err = IfByNetwork(selectorParam, inputIfAddrs)
+ case "port":
+ includedIfs, _, err = IfByPort(selectorParam, inputIfAddrs)
+ case "rfc", "rfcs":
+ includedIfs, _, err = IfByRFCs(selectorParam, inputIfAddrs)
+ case "size":
+ includedIfs, _, err = IfByMaskSize(selectorParam, inputIfAddrs)
+ case "type":
+ includedIfs, _, err = IfByType(selectorParam, inputIfAddrs)
+ default:
+ return IfAddrs{}, fmt.Errorf("invalid include selector %q", selectorName)
+ }
+
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ return includedIfs, nil
+}
+
+// ExcludeIfs returns an IfAddrs based on the passed in selector.
+func ExcludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ var excludedIfs IfAddrs
+ var err error
+
+ switch strings.ToLower(selectorName) {
+ case "address":
+ _, excludedIfs, err = IfByAddress(selectorParam, inputIfAddrs)
+ case "flag", "flags":
+ _, excludedIfs, err = IfByFlag(selectorParam, inputIfAddrs)
+ case "name":
+ _, excludedIfs, err = IfByName(selectorParam, inputIfAddrs)
+ case "network":
+ _, excludedIfs, err = IfByNetwork(selectorParam, inputIfAddrs)
+ case "port":
+ _, excludedIfs, err = IfByPort(selectorParam, inputIfAddrs)
+ case "rfc", "rfcs":
+ _, excludedIfs, err = IfByRFCs(selectorParam, inputIfAddrs)
+ case "size":
+ _, excludedIfs, err = IfByMaskSize(selectorParam, inputIfAddrs)
+ case "type":
+ _, excludedIfs, err = IfByType(selectorParam, inputIfAddrs)
+ default:
+ return IfAddrs{}, fmt.Errorf("invalid exclude selector %q", selectorName)
+ }
+
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ return excludedIfs, nil
+}
+
+// SortIfBy returns an IfAddrs sorted based on the passed in selector. Multiple
+// sort clauses can be passed in as a comma delimited list without whitespace.
+func SortIfBy(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ sortedIfs := append(IfAddrs(nil), inputIfAddrs...)
+
+ clauses := strings.Split(selectorParam, ",")
+ sortFuncs := make([]CmpIfAddrFunc, len(clauses))
+
+ for i, clause := range clauses {
+ switch strings.TrimSpace(strings.ToLower(clause)) {
+ case "+address", "address":
+ // The "address" selector returns an array of IfAddrs
+ // ordered by the network address. IfAddrs that are not
+ // comparable will be at the end of the list and in a
+ // non-deterministic order.
+ sortFuncs[i] = AscIfAddress
+ case "-address":
+ sortFuncs[i] = DescIfAddress
+ case "+default", "default":
+ sortFuncs[i] = AscIfDefault
+ case "-default":
+ sortFuncs[i] = DescIfDefault
+ case "+name", "name":
+ // The "name" selector returns an array of IfAddrs
+ // ordered by the interface name.
+ sortFuncs[i] = AscIfName
+ case "-name":
+ sortFuncs[i] = DescIfName
+ case "+port", "port":
+ // The "port" selector returns an array of IfAddrs
+ // ordered by the port, if included in the IfAddr.
+ // IfAddrs that are not comparable will be at the end of
+ // the list and in a non-deterministic order.
+ sortFuncs[i] = AscIfPort
+ case "-port":
+ sortFuncs[i] = DescIfPort
+ case "+private", "private":
+ // The "private" selector returns an array of IfAddrs
+ // ordered by private addresses first. IfAddrs that are
+ // not comparable will be at the end of the list and in
+ // a non-deterministic order.
+ sortFuncs[i] = AscIfPrivate
+ case "-private":
+ sortFuncs[i] = DescIfPrivate
+ case "+size", "size":
+ // The "size" selector returns an array of IfAddrs
+ // ordered by the size of the network mask, smaller mask
+ // (larger number of hosts per network) to largest
+ // (e.g. a /24 sorts before a /32).
+ sortFuncs[i] = AscIfNetworkSize
+ case "-size":
+ sortFuncs[i] = DescIfNetworkSize
+ case "+type", "type":
+ // The "type" selector returns an array of IfAddrs
+ // ordered by the type of the IfAddr. The sort order is
+ // Unix, IPv4, then IPv6.
+ sortFuncs[i] = AscIfType
+ case "-type":
+ sortFuncs[i] = DescIfType
+ default:
+ // Return an empty list for invalid sort types.
+ return IfAddrs{}, fmt.Errorf("unknown sort type: %q", clause)
+ }
+ }
+
+ OrderedIfAddrBy(sortFuncs...).Sort(sortedIfs)
+
+ return sortedIfs, nil
+}
+
+// UniqueIfAddrsBy creates a unique set of IfAddrs based on the matching
+// selector. UniqueIfAddrsBy assumes the input has already been sorted.
+func UniqueIfAddrsBy(selectorName string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ attrName := strings.ToLower(selectorName)
+
+ ifs := make(IfAddrs, 0, len(inputIfAddrs))
+ var lastMatch string
+ for _, ifAddr := range inputIfAddrs {
+ var out string
+ switch attrName {
+ case "address":
+ out = ifAddr.SockAddr.String()
+ case "name":
+ out = ifAddr.Name
+ default:
+ return nil, fmt.Errorf("unsupported unique constraint %+q", selectorName)
+ }
+
+ switch {
+ case lastMatch == "", lastMatch != out:
+ lastMatch = out
+ ifs = append(ifs, ifAddr)
+ case lastMatch == out:
+ continue
+ }
+ }
+
+ return ifs, nil
+}
+
+// JoinIfAddrs joins an IfAddrs and returns a string
+func JoinIfAddrs(selectorName string, joinStr string, inputIfAddrs IfAddrs) (string, error) {
+ outputs := make([]string, 0, len(inputIfAddrs))
+ attrName := AttrName(strings.ToLower(selectorName))
+
+ for _, ifAddr := range inputIfAddrs {
+ var attrVal string
+ var err error
+ attrVal, err = ifAddr.Attr(attrName)
+ if err != nil {
+ return "", err
+ }
+ outputs = append(outputs, attrVal)
+ }
+ return strings.Join(outputs, joinStr), nil
+}
+
+// LimitIfAddrs returns a slice of IfAddrs based on the specified limit.
+func LimitIfAddrs(lim uint, in IfAddrs) (IfAddrs, error) {
+ // Clamp the limit to the length of the array
+ if int(lim) > len(in) {
+ lim = uint(len(in))
+ }
+
+ return in[0:lim], nil
+}
+
+// OffsetIfAddrs returns a slice of IfAddrs based on the specified offset.
+func OffsetIfAddrs(off int, in IfAddrs) (IfAddrs, error) {
+ var end bool
+ if off < 0 {
+ end = true
+ off = off * -1
+ }
+
+ if off > len(in) {
+ return IfAddrs{}, fmt.Errorf("unable to seek past the end of the interface array: offset (%d) exceeds the number of interfaces (%d)", off, len(in))
+ }
+
+ if end {
+ return in[len(in)-off:], nil
+ }
+ return in[off:], nil
+}
+
+func (ifAddr IfAddr) String() string {
+ return fmt.Sprintf("%s %v", ifAddr.SockAddr, ifAddr.Interface)
+}
+
+// parseDefaultIfNameFromRoute parses standard route(8)'s output for the *BSDs
+// and Solaris.
+func parseDefaultIfNameFromRoute(routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ for _, line := range lines {
+ kvs := strings.SplitN(line, ":", 2)
+ if len(kvs) != 2 {
+ continue
+ }
+
+ if strings.TrimSpace(kvs[0]) == "interface" {
+ ifName := strings.TrimSpace(kvs[1])
+ return ifName, nil
+ }
+ }
+
+ return "", errors.New("No default interface found")
+}
+
+// parseDefaultIfNameFromIPCmd parses the default interface from ip(8) for
+// Linux.
+func parseDefaultIfNameFromIPCmd(routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ re := whitespaceRE.Copy()
+ for _, line := range lines {
+ kvs := re.Split(line, -1)
+ if len(kvs) < 5 {
+ continue
+ }
+
+ if kvs[0] == "default" &&
+ kvs[1] == "via" &&
+ kvs[3] == "dev" {
+ ifName := strings.TrimSpace(kvs[4])
+ return ifName, nil
+ }
+ }
+
+ return "", errors.New("No default interface found")
+}
+
+// parseDefaultIfNameWindows parses the default interface from `netstat -rn` and
+// `ipconfig` on Windows.
+func parseDefaultIfNameWindows(routeOut, ipconfigOut string) (string, error) {
+ defaultIPAddr, err := parseDefaultIPAddrWindowsRoute(routeOut)
+ if err != nil {
+ return "", err
+ }
+
+ ifName, err := parseDefaultIfNameWindowsIPConfig(defaultIPAddr, ipconfigOut)
+ if err != nil {
+ return "", err
+ }
+
+ return ifName, nil
+}
+
+// parseDefaultIPAddrWindowsRoute parses the IP address on the default interface
+// `netstat -rn`.
+//
+// NOTES(sean): Only IPv4 addresses are parsed at this time. If you have an
+// IPv6 connected host, submit an issue on github.com/hashicorp/go-sockaddr with
+// the output from `netstat -rn`, `ipconfig`, and version of Windows to see IPv6
+// support added.
+func parseDefaultIPAddrWindowsRoute(routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ re := whitespaceRE.Copy()
+ for _, line := range lines {
+ kvs := re.Split(strings.TrimSpace(line), -1)
+ if len(kvs) < 3 {
+ continue
+ }
+
+ if kvs[0] == "0.0.0.0" && kvs[1] == "0.0.0.0" {
+ defaultIPAddr := strings.TrimSpace(kvs[3])
+ return defaultIPAddr, nil
+ }
+ }
+
+ return "", errors.New("No IP on default interface found")
+}
+
+// parseDefaultIfNameWindowsIPConfig parses the output of `ipconfig` to find the
+// interface name forwarding traffic to the default gateway.
+func parseDefaultIfNameWindowsIPConfig(defaultIPAddr, routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ ifNameRe := ifNameRE.Copy()
+ ipAddrRe := ipAddrRE.Copy()
+ var ifName string
+ for _, line := range lines {
+ switch ifNameMatches := ifNameRe.FindStringSubmatch(line); {
+ case len(ifNameMatches) > 1:
+ ifName = ifNameMatches[1]
+ continue
+ }
+
+ switch ipAddrMatches := ipAddrRe.FindStringSubmatch(line); {
+ case len(ipAddrMatches) > 1 && ipAddrMatches[1] == defaultIPAddr:
+ return ifName, nil
+ }
+ }
+
+ return "", errors.New("No default interface found with matching IP")
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go
new file mode 100644
index 0000000..6984cb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go
@@ -0,0 +1,65 @@
+package sockaddr
+
+import (
+ "fmt"
+ "net"
+)
+
+// IfAddr is a union of a SockAddr and a net.Interface.
+type IfAddr struct {
+ SockAddr
+ net.Interface
+}
+
+// Attr returns the named attribute as a string
+func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) {
+ val := IfAddrAttr(ifAddr, attrName)
+ if val != "" {
+ return val, nil
+ }
+
+ return Attr(ifAddr.SockAddr, attrName)
+}
+
+// Attr returns the named attribute as a string
+func Attr(sa SockAddr, attrName AttrName) (string, error) {
+ switch sockType := sa.Type(); {
+ case sockType&TypeIP != 0:
+ ip := *ToIPAddr(sa)
+ attrVal := IPAddrAttr(ip, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+
+ if sockType == TypeIPv4 {
+ ipv4 := *ToIPv4Addr(sa)
+ attrVal := IPv4AddrAttr(ipv4, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+ } else if sockType == TypeIPv6 {
+ ipv6 := *ToIPv6Addr(sa)
+ attrVal := IPv6AddrAttr(ipv6, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+ }
+
+ case sockType == TypeUnix:
+ us := *ToUnixSock(sa)
+ attrVal := UnixSockAttr(us, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+ }
+
+ // Non type-specific attributes
+ switch attrName {
+ case "string":
+ return sa.String(), nil
+ case "type":
+ return sa.Type().String(), nil
+ }
+
+ return "", fmt.Errorf("unsupported attribute name %q", attrName)
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go
new file mode 100644
index 0000000..b47d15c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go
@@ -0,0 +1,169 @@
+package sockaddr
+
+import (
+ "fmt"
+ "math/big"
+ "net"
+ "strings"
+)
+
+// Constants for the sizes of IPv3, IPv4, and IPv6 address types.
+const (
+ IPv3len = 6
+ IPv4len = 4
+ IPv6len = 16
+)
+
+// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses,
+// networks, and socket endpoints.
+type IPAddr interface {
+ SockAddr
+ AddressBinString() string
+ AddressHexString() string
+ Cmp(SockAddr) int
+ CmpAddress(SockAddr) int
+ CmpPort(SockAddr) int
+ FirstUsable() IPAddr
+ Host() IPAddr
+ IPPort() IPPort
+ LastUsable() IPAddr
+ Maskbits() int
+ NetIP() *net.IP
+ NetIPMask() *net.IPMask
+ NetIPNet() *net.IPNet
+ Network() IPAddr
+ Octets() []int
+}
+
+// IPPort is the type for an IP port number for the TCP and UDP IP transports.
+type IPPort uint16
+
+// IPPrefixLen is a typed integer representing the prefix length for a given
+// IPAddr.
+type IPPrefixLen byte
+
+// ipAddrAttrMap is a map of the IPAddr type-specific attributes.
+var ipAddrAttrMap map[AttrName]func(IPAddr) string
+var ipAddrAttrs []AttrName
+
+func init() {
+ ipAddrInit()
+}
+
+// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is
+// not an IPv4 or an IPv6 address.
+func NewIPAddr(addr string) (IPAddr, error) {
+ ipv4Addr, err := NewIPv4Addr(addr)
+ if err == nil {
+ return ipv4Addr, nil
+ }
+
+ ipv6Addr, err := NewIPv6Addr(addr)
+ if err == nil {
+ return ipv6Addr, nil
+ }
+
+ return nil, fmt.Errorf("invalid IPAddr %v", addr)
+}
+
+// IPAddrAttr returns a string representation of an attribute for the given
+// IPAddr.
+func IPAddrAttr(ip IPAddr, selector AttrName) string {
+ fn, found := ipAddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(ip)
+}
+
+// IPAttrs returns a list of attributes supported by the IPAddr type
+func IPAttrs() []AttrName {
+ return ipAddrAttrs
+}
+
+// MustIPAddr is a helper method that must return an IPAddr or panic on invalid
+// input.
+func MustIPAddr(addr string) IPAddr {
+ ip, err := NewIPAddr(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err))
+ }
+ return ip
+}
+
+// ipAddrInit is called once at init()
+func ipAddrInit() {
+ // Sorted for human readability
+ ipAddrAttrs = []AttrName{
+ "host",
+ "address",
+ "port",
+ "netmask",
+ "network",
+ "mask_bits",
+ "binary",
+ "hex",
+ "first_usable",
+ "last_usable",
+ "octets",
+ }
+
+ ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{
+ "address": func(ip IPAddr) string {
+ return ip.NetIP().String()
+ },
+ "binary": func(ip IPAddr) string {
+ return ip.AddressBinString()
+ },
+ "first_usable": func(ip IPAddr) string {
+ return ip.FirstUsable().String()
+ },
+ "hex": func(ip IPAddr) string {
+ return ip.AddressHexString()
+ },
+ "host": func(ip IPAddr) string {
+ return ip.Host().String()
+ },
+ "last_usable": func(ip IPAddr) string {
+ return ip.LastUsable().String()
+ },
+ "mask_bits": func(ip IPAddr) string {
+ return fmt.Sprintf("%d", ip.Maskbits())
+ },
+ "netmask": func(ip IPAddr) string {
+ switch v := ip.(type) {
+ case IPv4Addr:
+ ipv4Mask := IPv4Addr{
+ Address: IPv4Address(v.Mask),
+ Mask: IPv4HostMask,
+ }
+ return ipv4Mask.String()
+ case IPv6Addr:
+ ipv6Mask := new(big.Int)
+ ipv6Mask.Set(v.Mask)
+ ipv6MaskAddr := IPv6Addr{
+ Address: IPv6Address(ipv6Mask),
+ Mask: ipv6HostMask,
+ }
+ return ipv6MaskAddr.String()
+ default:
+ return fmt.Sprintf("", ip)
+ }
+ },
+ "network": func(ip IPAddr) string {
+ return ip.Network().NetIP().String()
+ },
+ "octets": func(ip IPAddr) string {
+ octets := ip.Octets()
+ octetStrs := make([]string, 0, len(octets))
+ for _, octet := range octets {
+ octetStrs = append(octetStrs, fmt.Sprintf("%d", octet))
+ }
+ return strings.Join(octetStrs, " ")
+ },
+ "port": func(ip IPAddr) string {
+ return fmt.Sprintf("%d", ip.IPPort())
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go
new file mode 100644
index 0000000..6eeb7dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go
@@ -0,0 +1,98 @@
+package sockaddr
+
+import "bytes"
+
+type IPAddrs []IPAddr
+
+func (s IPAddrs) Len() int { return len(s) }
+func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used
+// // by the routines in this package. The SortIPAddrsByCmp type is used to
+// // sort IPAddrs by Cmp()
+// type SortIPAddrsByCmp struct{ IPAddrs }
+
+// // Less reports whether the element with index i should sort before the
+// // element with index j.
+// func (s SortIPAddrsByCmp) Less(i, j int) bool {
+// // Sort by Type, then address, then port number.
+// return Less(s.IPAddrs[i], s.IPAddrs[j])
+// }
+
+// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and
+// can be used by the routines in this package. The
+// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest
+// network (most specific to largest network).
+type SortIPAddrsByNetworkSize struct{ IPAddrs }
+
+// Less reports whether the element with index i should sort before the
+// element with index j.
+func (s SortIPAddrsByNetworkSize) Less(i, j int) bool {
+ // Sort masks with a larger binary value (i.e. fewer hosts per network
+ // prefix) after masks with a smaller value (larger number of hosts per
+ // prefix).
+ switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) {
+ case 0:
+ // Fall through to the second test if the net.IPMasks are the
+ // same.
+ break
+ case 1:
+ return true
+ case -1:
+ return false
+ default:
+ panic("bad, m'kay?")
+ }
+
+ // Sort IPs based on the length (i.e. prefer IPv4 over IPv6).
+ iLen := len(*s.IPAddrs[i].NetIP())
+ jLen := len(*s.IPAddrs[j].NetIP())
+ if iLen != jLen {
+ return iLen > jLen
+ }
+
+ // Sort IPs based on their network address from lowest to highest.
+ switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) {
+ case 0:
+ break
+ case 1:
+ return false
+ case -1:
+ return true
+ default:
+ panic("lol wut?")
+ }
+
+ // If a host does not have a port set, it always sorts after hosts
+ // that have a port (e.g. a host with a /32 and port number is more
+ // specific and should sort first over a host with a /32 but no port
+ // set).
+ if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 {
+ return false
+ }
+ return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort()
+}
+
+// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and
+// can be used by the routines in this package. The
+// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest
+// network (most specific to largest network).
+type SortIPAddrsBySpecificMaskLen struct{ IPAddrs }
+
+// Less reports whether the element with index i should sort before the
+// element with index j.
+func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool {
+ return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits()
+}
+
+// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can
+// be used by the routines in this package. The SortIPAddrsByBroadMaskLen
+// type is used to sort IPAddrs by largest network (i.e. largest subnets
+// first).
+type SortIPAddrsByBroadMaskLen struct{ IPAddrs }
+
+// Less reports whether the element with index i should sort before the
+// element with index j.
+func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool {
+ return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits()
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go
new file mode 100644
index 0000000..4d395dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go
@@ -0,0 +1,516 @@
+package sockaddr
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type (
+ // IPv4Address is a named type representing an IPv4 address.
+ IPv4Address uint32
+
+ // IPv4Network is a named type representing an IPv4 network.
+ IPv4Network uint32
+
+ // IPv4Mask is a named type representing an IPv4 network mask.
+ IPv4Mask uint32
+)
+
+// IPv4HostMask is a constant represents a /32 IPv4 Address
+// (i.e. 255.255.255.255).
+const IPv4HostMask = IPv4Mask(0xffffffff)
+
+// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes.
+var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string
+var ipv4AddrAttrs []AttrName
+var trailingHexNetmaskRE *regexp.Regexp
+
+// IPv4Addr implements a convenience wrapper around the union of Go's
+// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements
+// `sockaddr` when the the address family is set to AF_INET
+// (i.e. `sockaddr_in`).
+type IPv4Addr struct {
+ IPAddr
+ Address IPv4Address
+ Mask IPv4Mask
+ Port IPPort
+}
+
+func init() {
+ ipv4AddrInit()
+ trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`)
+}
+
+// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form
+// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is
+// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32`
+// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port
+// initialized to zero). ipv4Str can not be a hostname.
+//
+// NOTE: Many net.*() routines will initialize and return an IPv6 address.
+// To create uint32 values from net.IP, always test to make sure the address
+// returned can be converted to a 4 byte array using To4().
+func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) {
+ // Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In
+ // particular, clients with the Barracuda VPN client will see something like:
+ // `192.168.3.51/00ffffff` as their IP address.
+ trailingHexNetmaskRe := trailingHexNetmaskRE.Copy()
+ if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil {
+ ipv4Str = ipv4Str[:match[0]]
+ }
+
+ // Parse as an IPv4 CIDR
+ ipAddr, network, err := net.ParseCIDR(ipv4Str)
+ if err == nil {
+ ipv4 := ipAddr.To4()
+ if ipv4 == nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str)
+ }
+
+ // If we see an IPv6 netmask, convert it to an IPv4 mask.
+ netmaskSepPos := strings.LastIndexByte(ipv4Str, '/')
+ if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) {
+ netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8)
+ if err != nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err)
+ } else if netMask > 128 {
+ return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str)
+ }
+
+ if netMask >= 96 {
+ // Convert the IPv6 netmask to an IPv4 netmask
+ network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8)
+ }
+ }
+ ipv4Addr := IPv4Addr{
+ Address: IPv4Address(binary.BigEndian.Uint32(ipv4)),
+ Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)),
+ }
+ return ipv4Addr, nil
+ }
+
+ // Attempt to parse ipv4Str as a /32 host with a port number.
+ tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str)
+ if err == nil {
+ ipv4 := tcpAddr.IP.To4()
+ if ipv4 == nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str)
+ }
+
+ ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
+ ipv4Addr := IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: IPv4HostMask,
+ Port: IPPort(tcpAddr.Port),
+ }
+
+ return ipv4Addr, nil
+ }
+
+ // Parse as a naked IPv4 address
+ ip := net.ParseIP(ipv4Str)
+ if ip != nil {
+ ipv4 := ip.To4()
+ if ipv4 == nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str)
+ }
+
+ ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
+ ipv4Addr := IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: IPv4HostMask,
+ }
+ return ipv4Addr, nil
+ }
+
+ return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err)
+}
+
+// AddressBinString returns a string with the IPv4Addr's Address represented
+// as a sequence of '0' and '1' characters. This method is useful for
+// debugging or by operators who want to inspect an address.
+func (ipv4 IPv4Addr) AddressBinString() string {
+ return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2))
+}
+
+// AddressHexString returns a string with the IPv4Addr address represented as
+// a sequence of hex characters. This method is useful for debugging or by
+// operators who want to inspect an address.
+func (ipv4 IPv4Addr) AddressHexString() string {
+ return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16))
+}
+
+// Broadcast is an IPv4Addr-only method that returns the broadcast address of
+// the network.
+//
+// NOTE: IPv6 only supports multicast, so this method only exists for
+// IPv4Addr.
+func (ipv4 IPv4Addr) Broadcast() IPAddr {
+ // Nothing should listen on a broadcast address.
+ return IPv4Addr{
+ Address: IPv4Address(ipv4.BroadcastAddress()),
+ Mask: IPv4HostMask,
+ }
+}
+
+// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast
+// address.
+func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network {
+ return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask))
+}
+
+// CmpAddress follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its address is lower than arg
+// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is
+// of a different type.
+// - 1 If the argument should sort first.
+func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int {
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ return sortDeferDecision
+ }
+
+ switch {
+ case ipv4.Address == ipv4b.Address:
+ return sortDeferDecision
+ case ipv4.Address < ipv4b.Address:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// CmpPort follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its port is lower than arg
+// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr,
+// regardless of type.
+// - 1 If the argument should sort first.
+func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int {
+ var saPort IPPort
+ switch v := sa.(type) {
+ case IPv4Addr:
+ saPort = v.Port
+ case IPv6Addr:
+ saPort = v.Port
+ default:
+ return sortDeferDecision
+ }
+
+ switch {
+ case ipv4.Port == saPort:
+ return sortDeferDecision
+ case ipv4.Port < saPort:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// CmpRFC follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because it belongs to the RFC and its
+// arg does not
+// - 0 if the receiver and arg both belong to the same RFC or neither do.
+// - 1 If the arg belongs to the RFC but receiver does not.
+func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int {
+ recvInRFC := IsRFC(rfcNum, ipv4)
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ // If the receiver is part of the desired RFC and the SockAddr
+ // argument is not, return -1 so that the receiver sorts before
+ // the non-IPv4 SockAddr. Conversely, if the receiver is not
+ // part of the RFC, punt on sorting and leave it for the next
+ // sorter.
+ if recvInRFC {
+ return sortReceiverBeforeArg
+ } else {
+ return sortDeferDecision
+ }
+ }
+
+ argInRFC := IsRFC(rfcNum, ipv4b)
+ switch {
+ case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC):
+ // If a and b both belong to the RFC, or neither belong to
+ // rfcNum, defer sorting to the next sorter.
+ return sortDeferDecision
+ case recvInRFC && !argInRFC:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// Contains returns true if the SockAddr is contained within the receiver.
+func (ipv4 IPv4Addr) Contains(sa SockAddr) bool {
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ return false
+ }
+
+ return ipv4.ContainsNetwork(ipv4b)
+}
+
+// ContainsAddress returns true if the IPv4Address is contained within the
+// receiver.
+func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool {
+ return IPv4Address(ipv4.NetworkAddress()) <= x &&
+ IPv4Address(ipv4.BroadcastAddress()) >= x
+}
+
+// ContainsNetwork returns true if the network from IPv4Addr is contained
+// within the receiver.
+func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool {
+ return ipv4.NetworkAddress() <= x.NetworkAddress() &&
+ ipv4.BroadcastAddress() >= x.BroadcastAddress()
+}
+
+// DialPacketArgs returns the arguments required to be passed to
+// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0,
+// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its
+// mask set to /32.
+func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) {
+ if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
+ return "udp4", ""
+ }
+ return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// DialStreamArgs returns the arguments required to be passed to
+// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0,
+// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its
+// mask set to /32.
+func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) {
+ if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
+ return "tcp4", ""
+ }
+ return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// Equal returns true if a SockAddr is equal to the receiving IPv4Addr.
+func (ipv4 IPv4Addr) Equal(sa SockAddr) bool {
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ return false
+ }
+
+ if ipv4.Port != ipv4b.Port {
+ return false
+ }
+
+ if ipv4.Address != ipv4b.Address {
+ return false
+ }
+
+ if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() {
+ return false
+ }
+
+ return true
+}
+
+// FirstUsable returns an IPv4Addr set to the first address following the
+// network prefix. The first usable address in a network is normally the
+// gateway and should not be used except by devices forwarding packets
+// between two administratively distinct networks (i.e. a router). This
+// function does not discriminate against first usable vs "first address that
+// should be used." For example, FirstUsable() on "192.168.1.10/24" would
+// return the address "192.168.1.1/24".
+func (ipv4 IPv4Addr) FirstUsable() IPAddr {
+ addr := ipv4.NetworkAddress()
+
+ // If /32, return the address itself. If /31 assume a point-to-point
+ // link and return the lower address.
+ if ipv4.Maskbits() < 31 {
+ addr++
+ }
+
+ return IPv4Addr{
+ Address: IPv4Address(addr),
+ Mask: IPv4HostMask,
+ }
+}
+
+// Host returns a copy of ipv4 with its mask set to /32 so that it can be
+// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or
+// ListenStreamArgs().
+func (ipv4 IPv4Addr) Host() IPAddr {
+ // Nothing should listen on a broadcast address.
+ return IPv4Addr{
+ Address: ipv4.Address,
+ Mask: IPv4HostMask,
+ Port: ipv4.Port,
+ }
+}
+
+// IPPort returns the Port number attached to the IPv4Addr
+func (ipv4 IPv4Addr) IPPort() IPPort {
+ return ipv4.Port
+}
+
+// LastUsable returns the last address before the broadcast address in a
+// given network.
+func (ipv4 IPv4Addr) LastUsable() IPAddr {
+ addr := ipv4.BroadcastAddress()
+
+ // If /32, return the address itself. If /31 assume a point-to-point
+ // link and return the upper address.
+ if ipv4.Maskbits() < 31 {
+ addr--
+ }
+
+ return IPv4Addr{
+ Address: IPv4Address(addr),
+ Mask: IPv4HostMask,
+ }
+}
+
+// ListenPacketArgs returns the arguments required to be passed to
+// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs()
+// will fail. See Host() to create an IPv4Addr with its mask set to /32.
+func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) {
+ if ipv4.Mask != IPv4HostMask {
+ return "udp4", ""
+ }
+ return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// ListenStreamArgs returns the arguments required to be passed to
+// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs()
+// will fail. See Host() to create an IPv4Addr with its mask set to /32.
+func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) {
+ if ipv4.Mask != IPv4HostMask {
+ return "tcp4", ""
+ }
+ return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// Maskbits returns the number of network mask bits in a given IPv4Addr. For
+// example, the Maskbits() of "192.168.1.1/24" would return 24.
+func (ipv4 IPv4Addr) Maskbits() int {
+ mask := make(net.IPMask, IPv4len)
+ binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask))
+ maskOnes, _ := mask.Size()
+ return maskOnes
+}
+
+// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on
+// invalid input.
+func MustIPv4Addr(addr string) IPv4Addr {
+ ipv4, err := NewIPv4Addr(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err))
+ }
+ return ipv4
+}
+
+// NetIP returns the address as a net.IP (address is always presized to
+// IPv4).
+func (ipv4 IPv4Addr) NetIP() *net.IP {
+ x := make(net.IP, IPv4len)
+ binary.BigEndian.PutUint32(x, uint32(ipv4.Address))
+ return &x
+}
+
+// NetIPMask create a new net.IPMask from the IPv4Addr.
+func (ipv4 IPv4Addr) NetIPMask() *net.IPMask {
+ ipv4Mask := net.IPMask{}
+ ipv4Mask = make(net.IPMask, IPv4len)
+ binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask))
+ return &ipv4Mask
+}
+
+// NetIPNet create a new net.IPNet from the IPv4Addr.
+func (ipv4 IPv4Addr) NetIPNet() *net.IPNet {
+ ipv4net := &net.IPNet{}
+ ipv4net.IP = make(net.IP, IPv4len)
+ binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress()))
+ ipv4net.Mask = *ipv4.NetIPMask()
+ return ipv4net
+}
+
+// Network returns the network prefix or network address for a given network.
+func (ipv4 IPv4Addr) Network() IPAddr {
+ return IPv4Addr{
+ Address: IPv4Address(ipv4.NetworkAddress()),
+ Mask: ipv4.Mask,
+ }
+}
+
+// NetworkAddress returns an IPv4Network of the IPv4Addr's network address.
+func (ipv4 IPv4Addr) NetworkAddress() IPv4Network {
+ return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask))
+}
+
+// Octets returns a slice of the four octets in an IPv4Addr's Address. The
+// order of the bytes is big endian.
+func (ipv4 IPv4Addr) Octets() []int {
+ return []int{
+ int(ipv4.Address >> 24),
+ int((ipv4.Address >> 16) & 0xff),
+ int((ipv4.Address >> 8) & 0xff),
+ int(ipv4.Address & 0xff),
+ }
+}
+
+// String returns a string representation of the IPv4Addr
+func (ipv4 IPv4Addr) String() string {
+ if ipv4.Port != 0 {
+ return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+ }
+
+ if ipv4.Maskbits() == 32 {
+ return ipv4.NetIP().String()
+ }
+
+ return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits())
+}
+
+// Type is used as a type switch and returns TypeIPv4
+func (IPv4Addr) Type() SockAddrType {
+ return TypeIPv4
+}
+
+// IPv4AddrAttr returns a string representation of an attribute for the given
+// IPv4Addr.
+func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string {
+ fn, found := ipv4AddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(ipv4)
+}
+
+// IPv4Attrs returns a list of attributes supported by the IPv4Addr type
+func IPv4Attrs() []AttrName {
+ return ipv4AddrAttrs
+}
+
+// ipv4AddrInit is called once at init()
+func ipv4AddrInit() {
+ // Sorted for human readability
+ ipv4AddrAttrs = []AttrName{
+ "size", // Same position as in IPv6 for output consistency
+ "broadcast",
+ "uint32",
+ }
+
+ ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{
+ "broadcast": func(ipv4 IPv4Addr) string {
+ return ipv4.Broadcast().String()
+ },
+ "size": func(ipv4 IPv4Addr) string {
+ return fmt.Sprintf("%d", 1< 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' {
+ ipv6Str = ipv6Str[1 : len(ipv6Str)-1]
+ }
+ ip := net.ParseIP(ipv6Str)
+ if ip != nil {
+ ipv6 := ip.To16()
+ if ipv6 == nil {
+ return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str)
+ }
+
+ ipv6BigIntAddr := new(big.Int)
+ ipv6BigIntAddr.SetBytes(ipv6)
+
+ ipv6BigIntMask := new(big.Int)
+ ipv6BigIntMask.Set(ipv6HostMask)
+
+ return IPv6Addr{
+ Address: IPv6Address(ipv6BigIntAddr),
+ Mask: IPv6Mask(ipv6BigIntMask),
+ }, nil
+ }
+
+ // Parse as an IPv6 CIDR
+ ipAddr, network, err := net.ParseCIDR(ipv6Str)
+ if err == nil {
+ ipv6 := ipAddr.To16()
+ if ipv6 == nil {
+ return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str)
+ }
+
+ ipv6BigIntAddr := new(big.Int)
+ ipv6BigIntAddr.SetBytes(ipv6)
+
+ ipv6BigIntMask := new(big.Int)
+ ipv6BigIntMask.SetBytes(network.Mask)
+
+ ipv6Addr := IPv6Addr{
+ Address: IPv6Address(ipv6BigIntAddr),
+ Mask: IPv6Mask(ipv6BigIntMask),
+ }
+ return ipv6Addr, nil
+ }
+
+ return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err)
+}
+
+// AddressBinString returns a string with the IPv6Addr's Address represented
+// as a sequence of '0' and '1' characters. This method is useful for
+// debugging or by operators who want to inspect an address.
+func (ipv6 IPv6Addr) AddressBinString() string {
+ bi := big.Int(*ipv6.Address)
+ return fmt.Sprintf("%0128s", bi.Text(2))
+}
+
+// AddressHexString returns a string with the IPv6Addr address represented as
+// a sequence of hex characters. This method is useful for debugging or by
+// operators who want to inspect an address.
+func (ipv6 IPv6Addr) AddressHexString() string {
+ bi := big.Int(*ipv6.Address)
+ return fmt.Sprintf("%032s", bi.Text(16))
+}
+
+// CmpAddress follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its address is lower than arg
+// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a
+// different type.
+// - 1 If the argument should sort first.
+func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int {
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ return sortDeferDecision
+ }
+
+ ipv6aBigInt := new(big.Int)
+ ipv6aBigInt.Set(ipv6.Address)
+ ipv6bBigInt := new(big.Int)
+ ipv6bBigInt.Set(ipv6b.Address)
+
+ return ipv6aBigInt.Cmp(ipv6bBigInt)
+}
+
+// CmpPort follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its port is lower than arg
+// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr,
+// regardless of type.
+// - 1 If the argument should sort first.
+func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int {
+ var saPort IPPort
+ switch v := sa.(type) {
+ case IPv4Addr:
+ saPort = v.Port
+ case IPv6Addr:
+ saPort = v.Port
+ default:
+ return sortDeferDecision
+ }
+
+ switch {
+ case ipv6.Port == saPort:
+ return sortDeferDecision
+ case ipv6.Port < saPort:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// CmpRFC follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because it belongs to the RFC and its
+// arg does not
+// - 0 if the receiver and arg both belong to the same RFC or neither do.
+// - 1 If the arg belongs to the RFC but receiver does not.
+func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int {
+ recvInRFC := IsRFC(rfcNum, ipv6)
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ // If the receiver is part of the desired RFC and the SockAddr
+ // argument is not, sort receiver before the non-IPv6 SockAddr.
+ // Conversely, if the receiver is not part of the RFC, punt on
+ // sorting and leave it for the next sorter.
+ if recvInRFC {
+ return sortReceiverBeforeArg
+ } else {
+ return sortDeferDecision
+ }
+ }
+
+ argInRFC := IsRFC(rfcNum, ipv6b)
+ switch {
+ case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC):
+ // If a and b both belong to the RFC, or neither belong to
+ // rfcNum, defer sorting to the next sorter.
+ return sortDeferDecision
+ case recvInRFC && !argInRFC:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// Contains returns true if the SockAddr is contained within the receiver.
+func (ipv6 IPv6Addr) Contains(sa SockAddr) bool {
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ return false
+ }
+
+ return ipv6.ContainsNetwork(ipv6b)
+}
+
+// ContainsAddress returns true if the IPv6Address is contained within the
+// receiver.
+func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool {
+ xAddr := IPv6Addr{
+ Address: x,
+ Mask: ipv6HostMask,
+ }
+
+ {
+ xIPv6 := xAddr.FirstUsable().(IPv6Addr)
+ yIPv6 := ipv6.FirstUsable().(IPv6Addr)
+ if xIPv6.CmpAddress(yIPv6) >= 1 {
+ return false
+ }
+ }
+
+ {
+ xIPv6 := xAddr.LastUsable().(IPv6Addr)
+ yIPv6 := ipv6.LastUsable().(IPv6Addr)
+ if xIPv6.CmpAddress(yIPv6) <= -1 {
+ return false
+ }
+ }
+ return true
+}
+
+// ContainsNetwork returns true if the network from IPv6Addr is contained within
+// the receiver.
+func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool {
+ {
+ xIPv6 := x.FirstUsable().(IPv6Addr)
+ yIPv6 := y.FirstUsable().(IPv6Addr)
+ if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 {
+ return false
+ }
+ }
+
+ {
+ xIPv6 := x.LastUsable().(IPv6Addr)
+ yIPv6 := y.LastUsable().(IPv6Addr)
+ if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 {
+ return false
+ }
+ }
+ return true
+}
+
+// DialPacketArgs returns the arguments required to be passed to
+// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0,
+// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its
+// mask set to /128.
+func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 {
+ return "udp6", ""
+ }
+ return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// DialStreamArgs returns the arguments required to be passed to
+// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0,
+// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its
+// mask set to /128.
+func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 {
+ return "tcp6", ""
+ }
+ return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// Equal returns true if a SockAddr is equal to the receiving IPv4Addr.
+func (ipv6a IPv6Addr) Equal(sa SockAddr) bool {
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ return false
+ }
+
+ if ipv6a.NetIP().String() != ipv6b.NetIP().String() {
+ return false
+ }
+
+ if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() {
+ return false
+ }
+
+ if ipv6a.Port != ipv6b.Port {
+ return false
+ }
+
+ return true
+}
+
+// FirstUsable returns an IPv6Addr set to the first address following the
+// network prefix. The first usable address in a network is normally the
+// gateway and should not be used except by devices forwarding packets
+// between two administratively distinct networks (i.e. a router). This
+// function does not discriminate against first usable vs "first address that
+// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would
+// return "2001:0db8::00011".
+func (ipv6 IPv6Addr) FirstUsable() IPAddr {
+ return IPv6Addr{
+ Address: IPv6Address(ipv6.NetworkAddress()),
+ Mask: ipv6HostMask,
+ }
+}
+
+// Host returns a copy of ipv6 with its mask set to /128 so that it can be
+// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or
+// ListenStreamArgs().
+func (ipv6 IPv6Addr) Host() IPAddr {
+ // Nothing should listen on a broadcast address.
+ return IPv6Addr{
+ Address: ipv6.Address,
+ Mask: ipv6HostMask,
+ Port: ipv6.Port,
+ }
+}
+
+// IPPort returns the Port number attached to the IPv6Addr
+func (ipv6 IPv6Addr) IPPort() IPPort {
+ return ipv6.Port
+}
+
+// LastUsable returns the last address in a given network.
+func (ipv6 IPv6Addr) LastUsable() IPAddr {
+ addr := new(big.Int)
+ addr.Set(ipv6.Address)
+
+ mask := new(big.Int)
+ mask.Set(ipv6.Mask)
+
+ negMask := new(big.Int)
+ negMask.Xor(ipv6HostMask, mask)
+
+ lastAddr := new(big.Int)
+ lastAddr.And(addr, mask)
+ lastAddr.Or(lastAddr, negMask)
+
+ return IPv6Addr{
+ Address: IPv6Address(lastAddr),
+ Mask: ipv6HostMask,
+ }
+}
+
+// ListenPacketArgs returns the arguments required to be passed to
+// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs()
+// will fail. See Host() to create an IPv6Addr with its mask set to /128.
+func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 {
+ return "udp6", ""
+ }
+ return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// ListenStreamArgs returns the arguments required to be passed to
+// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs()
+// will fail. See Host() to create an IPv6Addr with its mask set to /128.
+func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 {
+ return "tcp6", ""
+ }
+ return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// Maskbits returns the number of network mask bits in a given IPv6Addr. For
+// example, the Maskbits() of "2001:0db8::0003/64" would return 64.
+func (ipv6 IPv6Addr) Maskbits() int {
+ maskOnes, _ := ipv6.NetIPNet().Mask.Size()
+
+ return maskOnes
+}
+
+// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on
+// invalid input.
+func MustIPv6Addr(addr string) IPv6Addr {
+ ipv6, err := NewIPv6Addr(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err))
+ }
+ return ipv6
+}
+
+// NetIP returns the address as a net.IP.
+func (ipv6 IPv6Addr) NetIP() *net.IP {
+ return bigIntToNetIPv6(ipv6.Address)
+}
+
+// NetIPMask create a new net.IPMask from the IPv6Addr.
+func (ipv6 IPv6Addr) NetIPMask() *net.IPMask {
+ ipv6Mask := make(net.IPMask, IPv6len)
+ m := big.Int(*ipv6.Mask)
+ copy(ipv6Mask, m.Bytes())
+ return &ipv6Mask
+}
+
+// Network returns a pointer to the net.IPNet within IPv4Addr receiver.
+func (ipv6 IPv6Addr) NetIPNet() *net.IPNet {
+ ipv6net := &net.IPNet{}
+ ipv6net.IP = make(net.IP, IPv6len)
+ copy(ipv6net.IP, *ipv6.NetIP())
+ ipv6net.Mask = *ipv6.NetIPMask()
+ return ipv6net
+}
+
+// Network returns the network prefix or network address for a given network.
+func (ipv6 IPv6Addr) Network() IPAddr {
+ return IPv6Addr{
+ Address: IPv6Address(ipv6.NetworkAddress()),
+ Mask: ipv6.Mask,
+ }
+}
+
+// NetworkAddress returns an IPv6Network of the IPv6Addr's network address.
+func (ipv6 IPv6Addr) NetworkAddress() IPv6Network {
+ addr := new(big.Int)
+ addr.SetBytes((*ipv6.Address).Bytes())
+
+ mask := new(big.Int)
+ mask.SetBytes(*ipv6.NetIPMask())
+
+ netAddr := new(big.Int)
+ netAddr.And(addr, mask)
+
+ return IPv6Network(netAddr)
+}
+
+// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The
+// order of the bytes is big endian.
+func (ipv6 IPv6Addr) Octets() []int {
+ x := make([]int, IPv6len)
+ for i, b := range *bigIntToNetIPv6(ipv6.Address) {
+ x[i] = int(b)
+ }
+
+ return x
+}
+
+// String returns a string representation of the IPv6Addr
+func (ipv6 IPv6Addr) String() string {
+ if ipv6.Port != 0 {
+ return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+ }
+
+ if ipv6.Maskbits() == 128 {
+ return ipv6.NetIP().String()
+ }
+
+ return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits())
+}
+
+// Type is used as a type switch and returns TypeIPv6
+func (IPv6Addr) Type() SockAddrType {
+ return TypeIPv6
+}
+
+// IPv6Attrs returns a list of attributes supported by the IPv6Addr type
+func IPv6Attrs() []AttrName {
+ return ipv6AddrAttrs
+}
+
+// IPv6AddrAttr returns a string representation of an attribute for the given
+// IPv6Addr.
+func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string {
+ fn, found := ipv6AddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(ipv6)
+}
+
+// ipv6AddrInit is called once at init()
+func ipv6AddrInit() {
+ // Sorted for human readability
+ ipv6AddrAttrs = []AttrName{
+ "size", // Same position as in IPv6 for output consistency
+ "uint128",
+ }
+
+ ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{
+ "size": func(ipv6 IPv6Addr) string {
+ netSize := big.NewInt(1)
+ netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits()))
+ return netSize.Text(10)
+ },
+ "uint128": func(ipv6 IPv6Addr) string {
+ b := big.Int(*ipv6.Address)
+ return b.Text(10)
+ },
+ }
+}
+
+// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the
+// correctly padded values.
+func bigIntToNetIPv6(bi *big.Int) *net.IP {
+ x := make(net.IP, IPv6len)
+ ipv6Bytes := bi.Bytes()
+
+ // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If
+ // they are different sizes we to pad the size of response.
+ if len(ipv6Bytes) < IPv6len {
+ buf := new(bytes.Buffer)
+ buf.Grow(IPv6len)
+
+ for i := len(ipv6Bytes); i < IPv6len; i++ {
+ if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil {
+ panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err))
+ }
+ }
+
+ for _, b := range ipv6Bytes {
+ if err := binary.Write(buf, binary.BigEndian, b); err != nil {
+ panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err))
+ }
+ }
+
+ ipv6Bytes = buf.Bytes()
+ }
+ i := copy(x, ipv6Bytes)
+ if i != IPv6len {
+ panic("IPv6 wrong size")
+ }
+ return &x
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc.go b/vendor/github.com/hashicorp/go-sockaddr/rfc.go
new file mode 100644
index 0000000..02e188f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/rfc.go
@@ -0,0 +1,948 @@
+package sockaddr
+
+// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP
+// blocks.
+const ForwardingBlacklist = 4294967295
+const ForwardingBlacklistRFC = "4294967295"
+
+// IsRFC tests to see if an SockAddr matches the specified RFC
+func IsRFC(rfcNum uint, sa SockAddr) bool {
+ rfcNetMap := KnownRFCs()
+ rfcNets, ok := rfcNetMap[rfcNum]
+ if !ok {
+ return false
+ }
+
+ var contained bool
+ for _, rfcNet := range rfcNets {
+ if rfcNet.Contains(sa) {
+ contained = true
+ break
+ }
+ }
+ return contained
+}
+
+// KnownRFCs returns an initial set of known RFCs.
+//
+// NOTE (sean@): As this list evolves over time, please submit patches to keep
+// this list current. If something isn't right, inquire, as it may just be a
+// bug on my part. Some of the inclusions were based on my judgement as to what
+// would be a useful value (e.g. RFC3330).
+//
+// Useful resources:
+//
+// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
+// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml
+// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
+func KnownRFCs() map[uint]SockAddrs {
+ // NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a
+ // RADIX tree, but `ENOTIME`. Patches welcome.
+ return map[uint]SockAddrs{
+ 919: {
+ // [RFC919] Broadcasting Internet Datagrams
+ MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards
+ },
+ 1122: {
+ // [RFC1122] Requirements for Internet Hosts -- Communication Layers
+ MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3
+ MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3
+ },
+ 1112: {
+ // [RFC1112] Host Extensions for IP Multicasting
+ MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses
+ },
+ 1918: {
+ // [RFC1918] Address Allocation for Private Internets
+ MustIPv4Addr("10.0.0.0/8"),
+ MustIPv4Addr("172.16.0.0/12"),
+ MustIPv4Addr("192.168.0.0/16"),
+ },
+ 2544: {
+ // [RFC2544] Benchmarking Methodology for Network
+ // Interconnect Devices
+ MustIPv4Addr("198.18.0.0/15"),
+ },
+ 2765: {
+ // [RFC2765] Stateless IP/ICMP Translation Algorithm
+ // (SIIT) (obsoleted by RFCs 6145, which itself was
+ // later obsoleted by 7915).
+
+ // [RFC2765], §2.1 Addresses
+ MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"),
+ },
+ 2928: {
+ // [RFC2928] Initial IPv6 Sub-TLA ID Assignments
+ MustIPv6Addr("2001::/16"), // Superblock
+ //MustIPv6Addr("2001:0000::/23"), // IANA
+ //MustIPv6Addr("2001:0200::/23"), // APNIC
+ //MustIPv6Addr("2001:0400::/23"), // ARIN
+ //MustIPv6Addr("2001:0600::/23"), // RIPE NCC
+ //MustIPv6Addr("2001:0800::/23"), // (future assignment)
+ // ...
+ //MustIPv6Addr("2001:FE00::/23"), // (future assignment)
+ },
+ 3056: { // 6to4 address
+ // [RFC3056] Connection of IPv6 Domains via IPv4 Clouds
+
+ // [RFC3056], §2 IPv6 Prefix Allocation
+ MustIPv6Addr("2002::/16"),
+ },
+ 3068: {
+ // [RFC3068] An Anycast Prefix for 6to4 Relay Routers
+ // (obsolete by RFC7526)
+
+ // [RFC3068], § 6to4 Relay anycast address
+ MustIPv4Addr("192.88.99.0/24"),
+
+ // [RFC3068], §2.5 6to4 IPv6 relay anycast address
+ //
+ // NOTE: /120 == 128-(32-24)
+ MustIPv6Addr("2002:c058:6301::/120"),
+ },
+ 3171: {
+ // [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments
+ MustIPv4Addr("224.0.0.0/4"),
+ },
+ 3330: {
+ // [RFC3330] Special-Use IPv4 Addresses
+
+ // Addresses in this block refer to source hosts on
+ // "this" network. Address 0.0.0.0/32 may be used as a
+ // source address for this host on this network; other
+ // addresses within 0.0.0.0/8 may be used to refer to
+ // specified hosts on this network [RFC1700, page 4].
+ MustIPv4Addr("0.0.0.0/8"),
+
+ // 10.0.0.0/8 - This block is set aside for use in
+ // private networks. Its intended use is documented in
+ // [RFC1918]. Addresses within this block should not
+ // appear on the public Internet.
+ MustIPv4Addr("10.0.0.0/8"),
+
+ // 14.0.0.0/8 - This block is set aside for assignments
+ // to the international system of Public Data Networks
+ // [RFC1700, page 181]. The registry of assignments
+ // within this block can be accessed from the "Public
+ // Data Network Numbers" link on the web page at
+ // http://www.iana.org/numbers.html. Addresses within
+ // this block are assigned to users and should be
+ // treated as such.
+
+ // 24.0.0.0/8 - This block was allocated in early 1996
+ // for use in provisioning IP service over cable
+ // television systems. Although the IANA initially was
+ // involved in making assignments to cable operators,
+ // this responsibility was transferred to American
+ // Registry for Internet Numbers (ARIN) in May 2001.
+ // Addresses within this block are assigned in the
+ // normal manner and should be treated as such.
+
+ // 39.0.0.0/8 - This block was used in the "Class A
+ // Subnet Experiment" that commenced in May 1995, as
+ // documented in [RFC1797]. The experiment has been
+ // completed and this block has been returned to the
+ // pool of addresses reserved for future allocation or
+ // assignment. This block therefore no longer has a
+ // special use and is subject to allocation to a
+ // Regional Internet Registry for assignment in the
+ // normal manner.
+
+ // 127.0.0.0/8 - This block is assigned for use as the Internet host
+ // loopback address. A datagram sent by a higher level protocol to an
+ // address anywhere within this block should loop back inside the host.
+ // This is ordinarily implemented using only 127.0.0.1/32 for loopback,
+ // but no addresses within this block should ever appear on any network
+ // anywhere [RFC1700, page 5].
+ MustIPv4Addr("127.0.0.0/8"),
+
+ // 128.0.0.0/16 - This block, corresponding to the
+ // numerically lowest of the former Class B addresses,
+ // was initially and is still reserved by the IANA.
+ // Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer
+ // applies and addresses in this block are subject to
+ // future allocation to a Regional Internet Registry for
+ // assignment in the normal manner.
+
+ // 169.254.0.0/16 - This is the "link local" block. It
+ // is allocated for communication between hosts on a
+ // single link. Hosts obtain these addresses by
+ // auto-configuration, such as when a DHCP server may
+ // not be found.
+ MustIPv4Addr("169.254.0.0/16"),
+
+ // 172.16.0.0/12 - This block is set aside for use in
+ // private networks. Its intended use is documented in
+ // [RFC1918]. Addresses within this block should not
+ // appear on the public Internet.
+ MustIPv4Addr("172.16.0.0/12"),
+
+ // 191.255.0.0/16 - This block, corresponding to the numerically highest
+ // to the former Class B addresses, was initially and is still reserved
+ // by the IANA. Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer applies and addresses
+ // in this block are subject to future allocation to a Regional Internet
+ // Registry for assignment in the normal manner.
+
+ // 192.0.0.0/24 - This block, corresponding to the
+ // numerically lowest of the former Class C addresses,
+ // was initially and is still reserved by the IANA.
+ // Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer
+ // applies and addresses in this block are subject to
+ // future allocation to a Regional Internet Registry for
+ // assignment in the normal manner.
+
+ // 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in
+ // documentation and example code. It is often used in conjunction with
+ // domain names example.com or example.net in vendor and protocol
+ // documentation. Addresses within this block should not appear on the
+ // public Internet.
+ MustIPv4Addr("192.0.2.0/24"),
+
+ // 192.88.99.0/24 - This block is allocated for use as 6to4 relay
+ // anycast addresses, according to [RFC3068].
+ MustIPv4Addr("192.88.99.0/24"),
+
+ // 192.168.0.0/16 - This block is set aside for use in private networks.
+ // Its intended use is documented in [RFC1918]. Addresses within this
+ // block should not appear on the public Internet.
+ MustIPv4Addr("192.168.0.0/16"),
+
+ // 198.18.0.0/15 - This block has been allocated for use
+ // in benchmark tests of network interconnect devices.
+ // Its use is documented in [RFC2544].
+ MustIPv4Addr("198.18.0.0/15"),
+
+ // 223.255.255.0/24 - This block, corresponding to the
+ // numerically highest of the former Class C addresses,
+ // was initially and is still reserved by the IANA.
+ // Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer
+ // applies and addresses in this block are subject to
+ // future allocation to a Regional Internet Registry for
+ // assignment in the normal manner.
+
+ // 224.0.0.0/4 - This block, formerly known as the Class
+ // D address space, is allocated for use in IPv4
+ // multicast address assignments. The IANA guidelines
+ // for assignments from this space are described in
+ // [RFC3171].
+ MustIPv4Addr("224.0.0.0/4"),
+
+ // 240.0.0.0/4 - This block, formerly known as the Class E address
+ // space, is reserved. The "limited broadcast" destination address
+ // 255.255.255.255 should never be forwarded outside the (sub-)net of
+ // the source. The remainder of this space is reserved
+ // for future use. [RFC1700, page 4]
+ MustIPv4Addr("240.0.0.0/4"),
+ },
+ 3849: {
+ // [RFC3849] IPv6 Address Prefix Reserved for Documentation
+ MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations
+ },
+ 3927: {
+ // [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses
+ MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection
+ },
+ 4038: {
+ // [RFC4038] Application Aspects of IPv6 Transition
+
+ // [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node
+ MustIPv6Addr("0:0:0:0:0:ffff::/96"),
+ },
+ 4193: {
+ // [RFC4193] Unique Local IPv6 Unicast Addresses
+ MustIPv6Addr("fc00::/7"),
+ },
+ 4291: {
+ // [RFC4291] IP Version 6 Addressing Architecture
+
+ // [RFC4291], §2.5.2 The Unspecified Address
+ MustIPv6Addr("::/128"),
+
+ // [RFC4291], §2.5.3 The Loopback Address
+ MustIPv6Addr("::1/128"),
+
+ // [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address
+ MustIPv6Addr("::/96"),
+
+ // [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address
+ MustIPv6Addr("::ffff:0:0/96"),
+
+ // [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses
+ MustIPv6Addr("fe80::/10"),
+
+ // [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses
+ // (depreciated)
+ MustIPv6Addr("fec0::/10"),
+
+ // [RFC4291], §2.7 Multicast Addresses
+ MustIPv6Addr("ff00::/8"),
+
+ // IPv6 Multicast Information.
+ //
+ // In the following "table" below, `ff0x` is replaced
+ // with the following values depending on the scope of
+ // the query:
+ //
+ // IPv6 Multicast Scopes:
+ // * ff00/9 // reserved
+ // * ff01/9 // interface-local
+ // * ff02/9 // link-local
+ // * ff03/9 // realm-local
+ // * ff04/9 // admin-local
+ // * ff05/9 // site-local
+ // * ff08/9 // organization-local
+ // * ff0e/9 // global
+ // * ff0f/9 // reserved
+ //
+ // IPv6 Multicast Addresses:
+ // * ff0x::2 // All routers
+ // * ff02::5 // OSPFIGP
+ // * ff02::6 // OSPFIGP Designated Routers
+ // * ff02::9 // RIP Routers
+ // * ff02::a // EIGRP Routers
+ // * ff02::d // All PIM Routers
+ // * ff02::1a // All RPL Routers
+ // * ff0x::fb // mDNSv6
+ // * ff0x::101 // All Network Time Protocol (NTP) servers
+ // * ff02::1:1 // Link Name
+ // * ff02::1:2 // All-dhcp-agents
+ // * ff02::1:3 // Link-local Multicast Name Resolution
+ // * ff05::1:3 // All-dhcp-servers
+ // * ff02::1:ff00:0/104 // Solicited-node multicast address.
+ // * ff02::2:ff00:0/104 // Node Information Queries
+ },
+ 4380: {
+ // [RFC4380] Teredo: Tunneling IPv6 over UDP through
+ // Network Address Translations (NATs)
+
+ // [RFC4380], §2.6 Global Teredo IPv6 Service Prefix
+ MustIPv6Addr("2001:0000::/32"),
+ },
+ 4773: {
+ // [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block
+ MustIPv6Addr("2001:0000::/23"), // IANA
+ },
+ 4843: {
+ // [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID)
+ MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations
+ },
+ 5180: {
+ // [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices
+ MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations
+ },
+ 5735: {
+ // [RFC5735] Special Use IPv4 Addresses
+ MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1
+ MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2
+ MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3
+ MustIPv4Addr("198.18.0.0/15"), // Benchmarks
+ },
+ 5737: {
+ // [RFC5737] IPv4 Address Blocks Reserved for Documentation
+ MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1
+ MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2
+ MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3
+ },
+ 6052: {
+ // [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators
+ MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix
+ },
+ 6333: {
+ // [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion
+ MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address
+ },
+ 6598: {
+ // [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space
+ MustIPv4Addr("100.64.0.0/10"),
+ },
+ 6666: {
+ // [RFC6666] A Discard Prefix for IPv6
+ MustIPv6Addr("0100::/64"),
+ },
+ 6890: {
+ // [RFC6890] Special-Purpose IP Address Registries
+
+ // From "RFC6890 §2.2.1 Information Requirements":
+ /*
+ The IPv4 and IPv6 Special-Purpose Address Registries maintain the
+ following information regarding each entry:
+
+ o Address Block - A block of IPv4 or IPv6 addresses that has been
+ registered for a special purpose.
+
+ o Name - A descriptive name for the special-purpose address block.
+
+ o RFC - The RFC through which the special-purpose address block was
+ requested.
+
+ o Allocation Date - The date upon which the special-purpose address
+ block was allocated.
+
+ o Termination Date - The date upon which the allocation is to be
+ terminated. This field is applicable for limited-use allocations
+ only.
+
+ o Source - A boolean value indicating whether an address from the
+ allocated special-purpose address block is valid when used as the
+ source address of an IP datagram that transits two devices.
+
+ o Destination - A boolean value indicating whether an address from
+ the allocated special-purpose address block is valid when used as
+ the destination address of an IP datagram that transits two
+ devices.
+
+ o Forwardable - A boolean value indicating whether a router may
+ forward an IP datagram whose destination address is drawn from the
+ allocated special-purpose address block between external
+ interfaces.
+
+ o Global - A boolean value indicating whether an IP datagram whose
+ destination address is drawn from the allocated special-purpose
+ address block is forwardable beyond a specified administrative
+ domain.
+
+ o Reserved-by-Protocol - A boolean value indicating whether the
+ special-purpose address block is reserved by IP, itself. This
+ value is "TRUE" if the RFC that created the special-purpose
+ address block requires all compliant IP implementations to behave
+ in a special way when processing packets either to or from
+ addresses contained by the address block.
+
+ If the value of "Destination" is FALSE, the values of "Forwardable"
+ and "Global" must also be false.
+ */
+
+ /*+----------------------+----------------------------+
+ * | Attribute | Value |
+ * +----------------------+----------------------------+
+ * | Address Block | 0.0.0.0/8 |
+ * | Name | "This host on this network"|
+ * | RFC | [RFC1122], Section 3.2.1.3 |
+ * | Allocation Date | September 1981 |
+ * | Termination Date | N/A |
+ * | Source | True |
+ * | Destination | False |
+ * | Forwardable | False |
+ * | Global | False |
+ * | Reserved-by-Protocol | True |
+ * +----------------------+----------------------------+*/
+ MustIPv4Addr("0.0.0.0/8"),
+
+ /*+----------------------+---------------+
+ * | Attribute | Value |
+ * +----------------------+---------------+
+ * | Address Block | 10.0.0.0/8 |
+ * | Name | Private-Use |
+ * | RFC | [RFC1918] |
+ * | Allocation Date | February 1996 |
+ * | Termination Date | N/A |
+ * | Source | True |
+ * | Destination | True |
+ * | Forwardable | True |
+ * | Global | False |
+ * | Reserved-by-Protocol | False |
+ * +----------------------+---------------+ */
+ MustIPv4Addr("10.0.0.0/8"),
+
+ /*+----------------------+----------------------+
+ | Attribute | Value |
+ +----------------------+----------------------+
+ | Address Block | 100.64.0.0/10 |
+ | Name | Shared Address Space |
+ | RFC | [RFC6598] |
+ | Allocation Date | April 2012 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------+*/
+ MustIPv4Addr("100.64.0.0/10"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 127.0.0.0/8 |
+ | Name | Loopback |
+ | RFC | [RFC1122], Section 3.2.1.3 |
+ | Allocation Date | September 1981 |
+ | Termination Date | N/A |
+ | Source | False [1] |
+ | Destination | False [1] |
+ | Forwardable | False [1] |
+ | Global | False [1] |
+ | Reserved-by-Protocol | True |
+ +----------------------+----------------------------+*/
+ // [1] Several protocols have been granted exceptions to
+ // this rule. For examples, see [RFC4379] and
+ // [RFC5884].
+ MustIPv4Addr("127.0.0.0/8"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 169.254.0.0/16 |
+ | Name | Link Local |
+ | RFC | [RFC3927] |
+ | Allocation Date | May 2005 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+----------------+*/
+ MustIPv4Addr("169.254.0.0/16"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 172.16.0.0/12 |
+ | Name | Private-Use |
+ | RFC | [RFC1918] |
+ | Allocation Date | February 1996 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ MustIPv4Addr("172.16.0.0/12"),
+
+ /*+----------------------+---------------------------------+
+ | Attribute | Value |
+ +----------------------+---------------------------------+
+ | Address Block | 192.0.0.0/24 [2] |
+ | Name | IETF Protocol Assignments |
+ | RFC | Section 2.1 of this document |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------------------------+*/
+ // [2] Not usable unless by virtue of a more specific
+ // reservation.
+ MustIPv4Addr("192.0.0.0/24"),
+
+ /*+----------------------+--------------------------------+
+ | Attribute | Value |
+ +----------------------+--------------------------------+
+ | Address Block | 192.0.0.0/29 |
+ | Name | IPv4 Service Continuity Prefix |
+ | RFC | [RFC6333], [RFC7335] |
+ | Allocation Date | June 2011 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------------------------+*/
+ MustIPv4Addr("192.0.0.0/29"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 192.0.2.0/24 |
+ | Name | Documentation (TEST-NET-1) |
+ | RFC | [RFC5737] |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv4Addr("192.0.2.0/24"),
+
+ /*+----------------------+--------------------+
+ | Attribute | Value |
+ +----------------------+--------------------+
+ | Address Block | 192.88.99.0/24 |
+ | Name | 6to4 Relay Anycast |
+ | RFC | [RFC3068] |
+ | Allocation Date | June 2001 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | True |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------------+*/
+ MustIPv4Addr("192.88.99.0/24"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 192.168.0.0/16 |
+ | Name | Private-Use |
+ | RFC | [RFC1918] |
+ | Allocation Date | February 1996 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------+*/
+ MustIPv4Addr("192.168.0.0/16"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 198.18.0.0/15 |
+ | Name | Benchmarking |
+ | RFC | [RFC2544] |
+ | Allocation Date | March 1999 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ MustIPv4Addr("198.18.0.0/15"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 198.51.100.0/24 |
+ | Name | Documentation (TEST-NET-2) |
+ | RFC | [RFC5737] |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv4Addr("198.51.100.0/24"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 203.0.113.0/24 |
+ | Name | Documentation (TEST-NET-3) |
+ | RFC | [RFC5737] |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv4Addr("203.0.113.0/24"),
+
+ /*+----------------------+----------------------+
+ | Attribute | Value |
+ +----------------------+----------------------+
+ | Address Block | 240.0.0.0/4 |
+ | Name | Reserved |
+ | RFC | [RFC1112], Section 4 |
+ | Allocation Date | August 1989 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+----------------------+*/
+ MustIPv4Addr("240.0.0.0/4"),
+
+ /*+----------------------+----------------------+
+ | Attribute | Value |
+ +----------------------+----------------------+
+ | Address Block | 255.255.255.255/32 |
+ | Name | Limited Broadcast |
+ | RFC | [RFC0919], Section 7 |
+ | Allocation Date | October 1984 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | True |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------+*/
+ MustIPv4Addr("255.255.255.255/32"),
+
+ /*+----------------------+------------------+
+ | Attribute | Value |
+ +----------------------+------------------+
+ | Address Block | ::1/128 |
+ | Name | Loopback Address |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+------------------+*/
+ MustIPv6Addr("::1/128"),
+
+ /*+----------------------+---------------------+
+ | Attribute | Value |
+ +----------------------+---------------------+
+ | Address Block | ::/128 |
+ | Name | Unspecified Address |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+---------------------+*/
+ MustIPv6Addr("::/128"),
+
+ /*+----------------------+---------------------+
+ | Attribute | Value |
+ +----------------------+---------------------+
+ | Address Block | 64:ff9b::/96 |
+ | Name | IPv4-IPv6 Translat. |
+ | RFC | [RFC6052] |
+ | Allocation Date | October 2010 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | True |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------------+*/
+ MustIPv6Addr("64:ff9b::/96"),
+
+ /*+----------------------+---------------------+
+ | Attribute | Value |
+ +----------------------+---------------------+
+ | Address Block | ::ffff:0:0/96 |
+ | Name | IPv4-mapped Address |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+---------------------+*/
+ MustIPv6Addr("::ffff:0:0/96"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 100::/64 |
+ | Name | Discard-Only Address Block |
+ | RFC | [RFC6666] |
+ | Allocation Date | June 2012 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv6Addr("100::/64"),
+
+ /*+----------------------+---------------------------+
+ | Attribute | Value |
+ +----------------------+---------------------------+
+ | Address Block | 2001::/23 |
+ | Name | IETF Protocol Assignments |
+ | RFC | [RFC2928] |
+ | Allocation Date | September 2000 |
+ | Termination Date | N/A |
+ | Source | False[1] |
+ | Destination | False[1] |
+ | Forwardable | False[1] |
+ | Global | False[1] |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------------------+*/
+ // [1] Unless allowed by a more specific allocation.
+ MustIPv6Addr("2001::/16"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 2001::/32 |
+ | Name | TEREDO |
+ | RFC | [RFC4380] |
+ | Allocation Date | January 2006 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001::/16"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 2001:2::/48 |
+ | Name | Benchmarking |
+ | RFC | [RFC5180] |
+ | Allocation Date | April 2008 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001:2::/48"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 2001:db8::/32 |
+ | Name | Documentation |
+ | RFC | [RFC3849] |
+ | Allocation Date | July 2004 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001:db8::/32"),
+
+ /*+----------------------+--------------+
+ | Attribute | Value |
+ +----------------------+--------------+
+ | Address Block | 2001:10::/28 |
+ | Name | ORCHID |
+ | RFC | [RFC4843] |
+ | Allocation Date | March 2007 |
+ | Termination Date | March 2014 |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001:10::/28"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 2002::/16 [2] |
+ | Name | 6to4 |
+ | RFC | [RFC3056] |
+ | Allocation Date | February 2001 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | N/A [2] |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ // [2] See [RFC3056] for details.
+ MustIPv6Addr("2002::/16"),
+
+ /*+----------------------+--------------+
+ | Attribute | Value |
+ +----------------------+--------------+
+ | Address Block | fc00::/7 |
+ | Name | Unique-Local |
+ | RFC | [RFC4193] |
+ | Allocation Date | October 2005 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------+*/
+ MustIPv6Addr("fc00::/7"),
+
+ /*+----------------------+-----------------------+
+ | Attribute | Value |
+ +----------------------+-----------------------+
+ | Address Block | fe80::/10 |
+ | Name | Linked-Scoped Unicast |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+-----------------------+*/
+ MustIPv6Addr("fe80::/10"),
+ },
+ 7335: {
+ // [RFC7335] IPv4 Service Continuity Prefix
+ MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations
+ },
+ ForwardingBlacklist: { // Pseudo-RFC
+ // Blacklist of non-forwardable IP blocks taken from RFC6890
+ //
+ // TODO: the attributes for forwardable should be
+ // searcahble and embedded in the main list of RFCs
+ // above.
+ MustIPv4Addr("0.0.0.0/8"),
+ MustIPv4Addr("127.0.0.0/8"),
+ MustIPv4Addr("169.254.0.0/16"),
+ MustIPv4Addr("192.0.0.0/24"),
+ MustIPv4Addr("192.0.2.0/24"),
+ MustIPv4Addr("198.51.100.0/24"),
+ MustIPv4Addr("203.0.113.0/24"),
+ MustIPv4Addr("240.0.0.0/4"),
+ MustIPv4Addr("255.255.255.255/32"),
+ MustIPv6Addr("::1/128"),
+ MustIPv6Addr("::/128"),
+ MustIPv6Addr("::ffff:0:0/96"),
+
+ // There is no way of expressing a whitelist per RFC2928
+ // atm without creating a negative mask, which I don't
+ // want to do atm.
+ //MustIPv6Addr("2001::/23"),
+
+ MustIPv6Addr("2001:db8::/32"),
+ MustIPv6Addr("2001:10::/28"),
+ MustIPv6Addr("fe80::/10"),
+ },
+ }
+}
+
+// VisitAllRFCs iterates over all known RFCs and calls the visitor
+func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) {
+ rfcNetMap := KnownRFCs()
+
+ // Blacklist of faux-RFCs. Don't show the world that we're abusing the
+ // RFC system in this library.
+ rfcBlacklist := map[uint]struct{}{
+ ForwardingBlacklist: {},
+ }
+
+ for rfcNum, sas := range rfcNetMap {
+ if _, found := rfcBlacklist[rfcNum]; !found {
+ fn(rfcNum, sas)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info.go b/vendor/github.com/hashicorp/go-sockaddr/route_info.go
new file mode 100644
index 0000000..2a3ee1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info.go
@@ -0,0 +1,19 @@
+package sockaddr
+
+// RouteInterface specifies an interface for obtaining memoized route table and
+// network information from a given OS.
+type RouteInterface interface {
+ // GetDefaultInterfaceName returns the name of the interface that has a
+ // default route or an error and an empty string if a problem was
+ // encountered.
+ GetDefaultInterfaceName() (string, error)
+}
+
+// VisitCommands visits each command used by the platform-specific RouteInfo
+// implementation.
+func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) {
+ for k, v := range ri.cmds {
+ cmds := append([]string(nil), v...)
+ fn(k, cmds)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go
new file mode 100644
index 0000000..705757a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go
@@ -0,0 +1,36 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package sockaddr
+
+import "os/exec"
+
+var cmds map[string][]string = map[string][]string{
+ "route": {"/sbin/route", "-n", "get", "default"},
+}
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ return routeInfo{
+ cmds: cmds,
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ var ifName string
+ if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil {
+ return "", err
+ }
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go
new file mode 100644
index 0000000..d1b009f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go
@@ -0,0 +1,10 @@
+// +build android nacl plan9
+
+package sockaddr
+
+import "errors"
+
+// getDefaultIfName is the default interface function for unsupported platforms.
+func getDefaultIfName() (string, error) {
+ return "", errors.New("No default interface found (unsupported platform)")
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go
new file mode 100644
index 0000000..c2ec91e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go
@@ -0,0 +1,40 @@
+package sockaddr
+
+import (
+ "errors"
+ "os/exec"
+)
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a Linux-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ // CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on
+ // $PATH and fallback to /sbin/ip on error.
+ path, _ := exec.LookPath("ip")
+ if path == "" {
+ path = "/sbin/ip"
+ }
+
+ return routeInfo{
+ cmds: map[string][]string{"ip": {path, "route"}},
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ var ifName string
+ if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil {
+ return "", errors.New("No default interface found")
+ }
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go
new file mode 100644
index 0000000..ee8e798
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go
@@ -0,0 +1,37 @@
+package sockaddr
+
+import (
+ "errors"
+ "os/exec"
+)
+
+var cmds map[string][]string = map[string][]string{
+ "route": {"/usr/sbin/route", "-n", "get", "default"},
+}
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ return routeInfo{
+ cmds: cmds,
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ var ifName string
+ if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil {
+ return "", errors.New("No default interface found")
+ }
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go
new file mode 100644
index 0000000..3da9728
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go
@@ -0,0 +1,41 @@
+package sockaddr
+
+import "os/exec"
+
+var cmds map[string][]string = map[string][]string{
+ "netstat": {"netstat", "-rn"},
+ "ipconfig": {"ipconfig"},
+}
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ return routeInfo{
+ cmds: cmds,
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut))
+ if err != nil {
+ return "", err
+ }
+
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go
new file mode 100644
index 0000000..826c91c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go
@@ -0,0 +1,206 @@
+package sockaddr
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type SockAddrType int
+type AttrName string
+
+const (
+ TypeUnknown SockAddrType = 0x0
+ TypeUnix = 0x1
+ TypeIPv4 = 0x2
+ TypeIPv6 = 0x4
+
+ // TypeIP is the union of TypeIPv4 and TypeIPv6
+ TypeIP = 0x6
+)
+
+type SockAddr interface {
+ // CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC
+ // networks, -1 if the receiver is contained within the RFC network, or
+ // 1 if the address is not contained within the RFC.
+ CmpRFC(rfcNum uint, sa SockAddr) int
+
+ // Contains returns true if the SockAddr arg is contained within the
+ // receiver
+ Contains(SockAddr) bool
+
+ // Equal allows for the comparison of two SockAddrs
+ Equal(SockAddr) bool
+
+ DialPacketArgs() (string, string)
+ DialStreamArgs() (string, string)
+ ListenPacketArgs() (string, string)
+ ListenStreamArgs() (string, string)
+
+ // String returns the string representation of SockAddr
+ String() string
+
+ // Type returns the SockAddrType
+ Type() SockAddrType
+}
+
+// sockAddrAttrMap is a map of the SockAddr type-specific attributes.
+var sockAddrAttrMap map[AttrName]func(SockAddr) string
+var sockAddrAttrs []AttrName
+
+func init() {
+ sockAddrInit()
+}
+
+// New creates a new SockAddr from the string. The order in which New()
+// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix.
+//
+// NOTE: New() relies on the heuristic wherein if the path begins with either a
+// '.' or '/' character before creating a new UnixSock. For UNIX sockets that
+// are absolute paths or are nested within a sub-directory, this works as
+// expected, however if the UNIX socket is contained in the current working
+// directory, this will fail unless the path begins with "./"
+// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer
+// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul
+// of this heuristic and be assumed to be a valid UNIX socket path (which they
+// are, but it is probably not what you want and you won't realize it until you
+// stat(2) the file system to discover it doesn't exist).
+func NewSockAddr(s string) (SockAddr, error) {
+ ipv4Addr, err := NewIPv4Addr(s)
+ if err == nil {
+ return ipv4Addr, nil
+ }
+
+ ipv6Addr, err := NewIPv6Addr(s)
+ if err == nil {
+ return ipv6Addr, nil
+ }
+
+ // Check to make sure the string begins with either a '.' or '/', or
+ // contains a '/'.
+ if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) {
+ unixSock, err := NewUnixSock(s)
+ if err == nil {
+ return unixSock, nil
+ }
+ }
+
+ return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s)
+}
+
+// ToIPAddr returns an IPAddr type or nil if the type conversion fails.
+func ToIPAddr(sa SockAddr) *IPAddr {
+ ipa, ok := sa.(IPAddr)
+ if !ok {
+ return nil
+ }
+ return &ipa
+}
+
+// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails.
+func ToIPv4Addr(sa SockAddr) *IPv4Addr {
+ switch v := sa.(type) {
+ case IPv4Addr:
+ return &v
+ default:
+ return nil
+ }
+}
+
+// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails.
+func ToIPv6Addr(sa SockAddr) *IPv6Addr {
+ switch v := sa.(type) {
+ case IPv6Addr:
+ return &v
+ default:
+ return nil
+ }
+}
+
+// ToUnixSock returns a UnixSock type or nil if the type conversion fails.
+func ToUnixSock(sa SockAddr) *UnixSock {
+ switch v := sa.(type) {
+ case UnixSock:
+ return &v
+ default:
+ return nil
+ }
+}
+
+// SockAddrAttr returns a string representation of an attribute for the given
+// SockAddr.
+func SockAddrAttr(sa SockAddr, selector AttrName) string {
+ fn, found := sockAddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(sa)
+}
+
+// String() for SockAddrType returns a string representation of the
+// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown").
+func (sat SockAddrType) String() string {
+ switch sat {
+ case TypeIPv4:
+ return "IPv4"
+ case TypeIPv6:
+ return "IPv6"
+ // There is no concrete "IP" type. Leaving here as a reminder.
+ // case TypeIP:
+ // return "IP"
+ case TypeUnix:
+ return "UNIX"
+ default:
+ panic("unsupported type")
+ }
+}
+
+// sockAddrInit is called once at init()
+func sockAddrInit() {
+ sockAddrAttrs = []AttrName{
+ "type", // type should be first
+ "string",
+ }
+
+ sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{
+ "string": func(sa SockAddr) string {
+ return sa.String()
+ },
+ "type": func(sa SockAddr) string {
+ return sa.Type().String()
+ },
+ }
+}
+
+// UnixSockAttrs returns a list of attributes supported by the UnixSock type
+func SockAddrAttrs() []AttrName {
+ return sockAddrAttrs
+}
+
+// Although this is pretty trivial to do in a program, having the logic here is
+// useful all around. Note that this marshals into a *string* -- the underlying
+// string representation of the sockaddr. If you then unmarshal into this type
+// in Go, all will work as expected, but externally you can take what comes out
+// and use the string value directly.
+type SockAddrMarshaler struct {
+ SockAddr
+}
+
+func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.SockAddr.String())
+}
+
+func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error {
+ var str string
+ err := json.Unmarshal(in, &str)
+ if err != nil {
+ return err
+ }
+ sa, err := NewSockAddr(str)
+ if err != nil {
+ return err
+ }
+ s.SockAddr = sa
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go
new file mode 100644
index 0000000..75fbffb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go
@@ -0,0 +1,193 @@
+package sockaddr
+
+import (
+ "bytes"
+ "sort"
+)
+
+// SockAddrs is a slice of SockAddrs
+type SockAddrs []SockAddr
+
+func (s SockAddrs) Len() int { return len(s) }
+func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// CmpAddrFunc is the function signature that must be met to be used in the
+// OrderedAddrBy multiAddrSorter
+type CmpAddrFunc func(p1, p2 *SockAddr) int
+
+// multiAddrSorter implements the Sort interface, sorting the SockAddrs within.
+type multiAddrSorter struct {
+ addrs SockAddrs
+ cmp []CmpAddrFunc
+}
+
+// Sort sorts the argument slice according to the Cmp functions passed to
+// OrderedAddrBy.
+func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) {
+ ms.addrs = sockAddrs
+ sort.Sort(ms)
+}
+
+// OrderedAddrBy sorts SockAddr by the list of sort function pointers.
+func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter {
+ return &multiAddrSorter{
+ cmp: cmpFuncs,
+ }
+}
+
+// Len is part of sort.Interface.
+func (ms *multiAddrSorter) Len() int {
+ return len(ms.addrs)
+}
+
+// Less is part of sort.Interface. It is implemented by looping along the
+// Cmp() functions until it finds a comparison that is either less than,
+// equal to, or greater than.
+func (ms *multiAddrSorter) Less(i, j int) bool {
+ p, q := &ms.addrs[i], &ms.addrs[j]
+ // Try all but the last comparison.
+ var k int
+ for k = 0; k < len(ms.cmp)-1; k++ {
+ cmp := ms.cmp[k]
+ x := cmp(p, q)
+ switch x {
+ case -1:
+ // p < q, so we have a decision.
+ return true
+ case 1:
+ // p > q, so we have a decision.
+ return false
+ }
+ // p == q; try the next comparison.
+ }
+ // All comparisons to here said "equal", so just return whatever the
+ // final comparison reports.
+ switch ms.cmp[k](p, q) {
+ case -1:
+ return true
+ case 1:
+ return false
+ default:
+ // Still a tie! Now what?
+ return false
+ }
+}
+
+// Swap is part of sort.Interface.
+func (ms *multiAddrSorter) Swap(i, j int) {
+ ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i]
+}
+
+const (
+ // NOTE (sean@): These constants are here for code readability only and
+ // are sprucing up the code for readability purposes. Some of the
+ // Cmp*() variants have confusing logic (especially when dealing with
+ // mixed-type comparisons) and this, I think, has made it easier to grok
+ // the code faster.
+ sortReceiverBeforeArg = -1
+ sortDeferDecision = 0
+ sortArgBeforeReceiver = 1
+)
+
+// AscAddress is a sorting function to sort SockAddrs by their respective
+// address type. Non-equal types are deferred in the sort.
+func AscAddress(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+
+ switch v := p1.(type) {
+ case IPv4Addr:
+ return v.CmpAddress(p2)
+ case IPv6Addr:
+ return v.CmpAddress(p2)
+ case UnixSock:
+ return v.CmpAddress(p2)
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscPort is a sorting function to sort SockAddrs by their respective address
+// type. Non-equal types are deferred in the sort.
+func AscPort(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+
+ switch v := p1.(type) {
+ case IPv4Addr:
+ return v.CmpPort(p2)
+ case IPv6Addr:
+ return v.CmpPort(p2)
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscPrivate is a sorting function to sort "more secure" private values before
+// "more public" values. Both IPv4 and IPv6 are compared against RFC6890
+// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and
+// IPv6 includes RFC4193).
+func AscPrivate(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+
+ switch v := p1.(type) {
+ case IPv4Addr, IPv6Addr:
+ return v.CmpRFC(6890, p2)
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscNetworkSize is a sorting function to sort SockAddrs based on their network
+// size. Non-equal types are deferred in the sort.
+func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+ p1Type := p1.Type()
+ p2Type := p2.Type()
+
+ // Network size operations on non-IP types make no sense
+ if p1Type != p2Type && p1Type != TypeIP {
+ return sortDeferDecision
+ }
+
+ ipA := p1.(IPAddr)
+ ipB := p2.(IPAddr)
+
+ return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask()))
+}
+
+// AscType is a sorting function to sort "more secure" types before
+// "less-secure" types.
+func AscType(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+ p1Type := p1.Type()
+ p2Type := p2.Type()
+ switch {
+ case p1Type < p2Type:
+ return sortReceiverBeforeArg
+ case p1Type == p2Type:
+ return sortDeferDecision
+ case p1Type > p2Type:
+ return sortArgBeforeReceiver
+ default:
+ return sortDeferDecision
+ }
+}
+
+// FilterByType returns two lists: a list of matched and unmatched SockAddrs
+func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) {
+ matched = make(SockAddrs, 0, len(sas))
+ excluded = make(SockAddrs, 0, len(sas))
+
+ for _, sa := range sas {
+ if sa.Type()&type_ != 0 {
+ matched = append(matched, sa)
+ } else {
+ excluded = append(excluded, sa)
+ }
+ }
+ return matched, excluded
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go
new file mode 100644
index 0000000..f3be3f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go
@@ -0,0 +1,135 @@
+package sockaddr
+
+import (
+ "fmt"
+ "strings"
+)
+
+type UnixSock struct {
+ SockAddr
+ path string
+}
+type UnixSocks []*UnixSock
+
+// unixAttrMap is a map of the UnixSockAddr type-specific attributes.
+var unixAttrMap map[AttrName]func(UnixSock) string
+var unixAttrs []AttrName
+
+func init() {
+ unixAttrInit()
+}
+
+// NewUnixSock creates an UnixSock from a string path. String can be in the
+// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute
+// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`).
+func NewUnixSock(s string) (ret UnixSock, err error) {
+ ret.path = s
+ return ret, nil
+}
+
+// CmpAddress follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its name lexically sorts before arg
+// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path.
+// - 1 If the argument should sort first.
+func (us UnixSock) CmpAddress(sa SockAddr) int {
+ usb, ok := sa.(UnixSock)
+ if !ok {
+ return sortDeferDecision
+ }
+
+ return strings.Compare(us.Path(), usb.Path())
+}
+
+// DialPacketArgs returns the arguments required to be passed to net.DialUnix()
+// with the `unixgram` network type.
+func (us UnixSock) DialPacketArgs() (network, dialArgs string) {
+ return "unixgram", us.path
+}
+
+// DialStreamArgs returns the arguments required to be passed to net.DialUnix()
+// with the `unix` network type.
+func (us UnixSock) DialStreamArgs() (network, dialArgs string) {
+ return "unix", us.path
+}
+
+// Equal returns true if a SockAddr is equal to the receiving UnixSock.
+func (us UnixSock) Equal(sa SockAddr) bool {
+ usb, ok := sa.(UnixSock)
+ if !ok {
+ return false
+ }
+
+ if us.Path() != usb.Path() {
+ return false
+ }
+
+ return true
+}
+
+// ListenPacketArgs returns the arguments required to be passed to
+// net.ListenUnixgram() with the `unixgram` network type.
+func (us UnixSock) ListenPacketArgs() (network, dialArgs string) {
+ return "unixgram", us.path
+}
+
+// ListenStreamArgs returns the arguments required to be passed to
+// net.ListenUnix() with the `unix` network type.
+func (us UnixSock) ListenStreamArgs() (network, dialArgs string) {
+ return "unix", us.path
+}
+
+// MustUnixSock is a helper method that must return an UnixSock or panic on
+// invalid input.
+func MustUnixSock(addr string) UnixSock {
+ us, err := NewUnixSock(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err))
+ }
+ return us
+}
+
+// Path returns the given path of the UnixSock
+func (us UnixSock) Path() string {
+ return us.path
+}
+
+// String returns the path of the UnixSock
+func (us UnixSock) String() string {
+ return fmt.Sprintf("%+q", us.path)
+}
+
+// Type is used as a type switch and returns TypeUnix
+func (UnixSock) Type() SockAddrType {
+ return TypeUnix
+}
+
+// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type
+func UnixSockAttrs() []AttrName {
+ return unixAttrs
+}
+
+// UnixSockAttr returns a string representation of an attribute for the given
+// UnixSock.
+func UnixSockAttr(us UnixSock, attrName AttrName) string {
+ fn, found := unixAttrMap[attrName]
+ if !found {
+ return ""
+ }
+
+ return fn(us)
+}
+
+// unixAttrInit is called once at init()
+func unixAttrInit() {
+ // Sorted for human readability
+ unixAttrs = []AttrName{
+ "path",
+ }
+
+ unixAttrMap = map[AttrName]func(us UnixSock) string{
+ "path": func(us UnixSock) string {
+ return us.Path()
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md b/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 2d7fc4b..0000000
--- a/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-### HCL Template
-```hcl
-# Place your HCL configuration file here
-```
-
-### Expected behavior
-What should have happened?
-
-### Actual behavior
-What actually happened?
-
-### Steps to reproduce
-1.
-2.
-3.
-
-### References
-Are there any other GitHub issues (open or closed) that should
-be linked here? For example:
-- GH-1234
-- ...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
index b88f322..bed9ebb 100644
--- a/vendor/github.com/hashicorp/hcl/decoder.go
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -573,7 +573,11 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
- fields := make(map[*reflect.StructField]reflect.Value)
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
@@ -616,7 +620,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
}
// Normal struct field, store it away
- fields[&fieldType] = structVal.Field(i)
+ fields = append(fields, field{fieldType, structVal.Field(i)})
}
}
@@ -624,26 +628,27 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
decodedFields := make([]string, 0, len(fields))
decodedFieldsVal := make([]reflect.Value, 0)
unusedKeysVal := make([]reflect.Value, 0)
- for fieldType, field := range fields {
- if !field.IsValid() {
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ if !fieldValue.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
- if !field.CanSet() {
+ if !fieldValue.CanSet() {
continue
}
- fieldName := fieldType.Name
+ fieldName := field.Name
- tagValue := fieldType.Tag.Get(tagName)
+ tagValue := field.Tag.Get(tagName)
tagParts := strings.SplitN(tagValue, ",", 2)
if len(tagParts) >= 2 {
switch tagParts[1] {
case "decodedFields":
- decodedFieldsVal = append(decodedFieldsVal, field)
+ decodedFieldsVal = append(decodedFieldsVal, fieldValue)
continue
case "key":
if item == nil {
@@ -654,10 +659,10 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
}
}
- field.SetString(item.Keys[0].Token.Value().(string))
+ fieldValue.SetString(item.Keys[0].Token.Value().(string))
continue
case "unusedKeys":
- unusedKeysVal = append(unusedKeysVal, field)
+ unusedKeysVal = append(unusedKeysVal, fieldValue)
continue
}
}
@@ -684,7 +689,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// because we actually want the value.
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
if len(prefixMatches.Items) > 0 {
- if err := d.decode(fieldName, prefixMatches, field); err != nil {
+ if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
return err
}
}
@@ -694,12 +699,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
decodeNode = &ast.ObjectList{Items: ot.List.Items}
}
- if err := d.decode(fieldName, decodeNode, field); err != nil {
+ if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
return err
}
}
- decodedFields = append(decodedFields, fieldType.Name)
+ decodedFields = append(decodedFields, field.Name)
}
if len(decodedFieldsVal) > 0 {
diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go
deleted file mode 100644
index 8682f47..0000000
--- a/vendor/github.com/hashicorp/hcl/decoder_test.go
+++ /dev/null
@@ -1,1203 +0,0 @@
-package hcl
-
-import (
- "io/ioutil"
- "path/filepath"
- "reflect"
- "testing"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "github.com/hashicorp/hcl/hcl/ast"
-)
-
-func TestDecode_interface(t *testing.T) {
- cases := []struct {
- File string
- Err bool
- Out interface{}
- }{
- {
- "basic.hcl",
- false,
- map[string]interface{}{
- "foo": "bar",
- "bar": "${file(\"bing/bong.txt\")}",
- },
- },
- {
- "basic_squish.hcl",
- false,
- map[string]interface{}{
- "foo": "bar",
- "bar": "${file(\"bing/bong.txt\")}",
- "foo-bar": "baz",
- },
- },
- {
- "empty.hcl",
- false,
- map[string]interface{}{
- "resource": []map[string]interface{}{
- map[string]interface{}{
- "foo": []map[string]interface{}{
- map[string]interface{}{},
- },
- },
- },
- },
- },
- {
- "tfvars.hcl",
- false,
- map[string]interface{}{
- "regularvar": "Should work",
- "map.key1": "Value",
- "map.key2": "Other value",
- },
- },
- {
- "escape.hcl",
- false,
- map[string]interface{}{
- "foo": "bar\"baz\\n",
- "qux": "back\\slash",
- "bar": "new\nline",
- "qax": `slash\:colon`,
- "nested": `${HH\\:mm\\:ss}`,
- "nestedquotes": `${"\"stringwrappedinquotes\""}`,
- },
- },
- {
- "float.hcl",
- false,
- map[string]interface{}{
- "a": 1.02,
- "b": 2,
- },
- },
- {
- "multiline_bad.hcl",
- true,
- nil,
- },
- {
- "multiline_literal.hcl",
- true,
- nil,
- },
- {
- "multiline_literal_with_hil.hcl",
- false,
- map[string]interface{}{"multiline_literal_with_hil": "${hello\n world}"},
- },
- {
- "multiline_no_marker.hcl",
- true,
- nil,
- },
- {
- "multiline.hcl",
- false,
- map[string]interface{}{"foo": "bar\nbaz\n"},
- },
- {
- "multiline_indented.hcl",
- false,
- map[string]interface{}{"foo": " bar\n baz\n"},
- },
- {
- "multiline_no_hanging_indent.hcl",
- false,
- map[string]interface{}{"foo": " baz\n bar\n foo\n"},
- },
- {
- "multiline_no_eof.hcl",
- false,
- map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"},
- },
- {
- "multiline.json",
- false,
- map[string]interface{}{"foo": "bar\nbaz"},
- },
- {
- "null_strings.json",
- false,
- map[string]interface{}{
- "module": []map[string]interface{}{
- map[string]interface{}{
- "app": []map[string]interface{}{
- map[string]interface{}{"foo": ""},
- },
- },
- },
- },
- },
- {
- "scientific.json",
- false,
- map[string]interface{}{
- "a": 1e-10,
- "b": 1e+10,
- "c": 1e10,
- "d": 1.2e-10,
- "e": 1.2e+10,
- "f": 1.2e10,
- },
- },
- {
- "scientific.hcl",
- false,
- map[string]interface{}{
- "a": 1e-10,
- "b": 1e+10,
- "c": 1e10,
- "d": 1.2e-10,
- "e": 1.2e+10,
- "f": 1.2e10,
- },
- },
- {
- "terraform_heroku.hcl",
- false,
- map[string]interface{}{
- "name": "terraform-test-app",
- "config_vars": []map[string]interface{}{
- map[string]interface{}{
- "FOO": "bar",
- },
- },
- },
- },
- {
- "structure_multi.hcl",
- false,
- map[string]interface{}{
- "foo": []map[string]interface{}{
- map[string]interface{}{
- "baz": []map[string]interface{}{
- map[string]interface{}{"key": 7},
- },
- },
- map[string]interface{}{
- "bar": []map[string]interface{}{
- map[string]interface{}{"key": 12},
- },
- },
- },
- },
- },
- {
- "structure_multi.json",
- false,
- map[string]interface{}{
- "foo": []map[string]interface{}{
- map[string]interface{}{
- "baz": []map[string]interface{}{
- map[string]interface{}{"key": 7},
- },
- },
- map[string]interface{}{
- "bar": []map[string]interface{}{
- map[string]interface{}{"key": 12},
- },
- },
- },
- },
- },
- {
- "list_of_lists.hcl",
- false,
- map[string]interface{}{
- "foo": []interface{}{
- []interface{}{"foo"},
- []interface{}{"bar"},
- },
- },
- },
- {
- "list_of_maps.hcl",
- false,
- map[string]interface{}{
- "foo": []interface{}{
- map[string]interface{}{"somekey1": "someval1"},
- map[string]interface{}{"somekey2": "someval2", "someextrakey": "someextraval"},
- },
- },
- },
- {
- "assign_deep.hcl",
- false,
- map[string]interface{}{
- "resource": []interface{}{
- map[string]interface{}{
- "foo": []interface{}{
- map[string]interface{}{
- "bar": []map[string]interface{}{
- map[string]interface{}{}}}}}}},
- },
- {
- "structure_list.hcl",
- false,
- map[string]interface{}{
- "foo": []map[string]interface{}{
- map[string]interface{}{
- "key": 7,
- },
- map[string]interface{}{
- "key": 12,
- },
- },
- },
- },
- {
- "structure_list.json",
- false,
- map[string]interface{}{
- "foo": []map[string]interface{}{
- map[string]interface{}{
- "key": 7,
- },
- map[string]interface{}{
- "key": 12,
- },
- },
- },
- },
- {
- "structure_list_deep.json",
- false,
- map[string]interface{}{
- "bar": []map[string]interface{}{
- map[string]interface{}{
- "foo": []map[string]interface{}{
- map[string]interface{}{
- "name": "terraform_example",
- "ingress": []map[string]interface{}{
- map[string]interface{}{
- "from_port": 22,
- },
- map[string]interface{}{
- "from_port": 80,
- },
- },
- },
- },
- },
- },
- },
- },
-
- {
- "structure_list_empty.json",
- false,
- map[string]interface{}{
- "foo": []interface{}{},
- },
- },
-
- {
- "nested_block_comment.hcl",
- false,
- map[string]interface{}{
- "bar": "value",
- },
- },
-
- {
- "unterminated_block_comment.hcl",
- true,
- nil,
- },
-
- {
- "unterminated_brace.hcl",
- true,
- nil,
- },
-
- {
- "nested_provider_bad.hcl",
- true,
- nil,
- },
-
- {
- "object_list.json",
- false,
- map[string]interface{}{
- "resource": []map[string]interface{}{
- map[string]interface{}{
- "aws_instance": []map[string]interface{}{
- map[string]interface{}{
- "db": []map[string]interface{}{
- map[string]interface{}{
- "vpc": "foo",
- "provisioner": []map[string]interface{}{
- map[string]interface{}{
- "file": []map[string]interface{}{
- map[string]interface{}{
- "source": "foo",
- "destination": "bar",
- },
- },
- },
- },
- },
- },
- },
- },
- },
- },
- },
- },
-
- // Terraform GH-8295 sanity test that basic decoding into
- // interface{} works.
- {
- "terraform_variable_invalid.json",
- false,
- map[string]interface{}{
- "variable": []map[string]interface{}{
- map[string]interface{}{
- "whatever": "abc123",
- },
- },
- },
- },
-
- {
- "interpolate.json",
- false,
- map[string]interface{}{
- "default": `${replace("europe-west", "-", " ")}`,
- },
- },
-
- {
- "block_assign.hcl",
- true,
- nil,
- },
-
- {
- "escape_backslash.hcl",
- false,
- map[string]interface{}{
- "output": []map[string]interface{}{
- map[string]interface{}{
- "one": `${replace(var.sub_domain, ".", "\\.")}`,
- "two": `${replace(var.sub_domain, ".", "\\\\.")}`,
- "many": `${replace(var.sub_domain, ".", "\\\\\\\\.")}`,
- },
- },
- },
- },
-
- {
- "git_crypt.hcl",
- true,
- nil,
- },
-
- {
- "object_with_bool.hcl",
- false,
- map[string]interface{}{
- "path": []map[string]interface{}{
- map[string]interface{}{
- "policy": "write",
- "permissions": []map[string]interface{}{
- map[string]interface{}{
- "bool": []interface{}{false},
- },
- },
- },
- },
- },
- },
- }
-
- for _, tc := range cases {
- t.Run(tc.File, func(t *testing.T) {
- d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var out interface{}
- err = Decode(&out, string(d))
- if (err != nil) != tc.Err {
- t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
- }
-
- if !reflect.DeepEqual(out, tc.Out) {
- t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
- }
-
- var v interface{}
- err = Unmarshal(d, &v)
- if (err != nil) != tc.Err {
- t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
- }
-
- if !reflect.DeepEqual(v, tc.Out) {
- t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
- }
- })
- }
-}
-
-func TestDecode_interfaceInline(t *testing.T) {
- cases := []struct {
- Value string
- Err bool
- Out interface{}
- }{
- {"t t e{{}}", true, nil},
- {"t=0t d {}", true, map[string]interface{}{"t": 0}},
- {"v=0E0v d{}", true, map[string]interface{}{"v": float64(0)}},
- }
-
- for _, tc := range cases {
- t.Logf("Testing: %q", tc.Value)
-
- var out interface{}
- err := Decode(&out, tc.Value)
- if (err != nil) != tc.Err {
- t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
- }
-
- if !reflect.DeepEqual(out, tc.Out) {
- t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out)
- }
-
- var v interface{}
- err = Unmarshal([]byte(tc.Value), &v)
- if (err != nil) != tc.Err {
- t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
- }
-
- if !reflect.DeepEqual(v, tc.Out) {
- t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out)
- }
- }
-}
-
-func TestDecode_equal(t *testing.T) {
- cases := []struct {
- One, Two string
- }{
- {
- "basic.hcl",
- "basic.json",
- },
- {
- "float.hcl",
- "float.json",
- },
- /*
- {
- "structure.hcl",
- "structure.json",
- },
- */
- {
- "structure.hcl",
- "structure_flat.json",
- },
- {
- "terraform_heroku.hcl",
- "terraform_heroku.json",
- },
- }
-
- for _, tc := range cases {
- p1 := filepath.Join(fixtureDir, tc.One)
- p2 := filepath.Join(fixtureDir, tc.Two)
-
- d1, err := ioutil.ReadFile(p1)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- d2, err := ioutil.ReadFile(p2)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var i1, i2 interface{}
- err = Decode(&i1, string(d1))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = Decode(&i2, string(d2))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if !reflect.DeepEqual(i1, i2) {
- t.Fatalf(
- "%s != %s\n\n%#v\n\n%#v",
- tc.One, tc.Two,
- i1, i2)
- }
- }
-}
-
-func TestDecode_flatMap(t *testing.T) {
- var val map[string]map[string]string
-
- err := Decode(&val, testReadFile(t, "structure_flatmap.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]map[string]string{
- "foo": map[string]string{
- "foo": "bar",
- "key": "7",
- },
- }
-
- if !reflect.DeepEqual(val, expected) {
- t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected)
- }
-}
-
-func TestDecode_structure(t *testing.T) {
- type Embedded interface{}
-
- type V struct {
- Embedded `hcl:"-"`
- Key int
- Foo string
- }
-
- var actual V
-
- err := Decode(&actual, testReadFile(t, "flat.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := V{
- Key: 7,
- Foo: "bar",
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
- }
-}
-
-func TestDecode_structurePtr(t *testing.T) {
- type V struct {
- Key int
- Foo string
- }
-
- var actual *V
-
- err := Decode(&actual, testReadFile(t, "flat.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := &V{
- Key: 7,
- Foo: "bar",
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
- }
-}
-
-func TestDecode_structureArray(t *testing.T) {
- // This test is extracted from a failure in Consul (consul.io),
- // hence the interesting structure naming.
-
- type KeyPolicyType string
-
- type KeyPolicy struct {
- Prefix string `hcl:",key"`
- Policy KeyPolicyType
- }
-
- type Policy struct {
- Keys []KeyPolicy `hcl:"key,expand"`
- }
-
- expected := Policy{
- Keys: []KeyPolicy{
- KeyPolicy{
- Prefix: "",
- Policy: "read",
- },
- KeyPolicy{
- Prefix: "foo/",
- Policy: "write",
- },
- KeyPolicy{
- Prefix: "foo/bar/",
- Policy: "read",
- },
- KeyPolicy{
- Prefix: "foo/bar/baz",
- Policy: "deny",
- },
- },
- }
-
- files := []string{
- "decode_policy.hcl",
- "decode_policy.json",
- }
-
- for _, f := range files {
- var actual Policy
-
- err := Decode(&actual, testReadFile(t, f))
- if err != nil {
- t.Fatalf("Input: %s\n\nerr: %s", f, err)
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
- }
- }
-}
-
-func TestDecode_sliceExpand(t *testing.T) {
- type testInner struct {
- Name string `hcl:",key"`
- Key string
- }
-
- type testStruct struct {
- Services []testInner `hcl:"service,expand"`
- }
-
- expected := testStruct{
- Services: []testInner{
- testInner{
- Name: "my-service-0",
- Key: "value",
- },
- testInner{
- Name: "my-service-1",
- Key: "value",
- },
- },
- }
-
- files := []string{
- "slice_expand.hcl",
- }
-
- for _, f := range files {
- t.Logf("Testing: %s", f)
-
- var actual testStruct
- err := Decode(&actual, testReadFile(t, f))
- if err != nil {
- t.Fatalf("Input: %s\n\nerr: %s", f, err)
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
- }
- }
-}
-
-func TestDecode_structureMap(t *testing.T) {
- // This test is extracted from a failure in Terraform (terraform.io),
- // hence the interesting structure naming.
-
- type hclVariable struct {
- Default interface{}
- Description string
- Fields []string `hcl:",decodedFields"`
- }
-
- type rawConfig struct {
- Variable map[string]hclVariable
- }
-
- expected := rawConfig{
- Variable: map[string]hclVariable{
- "foo": hclVariable{
- Default: "bar",
- Description: "bar",
- Fields: []string{"Default", "Description"},
- },
-
- "amis": hclVariable{
- Default: []map[string]interface{}{
- map[string]interface{}{
- "east": "foo",
- },
- },
- Fields: []string{"Default"},
- },
- },
- }
-
- files := []string{
- "decode_tf_variable.hcl",
- "decode_tf_variable.json",
- }
-
- for _, f := range files {
- t.Logf("Testing: %s", f)
-
- var actual rawConfig
- err := Decode(&actual, testReadFile(t, f))
- if err != nil {
- t.Fatalf("Input: %s\n\nerr: %s", f, err)
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
- }
- }
-}
-
-func TestDecode_structureMapInvalid(t *testing.T) {
- // Terraform GH-8295
-
- type hclVariable struct {
- Default interface{}
- Description string
- Fields []string `hcl:",decodedFields"`
- }
-
- type rawConfig struct {
- Variable map[string]*hclVariable
- }
-
- var actual rawConfig
- err := Decode(&actual, testReadFile(t, "terraform_variable_invalid.json"))
- if err == nil {
- t.Fatal("expected error")
- }
-}
-
-func TestDecode_interfaceNonPointer(t *testing.T) {
- var value interface{}
- err := Decode(value, testReadFile(t, "basic_int_string.hcl"))
- if err == nil {
- t.Fatal("should error")
- }
-}
-
-func TestDecode_intString(t *testing.T) {
- var value struct {
- Count int
- }
-
- err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if value.Count != 3 {
- t.Fatalf("bad: %#v", value.Count)
- }
-}
-
-func TestDecode_float32(t *testing.T) {
- var value struct {
- A float32 `hcl:"a"`
- B float32 `hcl:"b"`
- }
-
- err := Decode(&value, testReadFile(t, "float.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if got, want := value.A, float32(1.02); got != want {
- t.Fatalf("wrong result %#v; want %#v", got, want)
- }
- if got, want := value.B, float32(2); got != want {
- t.Fatalf("wrong result %#v; want %#v", got, want)
- }
-}
-
-func TestDecode_float64(t *testing.T) {
- var value struct {
- A float64 `hcl:"a"`
- B float64 `hcl:"b"`
- }
-
- err := Decode(&value, testReadFile(t, "float.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if got, want := value.A, float64(1.02); got != want {
- t.Fatalf("wrong result %#v; want %#v", got, want)
- }
- if got, want := value.B, float64(2); got != want {
- t.Fatalf("wrong result %#v; want %#v", got, want)
- }
-}
-
-func TestDecode_intStringAliased(t *testing.T) {
- var value struct {
- Count time.Duration
- }
-
- err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if value.Count != time.Duration(3) {
- t.Fatalf("bad: %#v", value.Count)
- }
-}
-
-func TestDecode_Node(t *testing.T) {
- // given
- var value struct {
- Content ast.Node
- Nested struct {
- Content ast.Node
- }
- }
-
- content := `
-content {
- hello = "world"
-}
-`
-
- // when
- err := Decode(&value, content)
-
- // then
- if err != nil {
- t.Errorf("unable to decode content, %v", err)
- return
- }
-
- // verify ast.Node can be decoded later
- var v map[string]interface{}
- err = DecodeObject(&v, value.Content)
- if err != nil {
- t.Errorf("unable to decode content, %v", err)
- return
- }
-
- if v["hello"] != "world" {
- t.Errorf("expected mapping to be returned")
- }
-}
-
-func TestDecode_NestedNode(t *testing.T) {
- // given
- var value struct {
- Nested struct {
- Content ast.Node
- }
- }
-
- content := `
-nested "content" {
- hello = "world"
-}
-`
-
- // when
- err := Decode(&value, content)
-
- // then
- if err != nil {
- t.Errorf("unable to decode content, %v", err)
- return
- }
-
- // verify ast.Node can be decoded later
- var v map[string]interface{}
- err = DecodeObject(&v, value.Nested.Content)
- if err != nil {
- t.Errorf("unable to decode content, %v", err)
- return
- }
-
- if v["hello"] != "world" {
- t.Errorf("expected mapping to be returned")
- }
-}
-
-// https://github.com/hashicorp/hcl/issues/60
-func TestDecode_topLevelKeys(t *testing.T) {
- type Template struct {
- Source string
- }
-
- templates := struct {
- Templates []*Template `hcl:"template"`
- }{}
-
- err := Decode(&templates, `
- template {
- source = "blah"
- }
-
- template {
- source = "blahblah"
- }`)
-
- if err != nil {
- t.Fatal(err)
- }
-
- if templates.Templates[0].Source != "blah" {
- t.Errorf("bad source: %s", templates.Templates[0].Source)
- }
-
- if templates.Templates[1].Source != "blahblah" {
- t.Errorf("bad source: %s", templates.Templates[1].Source)
- }
-}
-
-func TestDecode_flattenedJSON(t *testing.T) {
- // make sure we can also correctly extract a Name key too
- type V struct {
- Name string `hcl:",key"`
- Description string
- Default map[string]string
- }
- type Vars struct {
- Variable []*V
- }
-
- cases := []struct {
- JSON string
- Out interface{}
- Expected interface{}
- }{
- { // Nested object, no sibling keys
- JSON: `
-{
- "var_name": {
- "default": {
- "key1": "a",
- "key2": "b"
- }
- }
-}
- `,
- Out: &[]*V{},
- Expected: &[]*V{
- &V{
- Name: "var_name",
- Default: map[string]string{"key1": "a", "key2": "b"},
- },
- },
- },
-
- { // Nested object with a sibling key (this worked previously)
- JSON: `
-{
- "var_name": {
- "description": "Described",
- "default": {
- "key1": "a",
- "key2": "b"
- }
- }
-}
- `,
- Out: &[]*V{},
- Expected: &[]*V{
- &V{
- Name: "var_name",
- Description: "Described",
- Default: map[string]string{"key1": "a", "key2": "b"},
- },
- },
- },
-
- { // Multiple nested objects, one with a sibling key
- JSON: `
-{
- "variable": {
- "var_1": {
- "default": {
- "key1": "a",
- "key2": "b"
- }
- },
- "var_2": {
- "description": "Described",
- "default": {
- "key1": "a",
- "key2": "b"
- }
- }
- }
-}
- `,
- Out: &Vars{},
- Expected: &Vars{
- Variable: []*V{
- &V{
- Name: "var_1",
- Default: map[string]string{"key1": "a", "key2": "b"},
- },
- &V{
- Name: "var_2",
- Description: "Described",
- Default: map[string]string{"key1": "a", "key2": "b"},
- },
- },
- },
- },
-
- { // Nested object to maps
- JSON: `
-{
- "variable": {
- "var_name": {
- "description": "Described",
- "default": {
- "key1": "a",
- "key2": "b"
- }
- }
- }
-}
- `,
- Out: &[]map[string]interface{}{},
- Expected: &[]map[string]interface{}{
- {
- "variable": []map[string]interface{}{
- {
- "var_name": []map[string]interface{}{
- {
- "description": "Described",
- "default": []map[string]interface{}{
- {
- "key1": "a",
- "key2": "b",
- },
- },
- },
- },
- },
- },
- },
- },
- },
-
- { // Nested object to maps without a sibling key should decode the same as above
- JSON: `
-{
- "variable": {
- "var_name": {
- "default": {
- "key1": "a",
- "key2": "b"
- }
- }
- }
-}
- `,
- Out: &[]map[string]interface{}{},
- Expected: &[]map[string]interface{}{
- {
- "variable": []map[string]interface{}{
- {
- "var_name": []map[string]interface{}{
- {
- "default": []map[string]interface{}{
- {
- "key1": "a",
- "key2": "b",
- },
- },
- },
- },
- },
- },
- },
- },
- },
-
- { // Nested objects, one with a sibling key, and one without
- JSON: `
-{
- "variable": {
- "var_1": {
- "default": {
- "key1": "a",
- "key2": "b"
- }
- },
- "var_2": {
- "description": "Described",
- "default": {
- "key1": "a",
- "key2": "b"
- }
- }
- }
-}
- `,
- Out: &[]map[string]interface{}{},
- Expected: &[]map[string]interface{}{
- {
- "variable": []map[string]interface{}{
- {
- "var_1": []map[string]interface{}{
- {
- "default": []map[string]interface{}{
- {
- "key1": "a",
- "key2": "b",
- },
- },
- },
- },
- },
- },
- },
- {
- "variable": []map[string]interface{}{
- {
- "var_2": []map[string]interface{}{
- {
- "description": "Described",
- "default": []map[string]interface{}{
- {
- "key1": "a",
- "key2": "b",
- },
- },
- },
- },
- },
- },
- },
- },
- },
- }
-
- for i, tc := range cases {
- err := Decode(tc.Out, tc.JSON)
- if err != nil {
- t.Fatalf("[%d] err: %s", i, err)
- }
-
- if !reflect.DeepEqual(tc.Out, tc.Expected) {
- t.Fatalf("[%d]\ngot: %s\nexpected: %s\n", i, spew.Sdump(tc.Out), spew.Sdump(tc.Expected))
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go
deleted file mode 100644
index 942256c..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package ast
-
-import (
- "reflect"
- "strings"
- "testing"
-
- "github.com/hashicorp/hcl/hcl/token"
-)
-
-func TestObjectListFilter(t *testing.T) {
- var cases = []struct {
- Filter []string
- Input []*ObjectItem
- Output []*ObjectItem
- }{
- {
- []string{"foo"},
- []*ObjectItem{
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{
- Token: token.Token{Type: token.STRING, Text: `"foo"`},
- },
- },
- },
- },
- []*ObjectItem{
- &ObjectItem{
- Keys: []*ObjectKey{},
- },
- },
- },
-
- {
- []string{"foo"},
- []*ObjectItem{
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
- },
- },
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
- },
- },
- },
- []*ObjectItem{
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
- },
- },
- },
- },
- }
-
- for _, tc := range cases {
- input := &ObjectList{Items: tc.Input}
- expected := &ObjectList{Items: tc.Output}
- if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) {
- t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual)
- }
- }
-}
-
-func TestWalk(t *testing.T) {
- items := []*ObjectItem{
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
- },
- Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}},
- },
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
- },
- },
- }
-
- node := &ObjectList{Items: items}
-
- order := []string{
- "*ast.ObjectList",
- "*ast.ObjectItem",
- "*ast.ObjectKey",
- "*ast.ObjectKey",
- "*ast.LiteralType",
- "*ast.ObjectItem",
- "*ast.ObjectKey",
- }
- count := 0
-
- Walk(node, func(n Node) (Node, bool) {
- if n == nil {
- return n, false
- }
-
- typeName := reflect.TypeOf(n).String()
- if order[count] != typeName {
- t.Errorf("expected '%s' got: '%s'", order[count], typeName)
- }
- count++
- return n, true
- })
-}
-
-func TestWalkEquality(t *testing.T) {
- items := []*ObjectItem{
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
- },
- },
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
- },
- },
- }
-
- node := &ObjectList{Items: items}
-
- rewritten := Walk(node, func(n Node) (Node, bool) { return n, true })
-
- newNode, ok := rewritten.(*ObjectList)
- if !ok {
- t.Fatalf("expected Objectlist, got %T", rewritten)
- }
-
- if !reflect.DeepEqual(node, newNode) {
- t.Fatal("rewritten node is not equal to the given node")
- }
-
- if len(newNode.Items) != 2 {
- t.Error("expected newNode length 2, got: %d", len(newNode.Items))
- }
-
- expected := []string{
- `"foo"`,
- `"bar"`,
- }
-
- for i, item := range newNode.Items {
- if len(item.Keys) != 1 {
- t.Error("expected keys newNode length 1, got: %d", len(item.Keys))
- }
-
- if item.Keys[0].Token.Text != expected[i] {
- t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text)
- }
-
- if item.Val != nil {
- t.Errorf("expected item value should be nil")
- }
- }
-}
-
-func TestWalkRewrite(t *testing.T) {
- items := []*ObjectItem{
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
- },
- },
- &ObjectItem{
- Keys: []*ObjectKey{
- &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
- },
- },
- }
-
- node := &ObjectList{Items: items}
-
- suffix := "_example"
- node = Walk(node, func(n Node) (Node, bool) {
- switch i := n.(type) {
- case *ObjectKey:
- i.Token.Text = i.Token.Text + suffix
- n = i
- }
- return n, true
- }).(*ObjectList)
-
- Walk(node, func(n Node) (Node, bool) {
- switch i := n.(type) {
- case *ObjectKey:
- if !strings.HasSuffix(i.Token.Text, suffix) {
- t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix)
- }
- }
- return n, true
- })
-
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
deleted file mode 100644
index 2380d71..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Derivative work from:
-// - https://golang.org/src/cmd/gofmt/gofmt.go
-// - https://github.com/fatih/hclfmt
-
-package fmtcmd
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-
- "github.com/hashicorp/hcl/hcl/printer"
-)
-
-var (
- ErrWriteStdin = errors.New("cannot use write option with standard input")
-)
-
-type Options struct {
- List bool // list files whose formatting differs
- Write bool // write result to (source) file instead of stdout
- Diff bool // display diffs of formatting changes
-}
-
-func isValidFile(f os.FileInfo, extensions []string) bool {
- if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") {
- for _, ext := range extensions {
- if strings.HasSuffix(f.Name(), "."+ext) {
- return true
- }
- }
- }
-
- return false
-}
-
-// If in == nil, the source is the contents of the file with the given filename.
-func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error {
- if in == nil {
- f, err := os.Open(filename)
- if err != nil {
- return err
- }
- defer f.Close()
- in = f
- }
-
- src, err := ioutil.ReadAll(in)
- if err != nil {
- return err
- }
-
- res, err := printer.Format(src)
- if err != nil {
- return fmt.Errorf("In %s: %s", filename, err)
- }
-
- if !bytes.Equal(src, res) {
- // formatting has changed
- if opts.List {
- fmt.Fprintln(out, filename)
- }
- if opts.Write {
- err = ioutil.WriteFile(filename, res, 0644)
- if err != nil {
- return err
- }
- }
- if opts.Diff {
- data, err := diff(src, res)
- if err != nil {
- return fmt.Errorf("computing diff: %s", err)
- }
- fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename)
- out.Write(data)
- }
- }
-
- if !opts.List && !opts.Write && !opts.Diff {
- _, err = out.Write(res)
- }
-
- return err
-}
-
-func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error {
- visitFile := func(path string, f os.FileInfo, err error) error {
- if err == nil && isValidFile(f, extensions) {
- err = processFile(path, nil, stdout, false, opts)
- }
- return err
- }
-
- return filepath.Walk(path, visitFile)
-}
-
-func Run(
- paths, extensions []string,
- stdin io.Reader,
- stdout io.Writer,
- opts Options,
-) error {
- if len(paths) == 0 {
- if opts.Write {
- return ErrWriteStdin
- }
- if err := processFile("", stdin, stdout, true, opts); err != nil {
- return err
- }
- return nil
- }
-
- for _, path := range paths {
- switch dir, err := os.Stat(path); {
- case err != nil:
- return err
- case dir.IsDir():
- if err := walkDir(path, extensions, stdout, opts); err != nil {
- return err
- }
- default:
- if err := processFile(path, nil, stdout, false, opts); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func diff(b1, b2 []byte) (data []byte, err error) {
- f1, err := ioutil.TempFile("", "")
- if err != nil {
- return
- }
- defer os.Remove(f1.Name())
- defer f1.Close()
-
- f2, err := ioutil.TempFile("", "")
- if err != nil {
- return
- }
- defer os.Remove(f2.Name())
- defer f2.Close()
-
- f1.Write(b1)
- f2.Write(b2)
-
- data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
- if len(data) > 0 {
- // diff exits with a non-zero status when the files don't match.
- // Ignore that failure as long as we get output.
- err = nil
- }
- return
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go
deleted file mode 100644
index b952d76..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go
+++ /dev/null
@@ -1,440 +0,0 @@
-// +build !windows
-// TODO(jen20): These need fixing on Windows but fmt is not used right now
-// and red CI is making it harder to process other bugs, so ignore until
-// we get around to fixing them.
-
-package fmtcmd
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "reflect"
- "regexp"
- "sort"
- "syscall"
- "testing"
-
- "github.com/hashicorp/hcl/testhelper"
-)
-
-var fixtureExtensions = []string{"hcl"}
-
-func init() {
- sort.Sort(ByFilename(fixtures))
-}
-
-func TestIsValidFile(t *testing.T) {
- const fixtureDir = "./test-fixtures"
-
- cases := []struct {
- Path string
- Expected bool
- }{
- {"good.hcl", true},
- {".hidden.ignore", false},
- {"file.ignore", false},
- {"dir.ignore", false},
- }
-
- for _, tc := range cases {
- file, err := os.Stat(filepath.Join(fixtureDir, tc.Path))
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
-
- if res := isValidFile(file, fixtureExtensions); res != tc.Expected {
- t.Errorf("want: %b, got: %b", tc.Expected, res)
- }
- }
-}
-
-func TestRunMultiplePaths(t *testing.T) {
- path1, err := renderFixtures("")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(path1)
- path2, err := renderFixtures("")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(path2)
-
- var expectedOut bytes.Buffer
- for _, path := range []string{path1, path2} {
- for _, fixture := range fixtures {
- if !bytes.Equal(fixture.golden, fixture.input) {
- expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
- }
- }
- }
-
- _, stdout := mockIO()
- err = Run(
- []string{path1, path2},
- fixtureExtensions,
- nil, stdout,
- Options{
- List: true,
- },
- )
-
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- if stdout.String() != expectedOut.String() {
- t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
- }
-}
-
-func TestRunSubDirectories(t *testing.T) {
- pathParent, err := ioutil.TempDir("", "")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(pathParent)
-
- path1, err := renderFixtures(pathParent)
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- path2, err := renderFixtures(pathParent)
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
-
- paths := []string{path1, path2}
- sort.Strings(paths)
-
- var expectedOut bytes.Buffer
- for _, path := range paths {
- for _, fixture := range fixtures {
- if !bytes.Equal(fixture.golden, fixture.input) {
- expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
- }
- }
- }
-
- _, stdout := mockIO()
- err = Run(
- []string{pathParent},
- fixtureExtensions,
- nil, stdout,
- Options{
- List: true,
- },
- )
-
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- if stdout.String() != expectedOut.String() {
- t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
- }
-}
-
-func TestRunStdin(t *testing.T) {
- var expectedOut bytes.Buffer
- for i, fixture := range fixtures {
- if i != 0 {
- expectedOut.WriteString("\n")
- }
- expectedOut.Write(fixture.golden)
- }
-
- stdin, stdout := mockIO()
- for _, fixture := range fixtures {
- stdin.Write(fixture.input)
- }
-
- err := Run(
- []string{},
- fixtureExtensions,
- stdin, stdout,
- Options{},
- )
-
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) {
- t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
- }
-}
-
-func TestRunStdinAndWrite(t *testing.T) {
- var expectedOut = []byte{}
-
- stdin, stdout := mockIO()
- stdin.WriteString("")
- err := Run(
- []string{}, []string{},
- stdin, stdout,
- Options{
- Write: true,
- },
- )
-
- if err != ErrWriteStdin {
- t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err)
- }
- if !bytes.Equal(stdout.Bytes(), expectedOut) {
- t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
- }
-}
-
-func TestRunFileError(t *testing.T) {
- path, err := ioutil.TempDir("", "")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(path)
- filename := filepath.Join(path, "unreadable.hcl")
-
- var expectedError = &os.PathError{
- Op: "open",
- Path: filename,
- Err: syscall.EACCES,
- }
-
- err = ioutil.WriteFile(filename, []byte{}, 0000)
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
-
- _, stdout := mockIO()
- err = Run(
- []string{path},
- fixtureExtensions,
- nil, stdout,
- Options{},
- )
-
- if !reflect.DeepEqual(err, expectedError) {
- t.Errorf("error want: %#v, got: %#v", expectedError, err)
- }
-}
-
-func TestRunNoOptions(t *testing.T) {
- path, err := renderFixtures("")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(path)
-
- var expectedOut bytes.Buffer
- for _, fixture := range fixtures {
- expectedOut.Write(fixture.golden)
- }
-
- _, stdout := mockIO()
- err = Run(
- []string{path},
- fixtureExtensions,
- nil, stdout,
- Options{},
- )
-
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- if stdout.String() != expectedOut.String() {
- t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
- }
-}
-
-func TestRunList(t *testing.T) {
- path, err := renderFixtures("")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(path)
-
- var expectedOut bytes.Buffer
- for _, fixture := range fixtures {
- if !bytes.Equal(fixture.golden, fixture.input) {
- expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename)))
- }
- }
-
- _, stdout := mockIO()
- err = Run(
- []string{path},
- fixtureExtensions,
- nil, stdout,
- Options{
- List: true,
- },
- )
-
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- if stdout.String() != expectedOut.String() {
- t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
- }
-}
-
-func TestRunWrite(t *testing.T) {
- path, err := renderFixtures("")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(path)
-
- _, stdout := mockIO()
- err = Run(
- []string{path},
- fixtureExtensions,
- nil, stdout,
- Options{
- Write: true,
- },
- )
-
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- for _, fixture := range fixtures {
- res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename))
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- if !bytes.Equal(res, fixture.golden) {
- t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res)
- }
- }
-}
-
-func TestRunDiff(t *testing.T) {
- path, err := renderFixtures("")
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- defer os.RemoveAll(path)
-
- var expectedOut bytes.Buffer
- for _, fixture := range fixtures {
- if len(fixture.diff) > 0 {
- expectedOut.WriteString(
- regexp.QuoteMeta(
- fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename),
- ),
- )
- // Need to use regex to ignore datetimes in diff.
- expectedOut.WriteString(`--- .+?\n`)
- expectedOut.WriteString(`\+\+\+ .+?\n`)
- expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff)))
- }
- }
-
- expectedOutString := testhelper.Unix2dos(expectedOut.String())
-
- _, stdout := mockIO()
- err = Run(
- []string{path},
- fixtureExtensions,
- nil, stdout,
- Options{
- Diff: true,
- },
- )
-
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- }
- if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) {
- t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout)
- }
-}
-
-func mockIO() (stdin, stdout *bytes.Buffer) {
- return new(bytes.Buffer), new(bytes.Buffer)
-}
-
-type fixture struct {
- filename string
- input, golden, diff []byte
-}
-
-type ByFilename []fixture
-
-func (s ByFilename) Len() int { return len(s) }
-func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) }
-
-var fixtures = []fixture{
- {
- "noop.hcl",
- []byte(`resource "aws_security_group" "firewall" {
- count = 5
-}
-`),
- []byte(`resource "aws_security_group" "firewall" {
- count = 5
-}
-`),
- []byte(``),
- }, {
- "align_equals.hcl",
- []byte(`variable "foo" {
- default = "bar"
- description = "bar"
-}
-`),
- []byte(`variable "foo" {
- default = "bar"
- description = "bar"
-}
-`),
- []byte(`@@ -1,4 +1,4 @@
- variable "foo" {
-- default = "bar"
-+ default = "bar"
- description = "bar"
- }
-`),
- }, {
- "indentation.hcl",
- []byte(`provider "aws" {
- access_key = "foo"
- secret_key = "bar"
-}
-`),
- []byte(`provider "aws" {
- access_key = "foo"
- secret_key = "bar"
-}
-`),
- []byte(`@@ -1,4 +1,4 @@
- provider "aws" {
-- access_key = "foo"
-- secret_key = "bar"
-+ access_key = "foo"
-+ secret_key = "bar"
- }
-`),
- },
-}
-
-// parent can be an empty string, in which case the system's default
-// temporary directory will be used.
-func renderFixtures(parent string) (path string, err error) {
- path, err = ioutil.TempDir(parent, "")
- if err != nil {
- return "", err
- }
-
- for _, fixture := range fixtures {
- err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644)
- if err != nil {
- os.RemoveAll(path)
- return "", err
- }
- }
-
- return path, nil
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore
deleted file mode 100644
index 9977a28..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore
+++ /dev/null
@@ -1 +0,0 @@
-invalid
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore
deleted file mode 100644
index e69de29..0000000
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore
deleted file mode 100644
index 9977a28..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore
+++ /dev/null
@@ -1 +0,0 @@
-invalid
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl
deleted file mode 100644
index e69de29..0000000
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go
deleted file mode 100644
index 32399fe..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package parser
-
-import (
- "testing"
-)
-
-func TestPosError_impl(t *testing.T) {
- var _ error = new(PosError)
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index 098e1bc..64c83bc 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -205,6 +205,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
}
}
+ // key=#comment
+ // val
+ if p.lineComment != nil {
+ o.LineComment, p.lineComment = p.lineComment, nil
+ }
+
// do a look-ahead for line comment
p.scan()
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
deleted file mode 100644
index 2702122..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
+++ /dev/null
@@ -1,575 +0,0 @@
-package parser
-
-import (
- "fmt"
- "io/ioutil"
- "path/filepath"
- "reflect"
- "runtime"
- "strings"
- "testing"
-
- "github.com/hashicorp/hcl/hcl/ast"
- "github.com/hashicorp/hcl/hcl/token"
-)
-
-func TestType(t *testing.T) {
- var literals = []struct {
- typ token.Type
- src string
- }{
- {token.STRING, `foo = "foo"`},
- {token.NUMBER, `foo = 123`},
- {token.NUMBER, `foo = -29`},
- {token.FLOAT, `foo = 123.12`},
- {token.FLOAT, `foo = -123.12`},
- {token.BOOL, `foo = true`},
- {token.HEREDOC, "foo = < 0 && !lastHadLeadComment {
- buf.WriteByte(newline)
- }
-
- for _, comment := range lit.LeadComment.List {
- buf.Write(p.indent([]byte(comment.Text)))
- buf.WriteByte(newline)
- }
- }
-
- // also indent each line
- val := p.output(item)
- curLen := len(val)
- buf.Write(p.indent(val))
-
- // if this item is a heredoc, then we output the comma on
- // the next line. This is the only case this happens.
- comma := []byte{','}
- if heredoc {
- buf.WriteByte(newline)
- comma = p.indent(comma)
- }
-
- buf.Write(comma)
-
- if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
- // if the next item doesn't have any comments, do not align
- buf.WriteByte(blank) // align one space
- for i := 0; i < longestLine-curLen; i++ {
- buf.WriteByte(blank)
- }
-
- for _, comment := range lit.LineComment.List {
- buf.WriteString(comment.Text)
- }
- }
-
- lastItem := i == len(l.List)-1
- if lastItem {
- buf.WriteByte(newline)
- }
-
- if leadComment && !lastItem {
- buf.WriteByte(newline)
- }
-
- lastHadLeadComment = leadComment
- } else {
- if insertSpaceBeforeItem {
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
buf.WriteByte(blank)
- insertSpaceBeforeItem = false
}
- // Output the item itself
- // also indent each line
- val := p.output(item)
- curLen := len(val)
- buf.Write(val)
-
- // If this is a heredoc item we always have to output a newline
- // so that it parses properly.
- if heredoc {
- buf.WriteByte(newline)
- }
-
- // If this isn't the last element, write a comma.
- if i != len(l.List)-1 {
- buf.WriteString(",")
- insertSpaceBeforeItem = true
- }
-
- if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
- // if the next item doesn't have any comments, do not align
- buf.WriteByte(blank) // align one space
- for i := 0; i < longestLine-curLen; i++ {
- buf.WriteByte(blank)
- }
-
- for _, comment := range lit.LineComment.List {
- buf.WriteString(comment.Text)
- }
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
}
}
+ buf.WriteByte(newline)
+
+ // Ensure an empty line after every element with a
+ // lead comment (except the first item in a list).
+ haveEmptyLine = leadComment && i != len(l.List)-1
+ if haveEmptyLine {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+ for _, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ return false
+ }
+
+ lit, ok := item.(*ast.LiteralType)
+ if !ok {
+ return false
+ }
+
+ if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+ return false
+ }
+
+ if lit.LineComment != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+ buf := &bytes.Buffer{}
+
+ buf.WriteString("[")
+ for i, item := range l.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+
+ // Output the item itself
+ buf.Write(p.output(item))
+
+ // The heredoc marker needs to be at the end of line.
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ }
}
buf.WriteString("]")
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
deleted file mode 100644
index 5248259..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package printer
-
-import (
- "bytes"
- "errors"
- "flag"
- "fmt"
- "io/ioutil"
- "path/filepath"
- "testing"
-
- "github.com/hashicorp/hcl/hcl/parser"
-)
-
-var update = flag.Bool("update", false, "update golden files")
-
-const (
- dataDir = "testdata"
-)
-
-type entry struct {
- source, golden string
-}
-
-// Use go test -update to create/update the respective golden files.
-var data = []entry{
- {"complexhcl.input", "complexhcl.golden"},
- {"list.input", "list.golden"},
- {"list_comment.input", "list_comment.golden"},
- {"comment.input", "comment.golden"},
- {"comment_crlf.input", "comment.golden"},
- {"comment_aligned.input", "comment_aligned.golden"},
- {"comment_array.input", "comment_array.golden"},
- {"comment_end_file.input", "comment_end_file.golden"},
- {"comment_multiline_indent.input", "comment_multiline_indent.golden"},
- {"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"},
- {"comment_multiline_stanza.input", "comment_multiline_stanza.golden"},
- {"comment_newline.input", "comment_newline.golden"},
- {"comment_object_multi.input", "comment_object_multi.golden"},
- {"comment_standalone.input", "comment_standalone.golden"},
- {"empty_block.input", "empty_block.golden"},
- {"list_of_objects.input", "list_of_objects.golden"},
- {"multiline_string.input", "multiline_string.golden"},
- {"object_singleline.input", "object_singleline.golden"},
- {"object_with_heredoc.input", "object_with_heredoc.golden"},
-}
-
-func TestFiles(t *testing.T) {
- for _, e := range data {
- source := filepath.Join(dataDir, e.source)
- golden := filepath.Join(dataDir, e.golden)
- t.Run(e.source, func(t *testing.T) {
- check(t, source, golden)
- })
- }
-}
-
-func check(t *testing.T, source, golden string) {
- src, err := ioutil.ReadFile(source)
- if err != nil {
- t.Error(err)
- return
- }
-
- res, err := format(src)
- if err != nil {
- t.Error(err)
- return
- }
-
- // update golden files if necessary
- if *update {
- if err := ioutil.WriteFile(golden, res, 0644); err != nil {
- t.Error(err)
- }
- return
- }
-
- // get golden
- gld, err := ioutil.ReadFile(golden)
- if err != nil {
- t.Error(err)
- return
- }
-
- // formatted source and golden must be the same
- if err := diff(source, golden, res, gld); err != nil {
- t.Error(err)
- return
- }
-}
-
-// diff compares a and b.
-func diff(aname, bname string, a, b []byte) error {
- var buf bytes.Buffer // holding long error message
-
- // compare lengths
- if len(a) != len(b) {
- fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
- }
-
- // compare contents
- line := 1
- offs := 1
- for i := 0; i < len(a) && i < len(b); i++ {
- ch := a[i]
- if ch != b[i] {
- fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs))
- fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs))
- fmt.Fprintf(&buf, "\n\n")
- break
- }
- if ch == '\n' {
- line++
- offs = i + 1
- }
- }
-
- if buf.Len() > 0 {
- return errors.New(buf.String())
- }
- return nil
-}
-
-// format parses src, prints the corresponding AST, verifies the resulting
-// src is syntactically correct, and returns the resulting src or an error
-// if any.
-func format(src []byte) ([]byte, error) {
- formatted, err := Format(src)
- if err != nil {
- return nil, err
- }
-
- // make sure formatted output is syntactically correct
- if _, err := parser.Parse(formatted); err != nil {
- return nil, fmt.Errorf("parse: %s\n%s", err, formatted)
- }
-
- return formatted, nil
-}
-
-// lineAt returns the line in text starting at offset offs.
-func lineAt(text []byte, offs int) []byte {
- i := offs
- for i < len(text) && text[i] != '\n' {
- i++
- }
- return text[offs:i]
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden
deleted file mode 100644
index 9d4b072..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden
+++ /dev/null
@@ -1,36 +0,0 @@
-// A standalone comment is a comment which is not attached to any kind of node
-
-// This comes from Terraform, as a test
-variable "foo" {
- # Standalone comment should be still here
-
- default = "bar"
- description = "bar" # yooo
-}
-
-/* This is a multi line standalone
-comment*/
-
-// fatih arslan
-/* This is a developer test
-account and a multine comment */
-developer = ["fatih", "arslan"] // fatih arslan
-
-# One line here
-numbers = [1, 2] // another line here
-
-# Another comment
-variable = {
- description = "bar" # another yooo
-
- foo {
- # Nested standalone
-
- bar = "fatih"
- }
-}
-
-// lead comment
-foo {
- bar = "fatih" // line comment 2
-} // line comment 3
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input
deleted file mode 100644
index 57c37ac..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input
+++ /dev/null
@@ -1,37 +0,0 @@
-// A standalone comment is a comment which is not attached to any kind of node
-
- // This comes from Terraform, as a test
-variable "foo" {
- # Standalone comment should be still here
-
- default = "bar"
- description = "bar" # yooo
-}
-
-/* This is a multi line standalone
-comment*/
-
-
-// fatih arslan
-/* This is a developer test
-account and a multine comment */
-developer = [ "fatih", "arslan"] // fatih arslan
-
-# One line here
-numbers = [1,2] // another line here
-
- # Another comment
-variable = {
- description = "bar" # another yooo
- foo {
- # Nested standalone
-
- bar = "fatih"
- }
-}
-
- // lead comment
-foo {
- bar = "fatih" // line comment 2
-} // line comment 3
-
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden
deleted file mode 100644
index 6ff2150..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden
+++ /dev/null
@@ -1,32 +0,0 @@
-aligned {
- # We have some aligned items below
- foo = "fatih" # yoo1
- default = "bar" # yoo2
- bar = "bar and foo" # yoo3
-
- default = {
- bar = "example"
- }
-
- #deneme arslan
- fatih = ["fatih"] # yoo4
-
- #fatih arslan
- fatiharslan = ["arslan"] // yoo5
-
- default = {
- bar = "example"
- }
-
- security_groups = [
- "foo", # kenya 1
- "${aws_security_group.firewall.foo}", # kenya 2
- ]
-
- security_groups2 = [
- "foo", # kenya 1
- "bar", # kenya 1.5
- "${aws_security_group.firewall.foo}", # kenya 2
- "foobar", # kenya 3
- ]
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input
deleted file mode 100644
index bd43ab1..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input
+++ /dev/null
@@ -1,28 +0,0 @@
-aligned {
-# We have some aligned items below
- foo = "fatih" # yoo1
- default = "bar" # yoo2
- bar = "bar and foo" # yoo3
- default = {
- bar = "example"
- }
- #deneme arslan
- fatih = ["fatih"] # yoo4
- #fatih arslan
- fatiharslan = ["arslan"] // yoo5
- default = {
- bar = "example"
- }
-
-security_groups = [
- "foo", # kenya 1
- "${aws_security_group.firewall.foo}", # kenya 2
-]
-
-security_groups2 = [
- "foo", # kenya 1
- "bar", # kenya 1.5
- "${aws_security_group.firewall.foo}", # kenya 2
- "foobar", # kenya 3
-]
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden
deleted file mode 100644
index e778eaf..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden
+++ /dev/null
@@ -1,13 +0,0 @@
-banana = [
- # I really want to comment this item in the array.
- "a",
-
- # This as well
- "b",
-
- "c", # And C
- "d",
-
- # And another
- "e",
-]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input
deleted file mode 100644
index e778eaf..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input
+++ /dev/null
@@ -1,13 +0,0 @@
-banana = [
- # I really want to comment this item in the array.
- "a",
-
- # This as well
- "b",
-
- "c", # And C
- "d",
-
- # And another
- "e",
-]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input
deleted file mode 100644
index 5d27206..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input
+++ /dev/null
@@ -1,37 +0,0 @@
-// A standalone comment is a comment which is not attached to any kind of node
-
- // This comes from Terraform, as a test
-variable "foo" {
- # Standalone comment should be still here
-
- default = "bar"
- description = "bar" # yooo
-}
-
-/* This is a multi line standalone
-comment*/
-
-
-// fatih arslan
-/* This is a developer test
-account and a multine comment */
-developer = [ "fatih", "arslan"] // fatih arslan
-
-# One line here
-numbers = [1,2] // another line here
-
- # Another comment
-variable = {
- description = "bar" # another yooo
- foo {
- # Nested standalone
-
- bar = "fatih"
- }
-}
-
- // lead comment
-foo {
- bar = "fatih" // line comment 2
-} // line comment 3
-
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden
deleted file mode 100644
index dbeae36..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden
+++ /dev/null
@@ -1,6 +0,0 @@
-resource "blah" "blah" {}
-
-//
-//
-//
-
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input
deleted file mode 100644
index 68c4c28..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input
+++ /dev/null
@@ -1,5 +0,0 @@
-resource "blah" "blah" {}
-
-//
-//
-//
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden
deleted file mode 100644
index 74c4ccd..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden
+++ /dev/null
@@ -1,12 +0,0 @@
-resource "provider" "resource" {
- /*
- SPACE_SENSITIVE_CODE = < 0 {
+ if ch == '\x00' {
s.err("unexpected null character (0x00)")
return eof
}
+ if ch == '\uE123' {
+ s.err("unicode code point U+E123 reserved for internal use")
+ return utf8.RuneError
+ }
+
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
@@ -351,7 +352,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type {
return token.NUMBER
}
-// scanMantissa scans the mantissa begining from the rune. It returns the next
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
func (s *Scanner) scanMantissa(ch rune) rune {
scanned := false
@@ -432,16 +433,16 @@ func (s *Scanner) scanHeredoc() {
// Read the identifier
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
- if len(identBytes) == 0 {
+ if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
s.err("zero-length heredoc anchor")
return
}
var identRegexp *regexp.Regexp
if identBytes[0] == '-' {
- identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
} else {
- identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
}
// Read the actual string value
@@ -551,7 +552,7 @@ func (s *Scanner) scanDigits(ch rune, base, n int) rune {
s.err("illegal char escape")
}
- if n != start {
+ if n != start && ch != eof {
// we scanned all digits, put the last non digit char back,
// only if we read anything at all
s.unread()
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go
deleted file mode 100644
index 4f2c9cb..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go
+++ /dev/null
@@ -1,591 +0,0 @@
-package scanner
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "strings"
-
- "github.com/hashicorp/hcl/hcl/token"
-)
-
-var f100 = strings.Repeat("f", 100)
-
-type tokenPair struct {
- tok token.Type
- text string
-}
-
-var tokenLists = map[string][]tokenPair{
- "comment": []tokenPair{
- {token.COMMENT, "//"},
- {token.COMMENT, "////"},
- {token.COMMENT, "// comment"},
- {token.COMMENT, "// /* comment */"},
- {token.COMMENT, "// // comment //"},
- {token.COMMENT, "//" + f100},
- {token.COMMENT, "#"},
- {token.COMMENT, "##"},
- {token.COMMENT, "# comment"},
- {token.COMMENT, "# /* comment */"},
- {token.COMMENT, "# # comment #"},
- {token.COMMENT, "#" + f100},
- {token.COMMENT, "/**/"},
- {token.COMMENT, "/***/"},
- {token.COMMENT, "/* comment */"},
- {token.COMMENT, "/* // comment */"},
- {token.COMMENT, "/* /* comment */"},
- {token.COMMENT, "/*\n comment\n*/"},
- {token.COMMENT, "/*" + f100 + "*/"},
- },
- "operator": []tokenPair{
- {token.LBRACK, "["},
- {token.LBRACE, "{"},
- {token.COMMA, ","},
- {token.PERIOD, "."},
- {token.RBRACK, "]"},
- {token.RBRACE, "}"},
- {token.ASSIGN, "="},
- {token.ADD, "+"},
- {token.SUB, "-"},
- },
- "bool": []tokenPair{
- {token.BOOL, "true"},
- {token.BOOL, "false"},
- },
- "ident": []tokenPair{
- {token.IDENT, "a"},
- {token.IDENT, "a0"},
- {token.IDENT, "foobar"},
- {token.IDENT, "foo-bar"},
- {token.IDENT, "abc123"},
- {token.IDENT, "LGTM"},
- {token.IDENT, "_"},
- {token.IDENT, "_abc123"},
- {token.IDENT, "abc123_"},
- {token.IDENT, "_abc_123_"},
- {token.IDENT, "_äöü"},
- {token.IDENT, "_本"},
- {token.IDENT, "äöü"},
- {token.IDENT, "本"},
- {token.IDENT, "a۰۱۸"},
- {token.IDENT, "foo६४"},
- {token.IDENT, "bar9876"},
- },
- "heredoc": []tokenPair{
- {token.HEREDOC, "< 0 for %q", s.ErrorCount, src)
- }
-}
-
-func testTokenList(t *testing.T, tokenList []tokenPair) {
- // create artifical source code
- buf := new(bytes.Buffer)
- for _, ident := range tokenList {
- fmt.Fprintf(buf, "%s\n", ident.text)
- }
-
- s := New(buf.Bytes())
- for _, ident := range tokenList {
- tok := s.Scan()
- if tok.Type != ident.tok {
- t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
- }
-
- if tok.Text != ident.text {
- t.Errorf("text = %q want %q", tok.String(), ident.text)
- }
-
- }
-}
-
-func countNewlines(s string) int {
- n := 0
- for _, ch := range s {
- if ch == '\n' {
- n++
- }
- }
- return n
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
deleted file mode 100644
index 65be375..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package strconv
-
-import "testing"
-
-type quoteTest struct {
- in string
- out string
- ascii string
-}
-
-var quotetests = []quoteTest{
- {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`},
- {"\\", `"\\"`, `"\\"`},
- {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`},
- {"\u263a", `"☺"`, `"\u263a"`},
- {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`},
- {"\x04", `"\x04"`, `"\x04"`},
-}
-
-type unQuoteTest struct {
- in string
- out string
-}
-
-var unquotetests = []unQuoteTest{
- {`""`, ""},
- {`"a"`, "a"},
- {`"abc"`, "abc"},
- {`"☺"`, "☺"},
- {`"hello world"`, "hello world"},
- {`"\xFF"`, "\xFF"},
- {`"\377"`, "\377"},
- {`"\u1234"`, "\u1234"},
- {`"\U00010111"`, "\U00010111"},
- {`"\U0001011111"`, "\U0001011111"},
- {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""},
- {`"'"`, "'"},
- {`"${file("foo")}"`, `${file("foo")}`},
- {`"${file("\"foo\"")}"`, `${file("\"foo\"")}`},
- {`"echo ${var.region}${element(split(",",var.zones),0)}"`,
- `echo ${var.region}${element(split(",",var.zones),0)}`},
- {`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`},
- {`"${\n}"`, `${\n}`},
-}
-
-var misquoted = []string{
- ``,
- `"`,
- `"a`,
- `"'`,
- `b"`,
- `"\"`,
- `"\9"`,
- `"\19"`,
- `"\129"`,
- `'\'`,
- `'\9'`,
- `'\19'`,
- `'\129'`,
- `'ab'`,
- `"\x1!"`,
- `"\U12345678"`,
- `"\z"`,
- "`",
- "`xxx",
- "`\"",
- `"\'"`,
- `'\"'`,
- "\"\n\"",
- "\"\\n\n\"",
- "'\n'",
- `"${"`,
- `"${foo{}"`,
- "\"${foo}\n\"",
-}
-
-func TestUnquote(t *testing.T) {
- for _, tt := range unquotetests {
- if out, err := Unquote(tt.in); err != nil || out != tt.out {
- t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out)
- }
- }
-
- // run the quote tests too, backward
- for _, tt := range quotetests {
- if in, err := Unquote(tt.out); in != tt.in {
- t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in)
- }
- }
-
- for _, s := range misquoted {
- if out, err := Unquote(s); out != "" || err != ErrSyntax {
- t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl
deleted file mode 100644
index 78c2675..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl
+++ /dev/null
@@ -1,4 +0,0 @@
-foo = [
- "1",
- "2", # comment
-]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl
deleted file mode 100644
index eb5a99a..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl
+++ /dev/null
@@ -1,6 +0,0 @@
-resource = [{
- "foo": {
- "bar": {},
- "baz": [1, 2, "foo"],
- }
-}]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl
deleted file mode 100644
index 1ff7f29..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl
+++ /dev/null
@@ -1,15 +0,0 @@
-// Foo
-
-/* Bar */
-
-/*
-/*
-Baz
-*/
-
-# Another
-
-# Multiple
-# Lines
-
-foo = "bar"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl
deleted file mode 100644
index fec5601..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl
+++ /dev/null
@@ -1 +0,0 @@
-# Hello
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl
deleted file mode 100644
index cccb5b0..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl
+++ /dev/null
@@ -1,42 +0,0 @@
-// This comes from Terraform, as a test
-variable "foo" {
- default = "bar"
- description = "bar"
-}
-
-provider "aws" {
- access_key = "foo"
- secret_key = "bar"
-}
-
-provider "do" {
- api_key = "${var.foo}"
-}
-
-resource "aws_security_group" "firewall" {
- count = 5
-}
-
-resource aws_instance "web" {
- ami = "${var.foo}"
- security_groups = [
- "foo",
- "${aws_security_group.firewall.foo}"
- ]
-
- network_interface {
- device_index = 0
- description = "Main network interface"
- }
-}
-
-resource "aws_instance" "db" {
- security_groups = "${aws_security_group.firewall.*.id}"
- VPC = "foo"
-
- depends_on = ["aws_instance.web"]
-}
-
-output "web_ip" {
- value = "${aws_instance.web.private_ip}"
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl
deleted file mode 100644
index 0007aaf..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl
+++ /dev/null
@@ -1 +0,0 @@
-foo.bar = "baz"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl
deleted file mode 100644
index e69de29..0000000
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl
deleted file mode 100644
index 059d4ce..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl
+++ /dev/null
@@ -1 +0,0 @@
-foo = [1, 2, "foo"]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl
deleted file mode 100644
index 50f4218..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl
+++ /dev/null
@@ -1 +0,0 @@
-foo = [1, 2, "foo",]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl
deleted file mode 100644
index 029c54b..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl
+++ /dev/null
@@ -1,2 +0,0 @@
-foo = "bar"
-key = 7
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl
deleted file mode 100644
index e9f77ca..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl
+++ /dev/null
@@ -1,3 +0,0 @@
-default = {
- "eu-west-1": "ami-b1cf19c6",
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl
deleted file mode 100644
index 92592fb..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl
+++ /dev/null
@@ -1,5 +0,0 @@
-// This is a test structure for the lexer
-foo bar "baz" {
- key = 7
- foo = "bar"
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl
deleted file mode 100644
index 7229a1f..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl
+++ /dev/null
@@ -1,5 +0,0 @@
-foo {
- value = 7
- "value" = 8
- "complex::value" = 9
-}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl
deleted file mode 100644
index 4d156dd..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl
+++ /dev/null
@@ -1 +0,0 @@
-resource "foo" "bar" {}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl
deleted file mode 100644
index cf2747e..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl
+++ /dev/null
@@ -1,7 +0,0 @@
-foo = "bar"
-bar = 7
-baz = [1,2,3]
-foo = -12
-bar = 3.14159
-foo = true
-bar = false
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go b/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go
deleted file mode 100644
index e4b4af2..0000000
--- a/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package token
-
-import (
- "reflect"
- "testing"
-)
-
-func TestTypeString(t *testing.T) {
- var tokens = []struct {
- tt Type
- str string
- }{
- {ILLEGAL, "ILLEGAL"},
- {EOF, "EOF"},
- {COMMENT, "COMMENT"},
- {IDENT, "IDENT"},
- {NUMBER, "NUMBER"},
- {FLOAT, "FLOAT"},
- {BOOL, "BOOL"},
- {STRING, "STRING"},
- {HEREDOC, "HEREDOC"},
- {LBRACK, "LBRACK"},
- {LBRACE, "LBRACE"},
- {COMMA, "COMMA"},
- {PERIOD, "PERIOD"},
- {RBRACK, "RBRACK"},
- {RBRACE, "RBRACE"},
- {ASSIGN, "ASSIGN"},
- {ADD, "ADD"},
- {SUB, "SUB"},
- }
-
- for _, token := range tokens {
- if token.tt.String() != token.str {
- t.Errorf("want: %q got:%q\n", token.str, token.tt)
- }
- }
-
-}
-
-func TestTokenValue(t *testing.T) {
- var tokens = []struct {
- tt Token
- v interface{}
- }{
- {Token{Type: BOOL, Text: `true`}, true},
- {Token{Type: BOOL, Text: `false`}, false},
- {Token{Type: FLOAT, Text: `3.14`}, float64(3.14)},
- {Token{Type: NUMBER, Text: `42`}, int64(42)},
- {Token{Type: IDENT, Text: `foo`}, "foo"},
- {Token{Type: STRING, Text: `"foo"`}, "foo"},
- {Token{Type: STRING, Text: `"foo\nbar"`}, "foo\nbar"},
- {Token{Type: STRING, Text: `"${file("foo")}"`}, `${file("foo")}`},
- {
- Token{
- Type: STRING,
- Text: `"${replace("foo", ".", "\\.")}"`,
- },
- `${replace("foo", ".", "\\.")}`},
- {Token{Type: HEREDOC, Text: "< 0 for %q", s.ErrorCount, src)
- }
-}
-
-func testTokenList(t *testing.T, tokenList []tokenPair) {
- // create artifical source code
- buf := new(bytes.Buffer)
- for _, ident := range tokenList {
- fmt.Fprintf(buf, "%s\n", ident.text)
- }
-
- s := New(buf.Bytes())
- for _, ident := range tokenList {
- tok := s.Scan()
- if tok.Type != ident.tok {
- t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
- }
-
- if tok.Text != ident.text {
- t.Errorf("text = %q want %q", tok.String(), ident.text)
- }
-
- }
-}
-
-func countNewlines(s string) int {
- n := 0
- for _, ch := range s {
- if ch == '\n' {
- n++
- }
- }
- return n
-}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
deleted file mode 100644
index e320f17..0000000
--- a/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "foo": [1, 2, "bar"],
- "bar": "baz"
-}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
deleted file mode 100644
index b54bde9..0000000
--- a/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "foo": "bar"
-}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
deleted file mode 100644
index 72168a3..0000000
--- a/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "foo": {
- "bar": [1,2]
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
deleted file mode 100644
index 9a142a6..0000000
--- a/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "foo": "bar",
- "bar": 7,
- "baz": [1,2,3],
- "foo": -12,
- "bar": 3.14159,
- "foo": true,
- "bar": false,
- "foo": null
-}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token_test.go b/vendor/github.com/hashicorp/hcl/json/token/token_test.go
deleted file mode 100644
index a83fdd5..0000000
--- a/vendor/github.com/hashicorp/hcl/json/token/token_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package token
-
-import (
- "testing"
-)
-
-func TestTypeString(t *testing.T) {
- var tokens = []struct {
- tt Type
- str string
- }{
- {ILLEGAL, "ILLEGAL"},
- {EOF, "EOF"},
- {NUMBER, "NUMBER"},
- {FLOAT, "FLOAT"},
- {BOOL, "BOOL"},
- {STRING, "STRING"},
- {NULL, "NULL"},
- {LBRACK, "LBRACK"},
- {LBRACE, "LBRACE"},
- {COMMA, "COMMA"},
- {PERIOD, "PERIOD"},
- {RBRACK, "RBRACK"},
- {RBRACE, "RBRACE"},
- }
-
- for _, token := range tokens {
- if token.tt.String() != token.str {
- t.Errorf("want: %q got:%q\n", token.str, token.tt)
-
- }
- }
-
-}
diff --git a/vendor/github.com/hashicorp/hcl/lex_test.go b/vendor/github.com/hashicorp/hcl/lex_test.go
deleted file mode 100644
index 8062764..0000000
--- a/vendor/github.com/hashicorp/hcl/lex_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package hcl
-
-import (
- "testing"
-)
-
-func TestLexMode(t *testing.T) {
- cases := []struct {
- Input string
- Mode lexModeValue
- }{
- {
- "",
- lexModeHcl,
- },
- {
- "foo",
- lexModeHcl,
- },
- {
- "{}",
- lexModeJson,
- },
- {
- " {}",
- lexModeJson,
- },
- }
-
- for i, tc := range cases {
- actual := lexMode([]byte(tc.Input))
-
- if actual != tc.Mode {
- t.Fatalf("%d: %#v", i, actual)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl
deleted file mode 100644
index dd3151c..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/assign_deep.hcl
+++ /dev/null
@@ -1,5 +0,0 @@
-resource = [{
- foo = [{
- bar = {}
- }]
-}]
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl
deleted file mode 100644
index 9499944..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl
+++ /dev/null
@@ -1,2 +0,0 @@
-foo = "bar"
-bar = "${file("bing/bong.txt")}"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json
deleted file mode 100644
index 7bdddc8..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "foo": "bar",
- "bar": "${file(\"bing/bong.txt\")}"
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl
deleted file mode 100644
index 4e415da..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl
+++ /dev/null
@@ -1 +0,0 @@
-count = "3"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl
deleted file mode 100644
index 363697b..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl
+++ /dev/null
@@ -1,3 +0,0 @@
-foo="bar"
-bar="${file("bing/bong.txt")}"
-foo-bar="baz"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl
deleted file mode 100644
index ee8b06f..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/block_assign.hcl
+++ /dev/null
@@ -1,2 +0,0 @@
-environment = "aws" {
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl
deleted file mode 100644
index 5b185cc..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl
+++ /dev/null
@@ -1,15 +0,0 @@
-key "" {
- policy = "read"
-}
-
-key "foo/" {
- policy = "write"
-}
-
-key "foo/bar/" {
- policy = "read"
-}
-
-key "foo/bar/baz" {
- policy = "deny"
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json
deleted file mode 100644
index 151864e..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "key": {
- "": {
- "policy": "read"
- },
-
- "foo/": {
- "policy": "write"
- },
-
- "foo/bar/": {
- "policy": "read"
- },
-
- "foo/bar/baz": {
- "policy": "deny"
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl
deleted file mode 100644
index 52dcaa1..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl
+++ /dev/null
@@ -1,10 +0,0 @@
-variable "foo" {
- default = "bar"
- description = "bar"
-}
-
-variable "amis" {
- default = {
- east = "foo"
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json
deleted file mode 100644
index 49f921e..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "variable": {
- "foo": {
- "default": "bar",
- "description": "bar"
- },
-
- "amis": {
- "default": {
- "east": "foo"
- }
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl
deleted file mode 100644
index 5be1b23..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl
+++ /dev/null
@@ -1 +0,0 @@
-resource "foo" {}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl
deleted file mode 100644
index f818b15..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl
+++ /dev/null
@@ -1,6 +0,0 @@
-foo = "bar\"baz\\n"
-bar = "new\nline"
-qux = "back\\slash"
-qax = "slash\\:colon"
-nested = "${HH\\:mm\\:ss}"
-nestedquotes = "${"\"stringwrappedinquotes\""}"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/escape_backslash.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/escape_backslash.hcl
deleted file mode 100644
index bc337fb..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/escape_backslash.hcl
+++ /dev/null
@@ -1,5 +0,0 @@
-output {
- one = "${replace(var.sub_domain, ".", "\\.")}"
- two = "${replace(var.sub_domain, ".", "\\\\.")}"
- many = "${replace(var.sub_domain, ".", "\\\\\\\\.")}"
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl
deleted file mode 100644
index 9bca551..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl
+++ /dev/null
@@ -1,2 +0,0 @@
-foo = "bar"
-Key = 7
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
deleted file mode 100644
index edf355e..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
+++ /dev/null
@@ -1,2 +0,0 @@
-a = 1.02
-b = 2
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
deleted file mode 100644
index 5808680..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "a": 1.02,
- "b": 2
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/git_crypt.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/git_crypt.hcl
deleted file mode 100644
index f691948..0000000
Binary files a/vendor/github.com/hashicorp/hcl/test-fixtures/git_crypt.hcl and /dev/null differ
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json
deleted file mode 100644
index cad0151..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "default": "${replace(\"europe-west\", \"-\", \" \")}"
-}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_lists.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_lists.hcl
deleted file mode 100644
index 8af3458..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_lists.hcl
+++ /dev/null
@@ -1,2 +0,0 @@
-foo = [["foo"], ["bar"]]
-
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl
deleted file mode 100644
index 985a33b..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/list_of_maps.hcl
+++ /dev/null
@@ -1,4 +0,0 @@
-foo = [
- {somekey1 = "someval1"},
- {somekey2 = "someval2", someextrakey = "someextraval"},
-]
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl
deleted file mode 100644
index f883bd7..0000000
--- a/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl
+++ /dev/null
@@ -1,4 +0,0 @@
-foo = <
-
-
-
-
-**Environment:**
-
-
-* Vault Version:
-* Operating System/Architecture:
-
-**Vault Config File:**
-
-
-**Startup Log Output:**
-
-
-**Expected Behavior:**
-
-
-**Actual Behavior:**
-
-
-**Steps to Reproduce:**
-
-
-**Important Factoids:**
-
-
-**References:**
-
diff --git a/vendor/github.com/hashicorp/vault/.gitignore b/vendor/github.com/hashicorp/vault/.gitignore
deleted file mode 100644
index dbd3bc3..0000000
--- a/vendor/github.com/hashicorp/vault/.gitignore
+++ /dev/null
@@ -1,79 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-.cover
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-# Other dirs
-/bin/
-/pkg/
-
-# Vault-specific
-example.hcl
-example.vault.d
-
-# Ruby
-website/vendor
-website/.bundle
-website/build
-
-# Vagrant
-.vagrant/
-Vagrantfile
-
-# Configs
-*.hcl
-
-.DS_Store
-.idea
-.vscode
-
-dist/*
-
-tags
-
-# Editor backups
-*~
-*.sw[a-z]
-
-# IntelliJ IDEA project files
-.idea
-*.ipr
-*.iml
-
-# compiled output
-ui/dist
-ui/tmp
-
-# dependencies
-ui/node_modules
-ui/bower_components
-
-# misc
-ui/.DS_Store
-ui/.sass-cache
-ui/connect.lock
-ui/coverage/*
-ui/libpeerconnection.log
-ui/npm-debug.log
-ui/testem.log
diff --git a/vendor/github.com/hashicorp/vault/.hooks/pre-push b/vendor/github.com/hashicorp/vault/.hooks/pre-push
deleted file mode 100755
index ac56a48..0000000
--- a/vendor/github.com/hashicorp/vault/.hooks/pre-push
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-remote="$1"
-
-if [ "$remote" = "enterprise" ]; then
- exit 0
-fi
-
-if [ -f version/version_ent.go ]; then
- echo "Found enterprise version file while pushing to oss remote"
- exit 1
-fi
-
-exit 0
diff --git a/vendor/github.com/hashicorp/vault/.travis.yml b/vendor/github.com/hashicorp/vault/.travis.yml
deleted file mode 100644
index aa214be..0000000
--- a/vendor/github.com/hashicorp/vault/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-sudo: required
-dist: trusty
-
-language: go
-
-services:
- - docker
-
-go:
- - 1.9
-
-matrix:
- allow_failures:
- - go: tip
-
-branches:
- only:
- - master
- - travis-testing
-
-script:
- - make bootstrap
- - travis_wait 75 make test
- - travis_wait 75 make testrace
diff --git a/vendor/github.com/hashicorp/vault/CHANGELOG.md b/vendor/github.com/hashicorp/vault/CHANGELOG.md
deleted file mode 100644
index 9ce00fe..0000000
--- a/vendor/github.com/hashicorp/vault/CHANGELOG.md
+++ /dev/null
@@ -1,2076 +0,0 @@
-## 0.8.3 (September 19th, 2017)
-
-CHANGES:
-
- * Policy input/output standardization: For all built-in authentication
- backends, policies can now be specified as a comma-delimited string or an
- array if using JSON as API input; on read, policies will be returned as an
- array; and the `default` policy will not be forcefully added to policies
- saved in configurations. Please note that the `default` policy will continue
- to be added to generated tokens, however, rather than backends adding
- `default` to the given set of input policies (in some cases, and not in
- others), the stored set will reflect the user-specified set.
- * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the
- endpoint would not modify the Issuer in the generated certificate, leaving
- the output self-issued. Although theoretically valid, in practice crypto
- stacks were unhappy validating paths containing such certs. As a result,
- `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer
- DN of the generated certificate.
- * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely
- useful in break-glass or support scenarios, it is also extremely dangerous.
- As of now, a configuration file option `raw_storage_endpoint` must be set in
- order to enable this API endpoint. Once set, the available functionality has
- been enhanced slightly; it now supports listing and decrypting most of
- Vault's core data structures, except for the encryption keyring itself.
- * `generic` is now `kv`: To better reflect its actual use, the `generic`
- backend is now `kv`. Using `generic` will still work for backwards
- compatibility.
-
-FEATURES:
-
- * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault
- using machine credentials.
- * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts
- can not authenticate to vault using JWT tokens.
-
-IMPROVEMENTS:
-
- * configuration: Provide a config option to store Vault server's process ID
- (PID) in a file [GH-3321]
- * mfa (Enterprise): Add the ability to use identity metadata in username format
- * mfa/okta (Enterprise): Add support for configuring base_url for API calls
- * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value
- longer than the signing CA certificate's NotAfter value. [GH-3325]
- * sys/raw: Raw storage access is now disabled by default [GH-3329]
-
-BUG FIXES:
-
- * auth/okta: Fix regression that removed the ability to set base_url [GH-3313]
- * core: Fix panic while loading leases at startup on ARM processors
- [GH-3314]
- * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key
- [GH-3325]
-
-## 0.8.2.1 (September 11th, 2017) (Enterprise Only)
-
-BUG FIXES:
-
- * Fix an issue upgrading to 0.8.2 for Enterprise customers.
-
-## 0.8.2 (September 5th, 2017)
-
-SECURITY:
-
-* In prior versions of Vault, if authenticating via AWS IAM and requesting a
- periodic token, the period was not properly respected. This could lead to
- tokens expiring unexpectedly, or a token lifetime being longer than expected.
- Upon token renewal with Vault 0.8.2 the period will be properly enforced.
-
-DEPRECATIONS/CHANGES:
-
-* `vault ssh` users should supply `-mode` and `-role` to reduce the number of
- API calls. A future version of Vault will mark these optional values are
- required. Failure to supply `-mode` or `-role` will result in a warning.
-* Vault plugins will first briefly run a restricted version of the plugin to
- fetch metadata, and then lazy-load the plugin on first request to prevent
- crash/deadlock of Vault during the unseal process. Plugins will need to be
- built with the latest changes in order for them to run properly.
-
-FEATURES:
-
-* **Lazy Lease Loading**: On startup, Vault will now load leases from storage
- in a lazy fashion (token checks and revocation/renewal requests still force
- an immediate load). For larger installations this can significantly reduce
- downtime when switching active nodes or bringing Vault up from cold start.
-* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA
- backend for authenticating to machines. It also supports remote host key
- verification through the SSH CA backend, if enabled.
-* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports
- signing self-issued CA certs. This is useful when switching root CAs.
-
-IMPROVEMENTS:
-
- * audit/file: Allow specifying `stdout` as the `file_path` to log to standard
- output [GH-3235]
- * auth/aws: Allow wildcards in `bound_iam_principal_id` [GH-3213]
- * auth/okta: Compare groups case-insensitively since Okta is only
- case-preserving [GH-3240]
- * auth/okta: Standarize Okta configuration APIs across backends [GH-3245]
- * cli: Add subcommand autocompletion that can be enabled with
- `vault -autocomplete-install` [GH-3223]
- * cli: Add ability to handle wrapped responses when using `vault auth`. What
- is output depends on the other given flags; see the help output for that
- command for more information. [GH-3263]
- * core: TLS cipher suites used for cluster behavior can now be set via
- `cluster_cipher_suites` in configuration [GH-3228]
- * core: The `plugin_name` can now either be specified directly as part of the
- parameter or within the `config` object when mounting a secret or auth backend
- via `sys/mounts/:path` or `sys/auth/:path` respectively [GH-3202]
- * core: It is now possible to update the `description` of a mount when
- mount-tuning, although this must be done through the HTTP layer [GH-3285]
- * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and
- retrying the operation [GH-3269]
- * secret/pki: TTLs can now be specified as a string or an integer number of
- seconds [GH-3270]
- * secret/pki: Self-issued certs can now be signed via
- `pki/root/sign-self-issued` [GH-3274]
- * storage/gcp: Use application default credentials if they exist [GH-3248]
-
-BUG FIXES:
-
- * auth/aws: Properly use role-set period values for IAM-derived token renewals
- [GH-3220]
- * auth/okta: Fix updating organization/ttl/max_ttl after initial setting
- [GH-3236]
- * core: Fix PROXY when underlying connection is TLS [GH-3195]
- * core: Policy-related commands would sometimes fail to act case-insensitively
- [GH-3210]
- * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address
- [GH-3268]
- * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process.
- [GH-3255]
- * plugins: Skip mounting plugin-based secret and credential mounts when setting
- up mounts if the plugin is no longer present in the catalog. [GH-3255]
-
-## 0.8.1 (August 16th, 2017)
-
-DEPRECATIONS/CHANGES:
-
- * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already
- exists will now return a `204` instead of overwriting an existing root. If
- you want to recreate the root, first run a delete operation on `pki/root`
- (requires `sudo` capability), then generate it again.
-
-FEATURES:
-
- * **Oracle Secret Backend**: There is now an external plugin to support leased
- credentials for Oracle databases (distributed separately).
- * **GCP IAM Auth Backend**: There is now an authentication backend that allows
- using GCP IAM credentials to retrieve Vault tokens. This is available as
- both a plugin and built-in to Vault.
- * **PingID Push Support for Path-Baased MFA (Enterprise)**: PingID Push can
- now be used for MFA with the new path-based MFA introduced in Vault
- Enterprise 0.8.
- * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports
- specifying permitted DNS domains for CA certificates, allowing you to
- narrowly scope the set of domains for which a CA can issue or sign child
- certificates.
- * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to
- reload using the `sys/plugins/reload/backend` endpoint and providing either
- the plugin name or the mounts to reload.
- * **Self-Reloading Plugins**: The plugin system will now attempt to reload a
- crashed or stopped plugin, once per request.
-
-IMPROVEMENTS:
-
- * auth/approle: Allow array input for policies in addition to comma-delimited
- strings [GH-3163]
- * plugins: Send logs through Vault's logger rather than stdout [GH-3142]
- * secret/pki: Add `pki/root` delete operation [GH-3165]
- * secret/pki: Don't overwrite an existing root cert/key when calling generate
- [GH-3165]
-
-BUG FIXES:
-
- * aws: Don't prefer a nil HTTP client over an existing one [GH-3159]
- * core: If there is an error when checking for create/update existence, return
- 500 instead of 400 [GH-3162]
- * secret/database: Avoid creating usernames that are too long for legacy MySQL
- [GH-3138]
-
-## 0.8.0 (August 9th, 2017)
-
-SECURITY:
-
- * We've added a note to the docs about the way the GitHub auth backend works
- as it may not be readily apparent that GitHub personal access tokens, which
- are used by the backend, can be used for unauthorized access if they are
- stolen from third party services and access to Vault is public.
-
-DEPRECATIONS/CHANGES:
-
- * Database Plugin Backends: Passwords generated for these backends now
- enforce stricter password requirements, as opposed to the previous behavior
- of returning a randomized UUID. Passwords are of length 20, and have a `A1a-`
- characters prepended to ensure stricter requirements. No regressions are
- expected from this change. (For database backends that were previously
- substituting underscores for hyphens in passwords, this will remain the
- case.)
- * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`,
- `sys/revoke-force` have been deprecated and relocated under `sys/leases`.
- Additionally, the deprecated path `sys/revoke-force` now requires the `sudo`
- capability.
- * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint
- is now unauthenticated. This allows introspection of the wrapping info by
- clients that only have the wrapping token without then invalidating the
- token. Validation functions/checks are still performed on the token.
-
-FEATURES:
-
- * **Cassandra Storage**: Cassandra can now be used for Vault storage
- * **CockroachDB Storage**: CockroachDB can now be used for Vault storage
- * **CouchDB Storage**: CouchDB can now be used for Vault storage
- * **SAP HANA Database Plugin**: The `databases` backend can now manage users
- for SAP HANA databases
- * **Plugin Backends**: Vault now supports running secret and auth backends as
- plugins. Plugins can be mounted like normal backends and can be developed
- independently from Vault.
- * **PROXY Protocol Support** Vault listeners can now be configured to honor
- PROXY protocol v1 information to allow passing real client IPs into Vault. A
- list of authorized addresses (IPs or subnets) can be defined and
- accept/reject behavior controlled.
- * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI
- now supports lookup and listing of leases and the associated actions from the
- `sys/leases` endpoints in the API. These are located in the new top level
- navigation item "Leases".
- * **Filtered Mounts for Performance Mode Replication**: Whitelists or
- blacklists of mounts can be defined per-secondary to control which mounts
- are actually replicated to that secondary. This can allow targeted
- replication of specific sets of data to specific geolocations/datacenters.
- * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new
- replication mode, Disaster Recovery (DR), that performs full real-time
- replication (including tokens and leases) to DR secondaries. DR secondaries
- cannot handle client requests, but can be promoted to primary as needed for
- failover.
- * **Manage New Replication Features in the Vault Enterprise UI**: Support for
- Replication features in Vault Enterprise UI has expanded to include new DR
- Replication mode and management of Filtered Mounts in Performance Replication
- mode.
- * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows
- correlation of users across tokens. At present this is only used for MFA,
- but will be the foundation of many other features going forward.
- * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise
- Only)**: A brand new MFA system built on top of Identity allows MFA
- (currently Duo Push, Okta Push, and TOTP) for any authenticated path within
- Vault. MFA methods can be configured centrally, and TOTP keys live within
- the user's Identity information to allow using the same key across tokens.
- Specific MFA method(s) required for any given path within Vault can be
- specified in normal ACL path statements.
-
-IMPROVEMENTS:
-
- * api: Add client method for a secret renewer background process [GH-2886]
- * api: Add `RenewTokenAsSelf` [GH-2886]
- * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env
- var or with a new API function [GH-2956]
- * api/cli: Client will now attempt to look up SRV records for the given Vault
- hostname [GH-3035]
- * audit/socket: Enhance reconnection logic and don't require the connection to
- be established at unseal time [GH-2934]
- * audit/file: Opportunistically try re-opening the file on error [GH-2999]
- * auth/approle: Add role name to token metadata [GH-2985]
- * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [GH-2915]
- * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env
- var [GH-2956]
- * command/auth: Add `-token-only` flag to `vault auth` that returns only the
- token on stdout and does not store it via the token helper [GH-2855]
- * core: CORS allowed origins can now be configured [GH-2021]
- * core: Add metrics counters for audit log failures [GH-2863]
- * cors: Allow setting allowed headers via the API instead of always using
- wildcard [GH-3023]
- * secret/ssh: Allow specifying the key ID format using template values for CA
- type [GH-2888]
- * server: Add `tls_client_ca_file` option for specifying a CA file to use for
- client certificate verification when `tls_require_and_verify_client_cert` is
- enabled [GH-3034]
- * storage/cockroachdb: Add CockroachDB storage backend [GH-2713]
- * storage/couchdb: Add CouchhDB storage backend [GH-2880]
- * storage/mssql: Add `max_parallel` [GH-3026]
- * storage/postgresql: Add `max_parallel` [GH-3026]
- * storage/postgresql: Improve listing speed [GH-2945]
- * storage/s3: More efficient paging when an object has a lot of subobjects
- [GH-2780]
- * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [GH-3084]
- * sys/wrapping: Wrapped tokens now store the original request path of the data
- [GH-3100]
- * telemetry: Add support for DogStatsD [GH-2490]
-
-BUG FIXES:
-
- * api/health: Don't treat standby `429` codes as an error [GH-2850]
- * api/leases: Fix lease lookup returning lease properties at the top level
- * audit: Fix panic when audit logging a read operation on an asymmetric
- `transit` key [GH-2958]
- * auth/approle: Fix panic when secret and cidr list not provided in role
- [GH-3075]
- * auth/aws: Look up proper account ID on token renew [GH-3012]
- * auth/aws: Store IAM header in all cases when it changes [GH-3004]
- * auth/ldap: Verify given certificate is PEM encoded instead of failing
- silently [GH-3016]
- * auth/token: Don't allow using the same token ID twice when manually
- specifying [GH-2916]
- * cli: Fix issue with parsing keys that start with special characters [GH-2998]
- * core: Relocated `sys/leases/renew` returns same payload as original
- `sys/leases` endpoint [GH-2891]
- * secret/ssh: Fix panic when signing with incorrect key type [GH-3072]
- * secret/totp: Ensure codes can only be used once. This makes some automated
- workflows harder but complies with the RFC. [GH-2908]
- * secret/transit: Fix locking when creating a key with unsupported options
- [GH-2974]
-
-## 0.7.3 (June 7th, 2017)
-
-SECURITY:
-
- * Cert auth backend now checks validity of individual certificates: In
- previous versions of Vault, validity (e.g. expiration) of individual leaf
- certificates added for authentication was not checked. This was done to make
- it easier for administrators to control lifecycles of individual
- certificates added to the backend, e.g. the authentication material being
- checked was access to that specific certificate's private key rather than
- all private keys signed by a CA. However, this behavior is often unexpected
- and as a result can lead to insecure deployments, so we are now validating
- these certificates as well.
- * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2
- caused the HMACing of any App-ID information stored in paths (including
- actual app-IDs and user-IDs) to be unsalted and written as-is from the API.
- In 0.7.3 any such paths will be automatically changed to salted versions on
- access (e.g. login or read); however, if you created new app-IDs or user-IDs
- in 0.7.1/0.7.2, you may want to consider whether any users with access to
- Vault's underlying data store may have intercepted these values, and
- revoke/roll them.
-
-DEPRECATIONS/CHANGES:
-
- * Step-Down is Forwarded: When a step-down is issued against a non-active node
- in an HA cluster, it will now forward the request to the active node.
-
-FEATURES:
-
- * **ed25519 Signing/Verification in Transit with Key Derivation**: The
- `transit` backend now supports generating
- [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification
- functionality. These keys support derivation, allowing you to modify the
- actual encryption key used by supplying a `context` value.
- * **Key Version Specification for Encryption in Transit**: You can now specify
- the version of a key you use to wish to generate a signature, ciphertext, or
- HMAC. This can be controlled by the `min_encryption_version` key
- configuration property.
- * **Replication Primary Discovery (Enterprise)**: Replication primaries will
- now advertise the addresses of their local HA cluster members to replication
- secondaries. This helps recovery if the primary active node goes down and
- neither service discovery nor load balancers are in use to steer clients.
-
-IMPROVEMENTS:
-
- * api/health: Add Sys().Health() [GH-2805]
- * audit: Add auth information to requests that error out [GH-2754]
- * command/auth: Add `-no-store` option that prevents the auth command from
- storing the returned token into the configured token helper [GH-2809]
- * core/forwarding: Request forwarding now heartbeats to prevent unused
- connections from being terminated by firewalls or proxies
- * plugins/databases: Add MongoDB as an internal database plugin [GH-2698]
- * storage/dynamodb: Add a method for checking the existence of children,
- speeding up deletion operations in the DynamoDB storage backend [GH-2722]
- * storage/mysql: Add max_parallel parameter to MySQL backend [GH-2760]
- * secret/databases: Support listing connections [GH-2823]
- * secret/databases: Support custom renewal statements in Postgres database
- plugin [GH-2788]
- * secret/databases: Use the role name as part of generated credentials
- [GH-2812]
- * ui (Enterprise): Transit key and secret browsing UI handle large lists better
- * ui (Enterprise): root tokens are no longer persisted
- * ui (Enterprise): support for mounting Database and TOTP secret backends
-
-BUG FIXES:
-
- * auth/app-id: Fix regression causing loading of salts to be skipped
- * auth/aws: Improve EC2 describe instances performance [GH-2766]
- * auth/aws: Fix lookup of some instance profile ARNs [GH-2802]
- * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various
- points (e.g. renewal time) more robust [GH-2814]
- * auth/aws: Properly honor configured period when using IAM authentication
- [GH-2825]
- * auth/aws: Check that a bound IAM principal is not empty (in the current
- state of the role) before requiring it match the previously authenticated
- client [GH-2781]
- * auth/cert: Fix panic on renewal [GH-2749]
- * auth/cert: Certificate verification for non-CA certs [GH-2761]
- * core/acl: Prevent race condition when compiling ACLs in some scenarios
- [GH-2826]
- * secret/database: Increase wrapping token TTL; in a loaded scenario it could
- be too short
- * secret/generic: Allow integers to be set as the value of `ttl` field as the
- documentation claims is supported [GH-2699]
- * secret/ssh: Added host key callback to ssh client config [GH-2752]
- * storage/s3: Avoid a panic when some bad data is returned [GH-2785]
- * storage/dynamodb: Fix list functions working improperly on Windows [GH-2789]
- * storage/file: Don't leak file descriptors in some error cases
- * storage/swift: Fix pre-v3 project/tenant name reading [GH-2803]
-
-## 0.7.2 (May 8th, 2017)
-
-BUG FIXES:
-
- * audit: Fix auditing entries containing certain kinds of time values
- [GH-2689]
-
-## 0.7.1 (May 5th, 2017)
-
-DEPRECATIONS/CHANGES:
-
- * LDAP Auth Backend: Group membership queries will now run as the `binddn`
- user when `binddn`/`bindpass` are configured, rather than as the
- authenticating user as was the case previously.
-
-FEATURES:
-
- * **AWS IAM Authentication**: IAM principals can get Vault tokens
- automatically, opening AWS-based authentication to users, ECS containers,
- Lambda instances, and more. Signed client identity information retrieved
- using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS
- service before issuing a Vault token. This backend is unified with the
- `aws-ec2` authentication backend under the name `aws`, and allows additional
- EC2-related restrictions to be applied during the IAM authentication; the
- previous EC2 behavior is also still available. [GH-2441]
- * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your
- Vault physical data store [GH-2546]
- * **Lease Listing and Lookup**: You can now introspect a lease to get its
- creation and expiration properties via `sys/leases/lookup`; with `sudo`
- capability you can also list leases for lookup, renewal, or revocation via
- that endpoint. Various lease functions (renew, revoke, revoke-prefix,
- revoke-force) have also been relocated to `sys/leases/`, but they also work
- at the old paths for compatibility. Reading (but not listing) leases via
- `sys/leases/lookup` is now a part of the current `default` policy. [GH-2650]
- * **TOTP Secret Backend**: You can now store multi-factor authentication keys
- in Vault and use the API to retrieve time-based one-time use passwords on
- demand. The backend can also be used to generate a new key and validate
- passwords generated by that key. [GH-2492]
- * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend
- combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra
- backends. It also provides a plugin interface for extendability through
- custom databases. [GH-2200]
-
-IMPROVEMENTS:
-
- * auth/cert: Support for constraints on subject Common Name and DNS/email
- Subject Alternate Names in certificates [GH-2595]
- * auth/ldap: Use the binding credentials to search group membership rather
- than the user credentials [GH-2534]
- * cli/revoke: Add `-self` option to allow revoking the currently active token
- [GH-2596]
- * core: Randomize x coordinate in Shamir shares [GH-2621]
- * replication: Fix a bug when enabling `approle` on a primary before
- secondaries were connected
- * replication: Add heartbeating to ensure firewalls don't kill connections to
- primaries
- * secret/pki: Add `no_store` option that allows certificates to be issued
- without being stored. This removes the ability to look up and/or add to a
- CRL but helps with scaling to very large numbers of certificates. [GH-2565]
- * secret/pki: If used with a role parameter, the `sign-verbatim/`
- endpoint honors the values of `generate_lease`, `no_store`, `ttl` and
- `max_ttl` from the given role [GH-2593]
- * secret/pki: Add role parameter `allow_glob_domains` that enables defining
- names in `allowed_domains` containing `*` glob patterns [GH-2517]
- * secret/pki: Update certificate storage to not use characters that are not
- supported on some filesystems [GH-2575]
- * storage/etcd3: Add `discovery_srv` option to query for SRV records to find
- servers [GH-2521]
- * storage/s3: Support `max_parallel` option to limit concurrent outstanding
- requests [GH-2466]
- * storage/s3: Use pooled transport for http client [GH-2481]
- * storage/swift: Allow domain values for V3 authentication [GH-2554]
- * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more
- cleanup cases [GH-2452]
-
-BUG FIXES:
-
- * api: Respect a configured path in Vault's address [GH-2588]
- * auth/aws-ec2: New bounds added as criteria to allow role creation [GH-2600]
- * auth/ldap: Don't lowercase groups attached to users [GH-2613]
- * cli: Don't panic if `vault write` is used with the `force` flag but no path
- [GH-2674]
- * core: Help operations should request forward since standbys may not have
- appropriate info [GH-2677]
- * replication: Fix enabling secondaries when certain mounts already existed on
- the primary
- * secret/mssql: Update mssql driver to support queries with colons [GH-2610]
- * secret/pki: Don't lowercase O/OU values in certs [GH-2555]
- * secret/pki: Don't attempt to validate IP SANs if none are provided [GH-2574]
- * secret/ssh: Don't automatically lowercase principles in issued SSH certs
- [GH-2591]
- * storage/consul: Properly handle state events rather than timing out
- [GH-2548]
- * storage/etcd3: Ensure locks are released if client is improperly shut down
- [GH-2526]
-
-## 0.7.0 (March 21th, 2017)
-
-SECURITY:
-
- * Common name not being validated when `exclude_cn_from_sans` option used in
- `pki` backend: When using a role in the `pki` backend that specified the
- `exclude_cn_from_sans` option, the common name would not then be properly
- validated against the role's constraints. This has been fixed. We recommend
- any users of this feature to upgrade to 0.7 as soon as feasible.
-
-DEPRECATIONS/CHANGES:
-
- * List Operations Always Use Trailing Slash: Any list operation, whether via
- the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to
- have a trailing slash. This makes policy writing more predictable, as it
- means clients will no longer work or fail based on which client they're
- using or which HTTP verb they're using. However, it also means that policies
- allowing `list` capability must be carefully checked to ensure that they
- contain a trailing slash; some policies may need to be split into multiple
- stanzas to accommodate.
- * PKI Defaults to Unleased Certificates: When issuing certificates from the
- PKI backend, by default, no leases will be issued. If you want to manually
- revoke a certificate, its serial number can be used with the `pki/revoke`
- endpoint. Issuing leases is still possible by enabling the `generate_lease`
- toggle in PKI role entries (this will default to `true` for upgrades, to
- keep existing behavior), which will allow using lease IDs to revoke
- certificates. For installations issuing large numbers of certificates (tens
- to hundreds of thousands, or millions), this will significantly improve
- Vault startup time since leases associated with these certificates will not
- have to be loaded; however note that it also means that revocation of a
- token used to issue certificates will no longer add these certificates to a
- CRL. If this behavior is desired or needed, consider keeping leases enabled
- and ensuring lifetimes are reasonable, and issue long-lived certificates via
- a different role with leases disabled.
-
-FEATURES:
-
- * **Replication (Enterprise)**: Vault Enterprise now has support for creating
- a multi-datacenter replication set between clusters. The current replication
- offering is based on an asynchronous primary/secondary (1:N) model that
- replicates static data while keeping dynamic data (leases, tokens)
- cluster-local, focusing on horizontal scaling for high-throughput and
- high-fanout deployments.
- * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault
- Enterprise UI now supports looking up and rotating response wrapping tokens,
- as well as creating tokens with arbitrary values inside. It also now
- supports replication functionality, enabling the configuration of a
- replication set in the UI.
- * **Expanded Access Control Policies**: Access control policies can now
- specify allowed and denied parameters -- and, optionally, their values -- to
- control what a client can and cannot submit during an API call. Policies can
- also specify minimum/maximum response wrapping TTLs to both enforce the use
- of response wrapping and control the duration of resultant wrapping tokens.
- See the [policies concepts
- page](https://www.vaultproject.io/docs/concepts/policies.html) for more
- information.
- * **SSH Backend As Certificate Authority**: The SSH backend can now be
- configured to sign host and user certificates. Each mount of the backend
- acts as an independent signing authority. The CA key pair can be configured
- for each mount and the public key is accessible via an unauthenticated API
- call; additionally, the backend can generate a public/private key pair for
- you. We recommend using separate mounts for signing host and user
- certificates.
-
-IMPROVEMENTS:
-
- * api/request: Passing username and password information in API request
- [GH-2469]
- * audit: Logging the token's use count with authentication response and
- logging the remaining uses of the client token with request [GH-2437]
- * auth/approle: Support for restricting the number of uses on the tokens
- issued [GH-2435]
- * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID,
- Subnet ID and Region [GH-2407]
- * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the
- username if not explicitly set on the command line when authenticating
- [GH-2154]
- * audit: Support adding a configurable prefix (such as `@cee`) before each
- line [GH-2359]
- * core: Canonicalize list operations to use a trailing slash [GH-2390]
- * core: Add option to disable caching on a per-mount level [GH-2455]
- * core: Add ability to require valid client certs in listener config [GH-2457]
- * physical/dynamodb: Implement a session timeout to avoid having to use
- recovery mode in the case of an unclean shutdown, which makes HA much safer
- [GH-2141]
- * secret/pki: O (Organization) values can now be set to role-defined values
- for issued/signed certificates [GH-2369]
- * secret/pki: Certificates issued/signed from PKI backend do not generate
- leases by default [GH-2403]
- * secret/pki: When using DER format, still return the private key type
- [GH-2405]
- * secret/pki: Add an intermediate to the CA chain even if it lacks an
- authority key ID [GH-2465]
- * secret/pki: Add role option to use CSR SANs [GH-2489]
- * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208]
- * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint
- and also return it when CA key pair is generated [GH-2483]
-
-BUG FIXES:
-
- * audit: When auditing headers use case-insensitive comparisons [GH-2362]
- * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374]
- * auth/okta: Fix panic if user had no local groups and/or policies set
- [GH-2367]
- * command/server: Fix parsing of redirect address when port is not mentioned
- [GH-2354]
- * physical/postgresql: Fix listing returning incorrect results if there were
- multiple levels of children [GH-2393]
-
-## 0.6.5 (February 7th, 2017)
-
-FEATURES:
-
- * **Okta Authentication**: A new Okta authentication backend allows you to use
- Okta usernames and passwords to authenticate to Vault. If provided with an
- appropriate Okta API token, group membership can be queried to assign
- policies; users and groups can be defined locally as well.
- * **RADIUS Authentication**: A new RADIUS authentication backend allows using
- a RADIUS server to authenticate to Vault. Policies can be configured for
- specific users or for any authenticated user.
- * **Exportable Transit Keys**: Keys in `transit` can now be marked as
- `exportable` at creation time. This allows a properly ACL'd user to retrieve
- the associated signing key, encryption key, or HMAC key. The `exportable`
- value is returned on a key policy read and cannot be changed, so if a key is
- marked `exportable` it will always be exportable, and if it is not it will
- never be exportable.
- * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations
- in the transit backend now support processing multiple input items in one
- call, returning the output of each item in the response.
- * **Configurable Audited HTTP Headers**: You can now specify headers that you
- want to have included in each audit entry, along with whether each header
- should be HMAC'd or kept plaintext. This can be useful for adding additional
- client or network metadata to the audit logs.
- * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit
- backend, allowing creation, viewing and editing of named keys as well as using
- those keys to perform supported transit operations directly in the UI.
- * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent
- through TCP, UDP, or UNIX Sockets.
-
-IMPROVEMENTS:
-
- * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148]
- * auth/aws-ec2: Support issuing periodic tokens [GH-2324]
- * auth/github: Support listing teams and users [GH-2261]
- * auth/ldap: Support adding policies to local users directly, in addition to
- local groups [GH-2152]
- * command/server: Add ability to select and prefer server cipher suites
- [GH-2293]
- * core: Add a nonce to unseal operations as a check (useful mostly for
- support, not as a security principle) [GH-2276]
- * duo: Added ability to supply extra context to Duo pushes [GH-2118]
- * physical/consul: Add option for setting consistency mode on Consul gets
- [GH-2282]
- * physical/etcd: Full v3 API support; code will autodetect which API version
- to use. The v3 code path is significantly less complicated and may be much
- more stable. [GH-2168]
- * secret/pki: Allow specifying OU entries in generated certificate subjects
- [GH-2251]
- * secret mount ui (Enterprise): the secret mount list now shows all mounted
- backends even if the UI cannot browse them. Additional backends can now be
- mounted from the UI as well.
-
-BUG FIXES:
-
- * auth/token: Fix regression in 0.6.4 where using token store roles as a
- blacklist (with only `disallowed_policies` set) would not work in most
- circumstances [GH-2286]
- * physical/s3: Page responses in client so list doesn't truncate [GH-2224]
- * secret/cassandra: Stop a connection leak that could occur on active node
- failover [GH-2313]
- * secret/pki: When using `sign-verbatim`, don't require a role and use the
- CSR's common name [GH-2243]
-
-## 0.6.4 (December 16, 2016)
-
-SECURITY:
-
-Further details about these security issues can be found in the 0.6.4 upgrade
-guide.
-
- * `default` Policy Privilege Escalation: If a parent token did not have the
- `default` policy attached to its token, it could still create children with
- the `default` policy. This is no longer allowed (unless the parent has
- `sudo` capability for the creation path). In most cases this is low severity
- since the access grants in the `default` policy are meant to be access
- grants that are acceptable for all tokens to have.
- * Leases Not Expired When Limited Use Token Runs Out of Uses: When using
- limited-use tokens to create leased secrets, if the limited-use token was
- revoked due to running out of uses (rather than due to TTL expiration or
- explicit revocation) it would fail to revoke the leased secrets. These
- secrets would still be revoked when their TTL expired, limiting the severity
- of this issue. An endpoint has been added (`auth/token/tidy`) that can
- perform housekeeping tasks on the token store; one of its tasks can detect
- this situation and revoke the associated leases.
-
-FEATURES:
-
- * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing,
- creating, and editing policies.
-
-IMPROVEMENTS:
-
- * http: Vault now sets a `no-store` cache control header to make it more
- secure in setups that are not end-to-end encrypted [GH-2183]
-
-BUG FIXES:
-
- * auth/ldap: Don't panic if dialing returns an error and starttls is enabled;
- instead, return the error [GH-2188]
- * ui (Enterprise): Submitting an unseal key now properly resets the
- form so a browser refresh isn't required to continue.
-
-## 0.6.3 (December 6, 2016)
-
-DEPRECATIONS/CHANGES:
-
- * Request size limitation: A maximum request size of 32MB is imposed to
- prevent a denial of service attack with arbitrarily large requests [GH-2108]
- * LDAP denies passwordless binds by default: In new LDAP mounts, or when
- existing LDAP mounts are rewritten, passwordless binds will be denied by
- default. The new `deny_null_bind` parameter can be set to `false` to allow
- these. [GH-2103]
- * Any audit backend activated satisfies conditions: Previously, when a new
- Vault node was taking over service in an HA cluster, all audit backends were
- required to be loaded successfully to take over active duty. This behavior
- now matches the behavior of the audit logging system itself: at least one
- audit backend must successfully be loaded. The server log contains an error
- when this occurs. This helps keep a Vault HA cluster working when there is a
- misconfiguration on a standby node. [GH-2083]
-
-FEATURES:
-
- * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI
- that offers access to a number of features, including init/unsealing/sealing,
- authentication via userpass or LDAP, and K/V reading/writing. The capability
- set of the UI will be expanding rapidly in further releases. To enable it,
- set `ui = true` in the top level of Vault's configuration file and point a
- web browser at your Vault address.
- * **Google Cloud Storage Physical Backend**: You can now use GCS for storing
- Vault data [GH-2099]
-
-IMPROVEMENTS:
-
- * auth/github: Policies can now be assigned to users as well as to teams
- [GH-2079]
- * cli: Set the number of retries on 500 down to 0 by default (no retrying). It
- can be very confusing to users when there is a pause while the retries
- happen if they haven't explicitly set it. With request forwarding the need
- for this is lessened anyways. [GH-2093]
- * core: Response wrapping is now allowed to be specified by backend responses
- (requires backends gaining support) [GH-2088]
- * physical/consul: When announcing service, use the scheme of the Vault server
- rather than the Consul client [GH-2146]
- * secret/consul: Added listing functionality to roles [GH-2065]
- * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to
- enable customization of user revocation SQL statements [GH-2033]
- * secret/transit: Add listing of keys [GH-1987]
-
-BUG FIXES:
-
- * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with
- Vault 0.6.1 and older [GH-2014]
- * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077]
- * auth/approle: Creating the index for the role_id properly [GH-2004]
- * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the
- instance-profile ARN [GH-2035]
- * auth/ldap: Avoid leaking connections on login [GH-2130]
- * command/path-help: Use the actual error generated by Vault rather than
- always using 500 when there is a path help error [GH-2153]
- * command/ssh: Use temporary file for identity and ensure its deletion before
- the command returns [GH-2016]
- * cli: Fix error printing values with `-field` if the values contained
- formatting directives [GH-2109]
- * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120]
- * core: Fix bug where a failure to come up as active node (e.g. if an audit
- backend failed) could lead to deadlock [GH-2083]
- * physical/mysql: Fix potential crash during setup due to a query failure
- [GH-2105]
- * secret/consul: Fix panic on user error [GH-2145]
-
-## 0.6.2 (October 5, 2016)
-
-DEPRECATIONS/CHANGES:
-
- * Convergent Encryption v2: New keys in `transit` using convergent mode will
- use a new nonce derivation mechanism rather than require the user to supply
- a nonce. While not explicitly increasing security, it minimizes the
- likelihood that a user will use the mode improperly and impact the security
- of their keys. Keys in convergent mode that were created in v0.6.1 will
- continue to work with the same mechanism (user-supplied nonce).
- * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the
- `etcd` storage backend now requires that `ha_enabled` be explicitly
- specified in the configuration file. The backend currently has known broken
- HA behavior, so this flag discourages use by default without explicitly
- enabling it. If you are using this functionality, when upgrading, you should
- set `ha_enabled` to `"true"` *before* starting the new versions of Vault.
- * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault
- the default was 30 days, but moving it to 32 days allows some operations
- (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron
- job.
- * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are
- no longer part of request URLs. The GET and DELETE operations are now moved
- to new endpoints (`/lookup` and `/destroy`) which consumes the input from
- the body and not the URL.
- * AppRole requires at least one constraint: previously it was sufficient to
- turn off all AppRole authentication constraints (secret ID, CIDR block) and
- use the role ID only. It is now required that at least one additional
- constraint is enabled. Existing roles are unaffected, but any new roles or
- updated roles will require this.
- * Reading wrapped responses from `cubbyhole/response` is deprecated. The
- `sys/wrapping/unwrap` endpoint should be used instead as it provides
- additional security, auditing, and other benefits. The ability to read
- directly will be removed in a future release.
- * Request Forwarding is now on by default: in 0.6.1 this required toggling on,
- but is now enabled by default. This can be disabled via the
- `"disable_clustering"` parameter in Vault's
- [config](https://www.vaultproject.io/docs/config/index.html), or per-request
- with the `X-Vault-No-Request-Forwarding` header.
- * In prior versions a bug caused the `bound_iam_role_arn` value in the
- `aws-ec2` authentication backend to actually use the instance profile ARN.
- This has been corrected, but as a result there is a behavior change. To
- match using the instance profile ARN, a new parameter
- `bound_iam_instance_profile_arn` has been added. Existing roles will
- automatically transfer the value over to the correct parameter, but the next
- time the role is updated, the new meanings will take effect.
-
-FEATURES:
-
- * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an
- approle can now specify a list of CIDR blocks from where the requests to
- generate secret IDs should originate from. If an approle already has CIDR
- restrictions specified, the CIDR restrictions on the secret ID should be a
- subset of those specified on the role [GH-1910]
- * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root
- token created at initialization time can now be PGP encrypted [GH-1883]
- * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows,
- when a CA cert is being supplied as a signed root or intermediate, a trust
- chain of arbitrary length. The chain is returned as a parameter at
- certificate issue/sign time and is retrievable independently as well.
- [GH-1694]
- * **Response Wrapping Enhancements**: There are new endpoints to look up
- response wrapped token parameters; wrap arbitrary values; rotate wrapping
- tokens; and unwrap with enhanced validation. In addition, list operations
- can now be response-wrapped. [GH-1927]
- * **Transit Features**: The `transit` backend now supports generating random
- bytes and SHA sums; HMACs; and signing and verification functionality using
- EC keys (P-256 curve)
-
-IMPROVEMENTS:
-
- * api: Return error when an invalid (as opposed to incorrect) unseal key is
- submitted, rather than ignoring it [GH-1782]
- * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834]
- * api: Rekey operation now redirects from standbys to master [GH-1862]
- * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and
- re-open the log file, making it easier to rotate audit logs [GH-1953]
- * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity
- document and its SHA256 RSA digest [GH-1961]
- * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a
- prefix match instead of exact match [GH-1943]
- * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to
- refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn`
- to refer to IAM role ARN instead of the instance profile ARN [GH-1913]
- * auth/aws-ec2: Backend generates the nonce by default and clients can
- explicitly disable reauthentication by setting empty nonce [GH-1889]
- * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806]
- * command/format: The `format` flag on select CLI commands takes `yml` as an
- alias for `yaml` [GH-1899]
- * core: Allow the size of the read cache to be set via the config file, and
- change the default value to 1MB (from 32KB) [GH-1784]
- * core: Allow single and two-character path parameters for most places
- [GH-1811]
- * core: Allow list operations to be response-wrapped [GH-1814]
- * core: Provide better protection against timing attacks in Shamir code
- [GH-1877]
- * core: Unmounting/disabling backends no longer returns an error if the mount
- didn't exist. This is line with elsewhere in Vault's API where `DELETE` is
- an idempotent operation. [GH-1903]
- * credential/approle: At least one constraint is required to be enabled while
- creating and updating a role [GH-1882]
- * secret/cassandra: Added consistency level for use with roles [GH-1931]
- * secret/mysql: SQL for revoking user can be configured on the role [GH-1914]
- * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new
- keys [GH-1812]
- * secret/transit: Empty plaintext values are now allowed [GH-1874]
-
-BUG FIXES:
-
- * audit: Fix panic being caused by some values logging as underlying Go types
- instead of formatted strings [GH-1912]
- * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920]
- * auth/approle: Not letting secret IDs and secret ID accessors to get logged
- in plaintext in audit logs [GH-1947]
- * auth/aws-ec2: Allow authentication if the underlying host is in a bad state
- but the instance is running [GH-1884]
- * auth/token: Fixed metadata getting missed out from token lookup response by
- gracefully handling token entry upgrade [GH-1924]
- * cli: Don't error on newline in token file [GH-1774]
- * core: Pass back content-type header for forwarded requests [GH-1791]
- * core: Fix panic if the same key was given twice to `generate-root` [GH-1827]
- * core: Fix potential deadlock on unmount/remount [GH-1793]
- * physical/file: Remove empty directories from the `file` storage backend [GH-1821]
- * physical/zookeeper: Remove empty directories from the `zookeeper` storage
- backend and add a fix to the `file` storage backend's logic [GH-1964]
- * secret/aws: Added update operation to `aws/sts` path to consider `ttl`
- parameter [39b75c6]
- * secret/aws: Mark STS secrets as non-renewable [GH-1804]
- * secret/cassandra: Properly store session for re-use [GH-1802]
- * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781]
-
-## 0.6.1 (August 22, 2016)
-
-DEPRECATIONS/CHANGES:
-
- * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to
- connect to the HA cluster. We recommend following our [general upgrade
- instructions](https://www.vaultproject.io/docs/install/upgrade.html) in
- addition to 0.6.1-specific upgrade instructions to ensure that this is not
- an issue.
- * Status codes for sealed/uninitialized Vaults have changed to `503`/`501`
- respectively. See the [version-specific upgrade
- guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for
- more details.
- * Root tokens (tokens with the `root` policy) can no longer be created except
- by another root token or the `generate-root` endpoint.
- * Issued certificates from the `pki` backend against new roles created or
- modified after upgrading will contain a set of default key usages.
- * The `dynamodb` physical data store no longer supports HA by default. It has
- some non-ideal behavior around failover that was causing confusion. See the
- [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled)
- for information on enabling HA mode. It is very important that this
- configuration is added _before upgrading_.
- * The `ldap` backend no longer searches for `memberOf` groups as part of its
- normal flow. Instead, the desired group filter must be specified. This fixes
- some errors and increases speed for directories with different structures,
- but if this behavior has been relied upon, ensure that you see the upgrade
- notes _before upgrading_.
- * `app-id` is now deprecated with the addition of the new AppRole backend.
- There are no plans to remove it, but we encourage using AppRole whenever
- possible, as it offers enhanced functionality and can accommodate many more
- types of authentication paradigms.
-
-FEATURES:
-
- * **AppRole Authentication Backend**: The `approle` backend is a
- machine-oriented authentication backend that provides a similar concept to
- App-ID while adding many missing features, including a pull model that
- allows for the backend to generate authentication credentials rather than
- requiring operators or other systems to push credentials in. It should be
- useful in many more situations than App-ID. The inclusion of this backend
- deprecates App-ID. [GH-1426]
- * **Request Forwarding**: Vault servers can now forward requests to each other
- rather than redirecting clients. This feature is off by default in 0.6.1 but
- will be on by default in the next release. See the [HA concepts
- page](https://www.vaultproject.io/docs/concepts/ha.html) for information on
- enabling and configuring it. [GH-443]
- * **Convergent Encryption in `Transit`**: The `transit` backend now supports a
- convergent encryption mode where the same plaintext will produce the same
- ciphertext. Although very useful in some situations, this has potential
- security implications, which are mostly mitigated by requiring the use of
- key derivation when convergent encryption is enabled. See [the `transit`
- backend
- documentation](https://www.vaultproject.io/docs/secrets/transit/index.html)
- for more details. [GH-1537]
- * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates
- to define group filters, providing the capability to support some
- directories that could not easily be supported before (especially specific
- Active Directory setups with nested groups). [GH-1388]
- * **Key Usage Control in `PKI`**: Issued certificates from roles created or
- modified after upgrading contain a set of default key usages for increased
- compatibility with OpenVPN and some other software. This set can be changed
- when writing a role definition. Existing roles are unaffected. [GH-1552]
- * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx`
- error code will now retry after a backoff. The maximum total number of
- retries (including disabling this functionality) can be set with an
- environment variable. See the [environment variable
- documentation](https://www.vaultproject.io/docs/commands/environment.html)
- for more details. [GH-1594]
- * **Service Discovery in `vault init`**: The new `-auto` option on `vault init`
- will perform service discovery using Consul. When only one node is discovered,
- it will be initialized and when more than one node is discovered, they will
- be output for easy selection. See `vault init --help` for more details. [GH-1642]
- * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database
- credentials based on configured roles. Sponsored by
- [CommerceHub](http://www.commercehub.com/). [GH-1414]
- * **Circonus Metrics Integration**: Vault can now send metrics to
- [Circonus](http://www.circonus.com/). See the [configuration
- documentation](https://www.vaultproject.io/docs/config/index.html) for
- details. [GH-1646]
-
-IMPROVEMENTS:
-
- * audit: Added a unique identifier to each request which will also be found in
- the request portion of the response. [GH-1650]
- * auth/aws-ec2: Added a new constraint `bound_account_id` to the role
- [GH-1523]
- * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role
- [GH-1522]
- * auth/aws-ec2: Added `ttl` field for the role [GH-1703]
- * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config`
- have the minimum TLS version set to 1.2 by default. This is configurable.
- * auth/token: Added endpoint to list accessors [GH-1676]
- * auth/token: Added `disallowed_policies` option to token store roles [GH-1681]
- * auth/token: `root` or `sudo` tokens can now create periodic tokens via
- `auth/token/create`; additionally, the same token can now be periodic and
- have an explicit max TTL [GH-1725]
- * build: Add support for building on Solaris/Illumos [GH-1726]
- * cli: Output formatting in the presence of warnings in the response object
- [GH-1533]
- * cli: `vault auth` command supports a `-path` option to take in the path at
- which the auth backend is enabled, thereby allowing authenticating against
- different paths using the command options [GH-1532]
- * cli: `vault auth -methods` will now display the config settings of the mount
- [GH-1531]
- * cli: `vault read/write/unwrap -field` now allows selecting token response
- fields [GH-1567]
- * cli: `vault write -field` now allows selecting wrapped response fields
- [GH-1567]
- * command/status: Version information and cluster details added to the output
- of `vault status` command [GH-1671]
- * core: Response wrapping is now enabled for login endpoints [GH-1588]
- * core: The duration of leadership is now exported via events through
- telemetry [GH-1625]
- * core: `sys/capabilities-self` is now accessible as part of the `default`
- policy [GH-1695]
- * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701]
- * core: Unseal keys will now be returned in both hex and base64 forms, and
- either can be used [GH-1734]
- * core: Responses from most `/sys` endpoints now return normal `api.Secret`
- structs in addition to the values they carried before. This means that
- response wrapping can now be used with most authenticated `/sys` operations
- [GH-1699]
- * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576]
- * physical/consul: Allowing additional tags to be added to Consul service
- registration via `service_tags` option [GH-1643]
- * secret/aws: Listing of roles is supported now [GH-1546]
- * secret/cassandra: Add `connect_timeout` value for Cassandra connection
- configuration [GH-1581]
- * secret/mssql,mysql,postgresql: Reading of connection settings is supported
- in all the sql backends [GH-1515]
- * secret/mysql: Added optional maximum idle connections value to MySQL
- connection configuration [GH-1635]
- * secret/mysql: Use a combination of the role name and token display name in
- generated user names and allow the length to be controlled [GH-1604]
- * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed
- in via one of four ways: a semicolon-delimited string, a base64-delimited
- string, a serialized JSON string array, or a base64-encoded serialized JSON
- string array [GH-1686]
- * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning
- role name as part of response of `verify` API
- * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680]
- * sys/health: Added version information to the response of health status
- endpoint [GH-1647]
- * sys/health: Cluster information isbe returned as part of health status when
- Vault is unsealed [GH-1671]
- * sys/mounts: MountTable data is compressed before serializing to accommodate
- thousands of mounts [GH-1693]
- * website: The [token
- concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has
- been completely rewritten [GH-1725]
-
-BUG FIXES:
-
- * auth/aws-ec2: Added a nil check for stored whitelist identity object
- during renewal [GH-1542]
- * auth/cert: Fix panic if no client certificate is supplied [GH-1637]
- * auth/token: Don't report that a non-expiring root token is renewable, as
- attempting to renew it results in an error [GH-1692]
- * cli: Don't retry a command when a redirection is received [GH-1724]
- * core: Fix regression causing status codes to be `400` in most non-5xx error
- cases [GH-1553]
- * core: Fix panic that could occur during a leadership transition [GH-1627]
- * physical/postgres: Remove use of prepared statements as this causes
- connection multiplexing software to break [GH-1548]
- * physical/consul: Multiple Vault nodes on the same machine leading to check ID
- collisions were resulting in incorrect health check responses [GH-1628]
- * physical/consul: Fix deregistration of health checks on exit [GH-1678]
- * secret/postgresql: Check for existence of role before attempting deletion
- [GH-1575]
- * secret/postgresql: Handle revoking roles that have privileges on sequences
- [GH-1573]
- * secret/postgresql(,mysql,mssql): Fix incorrect use of database over
- transaction object which could lead to connection exhaustion [GH-1572]
- * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634]
- * secret/pki: Fix adding email addresses as SANs [GH-1688]
- * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727]
- * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715]
-
-## 0.6.0 (June 14th, 2016)
-
-SECURITY:
-
- * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via
- lease IDs, which incorporate path information) and
- `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using
- the tokens' paths and, since 0.5.2, role information), in implementation
- they both behaved exactly the same way since a single component in Vault is
- responsible for managing lifetimes of both, and the type of the tracked
- lifetime was not being checked. The end result was that either endpoint
- could revoke both secret leases and tokens. We consider this a very minor
- security issue as there are a number of mitigating factors: both endpoints
- require `sudo` capability in addition to write capability, preventing
- blanket ACL path globs from providing access; both work by using the prefix
- to revoke as a part of the endpoint path, allowing them to be properly
- ACL'd; and both are intended for emergency scenarios and users should
- already not generally have access to either one. In order to prevent
- confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and
- `sys/revoke-prefix` will be meant for both leases and tokens instead.
-
-DEPRECATIONS/CHANGES:
-
- * `auth/token/revoke-prefix` has been removed. See the security notice for
- details. [GH-1280]
- * Vault will now automatically register itself as the `vault` service when
- using the `consul` backend and will perform its own health checks. See
- the Consul backend documentation for information on how to disable
- auto-registration and service checks.
- * List operations that do not find any keys now return a `404` status code
- rather than an empty response object [GH-1365]
- * CA certificates issued from the `pki` backend no longer have associated
- leases, and any CA certs already issued will ignore revocation requests from
- the lease manager. This is to prevent CA certificates from being revoked
- when the token used to issue the certificate expires; it was not be obvious
- to users that they need to ensure that the token lifetime needed to be at
- least as long as a potentially very long-lived CA cert.
-
-FEATURES:
-
- * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS
- EC2 instances allowing automated retrieval of Vault tokens. Unlike most
- Vault authentication backends, this backend does not require first deploying
- or provisioning security-sensitive credentials (tokens, username/password,
- client certificates, etc). Instead, it treats AWS as a Trusted Third Party
- and uses the cryptographically signed dynamic metadata information that
- uniquely represents each EC2 instance. [Vault
- Enterprise](https://www.hashicorp.com/vault.html) customers have access to a
- turnkey client that speaks the backend API and makes access to a Vault token
- easy.
- * **Response Wrapping**: Nearly any response within Vault can now be wrapped
- inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole
- Authentication
- Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html)
- mechanism to its logical conclusion. Retrieving the original response is as
- simple as a single API command or the new `vault unwrap` command. This makes
- secret distribution easier and more secure, including secure introduction.
- * **Azure Physical Backend**: You can now use Azure blob object storage as
- your Vault physical data store [GH-1266]
- * **Swift Physical Backend**: You can now use Swift blob object storage as
- your Vault physical data store [GH-1425]
- * **Consul Backend Health Checks**: The Consul backend will automatically
- register a `vault` service and perform its own health checking. By default
- the active node can be found at `active.vault.service.consul` and all with
- standby nodes are `standby.vault.service.consul`. Sealed vaults are marked
- critical and are not listed by default in Consul's service discovery. See
- the documentation for details. [GH-1349]
- * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on
- tokens that do not honor changes in the system- or mount-set values. This is
- useful, for instance, when the max TTL of the system or the `auth/token`
- mount must be set high to accommodate certain needs but you want more
- granular restrictions on tokens being issued directly from the Token
- authentication backend at `auth/token`. [GH-1399]
- * **Non-Renewable Tokens**: When creating tokens directly through the token
- authentication backend, you can now specify in both token store roles and
- the API whether or not a token should be renewable, defaulting to `true`.
- * **RabbitMQ Secret Backend**: Vault can now generate credentials for
- RabbitMQ. Vhosts and tags can be defined within roles. [GH-788]
-
-IMPROVEMENTS:
-
- * audit: Add the DisplayName value to the copy of the Request object embedded
- in the associated Response, to match the original Request object [GH-1387]
- * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435]
- * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms.
- A particular exception are any current MFA paths. A few paths in `token` and
- `sys` also require `root` or `sudo`. [GH-1478]
- * command/auth: Restore the previous authenticated token if the `auth` command
- fails to authenticate the provided token [GH-1233]
- * command/write: `-format` and `-field` can now be used with the `write`
- command [GH-1228]
- * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297]
- * core: Don't keep lease timers around when tokens are revoked [GH-1277]
- * core: If using the `disable_cache` option, caches for the policy store and
- the `transit` backend are now disabled as well [GH-1346]
- * credential/cert: Renewal requests are rejected if the set of policies has
- changed since the token was issued [GH-477]
- * credential/cert: Check CRLs for specific non-CA certs configured in the
- backend [GH-1404]
- * credential/ldap: If `groupdn` is not configured, skip searching LDAP and
- only return policies for local groups, plus a warning [GH-1283]
- * credential/ldap: `vault list` support for users and groups [GH-1270]
- * credential/ldap: Support for the `memberOf` attribute for group membership
- searching [GH-1245]
- * credential/userpass: Add list support for users [GH-911]
- * credential/userpass: Remove user configuration paths from requiring sudo, in
- favor of normal ACL mechanisms [GH-1312]
- * credential/token: Sanitize policies and add `default` policies in appropriate
- places [GH-1235]
- * credential/token: Setting the renewable status of a token is now possible
- via `vault token-create` and the API. The default is true, but tokens can be
- specified as non-renewable. [GH-1499]
- * secret/aws: Use chain credentials to allow environment/EC2 instance/shared
- providers [GH-307]
- * secret/aws: Support for STS AssumeRole functionality [GH-1318]
- * secret/consul: Reading consul access configuration supported. The response
- will contain non-sensitive information only [GH-1445]
- * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to
- DNS or Email Subject Alternate Names [GH-1220]
- * secret/pki: Added list support for certificates [GH-1466]
- * sys/capabilities: Enforce ACL checks for requests that query the capabilities
- of a token on a given path [GH-1221]
- * sys/health: Status information can now be retrieved with `HEAD` [GH-1509]
-
-BUG FIXES:
-
- * command/read: Fix panic when using `-field` with a non-string value [GH-1308]
- * command/token-lookup: Fix TTL showing as 0 depending on how a token was
- created. This only affected the value shown at lookup, not the token
- behavior itself. [GH-1306]
- * command/various: Tell the JSON decoder to not convert all numbers to floats;
- fixes some various places where numbers were showing up in scientific
- notation
- * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags
- over their respective env vars [GH-1480]
- * command/ssh: Provided option to disable host key checking. The automated
- variant of `vault ssh` command uses `sshpass` which was failing to handle
- host key checking presented by the `ssh` binary. [GH-1473]
- * core: Properly persist mount-tuned TTLs for auth backends [GH-1371]
- * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372]
- * credential/github: Make organization comparison case-insensitive during
- login [GH-1359]
- * credential/github: Fix panic when renewing a token created with some earlier
- versions of Vault [GH-1510]
- * credential/github: The token used to log in via `vault auth` can now be
- specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511]
- * credential/ldap: Fix problem where certain error conditions when configuring
- or opening LDAP connections would cause a panic instead of return a useful
- error message [GH-1262]
- * credential/token: Fall back to normal parent-token semantics if
- `allowed_policies` is empty for a role. Using `allowed_policies` of
- `default` resulted in the same behavior anyways. [GH-1276]
- * credential/token: Fix issues renewing tokens when using the "suffix"
- capability of token roles [GH-1331]
- * credential/token: Fix lookup via POST showing the request token instead of
- the desired token [GH-1354]
- * credential/various: Fix renewal conditions when `default` policy is not
- contained in the backend config [GH-1256]
- * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353]
- * secret/consul: Use non-pooled Consul API client to avoid leaving files open
- [GH-1428]
- * secret/pki: Don't check whether a certificate is destined to be a CA
- certificate if sign-verbatim endpoint is used [GH-1250]
-
-## 0.5.3 (May 27th, 2016)
-
-SECURITY:
-
- * Consul ACL Token Revocation: An issue was reported to us indicating that
- generated Consul ACL tokens were not being properly revoked. Upon
- investigation, we found that this behavior was reproducible in a specific
- scenario: when a generated lease for a Consul ACL token had been renewed
- prior to revocation. In this case, the generated token was not being
- properly persisted internally through the renewal function, leading to an
- error during revocation due to the missing token. Unfortunately, this was
- coded as a user error rather than an internal error, and the revocation
- logic was expecting internal errors if revocation failed. As a result, the
- revocation logic believed the revocation to have succeeded when it in fact
- failed, causing the lease to be dropped while the token was still valid
- within Consul. In this release, the Consul backend properly persists the
- token through renewals, and the revocation logic has been changed to
- consider any error type to have been a failure to revoke, causing the lease
- to persist and attempt to be revoked later.
-
-We have written an example shell script that searches through Consul's ACL
-tokens and looks for those generated by Vault, which can be used as a template
-for a revocation script as deemed necessary for any particular security
-response. The script is available at
-https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0
-
-Please note that any outstanding leases for Consul tokens produced prior to
-0.5.3 that have been renewed will continue to exhibit this behavior. As a
-result, we recommend either revoking all tokens produced by the backend and
-issuing new ones, or if needed, a more advanced variant of the provided example
-could use the timestamp embedded in each generated token's name to decide which
-tokens are too old and should be deleted. This could then be run periodically
-up until the maximum lease time for any outstanding pre-0.5.3 tokens has
-expired.
-
-This is a security-only release. There are no other code changes since 0.5.2.
-The binaries have one additional change: they are built against Go 1.6.1 rather
-than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming
-language itself.
-
-## 0.5.2 (March 16th, 2016)
-
-FEATURES:
-
- * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based
- on configured roles [GH-998]
- * **Token Accessors**: Vault now provides an accessor with each issued token.
- This accessor is an identifier that can be used for a limited set of
- actions, notably for token revocation. This value can be logged in
- plaintext to audit logs, and in combination with the plaintext metadata
- logged to audit logs, provides a searchable and straightforward way to
- revoke particular users' or services' tokens in many cases. To enable
- plaintext audit logging of these accessors, set `hmac_accessor=false` when
- enabling an audit backend.
- * **Token Credential Backend Roles**: Roles can now be created in the `token`
- credential backend that allow modifying token behavior in ways that are not
- otherwise exposed or easily delegated. This allows creating tokens with a
- fixed set (or subset) of policies (rather than a subset of the calling
- token's), periodic tokens with a fixed TTL but no expiration, specified
- prefixes, and orphans.
- * **Listener Certificate Reloading**: Vault's configured listeners now reload
- their TLS certificate and private key when the Vault process receives a
- SIGHUP.
-
-IMPROVEMENTS:
-
- * auth/token: Endpoints optionally accept tokens from the HTTP body rather
- than just from the URLs [GH-1211]
- * auth/token,sys/capabilities: Added new endpoints
- `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and
- `sys/capabilities-accessor`, which enables performing the respective actions
- with just the accessor of the tokens, without having access to the actual
- token [GH-1188]
- * core: Ignore leading `/` in policy paths [GH-1170]
- * core: Ignore leading `/` in mount paths [GH-1172]
- * command/policy-write: Provided HCL is now validated for format violations
- and provides helpful information around where the violation occurred
- [GH-1200]
- * command/server: The initial root token ID when running in `-dev` mode can
- now be specified via `-dev-root-token-id` or the environment variable
- `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162]
- * command/server: The listen address when running in `-dev` mode can now be
- specified via `-dev-listen-address` or the environment variable
- `VAULT_DEV_LISTEN_ADDRESS` [GH-1169]
- * command/server: The configured listeners now reload their TLS
- certificates/keys when Vault is SIGHUP'd [GH-1196]
- * command/step-down: New `vault step-down` command and API endpoint to force
- the targeted node to give up active status, but without sealing. The node
- will wait ten seconds before attempting to grab the lock again. [GH-1146]
- * command/token-renew: Allow no token to be passed in; use `renew-self` in
- this case. Change the behavior for any token being passed in to use `renew`.
- [GH-1150]
- * credential/app-id: Allow `app-id` parameter to be given in the login path;
- this causes the `app-id` to be part of the token path, making it easier to
- use with `revoke-prefix` [GH-424]
- * credential/cert: Non-CA certificates can be used for authentication. They
- must be matched exactly (issuer and serial number) for authentication, and
- the certificate must carry the client authentication or 'any' extended usage
- attributes. [GH-1153]
- * credential/cert: Subject and Authority key IDs are output in metadata; this
- allows more flexible searching/revocation in the audit logs [GH-1183]
- * credential/cert: Support listing configured certs [GH-1212]
- * credential/userpass: Add support for `create`/`update` capability
- distinction in user path, and add user-specific endpoints to allow changing
- the password and policies [GH-1216]
- * credential/token: Add roles [GH-1155]
- * secret/mssql: Add MSSQL backend [GH-998]
- * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL`
- endpoint [GH-1180]
- * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some
- other formats [GH-1187]
- * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint.
- [GH-1154]
- * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to
- fetch the capabilities of a token on a given path [GH-1171]
- * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors
- when revoking a lease, necessary in some emergency/failure scenarios
- [GH-1168]
- * sys: The return codes from `sys/health` can now be user-specified via query
- parameters [GH-1199]
-
-BUG FIXES:
-
- * logical/cassandra: Apply hyphen/underscore replacement to the entire
- generated username, not just the UUID, in order to handle token display name
- hyphens [GH-1140]
- * physical/etcd: Output actual error when cluster sync fails [GH-1141]
- * vault/expiration: Not letting the error responses from the backends to skip
- during renewals [GH-1176]
-
-## 0.5.1 (February 25th, 2016)
-
-DEPRECATIONS/CHANGES:
-
- * RSA keys less than 2048 bits are no longer supported in the PKI backend.
- 1024-bit keys are considered unsafe and are disallowed in the Internet PKI.
- The `pki` backend has enforced SHA256 hashes in signatures from the
- beginning, and software that can handle these hashes should be able to
- handle larger key sizes. [GH-1095]
- * The PKI backend now does not automatically delete expired certificates,
- including from the CRL. Doing so could lead to a situation where a time
- mismatch between the Vault server and clients could result in a certificate
- that would not be considered expired by a client being removed from the CRL.
- The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129]
- * The `cert` backend now performs a variant of channel binding at renewal time
- for increased security. In order to not overly burden clients, a notion of
- identity is used. This functionality can be disabled. See the 0.5.1 upgrade
- guide for more specific information [GH-1127]
-
-FEATURES:
-
- * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of
- the audit contract do not allow us to make the results public.) [GH-220]
-
-IMPROVEMENTS:
-
- * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control
- the SNI header during TLS connections [GH-1131]
- * api/health: Add the server's time in UTC to health responses [GH-1117]
- * command/rekey and command/generate-root: These now return the status at
- attempt initialization time, rather than requiring a separate fetch for the
- nonce [GH-1054]
- * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/`
- paths; use normal ACL behavior instead [GH-468]
- * credential/github: The validity of the token used for login will be checked
- at renewal time [GH-1047]
- * credential/github: The `config` endpoint no longer requires a root token;
- normal ACL path matching applies
- * deps: Use the standardized Go 1.6 vendoring system
- * secret/aws: Inform users of AWS-imposed policy restrictions around STS
- tokens if they attempt to use an invalid policy [GH-1113]
- * secret/mysql: The MySQL backend now allows disabling verification of the
- `connection_url` [GH-1096]
- * secret/pki: Submitted CSRs are now verified to have the correct key type and
- minimum number of bits according to the role. The exception is intermediate
- CA signing and the `sign-verbatim` path [GH-1104]
- * secret/pki: New `tidy` endpoint to allow expunging expired certificates.
- [GH-1129]
- * secret/postgresql: The PostgreSQL backend now allows disabling verification
- of the `connection_url` [GH-1096]
- * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of
- 204 [GH-1086]
- * credential/app-id: App ID backend will check the validity of app-id and user-id
- during renewal time [GH-1039]
- * credential/cert: TLS Certificates backend, during renewal, will now match the
- client identity with the client identity used during login [GH-1127]
-
-BUG FIXES:
-
- * credential/ldap: Properly escape values being provided to search filters
- [GH-1100]
- * secret/aws: Capping on length of usernames for both IAM and STS types
- [GH-1102]
- * secret/pki: If a cert is not found during lookup of a serial number,
- respond with a 400 rather than a 500 [GH-1085]
- * secret/postgresql: Add extra revocation statements to better handle more
- permission scenarios [GH-1053]
- * secret/postgresql: Make connection_url work properly [GH-1112]
-
-## 0.5.0 (February 10, 2016)
-
-SECURITY:
-
- * Previous versions of Vault could allow a malicious user to hijack the rekey
- operation by canceling an operation in progress and starting a new one. The
- practical application of this is very small. If the user was an unseal key
- owner, they could attempt to do this in order to either receive unencrypted
- reseal keys or to replace the PGP keys used for encryption with ones under
- their control. However, since this would invalidate any rekey progress, they
- would need other unseal key holders to resubmit, which would be rather
- suspicious during this manual operation if they were not also the original
- initiator of the rekey attempt. If the user was not an unseal key holder,
- there is no benefit to be gained; the only outcome that could be attempted
- would be a denial of service against a legitimate rekey operation by sending
- cancel requests over and over. Thanks to Josh Snyder for the report!
-
-DEPRECATIONS/CHANGES:
-
- * `s3` physical backend: Environment variables are now preferred over
- configuration values. This makes it behave similar to the rest of Vault,
- which, in increasing order of preference, uses values from the configuration
- file, environment variables, and CLI flags. [GH-871]
- * `etcd` physical backend: `sync` functionality is now supported and turned on
- by default. This can be disabled. [GH-921]
- * `transit`: If a client attempts to encrypt a value with a key that does not
- yet exist, what happens now depends on the capabilities set in the client's
- ACL policies. If the client has `create` (or `create` and `update`)
- capability, the key will upsert as in the past. If the client has `update`
- capability, they will receive an error. [GH-1012]
- * `token-renew` CLI command: If the token given for renewal is the same as the
- client token, the `renew-self` endpoint will be used in the API. Given that
- the `default` policy (by default) allows all clients access to the
- `renew-self` endpoint, this makes it much more likely that the intended
- operation will be successful. [GH-894]
- * Token `lookup`: the `ttl` value in the response now reflects the actual
- remaining TTL rather than the original TTL specified when the token was
- created; this value is now located in `creation_ttl` [GH-986]
- * Vault no longer uses grace periods on leases or token TTLs. Uncertainty
- about the length grace period for any given backend could cause confusion
- and uncertainty. [GH-1002]
- * `rekey`: Rekey now requires a nonce to be supplied with key shares. This
- nonce is generated at the start of a rekey attempt and is unique for that
- attempt.
- * `status`: The exit code for the `status` CLI command is now `2` for an
- uninitialized Vault instead of `1`. `1` is returned for errors. This better
- matches the rest of the CLI.
-
-FEATURES:
-
- * **Split Data/High Availability Physical Backends**: You can now configure
- two separate physical backends: one to be used for High Availability
- coordination and another to be used for encrypted data storage. See the
- [configuration
- documentation](https://vaultproject.io/docs/config/index.html) for details.
- [GH-395]
- * **Fine-Grained Access Control**: Policies can now use the `capabilities` set
- to specify fine-grained control over operations allowed on a path, including
- separation of `sudo` privileges from other privileges. These can be mixed
- and matched in any way desired. The `policy` value is kept for backwards
- compatibility. See the [updated policy
- documentation](https://vaultproject.io/docs/concepts/policies.html) for
- details. [GH-914]
- * **List Support**: Listing is now supported via the API and the new `vault
- list` command. This currently supports listing keys in the `generic` and
- `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS
- section below). Different parts of the API and backends will need to
- implement list capabilities in ways that make sense to particular endpoints,
- so further support will appear over time. [GH-617]
- * **Root Token Generation via Unseal Keys**: You can now use the
- `generate-root` CLI command to generate new orphaned, non-expiring root
- tokens in case the original is lost or revoked (accidentally or
- purposefully). This requires a quorum of unseal key holders. The output
- value is protected via any PGP key of the initiator's choosing or a one-time
- pad known only to the initiator (a suitable pad can be generated via the
- `-genotp` flag to the command. [GH-915]
- * **Unseal Key Archiving**: You can now optionally have Vault store your
- unseal keys in your chosen physical store for disaster recovery purposes.
- This option is only available when the keys are encrypted with PGP. [GH-907]
- * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase
- users when passing in PGP keys to the `init`, `rekey`, and `generate-root`
- CLI commands. Public keys for these users will be fetched automatically.
- [GH-901]
- * **DynamoDB HA Physical Backend**: There is now a new, community-supported
- HA-enabled physical backend using Amazon DynamoDB. See the [configuration
- documentation](https://vaultproject.io/docs/config/index.html) for details.
- [GH-878]
- * **PostgreSQL Physical Backend**: There is now a new, community-supported
- physical backend using PostgreSQL. See the [configuration
- documentation](https://vaultproject.io/docs/config/index.html) for details.
- [GH-945]
- * **STS Support in AWS Secret Backend**: You can now use the AWS secret
- backend to fetch STS tokens rather than IAM users. [GH-927]
- * **Speedups in the transit backend**: The `transit` backend has gained a
- cache, and now loads only the working set of keys (e.g. from the
- `min_decryption_version` to the current key version) into its working set.
- This provides large speedups and potential memory savings when the `rotate`
- feature of the backend is used heavily.
-
-IMPROVEMENTS:
-
- * cli: Output secrets sorted by key name [GH-830]
- * cli: Support YAML as an output format [GH-832]
- * cli: Show an error if the output format is incorrect, rather than falling
- back to an empty table [GH-849]
- * cli: Allow setting the `advertise_addr` for HA via the
- `VAULT_ADVERTISE_ADDR` environment variable [GH-581]
- * cli/generate-root: Add generate-root and associated functionality [GH-915]
- * cli/init: Add `-check` flag that returns whether Vault is initialized
- [GH-949]
- * cli/server: Use internal functions for the token-helper rather than shelling
- out, which fixes some problems with using a static binary in Docker or paths
- with multiple spaces when launching in `-dev` mode [GH-850]
- * cli/token-lookup: Add token-lookup command [GH-892]
- * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for
- `-pgp-keys` [GH-940]
- * conf: Use normal bool values rather than empty/non-empty for the
- `tls_disable` option [GH-802]
- * credential/ldap: Add support for binding, both anonymously (to discover a
- user DN) and via a username and password [GH-975]
- * credential/token: Add `last_renewal_time` to token lookup calls [GH-896]
- * credential/token: Change `ttl` to reflect the current remaining TTL; the
- original value is in `creation_ttl` [GH-1007]
- * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829]
- * logical/aws: You can now get STS tokens instead of IAM users [GH-927]
- * logical/cassandra: Add `protocol_version` parameter to set the CQL proto
- version [GH-1005]
- * logical/cubbyhole: Add cubbyhole access to default policy [GH-936]
- * logical/mysql: Add list support for roles path [GH-984]
- * logical/pki: Fix up key usages being specified for CAs [GH-989]
- * logical/pki: Add list support for roles path [GH-985]
- * logical/pki: Allow `pem_bundle` to be specified as the format, which
- provides a concatenated PEM bundle of returned values [GH-1008]
- * logical/pki: Add 30 seconds of slack to the validity start period to
- accommodate some clock skew in machines [GH-1036]
- * logical/postgres: Add `max_idle_connections` parameter [GH-950]
- * logical/postgres: Add list support for roles path
- * logical/ssh: Add list support for roles path [GH-983]
- * logical/transit: Keys are archived and only keys between the latest version
- and `min_decryption_version` are loaded into the working set. This can
- provide a very large speed increase when rotating keys very often. [GH-977]
- * logical/transit: Keys are now cached, which should provide a large speedup
- in most cases [GH-979]
- * physical/cache: Use 2Q cache instead of straight LRU [GH-908]
- * physical/etcd: Support basic auth [GH-859]
- * physical/etcd: Support sync functionality and enable by default [GH-921]
-
-BUG FIXES:
-
- * api: Correct the HTTP verb used in the LookupSelf method [GH-887]
- * api: Fix the output of `Sys().MountConfig(...)` to return proper values
- [GH-1017]
- * command/read: Fix panic when an empty argument was given [GH-923]
- * command/ssh: Fix panic when username lookup fails [GH-886]
- * core: When running in standalone mode, don't advertise that we are active
- until post-unseal setup completes [GH-872]
- * core: Update go-cleanhttp dependency to ensure idle connections aren't
- leaked [GH-867]
- * core: Don't allow tokens to have duplicate policies [GH-897]
- * core: Fix regression in `sys/renew` that caused information stored in the
- Secret part of the response to be lost [GH-912]
- * physical: Use square brackets when setting an IPv6-based advertise address
- as the auto-detected advertise address [GH-883]
- * physical/s3: Use an initialized client when using IAM roles to fix a
- regression introduced against newer versions of the AWS Go SDK [GH-836]
- * secret/pki: Fix a condition where unmounting could fail if the CA
- certificate was not properly loaded [GH-946]
- * secret/ssh: Fix a problem where SSH connections were not always closed
- properly [GH-942]
-
-MISC:
-
- * Clarified our stance on support for community-derived physical backends.
- See the [configuration
- documentation](https://vaultproject.io/docs/config/index.html) for details.
- * Add `vault-java` to libraries [GH-851]
- * Various minor documentation fixes and improvements [GH-839] [GH-854]
- [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958]
- [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025]
-
-BUILD NOTE:
-
- * The HashiCorp-provided binary release of Vault 0.5.0 is built against a
- patched version of Go 1.5.3 containing two specific bug fixes affecting TLS
- certificate handling. These fixes are in the Go 1.6 tree and were
- cherry-picked on top of stock Go 1.5.3. If you want to examine the way in
- which the releases were built, please look at our [cross-compilation
- Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3).
-
-## 0.4.1 (January 13, 2016)
-
-SECURITY:
-
- * Build against Go 1.5.3 to mitigate a security vulnerability introduced in
- Go 1.5. For more information, please see
- https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4
-
-This is a security-only release; other than the version number and building
-against Go 1.5.3, there are no changes from 0.4.0.
-
-## 0.4.0 (December 10, 2015)
-
-DEPRECATIONS/CHANGES:
-
- * Policy Name Casing: Policy names are now normalized to lower-case on write,
- helping prevent accidental case mismatches. For backwards compatibility,
- policy names are not currently normalized when reading or deleting. [GH-676]
- * Default etcd port number: the default connection string for the `etcd`
- physical store uses port 2379 instead of port 4001, which is the port used
- by the supported version 2.x of etcd. [GH-753]
- * As noted below in the FEATURES section, if your Vault installation contains
- a policy called `default`, new tokens created will inherit this policy
- automatically.
- * In the PKI backend there have been a few minor breaking changes:
- * The token display name is no longer a valid option for providing a base
- domain for issuance. Since this name is prepended with the name of the
- authentication backend that issued it, it provided a faulty use-case at best
- and a confusing experience at worst. We hope to figure out a better
- per-token value in a future release.
- * The `allowed_base_domain` parameter has been changed to `allowed_domains`,
- which accepts a comma-separated list of domains. This allows issuing
- certificates with DNS subjects across multiple domains. If you had a
- configured `allowed_base_domain` parameter, it will be migrated
- automatically when the role is read (either via a normal read, or via
- issuing a certificate).
-
-FEATURES:
-
- * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate
- and sign root CA certificates and intermediate CA CSRs. It can also now sign
- submitted client CSRs, as well as a significant number of other
- enhancements. See the updated documentation for the full API. [GH-666]
- * **CRL Checking for Certificate Authentication**: The `cert` backend now
- supports pushing CRLs into the mount and using the contained serial numbers
- for revocation checking. See the documentation for the `cert` backend for
- more info. [GH-330]
- * **Default Policy**: Vault now ensures that a policy named `default` is added
- to every token. This policy cannot be deleted, but it can be modified
- (including to an empty policy). There are three endpoints allowed in the
- default `default` policy, related to token self-management: `lookup-self`,
- which allows a token to retrieve its own information, and `revoke-self` and
- `renew-self`, which are self-explanatory. If your existing Vault
- installation contains a policy called `default`, it will not be overridden,
- but it will be added to each new token created. You can override this
- behavior when using manual token creation (i.e. not via an authentication
- backend) by setting the "no_default_policy" flag to true. [GH-732]
-
-IMPROVEMENTS:
-
- * api: API client now uses a 60 second timeout instead of indefinite [GH-681]
- * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth
- tokens [GH-739]
- * api: Standardize environment variable reading logic inside the API; the CLI
- now uses this but can still override via command-line parameters [GH-618]
- * audit: HMAC-SHA256'd client tokens are now stored with each request entry.
- Previously they were only displayed at creation time; this allows much
- better traceability of client actions. [GH-713]
- * audit: There is now a `sys/audit-hash` endpoint that can be used to generate
- an HMAC-SHA256'd value from provided data using the given audit backend's
- salt [GH-784]
- * core: The physical storage read cache can now be disabled via
- "disable_cache" [GH-674]
- * core: The unsealing process can now be reset midway through (this feature
- was documented before, but not enabled) [GH-695]
- * core: Tokens can now renew themselves [GH-455]
- * core: Base64-encoded PGP keys can be used with the CLI for `init` and
- `rekey` operations [GH-653]
- * core: Print version on startup [GH-765]
- * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system
- instead of requiring a root token [GH-769]
- * credential/token: Display whether or not a token is an orphan in the output
- of a lookup call [GH-766]
- * logical: Allow `.` in path-based variables in many more locations [GH-244]
- * logical: Responses now contain a "warnings" key containing a list of
- warnings returned from the server. These are conditions that did not require
- failing an operation, but of which the client should be aware. [GH-676]
- * physical/(consul,etcd): Consul and etcd now use a connection pool to limit
- the number of outstanding operations, improving behavior when a lot of
- operations must happen at once [GH-677] [GH-780]
- * physical/consul: The `datacenter` parameter was removed; It could not be
- effective unless the Vault node (or the Consul node it was connecting to)
- was in the datacenter specified, in which case it wasn't needed [GH-816]
- * physical/etcd: Support TLS-encrypted connections and use a connection pool
- to limit the number of outstanding operations [GH-780]
- * physical/s3: The S3 endpoint can now be configured, allowing using
- S3-API-compatible storage solutions [GH-750]
- * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET`
- environment variable [GH-758]
- * secret/consul: Management tokens can now be created [GH-714]
-
-BUG FIXES:
-
- * api: API client now checks for a 301 response for redirects. Vault doesn't
- generate these, but in certain conditions Go's internal HTTP handler can
- generate them, leading to client errors.
- * cli: `token-create` now supports the `ttl` parameter in addition to the
- deprecated `lease` parameter. [GH-688]
- * core: Return data from `generic` backends on the last use of a limited-use
- token [GH-615]
- * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673]
- * core: Stale leader entries will now be reaped [GH-679]
- * core: Using `mount-tune` on the auth/token path did not take effect.
- [GH-688]
- * core: Fix a potential race condition when (un)sealing the vault with metrics
- enabled [GH-694]
- * core: Fix an error that could happen in some failure scenarios where Vault
- could fail to revert to a clean state [GH-733]
- * core: Ensure secondary indexes are removed when a lease is expired [GH-749]
- * core: Ensure rollback manager uses an up-to-date mounts table [GH-771]
- * everywhere: Don't use http.DefaultClient, as it shares state implicitly and
- is a source of hard-to-track-down bugs [GH-700]
- * credential/token: Allow creating orphan tokens via an API path [GH-748]
- * secret/generic: Validate given duration at write time, not just read time;
- if stored durations are not parseable, return a warning and the default
- duration rather than an error [GH-718]
- * secret/generic: Return 400 instead of 500 when `generic` backend is written
- to with no data fields [GH-825]
- * secret/postgresql: Revoke permissions before dropping a user or revocation
- may fail [GH-699]
-
-MISC:
-
- * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697]
- [GH-710] [GH-715] [GH-831]
-
-## 0.3.1 (October 6, 2015)
-
-SECURITY:
-
- * core: In certain failure scenarios, the full values of requests and
- responses would be logged [GH-665]
-
-FEATURES:
-
- * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends
- now allow setting the number of maximum open connections to the database,
- which was previously capped to 2. [GH-661]
- * **Renewable Tokens for GitHub**: The `github` backend now supports
- specifying a TTL, enabling renewable tokens. [GH-664]
-
-BUG FIXES:
-
- * dist: linux-amd64 distribution was dynamically linked [GH-656]
- * credential/github: Fix acceptance tests [GH-651]
-
-MISC:
-
- * Various minor documentation fixes and improvements [GH-649] [GH-650]
- [GH-654] [GH-663]
-
-## 0.3.0 (September 28, 2015)
-
-DEPRECATIONS/CHANGES:
-
-Note: deprecations and breaking changes in upcoming releases are announced
-ahead of time on the "vault-tool" mailing list.
-
- * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is
- via the X-Vault-Token header. Cookie authentication was hard to properly
- test, could result in browsers/tools/applications saving tokens in plaintext
- on disk, and other issues. [GH-564]
- * **Terminology/Field Names**: Vault is transitioning from overloading the
- term "lease" to mean both "a set of metadata" and "the amount of time the
- metadata is valid". The latter is now being referred to as TTL (or
- "lease_duration" for backwards-compatibility); some parts of Vault have
- already switched to using "ttl" and others will follow in upcoming releases.
- In particular, the "token", "generic", and "pki" backends accept both "ttl"
- and "lease" but in 0.4 only "ttl" will be accepted. [GH-528]
- * **Downgrade Not Supported**: Due to enhancements in the storage subsystem,
- values written by Vault 0.3+ will not be able to be read by prior versions
- of Vault. There are no expected upgrade issues, however, as with all
- critical infrastructure it is recommended to back up Vault's physical
- storage before upgrading.
-
-FEATURES:
-
- * **SSH Backend**: Vault can now be used to delegate SSH access to machines,
- via a (recommended) One-Time Password approach or by issuing dynamic keys.
- [GH-385]
- * **Cubbyhole Backend**: This backend works similarly to the "generic" backend
- but provides a per-token workspace. This enables some additional
- authentication workflows (especially for containers) and can be useful to
- applications to e.g. store local credentials while being restarted or
- upgraded, rather than persisting to disk. [GH-612]
- * **Transit Backend Improvements**: The transit backend now allows key
- rotation and datakey generation. For rotation, data encrypted with previous
- versions of the keys can still be decrypted, down to a (configurable)
- minimum previous version; there is a rewrap function for manual upgrades of
- ciphertext to newer versions. Additionally, the backend now allows
- generating and returning high-entropy keys of a configurable bitsize
- suitable for AES and other functions; this is returned wrapped by a named
- key, or optionally both wrapped and plaintext for immediate use. [GH-626]
- * **Global and Per-Mount Default/Max TTL Support**: You can now set the
- default and maximum Time To Live for leases both globally and per-mount.
- Per-mount settings override global settings. Not all backends honor these
- settings yet, but the maximum is a hard limit enforced outside the backend.
- See the documentation for "/sys/mounts/" for details on configuring
- per-mount TTLs. [GH-469]
- * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's
- master key, PGP/GPG public keys can now be provided. The output keys will be
- encrypted with the given keys, in order. [GH-570]
- * **Duo Multifactor Authentication Support**: Backends that support MFA can
- now use Duo as the mechanism. [GH-464]
- * **Performance Improvements**: Users of the "generic" backend will see a
- significant performance improvement as the backend no longer creates leases,
- although it does return TTLs (global/mount default, or set per-item) as
- before. [GH-631]
- * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the
- audit contract do not allow us to make the results public.) [GH-220]
-
-IMPROVEMENTS:
-
- * audit: Log entries now contain a time field [GH-495]
- * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627]
- * backends: Add ability for a cleanup function to be called on backend unmount
- [GH-608]
- * config: Allow specifying minimum acceptable TLS version [GH-447]
- * core: If trying to mount in a location that is already mounted, be more
- helpful about the error [GH-510]
- * core: Be more explicit on failure if the issue is invalid JSON [GH-553]
- * core: Tokens can now revoke themselves [GH-620]
- * credential/app-id: Give a more specific error when sending a duplicate POST
- to sys/auth/app-id [GH-392]
- * credential/github: Support custom API endpoints (e.g. for Github Enterprise)
- [GH-572]
- * credential/ldap: Add per-user policies and option to login with
- userPrincipalName [GH-420]
- * credential/token: Allow root tokens to specify the ID of a token being
- created from CLI [GH-502]
- * credential/userpass: Enable renewals for login tokens [GH-623]
- * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446]
- * scripts: Use godep for build scripts to use same environment as tests
- [GH-404]
- * secret/mysql: Allow reading configuration data [GH-529]
- * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to
- allow for non-hostname values (e.g. for client certificates) [GH-555]
- * storage/consul: Allow specifying certificates used to talk to Consul
- [GH-384]
- * storage/mysql: Allow SSL encrypted connections [GH-439]
- * storage/s3: Allow using temporary security credentials [GH-433]
- * telemetry: Put telemetry object in configuration to allow more flexibility
- [GH-419]
- * testing: Disable mlock for testing of logical backends so as not to require
- root [GH-479]
-
-BUG FIXES:
-
- * audit/file: Do not enable auditing if file permissions are invalid [GH-550]
- * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559]
- * cli: Fixed missing setup of client TLS certificates if no custom CA was
- provided
- * cli/read: Do not include a carriage return when using raw field output
- [GH-624]
- * core: Bad input data could lead to a panic for that session, rather than
- returning an error [GH-503]
- * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448]
- * core: Do not return a Secret if there are no uses left on a token (since it
- will be unable to be used) [GH-615]
- * core: Code paths that called lookup-self would decrement num_uses and
- potentially immediately revoke a token [GH-552]
- * core: Some /sys/ paths would not properly redirect from a standby to the
- leader [GH-499] [GH-551]
- * credential/aws: Translate spaces in a token's display name to avoid making
- IAM unhappy [GH-567]
- * credential/github: Integration failed if more than ten organizations or
- teams [GH-489]
- * credential/token: Tokens with sudo access to "auth/token/create" can now use
- root-only options [GH-629]
- * secret/cassandra: Work around backwards-incompatible change made in
- Cassandra 2.2 preventing Vault from properly setting/revoking leases
- [GH-549]
- * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues
- [GH-522]
- * secret/postgres: Explicitly set timezone in connections [GH-597]
- * storage/etcd: Renew semaphore periodically to prevent leadership flapping
- [GH-606]
- * storage/zk: Fix collisions in storage that could lead to data unavailability
- [GH-411]
-
-MISC:
-
- * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476]
- [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590]
- [GH-591] [GH-592] [GH-595] [GH-613] [GH-637]
- * Less "armon" in stack traces [GH-453]
- * Sourcegraph integration [GH-456]
-
-## 0.2.0 (July 13, 2015)
-
-FEATURES:
-
- * **Key Rotation Support**: The `rotate` command can be used to rotate the
- master encryption key used to write data to the storage (physical) backend.
- [GH-277]
- * **Rekey Support**: Rekey can be used to rotate the master key and change the
- configuration of the unseal keys (number of shares, threshold required).
- [GH-277]
- * **New secret backend: `pki`**: Enable Vault to be a certificate authority
- and generate signed TLS certificates. [GH-310]
- * **New secret backend: `cassandra`**: Generate dynamic credentials for
- Cassandra [GH-363]
- * **New storage backend: `etcd`**: store physical data in etcd [GH-259]
- [GH-297]
- * **New storage backend: `s3`**: store physical data in S3. Does not support
- HA. [GH-242]
- * **New storage backend: `MySQL`**: store physical data in MySQL. Does not
- support HA. [GH-324]
- * `transit` secret backend supports derived keys for per-transaction unique
- keys [GH-399]
-
-IMPROVEMENTS:
-
- * cli/auth: Enable `cert` method [GH-380]
- * cli/auth: read input from stdin [GH-250]
- * cli/read: Ability to read a single field from a secret [GH-257]
- * cli/write: Adding a force flag when no input required
- * core: allow time duration format in place of seconds for some inputs
- * core: audit log provides more useful information [GH-360]
- * core: graceful shutdown for faster HA failover
- * core: **change policy format** to use explicit globbing [GH-400] Any
- existing policy in Vault is automatically upgraded to avoid issues. All
- policy files must be updated for future writes. Adding the explicit glob
- character `*` to the path specification is all that is required.
- * core: policy merging to give deny highest precedence [GH-400]
- * credential/app-id: Protect against timing attack on app-id
- * credential/cert: Record the common name in the metadata [GH-342]
- * credential/ldap: Allow TLS verification to be disabled [GH-372]
- * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367]
- * credential/userpass: Protect against timing attack on password
- * credential/userpass: Use bcrypt for password matching
- * http: response codes improved to reflect error [GH-366]
- * http: the `sys/health` endpoint supports `?standbyok` to return 200 on
- standby [GH-389]
- * secret/app-id: Support deleting AppID and UserIDs [GH-200]
- * secret/consul: Fine grained lease control [GH-261]
- * secret/transit: Decouple raw key from key management endpoint [GH-355]
- * secret/transit: Upsert named key when encrypt is used [GH-355]
- * storage/zk: Support for HA configuration [GH-252]
- * storage/zk: Changing node representation. **Backwards incompatible**.
- [GH-416]
-
-BUG FIXES:
-
- * audit/file: file removing TLS connection state
- * audit/syslog: fix removing TLS connection state
- * command/*: commands accepting `k=v` allow blank values
- * core: Allow building on FreeBSD [GH-365]
- * core: Fixed various panics when audit logging enabled
- * core: Lease renewal does not create redundant lease
- * core: fixed leases with negative duration [GH-354]
- * core: token renewal does not create child token
- * core: fixing panic when lease increment is null [GH-408]
- * credential/app-id: Salt the paths in storage backend to avoid information
- leak
- * credential/cert: Fixing client certificate not being requested
- * credential/cert: Fixing panic when no certificate match found [GH-361]
- * http: Accept PUT as POST for sys/auth
- * http: Accept PUT as POST for sys/mounts [GH-349]
- * http: Return 503 when sealed [GH-225]
- * secret/postgres: Username length is capped to exceeding limit
- * server: Do not panic if backend not configured [GH-222]
- * server: Explicitly check value of tls_diable [GH-201]
- * storage/zk: Fixed issues with version conflicts [GH-190]
-
-MISC:
-
- * cli/path-help: renamed from `help` to avoid confusion
-
-## 0.1.2 (May 11, 2015)
-
-FEATURES:
-
- * **New physical backend: `zookeeper`**: store physical data in Zookeeper.
- HA not supported yet.
- * **New credential backend: `ldap`**: authenticate using LDAP credentials.
-
-IMPROVEMENTS:
-
- * core: Auth backends can store internal data about auth creds
- * audit: display name for auth is shown in logs [GH-176]
- * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130]
- * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162]
- * command/server: environment variables are copy-pastable
- * credential/app-id: hash of app and user ID are in metadata [GH-176]
- * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124]
- * logical/*: Generate help output even if no synopsis specified
-
-BUG FIXES:
-
- * core: login endpoints should never return secrets
- * core: Internal data should never be returned from core endpoints
- * core: defer barrier initialization to as late as possible to avoid error
- cases during init that corrupt data (no data loss)
- * core: guard against invalid init config earlier
- * audit/file: create file if it doesn't exist [GH-148]
- * command/*: ignore directories when traversing CA paths [GH-181]
- * credential/*: all policy mapping keys are case insensitive [GH-163]
- * physical/consul: Fixing path for locking so HA works in every case
-
-## 0.1.1 (May 2, 2015)
-
-SECURITY CHANGES:
-
- * physical/file: create the storge with 0600 permissions [GH-102]
- * token/disk: write the token to disk with 0600 perms
-
-IMPROVEMENTS:
-
- * core: Very verbose error if mlock fails [GH-59]
- * command/*: On error with TLS oversized record, show more human-friendly
- error message. [GH-123]
- * command/read: `lease_renewable` is now outputted along with the secret to
- show whether it is renewable or not
- * command/server: Add configuration option to disable mlock
- * command/server: Disable mlock for dev mode so it works on more systems
-
-BUG FIXES:
-
- * core: if token helper isn't absolute, prepend with path to Vault
- executable, not "vault" (which requires PATH) [GH-60]
- * core: Any "mapping" routes allow hyphens in keys [GH-119]
- * core: Validate `advertise_addr` is a valid URL with scheme [GH-106]
- * command/auth: Using an invalid token won't crash [GH-75]
- * credential/app-id: app and user IDs can have hyphens in keys [GH-119]
- * helper/password: import proper DLL for Windows to ask password [GH-83]
-
-## 0.1.0 (April 28, 2015)
-
- * Initial release
diff --git a/vendor/github.com/hashicorp/vault/CONTRIBUTING.md b/vendor/github.com/hashicorp/vault/CONTRIBUTING.md
deleted file mode 100644
index 6fc1888..0000000
--- a/vendor/github.com/hashicorp/vault/CONTRIBUTING.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# Contributing to Vault
-
-**Please note:** We take Vault's security and our users' trust very seriously.
-If you believe you have found a security issue in Vault, please responsibly
-disclose by contacting us at security@hashicorp.com.
-
-**First:** if you're unsure or afraid of _anything_, just ask or submit the
-issue or pull request anyways. You won't be yelled at for giving it your best
-effort. The worst that can happen is that you'll be politely asked to change
-something. We appreciate any sort of contributions, and don't want a wall of
-rules to get in the way of that.
-
-That said, if you want to ensure that a pull request is likely to be merged,
-talk to us! You can find out our thoughts and ensure that your contribution
-won't clash or be obviated by Vault's normal direction. A great way to do this
-is via the [Vault Google Group][2]. Sometimes Vault devs are in `#vault-tool`
-on Freenode, too.
-
-This document will cover what we're looking for in terms of reporting issues.
-By addressing all the points we're looking for, it raises the chances we can
-quickly merge or address your contributions.
-
-## Issues
-
-### Reporting an Issue
-
-* Make sure you test against the latest released version. It is possible
- we already fixed the bug you're experiencing. Even better is if you can test
- against `master`, as bugs are fixed regularly but new versions are only
- released every few months.
-
-* Provide steps to reproduce the issue, and if possible include the expected
- results as well as the actual results. Please provide text, not screen shots!
-
-* If you are seeing an internal Vault error (a status code of 5xx), please be
- sure to post relevant parts of (or the entire) Vault log, as often these
- errors are logged on the server but not reported to the user
-
-* If you experienced a panic, please create a [gist](https://gist.github.com)
- of the *entire* generated crash log for us to look at. Double check
- no sensitive items were in the log.
-
-* Respond as promptly as possible to any questions made by the Vault
- team to your issue. Stale issues will be closed periodically.
-
-### Issue Lifecycle
-
-1. The issue is reported.
-
-2. The issue is verified and categorized by a Vault collaborator.
- Categorization is done via tags. For example, bugs are marked as "bugs".
-
-3. Unless it is critical, the issue may be left for a period of time (sometimes
- many weeks), giving outside contributors -- maybe you!? -- a chance to
- address the issue.
-
-4. The issue is addressed in a pull request or commit. The issue will be
- referenced in the commit message so that the code that fixes it is clearly
- linked.
-
-5. The issue is closed. Sometimes, valid issues will be closed to keep
- the issue tracker clean. The issue is still indexed and available for
- future viewers, or can be re-opened if necessary.
-
-## Setting up Go to work on Vault
-
-If you have never worked with Go before, you will have to complete the
-following steps listed in the README, under the section [Developing Vault][1].
-
-
-[1]: https://github.com/hashicorp/vault#developing-vault
-[2]: https://groups.google.com/group/vault-tool
diff --git a/vendor/github.com/hashicorp/vault/Makefile b/vendor/github.com/hashicorp/vault/Makefile
deleted file mode 100644
index 0bf1d14..0000000
--- a/vendor/github.com/hashicorp/vault/Makefile
+++ /dev/null
@@ -1,104 +0,0 @@
-TEST?=$$(go list ./... | grep -v /vendor/)
-VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
-EXTERNAL_TOOLS=\
- github.com/mitchellh/gox \
- github.com/kardianos/govendor
-BUILD_TAGS?=vault
-GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
-
-default: dev
-
-# bin generates the releaseable binaries for Vault
-bin: fmtcheck prep
- @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
-
-# dev creates binaries for testing Vault locally. These are put
-# into ./bin/ as well as $GOPATH/bin, except for quickdev which
-# is only put into /bin/
-quickdev: prep
- @CGO_ENABLED=0 go build -i -tags='$(BUILD_TAGS)' -o bin/vault
-dev: fmtcheck prep
- @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
-dev-dynamic: prep
- @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
-
-# test runs the unit tests and vets the code
-test: fmtcheck prep
- CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=20m -parallel=4
-
-testcompile: fmtcheck prep
- @for pkg in $(TEST) ; do \
- go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \
- done
-
-# testacc runs acceptance tests
-testacc: fmtcheck prep
- @if [ "$(TEST)" = "./..." ]; then \
- echo "ERROR: Set TEST to a specific package"; \
- exit 1; \
- fi
- VAULT_ACC=1 go test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout 45m
-
-# testrace runs the race checker
-testrace: fmtcheck prep
- CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=45m -parallel=4
-
-cover:
- ./scripts/coverage.sh --html
-
-# vet runs the Go source code static analysis tool `vet` to find
-# any common errors.
-vet:
- @go list -f '{{.Dir}}' ./... | grep -v /vendor/ \
- | grep -v '.*github.com/hashicorp/vault$$' \
- | xargs go tool vet ; if [ $$? -eq 1 ]; then \
- echo ""; \
- echo "Vet found suspicious constructs. Please check the reported constructs"; \
- echo "and fix them if necessary before submitting the code for reviewal."; \
- fi
-
-# prep runs `go generate` to build the dynamically generated
-# source files.
-prep:
- go generate $(go list ./... | grep -v /vendor/)
- cp .hooks/* .git/hooks/
-
-# bootstrap the build by downloading additional tools
-bootstrap:
- @for tool in $(EXTERNAL_TOOLS) ; do \
- echo "Installing/Updating $$tool" ; \
- go get -u $$tool; \
- done
-
-proto:
- protoc -I helper/forwarding -I vault -I ../../.. vault/*.proto --go_out=plugins=grpc:vault
- protoc -I helper/forwarding -I vault -I ../../.. helper/forwarding/types.proto --go_out=plugins=grpc:helper/forwarding
-
-fmtcheck:
- @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'"
-
-fmt:
- gofmt -w $(GOFMT_FILES)
-
-mysql-database-plugin:
- @CGO_ENABLED=0 go build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin
-
-mysql-legacy-database-plugin:
- @CGO_ENABLED=0 go build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin
-
-cassandra-database-plugin:
- @CGO_ENABLED=0 go build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin
-
-postgresql-database-plugin:
- @CGO_ENABLED=0 go build -o bin/postgresql-database-plugin ./plugins/database/postgresql/postgresql-database-plugin
-
-mssql-database-plugin:
- @CGO_ENABLED=0 go build -o bin/mssql-database-plugin ./plugins/database/mssql/mssql-database-plugin
-
-hana-database-plugin:
- @CGO_ENABLED=0 go build -o bin/hana-database-plugin ./plugins/database/hana/hana-database-plugin
-
-mongodb-database-plugin:
- @CGO_ENABLED=0 go build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin
-
-.PHONY: bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin
diff --git a/vendor/github.com/hashicorp/vault/README.md b/vendor/github.com/hashicorp/vault/README.md
deleted file mode 100644
index 058c065..0000000
--- a/vendor/github.com/hashicorp/vault/README.md
+++ /dev/null
@@ -1,132 +0,0 @@
-Vault [![Build Status](https://travis-ci.org/hashicorp/vault.svg)](https://travis-ci.org/hashicorp/vault) [![Join the chat at https://gitter.im/hashicorp-vault/Lobby](https://badges.gitter.im/hashicorp-vault/Lobby.svg)](https://gitter.im/hashicorp-vault/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise)
-=========
-**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com).
-
-=========
-
-- Website: https://www.vaultproject.io
-- IRC: `#vault-tool` on Freenode
-- Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce)
-- Discussion list: [Google Groups](https://groups.google.com/group/vault-tool)
-
-
-
-Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log.
-
-A modern system requires access to a multitude of secrets: database credentials, API keys for external services, credentials for service-oriented architecture communication, etc. Understanding who is accessing what secrets is already very difficult and platform-specific. Adding on key rolling, secure storage, and detailed audit logs is almost impossible without a custom solution. This is where Vault steps in.
-
-The key features of Vault are:
-
-* **Secure Secret Storage**: Arbitrary key/value secrets can be stored
- in Vault. Vault encrypts these secrets prior to writing them to persistent
- storage, so gaining access to the raw storage isn't enough to access
- your secrets. Vault can write to disk, [Consul](https://www.consul.io),
- and more.
-
-* **Dynamic Secrets**: Vault can generate secrets on-demand for some
- systems, such as AWS or SQL databases. For example, when an application
- needs to access an S3 bucket, it asks Vault for credentials, and Vault
- will generate an AWS keypair with valid permissions on demand. After
- creating these dynamic secrets, Vault will also automatically revoke them
- after the lease is up.
-
-* **Data Encryption**: Vault can encrypt and decrypt data without storing
- it. This allows security teams to define encryption parameters and
- developers to store encrypted data in a location such as SQL without
- having to design their own encryption methods.
-
-* **Leasing and Renewal**: All secrets in Vault have a _lease_ associated
- with it. At the end of the lease, Vault will automatically revoke that
- secret. Clients are able to renew leases via built-in renew APIs.
-
-* **Revocation**: Vault has built-in support for secret revocation. Vault
- can revoke not only single secrets, but a tree of secrets, for example
- all secrets read by a specific user, or all secrets of a particular type.
- Revocation assists in key rolling as well as locking down systems in the
- case of an intrusion.
-
-For more information, see the [introduction section](https://www.vaultproject.io/intro)
-of the Vault website.
-
-Getting Started & Documentation
--------------------------------
-
-All documentation is available on the [Vault website](https://www.vaultproject.io).
-
-Developing Vault
---------------------
-
-If you wish to work on Vault itself or any of its built-in systems, you'll
-first need [Go](https://www.golang.org) installed on your machine (version 1.9+
-is *required*).
-
-For local dev first make sure Go is properly installed, including setting up a
-[GOPATH](https://golang.org/doc/code.html#GOPATH). Next, clone this repository
-into `$GOPATH/src/github.com/hashicorp/vault`. You can then download any
-required build tools by bootstrapping your environment:
-
-```sh
-$ make bootstrap
-...
-```
-
-To compile a development version of Vault, run `make` or `make dev`. This will
-put the Vault binary in the `bin` and `$GOPATH/bin` folders:
-
-```sh
-$ make dev
-...
-$ bin/vault
-...
-```
-
-To run tests, type `make test`. Note: this requires Docker to be installed. If
-this exits with exit status 0, then everything is working!
-
-```sh
-$ make test
-...
-```
-
-If you're developing a specific package, you can run tests for just that
-package by specifying the `TEST` variable. For example below, only
-`vault` package tests will be run.
-
-```sh
-$ make test TEST=./vault
-...
-```
-
-### Acceptance Tests
-
-Vault has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing)
-covering most of the features of the secret and auth backends.
-
-If you're working on a feature of a secret or auth backend and want to
-verify it is functioning (and also hasn't broken anything else), we recommend
-running the acceptance tests.
-
-**Warning:** The acceptance tests create/destroy/modify *real resources*, which
-may incur real costs in some cases. In the presence of a bug, it is technically
-possible that broken backends could leave dangling data behind. Therefore,
-please run the acceptance tests at your own risk. At the very least,
-we recommend running them in their own private account for whatever backend
-you're testing.
-
-To run the acceptance tests, invoke `make testacc`:
-
-```sh
-$ make testacc TEST=./builtin/logical/consul
-...
-```
-
-The `TEST` variable is required, and you should specify the folder where the
-backend is. The `TESTARGS` variable is recommended to filter down to a specific
-resource to test, since testing all of them at once can sometimes take a very
-long time.
-
-Acceptance tests typically require other environment variables to be set for
-things such as access keys. The test itself should error early and tell
-you what to set, so it is not documented here.
-
-For more information on Vault Enterprise features, visit the [Vault Enterprise site](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=referral&utm_campaign=github-vault-enterprise).
diff --git a/vendor/github.com/hashicorp/vault/api/SPEC.md b/vendor/github.com/hashicorp/vault/api/SPEC.md
deleted file mode 100644
index 15345f3..0000000
--- a/vendor/github.com/hashicorp/vault/api/SPEC.md
+++ /dev/null
@@ -1,611 +0,0 @@
-FORMAT: 1A
-
-# vault
-
-The Vault API gives you full access to the Vault project.
-
-If you're browsing this API specifiction in GitHub or in raw
-format, please excuse some of the odd formatting. This document
-is in api-blueprint format that is read by viewers such as
-Apiary.
-
-## Sealed vs. Unsealed
-
-Whenever an individual Vault server is started, it is started
-in the _sealed_ state. In this state, it knows where its data
-is located, but the data is encrypted and Vault doesn't have the
-encryption keys to access it. Before Vault can operate, it must
-be _unsealed_.
-
-**Note:** Sealing/unsealing has no relationship to _authentication_
-which is separate and still required once the Vault is unsealed.
-
-Instead of being sealed with a single key, we utilize
-[Shamir's Secret Sharing](http://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing)
-to shard a key into _n_ parts such that _t_ parts are required
-to reconstruct the original key, where `t <= n`. This means that
-Vault itself doesn't know the original key, and no single person
-has the original key (unless `n = 1`, or `t` parts are given to
-a single person).
-
-Unsealing is done via an unauthenticated
-[unseal API](#reference/seal/unseal/unseal). This API takes a single
-master shard and progresses the unsealing process. Once all shards
-are given, the Vault is either unsealed or resets the unsealing
-process if the key was invalid.
-
-The entire seal/unseal state is server-wide. This allows multiple
-distinct operators to use the unseal API (or more likely the
-`vault unseal` command) from separate computers/networks and never
-have to transmit their key in order to unseal the vault in a
-distributed fashion.
-
-## Transport
-
-The API is expected to be accessed over a TLS connection at
-all times, with a valid certificate that is verified by a well
-behaved client.
-
-## Authentication
-
-Once the Vault is unsealed, every other operation requires
-authentication. There are multiple methods for authentication
-that can be enabled (see
-[authentication](#reference/authentication)).
-
-Authentication is done with the login endpoint. The login endpoint
-returns an access token that is set as the `X-Vault-Token` header.
-
-## Help
-
-To retrieve the help for any API within Vault, including mounted
-backends, credential providers, etc. then append `?help=1` to any
-URL. If you have valid permission to access the path, then the help text
-will be returned with the following structure:
-
- {
- "help": "help text"
- }
-
-## Error Response
-
-A common JSON structure is always returned to return errors:
-
- {
- "errors": [
- "message",
- "another message"
- ]
- }
-
-This structure will be sent down for any non-20x HTTP status.
-
-## HTTP Status Codes
-
-The following HTTP status codes are used throughout the API.
-
-- `200` - Success with data.
-- `204` - Success, no data returned.
-- `400` - Invalid request, missing or invalid data.
-- `403` - Forbidden, your authentication details are either
- incorrect or you don't have access to this feature.
-- `404` - Invalid path. This can both mean that the path truly
- doesn't exist or that you don't have permission to view a
- specific path. We use 404 in some cases to avoid state leakage.
-- `429` - Rate limit exceeded. Try again after waiting some period
- of time.
-- `500` - Internal server error. An internal error has occurred,
- try again later. If the error persists, report a bug.
-- `503` - Vault is down for maintenance or is currently sealed.
- Try again later.
-
-# Group Initialization
-
-## Initialization [/sys/init]
-### Initialization Status [GET]
-Returns the status of whether the vault is initialized or not. The
-vault doesn't have to be unsealed for this operation.
-
-+ Response 200 (application/json)
-
- {
- "initialized": true
- }
-
-### Initialize [POST]
-Initialize the vault. This is an unauthenticated request to initially
-setup a new vault. Although this is unauthenticated, it is still safe:
-data cannot be in vault prior to initialization, and any future
-authentication will fail if you didn't initialize it yourself.
-Additionally, once initialized, a vault cannot be reinitialized.
-
-This API is the only time Vault will ever be aware of your keys, and
-the only time the keys will ever be returned in one unit. Care should
-be taken to ensure that the output of this request is never logged,
-and that the keys are properly distributed.
-
-The response also contains the initial root token that can be used
-as authentication in order to initially configure Vault once it is
-unsealed. Just as with the unseal keys, this is the only time Vault is
-ever aware of this token.
-
-+ Request (application/json)
-
- {
- "secret_shares": 5,
- "secret_threshold": 3,
- }
-
-+ Response 200 (application/json)
-
- {
- "keys": ["one", "two", "three"],
- "root_token": "foo"
- }
-
-# Group Seal/Unseal
-
-## Seal Status [/sys/seal-status]
-### Seal Status [GET]
-Returns the status of whether the vault is currently
-sealed or not, as well as the progress of unsealing.
-
-The response has the following attributes:
-
-- sealed (boolean) - If true, the vault is sealed. Otherwise,
- it is unsealed.
-- t (int) - The "t" value for the master key, or the number
- of shards needed total to unseal the vault.
-- n (int) - The "n" value for the master key, or the total
- number of shards of the key distributed.
-- progress (int) - The number of master key shards that have
- been entered so far towards unsealing the vault.
-
-+ Response 200 (application/json)
-
- {
- "sealed": true,
- "t": 3,
- "n": 5,
- "progress": 1
- }
-
-## Seal [/sys/seal]
-### Seal [PUT]
-Seal the vault.
-
-Sealing the vault locks Vault from any future operations on any
-secrets or system configuration until the vault is once again
-unsealed. Internally, sealing throws away the keys to access the
-encrypted vault data, so Vault is unable to access the data without
-unsealing to get the encryption keys.
-
-+ Response 204
-
-## Unseal [/sys/unseal]
-### Unseal [PUT]
-Unseal the vault.
-
-Unseal the vault by entering a portion of the master key. The
-response object will tell you if the unseal is complete or
-only partial.
-
-If the vault is already unsealed, this does nothing. It is
-not an error, the return value just says the vault is unsealed.
-Due to the architecture of Vault, we cannot validate whether
-any portion of the unseal key given is valid until all keys
-are inputted, therefore unsealing an already unsealed vault
-is still a success even if the input key is invalid.
-
-+ Request (application/json)
-
- {
- "key": "value"
- }
-
-+ Response 200 (application/json)
-
- {
- "sealed": true,
- "t": 3,
- "n": 5,
- "progress": 1
- }
-
-# Group Authentication
-
-## List Auth Methods [/sys/auth]
-### List all auth methods [GET]
-Lists all available authentication methods.
-
-This returns the name of the authentication method as well as
-a human-friendly long-form help text for the method that can be
-shown to the user as documentation.
-
-+ Response 200 (application/json)
-
- {
- "token": {
- "type": "token",
- "description": "Token authentication"
- },
- "oauth": {
- "type": "oauth",
- "description": "OAuth authentication"
- }
- }
-
-## Single Auth Method [/sys/auth/{id}]
-
-+ Parameters
- + id (required, string) ... The ID of the auth method.
-
-### Enable an auth method [PUT]
-Enables an authentication method.
-
-The body of the request depends on the authentication method
-being used. Please reference the documentation for the specific
-authentication method you're enabling in order to determine what
-parameters you must give it.
-
-If an authentication method is already enabled, then this can be
-used to change the configuration, including even the type of
-the configuration.
-
-+ Request (application/json)
-
- {
- "type": "type",
- "key": "value",
- "key2": "value2"
- }
-
-+ Response 204
-
-### Disable an auth method [DELETE]
-Disables an authentication method. Previously authenticated sessions
-are immediately invalidated.
-
-+ Response 204
-
-# Group Policies
-
-Policies are named permission sets that identities returned by
-credential stores are bound to. This separates _authentication_
-from _authorization_.
-
-## Policies [/sys/policy]
-### List all Policies [GET]
-
-List all the policies.
-
-+ Response 200 (application/json)
-
- {
- "policies": ["root"]
- }
-
-## Single Policy [/sys/policy/{id}]
-
-+ Parameters
- + id (required, string) ... The name of the policy
-
-### Upsert [PUT]
-
-Create or update a policy with the given ID.
-
-+ Request (application/json)
-
- {
- "rules": "HCL"
- }
-
-+ Response 204
-
-### Delete [DELETE]
-
-Delete a policy with the given ID. Any identities bound to this
-policy will immediately become "deny all" despite already being
-authenticated.
-
-+ Response 204
-
-# Group Mounts
-
-Logical backends are mounted at _mount points_, similar to
-filesystems. This allows you to mount the "aws" logical backend
-at the "aws-us-east" path, so all access is at `/aws-us-east/keys/foo`
-for example. This enables multiple logical backends to be enabled.
-
-## Mounts [/sys/mounts]
-### List all mounts [GET]
-
-Lists all the active mount points.
-
-+ Response 200 (application/json)
-
- {
- "aws": {
- "type": "aws",
- "description": "AWS"
- },
- "pg": {
- "type": "postgresql",
- "description": "PostgreSQL dynamic users"
- }
- }
-
-## Single Mount [/sys/mounts/{path}]
-### New Mount [POST]
-
-Mount a logical backend to a new path.
-
-Configuration for this new backend is done via the normal
-read/write mechanism once it is mounted.
-
-+ Request (application/json)
-
- {
- "type": "aws",
- "description": "EU AWS tokens"
- }
-
-+ Response 204
-
-### Unmount [DELETE]
-
-Unmount a mount point.
-
-+ Response 204
-
-## Remount [/sys/remount]
-### Remount [POST]
-
-Move an already-mounted backend to a new path.
-
-+ Request (application/json)
-
- {
- "from": "aws",
- "to": "aws-east"
- }
-
-+ Response 204
-
-# Group Audit Backends
-
-Audit backends are responsible for shuttling the audit logs that
-Vault generates to a durable system for future querying. By default,
-audit logs are not stored anywhere.
-
-## Audit Backends [/sys/audit]
-### List Enabled Audit Backends [GET]
-
-List all the enabled audit backends
-
-+ Response 200 (application/json)
-
- {
- "file": {
- "type": "file",
- "description": "Send audit logs to a file",
- "options": {}
- }
- }
-
-## Single Audit Backend [/sys/audit/{path}]
-
-+ Parameters
- + path (required, string) ... The path where the audit backend is mounted
-
-### Enable [PUT]
-
-Enable an audit backend.
-
-+ Request (application/json)
-
- {
- "type": "file",
- "description": "send to a file",
- "options": {
- "path": "/var/log/vault.audit.log"
- }
- }
-
-+ Response 204
-
-### Disable [DELETE]
-
-Disable an audit backend.
-
-+ Request (application/json)
-
-+ Response 204
-
-# Group Secrets
-
-## Generic [/{mount}/{path}]
-
-This group documents the general format of reading and writing
-to Vault. The exact structure of the keyspace is defined by the
-logical backends in use, so documentation related to
-a specific backend should be referenced for details on what keys
-and routes are expected.
-
-The path for examples are `/prefix/path`, but in practice
-these will be defined by the backends that are mounted. For
-example, reading an AWS key might be at the `/aws/root` path.
-These paths are defined by the logical backends.
-
-+ Parameters
- + mount (required, string) ... The mount point for the
- logical backend. Example: `aws`.
- + path (optional, string) ... The path within the backend
- to read or write data.
-
-### Read [GET]
-
-Read data from vault.
-
-The data read from the vault can either be a secret or
-arbitrary configuration data. The type of data returned
-depends on the path, and is defined by the logical backend.
-
-If the return value is a secret, then the return structure
-is a mixture of arbitrary key/value along with the following
-fields which are guaranteed to exist:
-
-- `lease_id` (string) - A unique ID used for renewal and
- revocation.
-
-- `renewable` (bool) - If true, then this key can be renewed.
- If a key can't be renewed, then a new key must be requested
- after the lease duration period.
-
-- `lease_duration` (int) - The time in seconds that a secret is
- valid for before it must be renewed.
-
-- `lease_duration_max` (int) - The maximum amount of time in
- seconds that a secret is valid for. This will always be
- greater than or equal to `lease_duration`. The difference
- between this and `lease_duration` is an overlap window
- where multiple keys may be valid.
-
-If the return value is not a secret, then the return structure
-is an arbitrary JSON object.
-
-+ Response 200 (application/json)
-
- {
- "lease_id": "UUID",
- "lease_duration": 3600,
- "key": "value"
- }
-
-### Write [PUT]
-
-Write data to vault.
-
-The behavior and arguments to the write are defined by
-the logical backend.
-
-+ Request (application/json)
-
- {
- "key": "value"
- }
-
-+ Response 204
-
-# Group Lease Management
-
-## Renew Key [/sys/renew/{id}]
-
-+ Parameters
- + id (required, string) ... The `lease_id` of the secret
- to renew.
-
-### Renew [PUT]
-
-+ Response 200 (application/json)
-
- {
- "lease_id": "...",
- "lease_duration": 3600,
- "access_key": "foo",
- "secret_key": "bar"
- }
-
-## Revoke Key [/sys/revoke/{id}]
-
-+ Parameters
- + id (required, string) ... The `lease_id` of the secret
- to revoke.
-
-### Revoke [PUT]
-
-+ Response 204
-
-# Group Backend: AWS
-
-## Root Key [/aws/root]
-### Set the Key [PUT]
-
-Set the root key that the logical backend will use to create
-new secrets, IAM policies, etc.
-
-+ Request (application/json)
-
- {
- "access_key": "key",
- "secret_key": "key",
- "region": "us-east-1"
- }
-
-+ Response 204
-
-## Policies [/aws/policies]
-### List Policies [GET]
-
-List all the policies that can be used to create keys.
-
-+ Response 200 (application/json)
-
- [{
- "name": "root",
- "description": "Root access"
- }, {
- "name": "web-deploy",
- "description": "Enough permissions to deploy the web app."
- }]
-
-## Single Policy [/aws/policies/{name}]
-
-+ Parameters
- + name (required, string) ... Name of the policy.
-
-### Read [GET]
-
-Read a policy.
-
-+ Response 200 (application/json)
-
- {
- "policy": "base64-encoded policy"
- }
-
-### Upsert [PUT]
-
-Create or update a policy.
-
-+ Request (application/json)
-
- {
- "policy": "base64-encoded policy"
- }
-
-+ Response 204
-
-### Delete [DELETE]
-
-Delete the policy with the given name.
-
-+ Response 204
-
-## Generate Access Keys [/aws/keys/{policy}]
-### Create [GET]
-
-This generates a new keypair for the given policy.
-
-+ Parameters
- + policy (required, string) ... The policy under which to create
- the key pair.
-
-+ Response 200 (application/json)
-
- {
- "lease_id": "...",
- "lease_duration": 3600,
- "access_key": "foo",
- "secret_key": "bar"
- }
diff --git a/vendor/github.com/hashicorp/vault/api/api_integration_test.go b/vendor/github.com/hashicorp/vault/api/api_integration_test.go
deleted file mode 100644
index c4e1a1d..0000000
--- a/vendor/github.com/hashicorp/vault/api/api_integration_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package api_test
-
-import (
- "database/sql"
- "fmt"
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/pki"
- "github.com/hashicorp/vault/builtin/logical/transit"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-
- vaulthttp "github.com/hashicorp/vault/http"
- logxi "github.com/mgutz/logxi/v1"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-var testVaultServerDefaultBackends = map[string]logical.Factory{
- "transit": transit.Factory,
- "pki": pki.Factory,
-}
-
-func testVaultServer(t testing.TB) (*api.Client, func()) {
- return testVaultServerBackends(t, testVaultServerDefaultBackends)
-}
-
-func testVaultServerBackends(t testing.TB, backends map[string]logical.Factory) (*api.Client, func()) {
- coreConfig := &vault.CoreConfig{
- DisableMlock: true,
- DisableCache: true,
- Logger: logxi.NullLog,
- LogicalBackends: backends,
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
-
- // make it easy to get access to the active
- core := cluster.Cores[0].Core
- vault.TestWaitActive(t, core)
-
- client := cluster.Cores[0].Client
- client.SetToken(cluster.RootToken)
-
- // Sanity check
- secret, err := client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.Data["id"].(string) != cluster.RootToken {
- t.Fatalf("token mismatch: %#v vs %q", secret, cluster.RootToken)
- }
- return client, func() { defer cluster.Cleanup() }
-}
-
-// testPostgresDB creates a testing postgres database in a Docker container,
-// returning the connection URL and the associated closer function.
-func testPostgresDB(t testing.TB) (string, func()) {
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("postgresdb: failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("postgres", "latest", []string{
- "POSTGRES_PASSWORD=secret",
- "POSTGRES_DB=database",
- })
- if err != nil {
- t.Fatalf("postgresdb: could not start container: %s", err)
- }
-
- addr := fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp"))
-
- if err := pool.Retry(func() error {
- db, err := sql.Open("postgres", addr)
- if err != nil {
- return err
- }
- return db.Ping()
- }); err != nil {
- t.Fatalf("postgresdb: could not connect: %s", err)
- }
-
- return addr, func() {
- if err := pool.Purge(resource); err != nil {
- t.Fatalf("postgresdb: failed to cleanup container: %s", err)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/api/api_test.go b/vendor/github.com/hashicorp/vault/api/api_test.go
deleted file mode 100644
index d9059ea..0000000
--- a/vendor/github.com/hashicorp/vault/api/api_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package api
-
-import (
- "fmt"
- "net"
- "net/http"
- "testing"
-
- "golang.org/x/net/http2"
-)
-
-// testHTTPServer creates a test HTTP server that handles requests until
-// the listener returned is closed.
-func testHTTPServer(
- t *testing.T, handler http.Handler) (*Config, net.Listener) {
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- server := &http.Server{Handler: handler}
- if err := http2.ConfigureServer(server, nil); err != nil {
- t.Fatal(err)
- }
- go server.Serve(ln)
-
- config := DefaultConfig()
- config.Address = fmt.Sprintf("http://%s", ln.Addr())
-
- return config, ln
-}
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
index b19d5f0..53655fa 100644
--- a/vendor/github.com/hashicorp/vault/api/client.go
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -12,13 +12,14 @@ import (
"strings"
"sync"
"time"
+ "unicode"
- "golang.org/x/net/http2"
-
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/sethgrid/pester"
+ "golang.org/x/net/http2"
)
const EnvVaultAddress = "VAULT_ADDR"
@@ -32,6 +33,7 @@ const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME"
const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
const EnvVaultMaxRetries = "VAULT_MAX_RETRIES"
const EnvVaultToken = "VAULT_TOKEN"
+const EnvVaultMFA = "VAULT_MFA"
// WrappingLookupFunc is a function that, given an HTTP verb and a path,
// returns an optional string duration to be used for response wrapping (e.g.
@@ -42,24 +44,31 @@ type WrappingLookupFunc func(operation, path string) string
// Config is used to configure the creation of the client.
type Config struct {
+ modifyLock sync.RWMutex
+
// Address is the address of the Vault server. This should be a complete
// URL such as "http://vault.example.com". If you need a custom SSL
// cert or want to enable insecure mode, you need to specify a custom
// HttpClient.
Address string
- // HttpClient is the HTTP client to use, which will currently always have the
- // same values as http.DefaultClient. This is used to control redirect behavior.
+ // HttpClient is the HTTP client to use. Vault sets sane defaults for the
+ // http.Client and its associated http.Transport created in DefaultConfig.
+ // If you must modify Vault's defaults, it is suggested that you start with
+ // that client and modify as needed rather than start with an empty client
+ // (or http.DefaultClient).
HttpClient *http.Client
- redirectSetup sync.Once
-
// MaxRetries controls the maximum number of times to retry when a 5xx error
// occurs. Set to 0 or less to disable retrying. Defaults to 0.
MaxRetries int
// Timeout is for setting custom timeout parameter in the HttpClient
Timeout time.Duration
+
+ // If there is an error when creating the configuration, this will be the
+ // error
+ Error error
}
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
@@ -92,60 +101,91 @@ type TLSConfig struct {
//
// The default Address is https://127.0.0.1:8200, but this can be overridden by
// setting the `VAULT_ADDR` environment variable.
+//
+// If an error is encountered, this will return nil.
func DefaultConfig() *Config {
config := &Config{
Address: "https://127.0.0.1:8200",
HttpClient: cleanhttp.DefaultClient(),
}
config.HttpClient.Timeout = time.Second * 60
+
transport := config.HttpClient.Transport.(*http.Transport)
transport.TLSHandshakeTimeout = 10 * time.Second
transport.TLSClientConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
}
+ if err := http2.ConfigureTransport(transport); err != nil {
+ config.Error = err
+ return config
+ }
- if v := os.Getenv(EnvVaultAddress); v != "" {
- config.Address = v
+ if err := config.ReadEnvironment(); err != nil {
+ config.Error = err
+ return config
+ }
+
+ // Ensure redirects are not automatically followed
+ // Note that this is sane for the API client as it has its own
+ // redirect handling logic (and thus also for command/meta),
+ // but in e.g. http_test actual redirect handling is necessary
+ config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ // Returning this value causes the Go net library to not close the
+ // response body and to nil out the error. Otherwise pester tries
+ // three times on every redirect because it sees an error from this
+ // function (to prevent redirects) passing through to it.
+ return http.ErrUseLastResponse
}
return config
}
-// ConfigureTLS takes a set of TLS configurations and applies those to the the HTTP client.
+// ConfigureTLS takes a set of TLS configurations and applies those to the the
+// HTTP client.
func (c *Config) ConfigureTLS(t *TLSConfig) error {
if c.HttpClient == nil {
c.HttpClient = DefaultConfig().HttpClient
}
+ clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig
var clientCert tls.Certificate
foundClientCert := false
- if t.CACert != "" || t.CAPath != "" || t.ClientCert != "" || t.ClientKey != "" || t.Insecure {
- if t.ClientCert != "" && t.ClientKey != "" {
- var err error
- clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey)
- if err != nil {
- return err
- }
- foundClientCert = true
- } else if t.ClientCert != "" || t.ClientKey != "" {
- return fmt.Errorf("Both client cert and client key must be provided")
+
+ switch {
+ case t.ClientCert != "" && t.ClientKey != "":
+ var err error
+ clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey)
+ if err != nil {
+ return err
+ }
+ foundClientCert = true
+ case t.ClientCert != "" || t.ClientKey != "":
+ return fmt.Errorf("both client cert and client key must be provided")
+ }
+
+ if t.CACert != "" || t.CAPath != "" {
+ rootConfig := &rootcerts.Config{
+ CAFile: t.CACert,
+ CAPath: t.CAPath,
+ }
+ if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
+ return err
}
}
- clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig
- rootConfig := &rootcerts.Config{
- CAFile: t.CACert,
- CAPath: t.CAPath,
+ if t.Insecure {
+ clientTLSConfig.InsecureSkipVerify = true
}
- if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
- return err
- }
-
- clientTLSConfig.InsecureSkipVerify = t.Insecure
if foundClientCert {
- clientTLSConfig.Certificates = []tls.Certificate{clientCert}
+ // We use this function to ignore the server's preferential list of
+ // CAs, otherwise any CA used for the cert auth backend must be in the
+ // server's CA pool
+ clientTLSConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ return &clientCert, nil
+ }
}
+
if t.TLSServerName != "" {
clientTLSConfig.ServerName = t.TLSServerName
}
@@ -153,9 +193,8 @@ func (c *Config) ConfigureTLS(t *TLSConfig) error {
return nil
}
-// ReadEnvironment reads configuration information from the
-// environment. If there is an error, no configuration value
-// is updated.
+// ReadEnvironment reads configuration information from the environment. If
+// there is an error, no configuration value is updated.
func (c *Config) ReadEnvironment() error {
var envAddress string
var envCACert string
@@ -193,7 +232,7 @@ func (c *Config) ReadEnvironment() error {
if t := os.Getenv(EnvVaultClientTimeout); t != "" {
clientTimeout, err := parseutil.ParseDurationSecond(t)
if err != nil {
- return fmt.Errorf("Could not parse %s", EnvVaultClientTimeout)
+ return fmt.Errorf("could not parse %q", EnvVaultClientTimeout)
}
envClientTimeout = clientTimeout
}
@@ -201,7 +240,7 @@ func (c *Config) ReadEnvironment() error {
var err error
envInsecure, err = strconv.ParseBool(v)
if err != nil {
- return fmt.Errorf("Could not parse VAULT_SKIP_VERIFY")
+ return fmt.Errorf("could not parse VAULT_SKIP_VERIFY")
}
}
if v := os.Getenv(EnvVaultTLSServerName); v != "" {
@@ -217,6 +256,10 @@ func (c *Config) ReadEnvironment() error {
TLSServerName: envTLSServerName,
Insecure: envInsecure,
}
+
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
if err := c.ConfigureTLS(t); err != nil {
return err
}
@@ -236,27 +279,41 @@ func (c *Config) ReadEnvironment() error {
return nil
}
-// Client is the client to the Vault API. Create a client with
-// NewClient.
+// Client is the client to the Vault API. Create a client with NewClient.
type Client struct {
+ modifyLock sync.RWMutex
addr *url.URL
config *Config
token string
+ headers http.Header
wrappingLookupFunc WrappingLookupFunc
+ mfaCreds []string
+ policyOverride bool
}
// NewClient returns a new client for the given configuration.
//
+// If the configuration is nil, Vault will use configuration from
+// DefaultConfig(), which is the recommended starting configuration.
+//
// If the environment variable `VAULT_TOKEN` is present, the token will be
// automatically added to the client. Otherwise, you must manually call
// `SetToken()`.
func NewClient(c *Config) (*Client, error) {
- if c == nil {
- c = DefaultConfig()
- if err := c.ReadEnvironment(); err != nil {
- return nil, fmt.Errorf("error reading environment: %v", err)
- }
+ def := DefaultConfig()
+ if def == nil {
+ return nil, fmt.Errorf("could not create/read default configuration")
}
+ if def.Error != nil {
+ return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error)
+ }
+
+ if c == nil {
+ c = def
+ }
+
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
u, err := url.Parse(c.Address)
if err != nil {
@@ -264,37 +321,19 @@ func NewClient(c *Config) (*Client, error) {
}
if c.HttpClient == nil {
- c.HttpClient = DefaultConfig().HttpClient
+ c.HttpClient = def.HttpClient
}
-
- tp := c.HttpClient.Transport.(*http.Transport)
- if err := http2.ConfigureTransport(tp); err != nil {
- return nil, err
+ if c.HttpClient.Transport == nil {
+ c.HttpClient.Transport = def.HttpClient.Transport
}
- redirFunc := func() {
- // Ensure redirects are not automatically followed
- // Note that this is sane for the API client as it has its own
- // redirect handling logic (and thus also for command/meta),
- // but in e.g. http_test actual redirect handling is necessary
- c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- // Returning this value causes the Go net library to not close the
- // response body and to nil out the error. Otherwise pester tries
- // three times on every redirect because it sees an error from this
- // function (to prevent redirects) passing through to it.
- return http.ErrUseLastResponse
- }
- }
-
- c.redirectSetup.Do(redirFunc)
-
client := &Client{
addr: u,
config: c,
}
if token := os.Getenv(EnvVaultToken); token != "" {
- client.SetToken(token)
+ client.token = token
}
return client, nil
@@ -304,9 +343,12 @@ func NewClient(c *Config) (*Client, error) {
// "://:". Setting this on a client will override the
// value of VAULT_ADDR environment variable.
func (c *Client) SetAddress(addr string) error {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
var err error
if c.addr, err = url.Parse(addr); err != nil {
- return fmt.Errorf("failed to set address: %v", err)
+ return errwrap.Wrapf("failed to set address: {{err}}", err)
}
return nil
@@ -314,51 +356,122 @@ func (c *Client) SetAddress(addr string) error {
// Address returns the Vault URL the client is configured to connect to
func (c *Client) Address() string {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
return c.addr.String()
}
// SetMaxRetries sets the number of retries that will be used in the case of certain errors
func (c *Client) SetMaxRetries(retries int) {
+ c.modifyLock.RLock()
+ c.config.modifyLock.Lock()
+ defer c.config.modifyLock.Unlock()
+ c.modifyLock.RUnlock()
+
c.config.MaxRetries = retries
}
// SetClientTimeout sets the client request timeout
func (c *Client) SetClientTimeout(timeout time.Duration) {
+ c.modifyLock.RLock()
+ c.config.modifyLock.Lock()
+ defer c.config.modifyLock.Unlock()
+ c.modifyLock.RUnlock()
+
c.config.Timeout = timeout
}
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
c.wrappingLookupFunc = lookupFunc
}
+// SetMFACreds sets the MFA credentials supplied either via the environment
+// variable or via the command line.
+func (c *Client) SetMFACreds(creds []string) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ c.mfaCreds = creds
+}
+
// Token returns the access token being used by this client. It will
// return the empty string if there is no token set.
func (c *Client) Token() string {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
return c.token
}
// SetToken sets the token directly. This won't perform any auth
// verification, it simply sets the token properly for future requests.
func (c *Client) SetToken(v string) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
c.token = v
}
// ClearToken deletes the token if it is set or does nothing otherwise.
func (c *Client) ClearToken() {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
c.token = ""
}
-// Clone creates a copy of this client.
+// SetHeaders sets the headers to be used for future requests.
+func (c *Client) SetHeaders(headers http.Header) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ c.headers = headers
+}
+
+// Clone creates a new client with the same configuration. Note that the same
+// underlying http.Client is used; modifying the client from more than one
+// goroutine at once may not be safe, so modify the client as needed and then
+// clone.
func (c *Client) Clone() (*Client, error) {
- return NewClient(c.config)
+ c.modifyLock.RLock()
+ c.config.modifyLock.RLock()
+ config := c.config
+ c.modifyLock.RUnlock()
+
+ newConfig := &Config{
+ Address: config.Address,
+ HttpClient: config.HttpClient,
+ MaxRetries: config.MaxRetries,
+ Timeout: config.Timeout,
+ }
+ config.modifyLock.RUnlock()
+
+ return NewClient(newConfig)
+}
+
+// SetPolicyOverride sets whether requests should be sent with the policy
+// override flag to request overriding soft-mandatory Sentinel policies (both
+// RGPs and EGPs)
+func (c *Client) SetPolicyOverride(override bool) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ c.policyOverride = override
}
// NewRequest creates a new raw request object to query the Vault server
// configured for this client. This is an advanced method and generally
// doesn't need to be called externally.
func (c *Client) NewRequest(method, requestPath string) *Request {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
// if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV
// record and take the highest match; this is not designed for high-availability, just discovery
var host string = c.addr.Host
@@ -391,6 +504,9 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
default:
lookupPath = requestPath
}
+
+ req.MFAHeaderVals = c.mfaCreds
+
if c.wrappingLookupFunc != nil {
req.WrapTTL = c.wrappingLookupFunc(method, lookupPath)
} else {
@@ -399,6 +515,11 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
if c.config.Timeout != 0 {
c.config.HttpClient.Timeout = c.config.Timeout
}
+ if c.headers != nil {
+ req.Headers = c.headers
+ }
+
+ req.PolicyOverride = c.policyOverride
return req
}
@@ -407,6 +528,20 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
// a Vault server not configured with this client. This is an advanced operation
// that generally won't need to be called externally.
func (c *Client) RawRequest(r *Request) (*Response, error) {
+ c.modifyLock.RLock()
+ c.config.modifyLock.RLock()
+ defer c.config.modifyLock.RUnlock()
+ token := c.token
+ c.modifyLock.RUnlock()
+
+ // Sanity check the token before potentially erroring from the API
+ idx := strings.IndexFunc(token, func(c rune) bool {
+ return !unicode.IsPrint(c)
+ })
+ if idx != -1 {
+ return nil, fmt.Errorf("configured Vault token contains non-printable characters and cannot be used")
+ }
+
redirectCount := 0
START:
req, err := r.ToHTTP()
@@ -425,8 +560,8 @@ START:
}
if err != nil {
if strings.Contains(err.Error(), "tls: oversized") {
- err = fmt.Errorf(
- "%s\n\n"+
+ err = errwrap.Wrapf(
+ "{{err}}\n\n"+
"This error usually means that the server is running with TLS disabled\n"+
"but the client is configured to use TLS. Please either enable TLS\n"+
"on the server or run the client with -address set to an address\n"+
diff --git a/vendor/github.com/hashicorp/vault/api/client_test.go b/vendor/github.com/hashicorp/vault/api/client_test.go
deleted file mode 100644
index 84663ee..0000000
--- a/vendor/github.com/hashicorp/vault/api/client_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package api
-
-import (
- "bytes"
- "io"
- "net/http"
- "os"
- "testing"
- "time"
-)
-
-func init() {
- // Ensure our special envvars are not present
- os.Setenv("VAULT_ADDR", "")
- os.Setenv("VAULT_TOKEN", "")
-}
-
-func TestDefaultConfig_envvar(t *testing.T) {
- os.Setenv("VAULT_ADDR", "https://vault.mycompany.com")
- defer os.Setenv("VAULT_ADDR", "")
-
- config := DefaultConfig()
- if config.Address != "https://vault.mycompany.com" {
- t.Fatalf("bad: %s", config.Address)
- }
-
- os.Setenv("VAULT_TOKEN", "testing")
- defer os.Setenv("VAULT_TOKEN", "")
-
- client, err := NewClient(config)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if token := client.Token(); token != "testing" {
- t.Fatalf("bad: %s", token)
- }
-}
-
-func TestClientNilConfig(t *testing.T) {
- client, err := NewClient(nil)
- if err != nil {
- t.Fatal(err)
- }
- if client == nil {
- t.Fatal("expected a non-nil client")
- }
-}
-
-func TestClientSetAddress(t *testing.T) {
- client, err := NewClient(nil)
- if err != nil {
- t.Fatal(err)
- }
- if err := client.SetAddress("http://172.168.2.1:8300"); err != nil {
- t.Fatal(err)
- }
- if client.addr.Host != "172.168.2.1:8300" {
- t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host)
- }
-}
-
-func TestClientToken(t *testing.T) {
- tokenValue := "foo"
- handler := func(w http.ResponseWriter, req *http.Request) {}
-
- config, ln := testHTTPServer(t, http.HandlerFunc(handler))
- defer ln.Close()
-
- client, err := NewClient(config)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- client.SetToken(tokenValue)
-
- // Verify the token is set
- if v := client.Token(); v != tokenValue {
- t.Fatalf("bad: %s", v)
- }
-
- client.ClearToken()
-
- if v := client.Token(); v != "" {
- t.Fatalf("bad: %s", v)
- }
-}
-
-func TestClientRedirect(t *testing.T) {
- primary := func(w http.ResponseWriter, req *http.Request) {
- w.Write([]byte("test"))
- }
- config, ln := testHTTPServer(t, http.HandlerFunc(primary))
- defer ln.Close()
-
- standby := func(w http.ResponseWriter, req *http.Request) {
- w.Header().Set("Location", config.Address)
- w.WriteHeader(307)
- }
- config2, ln2 := testHTTPServer(t, http.HandlerFunc(standby))
- defer ln2.Close()
-
- client, err := NewClient(config2)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Set the token manually
- client.SetToken("foo")
-
- // Do a raw "/" request
- resp, err := client.RawRequest(client.NewRequest("PUT", "/"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Copy the response
- var buf bytes.Buffer
- io.Copy(&buf, resp.Body)
-
- // Verify we got the response from the primary
- if buf.String() != "test" {
- t.Fatalf("Bad: %s", buf.String())
- }
-}
-
-func TestClientEnvSettings(t *testing.T) {
- cwd, _ := os.Getwd()
- oldCACert := os.Getenv(EnvVaultCACert)
- oldCAPath := os.Getenv(EnvVaultCAPath)
- oldClientCert := os.Getenv(EnvVaultClientCert)
- oldClientKey := os.Getenv(EnvVaultClientKey)
- oldSkipVerify := os.Getenv(EnvVaultInsecure)
- oldMaxRetries := os.Getenv(EnvVaultMaxRetries)
- os.Setenv(EnvVaultCACert, cwd+"/test-fixtures/keys/cert.pem")
- os.Setenv(EnvVaultCAPath, cwd+"/test-fixtures/keys")
- os.Setenv(EnvVaultClientCert, cwd+"/test-fixtures/keys/cert.pem")
- os.Setenv(EnvVaultClientKey, cwd+"/test-fixtures/keys/key.pem")
- os.Setenv(EnvVaultInsecure, "true")
- os.Setenv(EnvVaultMaxRetries, "5")
- defer os.Setenv(EnvVaultCACert, oldCACert)
- defer os.Setenv(EnvVaultCAPath, oldCAPath)
- defer os.Setenv(EnvVaultClientCert, oldClientCert)
- defer os.Setenv(EnvVaultClientKey, oldClientKey)
- defer os.Setenv(EnvVaultInsecure, oldSkipVerify)
- defer os.Setenv(EnvVaultMaxRetries, oldMaxRetries)
-
- config := DefaultConfig()
- if err := config.ReadEnvironment(); err != nil {
- t.Fatalf("error reading environment: %v", err)
- }
-
- tlsConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig
- if len(tlsConfig.RootCAs.Subjects()) == 0 {
- t.Fatalf("bad: expected a cert pool with at least one subject")
- }
- if len(tlsConfig.Certificates) != 1 {
- t.Fatalf("bad: expected client tls config to have a client certificate")
- }
- if tlsConfig.InsecureSkipVerify != true {
- t.Fatalf("bad: %v", tlsConfig.InsecureSkipVerify)
- }
-}
-
-func TestClientTimeoutSetting(t *testing.T) {
- oldClientTimeout := os.Getenv(EnvVaultClientTimeout)
- os.Setenv(EnvVaultClientTimeout, "10")
- defer os.Setenv(EnvVaultClientTimeout, oldClientTimeout)
- config := DefaultConfig()
- config.ReadEnvironment()
- client, err := NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- _ = client.NewRequest("PUT", "/")
- if client.config.HttpClient.Timeout != time.Second*10 {
- t.Fatalf("error setting client timeout using env variable")
- }
-
- // Setting custom client timeout for a new request
- client.SetClientTimeout(time.Second * 20)
- _ = client.NewRequest("PUT", "/")
- if client.config.HttpClient.Timeout != time.Second*20 {
- t.Fatalf("error setting client timeout using SetClientTimeout")
- }
-
-}
diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go
index 0d5e7d4..d5e5afa 100644
--- a/vendor/github.com/hashicorp/vault/api/logical.go
+++ b/vendor/github.com/hashicorp/vault/api/logical.go
@@ -3,9 +3,11 @@ package api
import (
"bytes"
"fmt"
+ "io"
"net/http"
"os"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/jsonutil"
)
@@ -50,6 +52,17 @@ func (c *Logical) Read(path string) (*Secret, error) {
defer resp.Body.Close()
}
if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, nil
+ }
return nil, nil
}
if err != nil {
@@ -70,6 +83,17 @@ func (c *Logical) List(path string) (*Secret, error) {
defer resp.Body.Close()
}
if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, nil
+ }
return nil, nil
}
if err != nil {
@@ -89,6 +113,19 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro
if resp != nil {
defer resp.Body.Close()
}
+ if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, err
+ }
+ }
if err != nil {
return nil, err
}
@@ -106,6 +143,19 @@ func (c *Logical) Delete(path string) (*Secret, error) {
if resp != nil {
defer resp.Body.Close()
}
+ if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, err
+ }
+ }
if err != nil {
return nil, err
}
@@ -138,10 +188,11 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
if resp != nil {
defer resp.Body.Close()
}
- if err != nil {
- if resp != nil && resp.StatusCode != 404 {
- return nil, err
- }
+
+ // Return all errors except those that are from a 404 as we handle the not
+ // found error as a special case.
+ if err != nil && (resp == nil || resp.StatusCode != 404) {
+ return nil, err
}
if resp == nil {
return nil, nil
@@ -163,10 +214,10 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
secret, err := c.Read(wrappedResponseLocation)
if err != nil {
- return nil, fmt.Errorf("error reading %s: %s", wrappedResponseLocation, err)
+ return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err)
}
if secret == nil {
- return nil, fmt.Errorf("no value found at %s", wrappedResponseLocation)
+ return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation)
}
if secret.Data == nil {
return nil, fmt.Errorf("\"data\" not found in wrapping response")
@@ -178,7 +229,7 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
wrappedSecret := new(Secret)
buf := bytes.NewBufferString(secret.Data["response"].(string))
if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil {
- return nil, fmt.Errorf("error unmarshaling wrapped secret: %s", err)
+ return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err)
}
return wrappedSecret, nil
diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go
index a2a4b66..7fd1de7 100644
--- a/vendor/github.com/hashicorp/vault/api/renewer.go
+++ b/vendor/github.com/hashicorp/vault/api/renewer.go
@@ -13,9 +13,6 @@ var (
ErrRenewerNotRenewable = errors.New("secret is not renewable")
ErrRenewerNoSecretData = errors.New("returned empty secret data")
- // DefaultRenewerGrace is the default grace period
- DefaultRenewerGrace = 15 * time.Second
-
// DefaultRenewerRenewBuffer is the default size of the buffer for renew
// messages on the channel.
DefaultRenewerRenewBuffer = 5
@@ -50,12 +47,13 @@ var (
type Renewer struct {
l sync.Mutex
- client *Client
- secret *Secret
- grace time.Duration
- random *rand.Rand
- doneCh chan error
- renewCh chan *RenewOutput
+ client *Client
+ secret *Secret
+ grace time.Duration
+ random *rand.Rand
+ increment int
+ doneCh chan error
+ renewCh chan *RenewOutput
stopped bool
stopCh chan struct{}
@@ -79,6 +77,11 @@ type RenewerInput struct {
// RenewBuffer is the size of the buffered channel where renew messages are
// dispatched.
RenewBuffer int
+
+ // The new TTL, in seconds, that should be set on the lease. The TTL set
+ // here may or may not be honored by the vault server, based on Vault
+ // configuration or any associated max TTL values.
+ Increment int
}
// RenewOutput is the metadata returned to the client (if it's listening) to
@@ -105,9 +108,6 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
}
grace := i.Grace
- if grace == 0 {
- grace = DefaultRenewerGrace
- }
random := i.Rand
if random == nil {
@@ -120,12 +120,13 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
}
return &Renewer{
- client: c,
- secret: secret,
- grace: grace,
- random: random,
- doneCh: make(chan error, 1),
- renewCh: make(chan *RenewOutput, renewBuffer),
+ client: c,
+ secret: secret,
+ grace: grace,
+ increment: i.Increment,
+ random: random,
+ doneCh: make(chan error, 1),
+ renewCh: make(chan *RenewOutput, renewBuffer),
stopped: false,
stopCh: make(chan struct{}),
@@ -155,8 +156,8 @@ func (r *Renewer) Stop() {
}
// Renew starts a background process for renewing this secret. When the secret
-// is has auth data, this attempts to renew the auth (token). When the secret
-// has a lease, this attempts to renew the lease.
+// has auth data, this attempts to renew the auth (token). When the secret has
+// a lease, this attempts to renew the lease.
func (r *Renewer) Renew() {
var result error
if r.secret.Auth != nil {
@@ -177,6 +178,9 @@ func (r *Renewer) renewAuth() error {
return ErrRenewerNotRenewable
}
+ priorDuration := time.Duration(r.secret.Auth.LeaseDuration) * time.Second
+ r.calculateGrace(priorDuration)
+
client, token := r.client, r.secret.Auth.ClientToken
for {
@@ -188,7 +192,7 @@ func (r *Renewer) renewAuth() error {
}
// Renew the auth.
- renewal, err := client.Auth().Token().RenewTokenAsSelf(token, 0)
+ renewal, err := client.Auth().Token().RenewTokenAsSelf(token, r.increment)
if err != nil {
return err
}
@@ -209,13 +213,28 @@ func (r *Renewer) renewAuth() error {
return ErrRenewerNotRenewable
}
- // Grab the lease duration and sleep duration - note that we grab the auth
- // lease duration, not the secret lease duration.
+ // Grab the lease duration
leaseDuration := time.Duration(renewal.Auth.LeaseDuration) * time.Second
- sleepDuration := r.sleepDuration(leaseDuration)
- // If we are within grace, return now.
- if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ // We keep evaluating a new grace period so long as the lease is
+ // extending. Once it stops extending, we've hit the max and need to
+ // rely on the grace duration.
+ if leaseDuration > priorDuration {
+ r.calculateGrace(leaseDuration)
+ }
+ priorDuration = leaseDuration
+
+ // The sleep duration is set to 2/3 of the current lease duration plus
+ // 1/3 of the current grace period, which adds jitter.
+ sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3)
+
+ // If we are within grace, return now; or, if the amount of time we
+ // would sleep would land us in the grace period. This helps with short
+ // tokens; for example, you don't want a current lease duration of 4
+ // seconds, a grace period of 3 seconds, and end up sleeping for more
+ // than three of those seconds and having a very small budget of time
+ // to renew.
+ if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace {
return nil
}
@@ -234,6 +253,9 @@ func (r *Renewer) renewLease() error {
return ErrRenewerNotRenewable
}
+ priorDuration := time.Duration(r.secret.LeaseDuration) * time.Second
+ r.calculateGrace(priorDuration)
+
client, leaseID := r.client, r.secret.LeaseID
for {
@@ -245,7 +267,7 @@ func (r *Renewer) renewLease() error {
}
// Renew the lease.
- renewal, err := client.Sys().Renew(leaseID, 0)
+ renewal, err := client.Sys().Renew(leaseID, r.increment)
if err != nil {
return err
}
@@ -266,12 +288,28 @@ func (r *Renewer) renewLease() error {
return ErrRenewerNotRenewable
}
- // Grab the lease duration and sleep duration
+ // Grab the lease duration
leaseDuration := time.Duration(renewal.LeaseDuration) * time.Second
- sleepDuration := r.sleepDuration(leaseDuration)
- // If we are within grace, return now.
- if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ // We keep evaluating a new grace period so long as the lease is
+ // extending. Once it stops extending, we've hit the max and need to
+ // rely on the grace duration.
+ if leaseDuration > priorDuration {
+ r.calculateGrace(leaseDuration)
+ }
+ priorDuration = leaseDuration
+
+ // The sleep duration is set to 2/3 of the current lease duration plus
+ // 1/3 of the current grace period, which adds jitter.
+ sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3)
+
+ // If we are within grace, return now; or, if the amount of time we
+ // would sleep would land us in the grace period. This helps with short
+ // tokens; for example, you don't want a current lease duration of 4
+ // seconds, a grace period of 3 seconds, and end up sleeping for more
+ // than three of those seconds and having a very small budget of time
+ // to renew.
+ if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace {
return nil
}
@@ -300,3 +338,20 @@ func (r *Renewer) sleepDuration(base time.Duration) time.Duration {
return time.Duration(sleep)
}
+
+// calculateGrace calculates the grace period based on a reasonable set of
+// assumptions given the total lease time; it also adds some jitter to not have
+// clients be in sync.
+func (r *Renewer) calculateGrace(leaseDuration time.Duration) {
+ if leaseDuration == 0 {
+ r.grace = 0
+ return
+ }
+
+ leaseNanos := float64(leaseDuration.Nanoseconds())
+ jitterMax := 0.1 * leaseNanos
+
+ // For a given lease duration, we want to allow 80-90% of that to elapse,
+ // so the remaining amount is the grace period
+ r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax))
+}
diff --git a/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go b/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go
deleted file mode 100644
index 7011c7d..0000000
--- a/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package api_test
-
-import (
- "testing"
- "time"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database"
- "github.com/hashicorp/vault/builtin/logical/pki"
- "github.com/hashicorp/vault/builtin/logical/transit"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestRenewer_Renew(t *testing.T) {
- t.Parallel()
-
- client, vaultDone := testVaultServerBackends(t, map[string]logical.Factory{
- "database": database.Factory,
- "pki": pki.Factory,
- "transit": transit.Factory,
- })
- defer vaultDone()
-
- pgURL, pgDone := testPostgresDB(t)
- defer pgDone()
-
- t.Run("group", func(t *testing.T) {
- t.Run("kv", func(t *testing.T) {
- t.Parallel()
-
- if _, err := client.Logical().Write("secret/value", map[string]interface{}{
- "foo": "bar",
- }); err != nil {
- t.Fatal(err)
- }
-
- secret, err := client.Logical().Read("secret/value")
- if err != nil {
- t.Fatal(err)
- }
-
- v, err := client.NewRenewer(&api.RenewerInput{
- Secret: secret,
- })
- if err != nil {
- t.Fatal(err)
- }
- go v.Renew()
- defer v.Stop()
-
- select {
- case err := <-v.DoneCh():
- if err != api.ErrRenewerNotRenewable {
- t.Fatal(err)
- }
- case renew := <-v.RenewCh():
- t.Errorf("received renew, but should have been nil: %#v", renew)
- case <-time.After(500 * time.Millisecond):
- t.Error("should have been non-renewable")
- }
- })
-
- t.Run("transit", func(t *testing.T) {
- t.Parallel()
-
- if err := client.Sys().Mount("transit", &api.MountInput{
- Type: "transit",
- }); err != nil {
- t.Fatal(err)
- }
-
- secret, err := client.Logical().Write("transit/encrypt/my-app", map[string]interface{}{
- "plaintext": "Zm9vCg==",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- v, err := client.NewRenewer(&api.RenewerInput{
- Secret: secret,
- })
- if err != nil {
- t.Fatal(err)
- }
- go v.Renew()
- defer v.Stop()
-
- select {
- case err := <-v.DoneCh():
- if err != api.ErrRenewerNotRenewable {
- t.Fatal(err)
- }
- case renew := <-v.RenewCh():
- t.Errorf("received renew, but should have been nil: %#v", renew)
- case <-time.After(500 * time.Millisecond):
- t.Error("should have been non-renewable")
- }
- })
-
- t.Run("database", func(t *testing.T) {
- t.Parallel()
-
- if err := client.Sys().Mount("database", &api.MountInput{
- Type: "database",
- }); err != nil {
- t.Fatal(err)
- }
- if _, err := client.Logical().Write("database/config/postgresql", map[string]interface{}{
- "plugin_name": "postgresql-database-plugin",
- "connection_url": pgURL,
- "allowed_roles": "readonly",
- }); err != nil {
- t.Fatal(err)
- }
- if _, err := client.Logical().Write("database/roles/readonly", map[string]interface{}{
- "db_name": "postgresql",
- "creation_statements": `` +
- `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';` +
- `GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";`,
- "default_ttl": "1s",
- "max_ttl": "3s",
- }); err != nil {
- t.Fatal(err)
- }
-
- secret, err := client.Logical().Read("database/creds/readonly")
- if err != nil {
- t.Fatal(err)
- }
-
- v, err := client.NewRenewer(&api.RenewerInput{
- Secret: secret,
- })
- if err != nil {
- t.Fatal(err)
- }
- go v.Renew()
- defer v.Stop()
-
- select {
- case err := <-v.DoneCh():
- t.Errorf("should have renewed once before returning: %s", err)
- case renew := <-v.RenewCh():
- if renew == nil {
- t.Fatal("renew is nil")
- }
- if !renew.Secret.Renewable {
- t.Errorf("expected lease to be renewable: %#v", renew)
- }
- if renew.Secret.LeaseDuration > 2 {
- t.Errorf("expected lease to < 2s: %#v", renew)
- }
- case <-time.After(3 * time.Second):
- t.Errorf("no renewal")
- }
-
- select {
- case err := <-v.DoneCh():
- if err != nil {
- t.Fatal(err)
- }
- case renew := <-v.RenewCh():
- t.Fatalf("should not have renewed (lease should be up): %#v", renew)
- case <-time.After(3 * time.Second):
- t.Errorf("no data")
- }
- })
-
- t.Run("auth", func(t *testing.T) {
- t.Parallel()
-
- secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Policies: []string{"default"},
- TTL: "1s",
- ExplicitMaxTTL: "3s",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- v, err := client.NewRenewer(&api.RenewerInput{
- Secret: secret,
- })
- if err != nil {
- t.Fatal(err)
- }
- go v.Renew()
- defer v.Stop()
-
- select {
- case err := <-v.DoneCh():
- t.Errorf("should have renewed once before returning: %s", err)
- case renew := <-v.RenewCh():
- if renew == nil {
- t.Fatal("renew is nil")
- }
- if renew.Secret.Auth == nil {
- t.Fatal("renew auth is nil")
- }
- if !renew.Secret.Auth.Renewable {
- t.Errorf("expected lease to be renewable: %#v", renew)
- }
- if renew.Secret.Auth.LeaseDuration > 2 {
- t.Errorf("expected lease to < 2s: %#v", renew)
- }
- if renew.Secret.Auth.ClientToken == "" {
- t.Error("expected a client token")
- }
- if renew.Secret.Auth.Accessor == "" {
- t.Error("expected an accessor")
- }
- case <-time.After(3 * time.Second):
- t.Errorf("no renewal")
- }
-
- select {
- case err := <-v.DoneCh():
- if err != nil {
- t.Fatal(err)
- }
- case renew := <-v.RenewCh():
- t.Fatalf("should not have renewed (lease should be up): %#v", renew)
- case <-time.After(3 * time.Second):
- t.Errorf("no data")
- }
- })
- })
-}
diff --git a/vendor/github.com/hashicorp/vault/api/renewer_test.go b/vendor/github.com/hashicorp/vault/api/renewer_test.go
deleted file mode 100644
index 262484e..0000000
--- a/vendor/github.com/hashicorp/vault/api/renewer_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package api
-
-import (
- "reflect"
- "testing"
- "time"
-)
-
-func TestRenewer_NewRenewer(t *testing.T) {
- t.Parallel()
-
- client, err := NewClient(DefaultConfig())
- if err != nil {
- t.Fatal(err)
- }
-
- cases := []struct {
- name string
- i *RenewerInput
- e *Renewer
- err bool
- }{
- {
- "nil",
- nil,
- nil,
- true,
- },
- {
- "missing_secret",
- &RenewerInput{
- Secret: nil,
- },
- nil,
- true,
- },
- {
- "default_grace",
- &RenewerInput{
- Secret: &Secret{},
- },
- &Renewer{
- secret: &Secret{},
- grace: DefaultRenewerGrace,
- },
- false,
- },
- {
- "custom_grace",
- &RenewerInput{
- Secret: &Secret{},
- Grace: 30 * time.Second,
- },
- &Renewer{
- secret: &Secret{},
- grace: 30 * time.Second,
- },
- false,
- },
- }
-
- for _, tc := range cases {
- t.Run(tc.name, func(t *testing.T) {
- v, err := client.NewRenewer(tc.i)
- if (err != nil) != tc.err {
- t.Fatal(err)
- }
-
- if v == nil {
- return
- }
-
- // Zero-out channels because reflect
- v.client = nil
- v.random = nil
- v.doneCh = nil
- v.renewCh = nil
- v.stopCh = nil
-
- if !reflect.DeepEqual(tc.e, v) {
- t.Errorf("not equal\nexp: %#v\nact: %#v", tc.e, v)
- }
- })
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go
index 83a28bd..a5d8e75 100644
--- a/vendor/github.com/hashicorp/vault/api/request.go
+++ b/vendor/github.com/hashicorp/vault/api/request.go
@@ -11,15 +11,21 @@ import (
// Request is a raw request configuration structure used to initiate
// API requests to the Vault server.
type Request struct {
- Method string
- URL *url.URL
- Params url.Values
- Headers http.Header
- ClientToken string
- WrapTTL string
- Obj interface{}
- Body io.Reader
- BodySize int64
+ Method string
+ URL *url.URL
+ Params url.Values
+ Headers http.Header
+ ClientToken string
+ MFAHeaderVals []string
+ WrapTTL string
+ Obj interface{}
+ Body io.Reader
+ BodySize int64
+
+ // Whether to request overriding soft-mandatory Sentinel policies (RGPs and
+ // EGPs). If set, the override flag will take effect for all policies
+ // evaluated during the request.
+ PolicyOverride bool
}
// SetJSONBody is used to set a request body that is a JSON-encoded value.
@@ -77,5 +83,15 @@ func (r *Request) ToHTTP() (*http.Request, error) {
req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL)
}
+ if len(r.MFAHeaderVals) != 0 {
+ for _, mfaHeaderVal := range r.MFAHeaderVals {
+ req.Header.Add("X-Vault-MFA", mfaHeaderVal)
+ }
+ }
+
+ if r.PolicyOverride {
+ req.Header.Set("X-Vault-Policy-Override", "true")
+ }
+
return req, nil
}
diff --git a/vendor/github.com/hashicorp/vault/api/request_test.go b/vendor/github.com/hashicorp/vault/api/request_test.go
deleted file mode 100644
index 904f59a..0000000
--- a/vendor/github.com/hashicorp/vault/api/request_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package api
-
-import (
- "bytes"
- "io"
- "strings"
- "testing"
-)
-
-func TestRequestSetJSONBody(t *testing.T) {
- var r Request
- raw := map[string]interface{}{"foo": "bar"}
- if err := r.SetJSONBody(raw); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var buf bytes.Buffer
- if _, err := io.Copy(&buf, r.Body); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := `{"foo":"bar"}`
- actual := strings.TrimSpace(buf.String())
- if actual != expected {
- t.Fatalf("bad: %s", actual)
- }
-
- if int64(len(buf.String())) != r.BodySize {
- t.Fatalf("bad: %d", len(actual))
- }
-}
-
-func TestRequestResetJSONBody(t *testing.T) {
- var r Request
- raw := map[string]interface{}{"foo": "bar"}
- if err := r.SetJSONBody(raw); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var buf bytes.Buffer
- if _, err := io.Copy(&buf, r.Body); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := r.ResetJSONBody(); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var buf2 bytes.Buffer
- if _, err := io.Copy(&buf2, r.Body); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := `{"foo":"bar"}`
- actual := strings.TrimSpace(buf2.String())
- if actual != expected {
- t.Fatalf("bad: %s", actual)
- }
-
- if int64(len(buf2.String())) != r.BodySize {
- t.Fatalf("bad: %d", len(actual))
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go
index 05502e1..053a277 100644
--- a/vendor/github.com/hashicorp/vault/api/response.go
+++ b/vendor/github.com/hashicorp/vault/api/response.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"io"
+ "io/ioutil"
"net/http"
"github.com/hashicorp/vault/helper/jsonutil"
@@ -33,11 +34,14 @@ func (r *Response) Error() error {
// We have an error. Let's copy the body into our own buffer first,
// so that if we can't decode JSON, we can at least copy it raw.
- var bodyBuf bytes.Buffer
- if _, err := io.Copy(&bodyBuf, r.Body); err != nil {
+ bodyBuf := &bytes.Buffer{}
+ if _, err := io.Copy(bodyBuf, r.Body); err != nil {
return err
}
+ r.Body.Close()
+ r.Body = ioutil.NopCloser(bodyBuf)
+
// Decode the error response if we can. Note that we wrap the bodyBuf
// in a bytes.Reader here so that the JSON decoder doesn't move the
// read pointer for the original buffer.
diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go
index 7478a0c..4675f4a 100644
--- a/vendor/github.com/hashicorp/vault/api/secret.go
+++ b/vendor/github.com/hashicorp/vault/api/secret.go
@@ -1,10 +1,13 @@
package api
import (
+ "fmt"
"io"
"time"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/parseutil"
)
// Secret is the structure returned for every secret within Vault.
@@ -35,11 +38,194 @@ type Secret struct {
WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"`
}
+// TokenID returns the standardized token ID (token) for the given secret.
+func (s *Secret) TokenID() (string, error) {
+ if s == nil {
+ return "", nil
+ }
+
+ if s.Auth != nil && len(s.Auth.ClientToken) > 0 {
+ return s.Auth.ClientToken, nil
+ }
+
+ if s.Data == nil || s.Data["id"] == nil {
+ return "", nil
+ }
+
+ id, ok := s.Data["id"].(string)
+ if !ok {
+ return "", fmt.Errorf("token found but in the wrong format")
+ }
+
+ return id, nil
+}
+
+// TokenAccessor returns the standardized token accessor for the given secret.
+// If the secret is nil or does not contain an accessor, this returns the empty
+// string.
+func (s *Secret) TokenAccessor() (string, error) {
+ if s == nil {
+ return "", nil
+ }
+
+ if s.Auth != nil && len(s.Auth.Accessor) > 0 {
+ return s.Auth.Accessor, nil
+ }
+
+ if s.Data == nil || s.Data["accessor"] == nil {
+ return "", nil
+ }
+
+ accessor, ok := s.Data["accessor"].(string)
+ if !ok {
+ return "", fmt.Errorf("token found but in the wrong format")
+ }
+
+ return accessor, nil
+}
+
+// TokenRemainingUses returns the standardized remaining uses for the given
+// secret. If the secret is nil or does not contain the "num_uses", this
+// returns -1. On error, this will return -1 and a non-nil error.
+func (s *Secret) TokenRemainingUses() (int, error) {
+ if s == nil || s.Data == nil || s.Data["num_uses"] == nil {
+ return -1, nil
+ }
+
+ uses, err := parseutil.ParseInt(s.Data["num_uses"])
+ if err != nil {
+ return 0, err
+ }
+
+ return int(uses), nil
+}
+
+// TokenPolicies returns the standardized list of policies for the given secret.
+// If the secret is nil or does not contain any policies, this returns nil.
+func (s *Secret) TokenPolicies() ([]string, error) {
+ if s == nil {
+ return nil, nil
+ }
+
+ if s.Auth != nil && len(s.Auth.Policies) > 0 {
+ return s.Auth.Policies, nil
+ }
+
+ if s.Data == nil || s.Data["policies"] == nil {
+ return nil, nil
+ }
+
+ sList, ok := s.Data["policies"].([]string)
+ if ok {
+ return sList, nil
+ }
+
+ list, ok := s.Data["policies"].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to convert token policies to expected format")
+ }
+
+ policies := make([]string, len(list))
+ for i := range list {
+ p, ok := list[i].(string)
+ if !ok {
+ return nil, fmt.Errorf("unable to convert policy %v to string", list[i])
+ }
+ policies[i] = p
+ }
+
+ return policies, nil
+}
+
+// TokenMetadata returns the map of metadata associated with this token, if any
+// exists. If the secret is nil or does not contain the "metadata" key, this
+// returns nil.
+func (s *Secret) TokenMetadata() (map[string]string, error) {
+ if s == nil {
+ return nil, nil
+ }
+
+ if s.Auth != nil && len(s.Auth.Metadata) > 0 {
+ return s.Auth.Metadata, nil
+ }
+
+ if s.Data == nil || (s.Data["metadata"] == nil && s.Data["meta"] == nil) {
+ return nil, nil
+ }
+
+ data, ok := s.Data["metadata"].(map[string]interface{})
+ if !ok {
+ data, ok = s.Data["meta"].(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to convert metadata field to expected format")
+ }
+ }
+
+ metadata := make(map[string]string, len(data))
+ for k, v := range data {
+ typed, ok := v.(string)
+ if !ok {
+ return nil, fmt.Errorf("unable to convert metadata value %v to string", v)
+ }
+ metadata[k] = typed
+ }
+
+ return metadata, nil
+}
+
+// TokenIsRenewable returns the standardized token renewability for the given
+// secret. If the secret is nil or does not contain the "renewable" key, this
+// returns false.
+func (s *Secret) TokenIsRenewable() (bool, error) {
+ if s == nil {
+ return false, nil
+ }
+
+ if s.Auth != nil && s.Auth.Renewable {
+ return s.Auth.Renewable, nil
+ }
+
+ if s.Data == nil || s.Data["renewable"] == nil {
+ return false, nil
+ }
+
+ renewable, err := parseutil.ParseBool(s.Data["renewable"])
+ if err != nil {
+ return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err)
+ }
+
+ return renewable, nil
+}
+
+// TokenTTL returns the standardized remaining token TTL for the given secret.
+// If the secret is nil or does not contain a TTL, this returns 0.
+func (s *Secret) TokenTTL() (time.Duration, error) {
+ if s == nil {
+ return 0, nil
+ }
+
+ if s.Auth != nil && s.Auth.LeaseDuration > 0 {
+ return time.Duration(s.Auth.LeaseDuration) * time.Second, nil
+ }
+
+ if s.Data == nil || s.Data["ttl"] == nil {
+ return 0, nil
+ }
+
+ ttl, err := parseutil.ParseDurationSecond(s.Data["ttl"])
+ if err != nil {
+ return 0, err
+ }
+
+ return ttl, nil
+}
+
// SecretWrapInfo contains wrapping information if we have it. If what is
// contained is an authentication token, the accessor for the token will be
// available in WrappedAccessor.
type SecretWrapInfo struct {
Token string `json:"token"`
+ Accessor string `json:"accessor"`
TTL int `json:"ttl"`
CreationTime time.Time `json:"creation_time"`
CreationPath string `json:"creation_path"`
diff --git a/vendor/github.com/hashicorp/vault/api/secret_test.go b/vendor/github.com/hashicorp/vault/api/secret_test.go
deleted file mode 100644
index 3b64966..0000000
--- a/vendor/github.com/hashicorp/vault/api/secret_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package api
-
-import (
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-func TestParseSecret(t *testing.T) {
- raw := strings.TrimSpace(`
-{
- "lease_id": "foo",
- "renewable": true,
- "lease_duration": 10,
- "data": {
- "key": "value"
- },
- "warnings": [
- "a warning!"
- ],
- "wrap_info": {
- "token": "token",
- "ttl": 60,
- "creation_time": "2016-06-07T15:52:10-04:00",
- "wrapped_accessor": "abcd1234"
- }
-}`)
-
- rawTime, _ := time.Parse(time.RFC3339, "2016-06-07T15:52:10-04:00")
-
- secret, err := ParseSecret(strings.NewReader(raw))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := &Secret{
- LeaseID: "foo",
- Renewable: true,
- LeaseDuration: 10,
- Data: map[string]interface{}{
- "key": "value",
- },
- Warnings: []string{
- "a warning!",
- },
- WrapInfo: &SecretWrapInfo{
- Token: "token",
- TTL: 60,
- CreationTime: rawTime,
- WrappedAccessor: "abcd1234",
- },
- }
- if !reflect.DeepEqual(secret, expected) {
- t.Fatalf("bad:\ngot\n%#v\nexpected\n%#v\n", secret, expected)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go
index 729fd99..8027001 100644
--- a/vendor/github.com/hashicorp/vault/api/ssh_agent.go
+++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"os"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-rootcerts"
@@ -41,16 +42,16 @@ type SSHHelper struct {
type SSHVerifyResponse struct {
// Usually empty. If the request OTP is echo request message, this will
// be set to the corresponding echo response message.
- Message string `json:"message" structs:"message" mapstructure:"message"`
+ Message string `json:"message" mapstructure:"message"`
// Username associated with the OTP
- Username string `json:"username" structs:"username" mapstructure:"username"`
+ Username string `json:"username" mapstructure:"username"`
// IP associated with the OTP
- IP string `json:"ip" structs:"ip" mapstructure:"ip"`
+ IP string `json:"ip" mapstructure:"ip"`
// Name of the role against which the OTP was issued
- RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"`
+ RoleName string `json:"role_name" mapstructure:"role_name"`
}
// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file.
@@ -141,12 +142,12 @@ func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) {
func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
root, err := hcl.Parse(string(contents))
if err != nil {
- return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err)
+ return nil, errwrap.Wrapf("error parsing config: {{err}}", err)
}
list, ok := root.Node.(*ast.ObjectList)
if !ok {
- return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object")
+ return nil, fmt.Errorf("error parsing config: file doesn't contain a root object")
}
valid := []string{
@@ -170,7 +171,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
}
if c.VaultAddr == "" {
- return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'")
+ return nil, fmt.Errorf(`missing config "vault_addr"`)
}
return &c, nil
}
@@ -248,8 +249,7 @@ func checkHCLKeys(node ast.Node, valid []string) error {
for _, item := range list.Items {
key := item.Keys[0].Token.Value().(string)
if _, ok := validMap[key]; !ok {
- result = multierror.Append(result, fmt.Errorf(
- "invalid key '%s' on line %d", key, item.Assign.Line))
+ result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line))
}
}
diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent_test.go b/vendor/github.com/hashicorp/vault/api/ssh_agent_test.go
deleted file mode 100644
index dfef4b8..0000000
--- a/vendor/github.com/hashicorp/vault/api/ssh_agent_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package api
-
-import (
- "fmt"
- "net/http"
- "strings"
- "testing"
-)
-
-func TestSSH_CreateTLSClient(t *testing.T) {
- // load the default configuration
- config, err := LoadSSHHelperConfig("./test-fixtures/agent_config.hcl")
- if err != nil {
- panic(fmt.Sprintf("error loading agent's config file: %s", err))
- }
-
- client, err := config.NewClient()
- if err != nil {
- panic(fmt.Sprintf("error creating the client: %s", err))
- }
-
- // Provide a certificate and enforce setting of transport
- config.CACert = "./test-fixtures/vault.crt"
-
- client, err = config.NewClient()
- if err != nil {
- panic(fmt.Sprintf("error creating the client: %s", err))
- }
- if client.config.HttpClient.Transport == nil {
- panic(fmt.Sprintf("error creating client with TLS transport"))
- }
-}
-
-func TestSSH_CreateTLSClient_tlsServerName(t *testing.T) {
- // Ensure that the HTTP client is associated with the configured TLS server name.
- var tlsServerName = "tls.server.name"
-
- config, err := ParseSSHHelperConfig(fmt.Sprintf(`
-vault_addr = "1.2.3.4"
-tls_server_name = "%s"
-`, tlsServerName))
- if err != nil {
- panic(fmt.Sprintf("error loading config: %s", err))
- }
-
- client, err := config.NewClient()
- if err != nil {
- panic(fmt.Sprintf("error creating the client: %s", err))
- }
-
- actualTLSServerName := client.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.ServerName
- if actualTLSServerName != tlsServerName {
- panic(fmt.Sprintf("incorrect TLS server name. expected: %s actual: %s", tlsServerName, actualTLSServerName))
- }
-}
-
-func TestParseSSHHelperConfig(t *testing.T) {
- config, err := ParseSSHHelperConfig(`
- vault_addr = "1.2.3.4"
-`)
- if err != nil {
- t.Fatal(err)
- }
-
- if config.SSHMountPoint != SSHHelperDefaultMountPoint {
- t.Errorf("expected %q to be %q", config.SSHMountPoint, SSHHelperDefaultMountPoint)
- }
-}
-
-func TestParseSSHHelperConfig_missingVaultAddr(t *testing.T) {
- _, err := ParseSSHHelperConfig("")
- if err == nil {
- t.Fatal("expected error")
- }
-
- if !strings.Contains(err.Error(), "ssh_helper: missing config 'vault_addr'") {
- t.Errorf("bad error: %s", err)
- }
-}
-
-func TestParseSSHHelperConfig_badKeys(t *testing.T) {
- _, err := ParseSSHHelperConfig(`
-vault_addr = "1.2.3.4"
-nope = "bad"
-`)
- if err == nil {
- t.Fatal("expected error")
- }
-
- if !strings.Contains(err.Error(), "ssh_helper: invalid key 'nope' on line 3") {
- t.Errorf("bad error: %s", err)
- }
-}
-
-func TestParseSSHHelperConfig_tlsServerName(t *testing.T) {
- var tlsServerName = "tls.server.name"
-
- config, err := ParseSSHHelperConfig(fmt.Sprintf(`
-vault_addr = "1.2.3.4"
-tls_server_name = "%s"
-`, tlsServerName))
-
- if err != nil {
- t.Fatal(err)
- }
-
- if config.TLSServerName != tlsServerName {
- t.Errorf("incorrect TLS server name. expected: %s actual: %s", tlsServerName, config.TLSServerName)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go
index 89f2141..05cd756 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_audit.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go
@@ -3,7 +3,6 @@ package api
import (
"fmt"
- "github.com/fatih/structs"
"github.com/mitchellh/mapstructure"
)
@@ -83,10 +82,8 @@ func (c *Sys) EnableAudit(
}
func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error {
- body := structs.Map(options)
-
r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(options); err != nil {
return err
}
@@ -113,10 +110,10 @@ func (c *Sys) DisableAudit(path string) error {
// documentation. Please refer to that documentation for more details.
type EnableAuditOptions struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Options map[string]string `json:"options" structs:"options"`
- Local bool `json:"local" structs:"local"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Options map[string]string `json:"options"`
+ Local bool `json:"local"`
}
type Audit struct {
diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go
index 32f4bbd..0b1a319 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_auth.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go
@@ -3,7 +3,6 @@ package api
import (
"fmt"
- "github.com/fatih/structs"
"github.com/mitchellh/mapstructure"
)
@@ -52,10 +51,8 @@ func (c *Sys) EnableAuth(path, authType, desc string) error {
}
func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error {
- body := structs.Map(options)
-
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(options); err != nil {
return err
}
@@ -78,31 +75,45 @@ func (c *Sys) DisableAuth(path string) error {
}
// Structures for the requests/resposne are all down here. They aren't
-// individually documentd because the map almost directly to the raw HTTP API
+// individually documented because the map almost directly to the raw HTTP API
// documentation. Please refer to that documentation for more details.
type EnableAuthOptions struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Config AuthConfigInput `json:"config" structs:"config"`
- Local bool `json:"local" structs:"local"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Config AuthConfigInput `json:"config"`
+ Local bool `json:"local"`
+ PluginName string `json:"plugin_name,omitempty"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
+ Options map[string]string `json:"options" mapstructure:"options"`
}
type AuthConfigInput struct {
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
type AuthMount struct {
- Type string `json:"type" structs:"type" mapstructure:"type"`
- Description string `json:"description" structs:"description" mapstructure:"description"`
- Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"`
- Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"`
- Local bool `json:"local" structs:"local" mapstructure:"local"`
+ Type string `json:"type" mapstructure:"type"`
+ Description string `json:"description" mapstructure:"description"`
+ Accessor string `json:"accessor" mapstructure:"accessor"`
+ Config AuthConfigOutput `json:"config" mapstructure:"config"`
+ Local bool `json:"local" mapstructure:"local"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
+ Options map[string]string `json:"options" mapstructure:"options"`
}
type AuthConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
index 8dc2095..adb5496 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
@@ -1,7 +1,15 @@
package api
func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) {
- r := c.c.NewRequest("GET", "/v1/sys/generate-root/attempt")
+ return c.generateRootStatusCommon("/v1/sys/generate-root/attempt")
+}
+
+func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) {
+ return c.generateRootStatusCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt")
+}
+
+func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) {
+ r := c.c.NewRequest("GET", path)
resp, err := c.c.RawRequest(r)
if err != nil {
return nil, err
@@ -14,12 +22,20 @@ func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) {
}
func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootInitCommon("/v1/sys/generate-root/attempt", otp, pgpKey)
+}
+
+func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootInitCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey)
+}
+
+func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootStatusResponse, error) {
body := map[string]interface{}{
"otp": otp,
"pgp_key": pgpKey,
}
- r := c.c.NewRequest("PUT", "/v1/sys/generate-root/attempt")
+ r := c.c.NewRequest("PUT", path)
if err := r.SetJSONBody(body); err != nil {
return nil, err
}
@@ -36,7 +52,15 @@ func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse,
}
func (c *Sys) GenerateRootCancel() error {
- r := c.c.NewRequest("DELETE", "/v1/sys/generate-root/attempt")
+ return c.generateRootCancelCommon("/v1/sys/generate-root/attempt")
+}
+
+func (c *Sys) GenerateDROperationTokenCancel() error {
+ return c.generateRootCancelCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt")
+}
+
+func (c *Sys) generateRootCancelCommon(path string) error {
+ r := c.c.NewRequest("DELETE", path)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
@@ -45,12 +69,20 @@ func (c *Sys) GenerateRootCancel() error {
}
func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootUpdateCommon("/v1/sys/generate-root/update", shard, nonce)
+}
+
+func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootUpdateCommon("/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce)
+}
+
+func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRootStatusResponse, error) {
body := map[string]interface{}{
"key": shard,
"nonce": nonce,
}
- r := c.c.NewRequest("PUT", "/v1/sys/generate-root/update")
+ r := c.c.NewRequest("PUT", path)
if err := r.SetJSONBody(body); err != nil {
return nil, err
}
@@ -67,11 +99,12 @@ func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusRespon
}
type GenerateRootStatusResponse struct {
- Nonce string
- Started bool
- Progress int
- Required int
- Complete bool
+ Nonce string `json:"nonce"`
+ Started bool `json:"started"`
+ Progress int `json:"progress"`
+ Required int `json:"required"`
+ Complete bool `json:"complete"`
+ EncodedToken string `json:"encoded_token"`
EncodedRootToken string `json:"encoded_root_token"`
PGPFingerprint string `json:"pgp_fingerprint"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go
index 822354c..82fd1f6 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_health.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_health.go
@@ -5,8 +5,10 @@ func (c *Sys) Health() (*HealthResponse, error) {
// If the code is 400 or above it will automatically turn into an error,
// but the sys/health API defaults to returning 5xx when not sealed or
// inited, so we force this code to be something else so we parse correctly
- r.Params.Add("sealedcode", "299")
r.Params.Add("uninitcode", "299")
+ r.Params.Add("sealedcode", "299")
+ r.Params.Add("standbycode", "299")
+ r.Params.Add("drsecondarycode", "299")
resp, err := c.c.RawRequest(r)
if err != nil {
return nil, err
@@ -19,11 +21,13 @@ func (c *Sys) Health() (*HealthResponse, error) {
}
type HealthResponse struct {
- Initialized bool `json:"initialized"`
- Sealed bool `json:"sealed"`
- Standby bool `json:"standby"`
- ServerTimeUTC int64 `json:"server_time_utc"`
- Version string `json:"version"`
- ClusterName string `json:"cluster_name,omitempty"`
- ClusterID string `json:"cluster_id,omitempty"`
+ Initialized bool `json:"initialized"`
+ Sealed bool `json:"sealed"`
+ Standby bool `json:"standby"`
+ ReplicationPerformanceMode string `json:"replication_performance_mode"`
+ ReplicationDRMode string `json:"replication_dr_mode"`
+ ServerTimeUTC int64 `json:"server_time_utc"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
index 091a8f6..8ac5b45 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
@@ -3,7 +3,6 @@ package api
import (
"fmt"
- "github.com/fatih/structs"
"github.com/mitchellh/mapstructure"
)
@@ -44,10 +43,8 @@ func (c *Sys) ListMounts() (map[string]*MountOutput, error) {
}
func (c *Sys) Mount(path string, mountInfo *MountInput) error {
- body := structs.Map(mountInfo)
-
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(mountInfo); err != nil {
return err
}
@@ -88,9 +85,8 @@ func (c *Sys) Remount(from, to string) error {
}
func (c *Sys) TuneMount(path string, config MountConfigInput) error {
- body := structs.Map(config)
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(config); err != nil {
return err
}
@@ -120,31 +116,44 @@ func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) {
}
type MountInput struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Config MountConfigInput `json:"config" structs:"config"`
- Local bool `json:"local" structs:"local"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Config MountConfigInput `json:"config"`
+ Options map[string]string `json:"options"`
+ Local bool `json:"local"`
+ PluginName string `json:"plugin_name,omitempty"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
}
type MountConfigInput struct {
- DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ Options map[string]string `json:"options" mapstructure:"options"`
+ DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
type MountOutput struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Accessor string `json:"accessor" structs:"accessor"`
- Config MountConfigOutput `json:"config" structs:"config"`
- Local bool `json:"local" structs:"local"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Accessor string `json:"accessor"`
+ Config MountConfigOutput `json:"config"`
+ Options map[string]string `json:"options"`
+ Local bool `json:"local"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
}
type MountConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go
new file mode 100644
index 0000000..8183b10
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go
@@ -0,0 +1,117 @@
+package api
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// ListPluginsInput is used as input to the ListPlugins function.
+type ListPluginsInput struct{}
+
+// ListPluginsResponse is the response from the ListPlugins call.
+type ListPluginsResponse struct {
+ // Names is the list of names of the plugins.
+ Names []string
+}
+
+// ListPlugins lists all plugins in the catalog and returns their names as a
+// list of strings.
+func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) {
+ path := "/v1/sys/plugins/catalog"
+ req := c.c.NewRequest("LIST", path)
+ resp, err := c.c.RawRequest(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result struct {
+ Data struct {
+ Keys []string `json:"keys"`
+ } `json:"data"`
+ }
+ if err := resp.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &ListPluginsResponse{Names: result.Data.Keys}, nil
+}
+
+// GetPluginInput is used as input to the GetPlugin function.
+type GetPluginInput struct {
+ Name string `json:"-"`
+}
+
+// GetPluginResponse is the response from the GetPlugin call.
+type GetPluginResponse struct {
+ Args []string `json:"args"`
+ Builtin bool `json:"builtin"`
+ Command string `json:"command"`
+ Name string `json:"name"`
+ SHA256 string `json:"sha256"`
+}
+
+func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) {
+ path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
+ req := c.c.NewRequest(http.MethodGet, path)
+ resp, err := c.c.RawRequest(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result GetPluginResponse
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+ return &result, err
+}
+
+// RegisterPluginInput is used as input to the RegisterPlugin function.
+type RegisterPluginInput struct {
+ // Name is the name of the plugin. Required.
+ Name string `json:"-"`
+
+ // Args is the list of args to spawn the process with.
+ Args []string `json:"args,omitempty"`
+
+ // Command is the command to run.
+ Command string `json:"command,omitempty"`
+
+ // SHA256 is the shasum of the plugin.
+ SHA256 string `json:"sha256,omitempty"`
+}
+
+// RegisterPlugin registers the plugin with the given information.
+func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error {
+ path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
+ req := c.c.NewRequest(http.MethodPut, path)
+ if err := req.SetJSONBody(i); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(req)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+// DeregisterPluginInput is used as input to the DeregisterPlugin function.
+type DeregisterPluginInput struct {
+ // Name is the name of the plugin. Required.
+ Name string `json:"-"`
+}
+
+// DeregisterPlugin removes the plugin with the given name from the plugin
+// catalog.
+func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error {
+ path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
+ req := c.c.NewRequest(http.MethodDelete, path)
+ resp, err := c.c.RawRequest(req)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go
index ba0e17f..9c9d9c0 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_policy.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go
@@ -50,12 +50,14 @@ func (c *Sys) GetPolicy(name string) (string, error) {
return "", err
}
- var ok bool
- if _, ok = result["rules"]; !ok {
- return "", fmt.Errorf("rules not found in response")
+ if rulesRaw, ok := result["rules"]; ok {
+ return rulesRaw.(string), nil
+ }
+ if policyRaw, ok := result["policy"]; ok {
+ return policyRaw.(string), nil
}
- return result["rules"].(string), nil
+ return "", fmt.Errorf("no policy found in response")
}
func (c *Sys) PutPolicy(name, rules string) error {
diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go
index e6d039e..8b2d043 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_rekey.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go
@@ -171,32 +171,33 @@ func (c *Sys) RekeyDeleteRecoveryBackup() error {
type RekeyInitRequest struct {
SecretShares int `json:"secret_shares"`
SecretThreshold int `json:"secret_threshold"`
+ StoredShares int `json:"stored_shares"`
PGPKeys []string `json:"pgp_keys"`
Backup bool
}
type RekeyStatusResponse struct {
- Nonce string
- Started bool
- T int
- N int
- Progress int
- Required int
+ Nonce string `json:"nonce"`
+ Started bool `json:"started"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+ Required int `json:"required"`
PGPFingerprints []string `json:"pgp_fingerprints"`
- Backup bool
+ Backup bool `json:"backup"`
}
type RekeyUpdateResponse struct {
- Nonce string
- Complete bool
- Keys []string
+ Nonce string `json:"nonce"`
+ Complete bool `json:"complete"`
+ Keys []string `json:"keys"`
KeysB64 []string `json:"keys_base64"`
PGPFingerprints []string `json:"pgp_fingerprints"`
- Backup bool
+ Backup bool `json:"backup"`
}
type RekeyRetrieveResponse struct {
- Nonce string
- Keys map[string][]string
+ Nonce string `json:"nonce"`
+ Keys map[string][]string `json:"keys"`
KeysB64 map[string][]string `json:"keys_base64"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go
index 97a49ae..3d594ba 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_seal.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go
@@ -49,12 +49,14 @@ func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) {
}
type SealStatusResponse struct {
- Sealed bool `json:"sealed"`
- T int `json:"t"`
- N int `json:"n"`
- Progress int `json:"progress"`
- Nonce string `json:"nonce"`
- Version string `json:"version"`
- ClusterName string `json:"cluster_name,omitempty"`
- ClusterID string `json:"cluster_id,omitempty"`
+ Type string `json:"type"`
+ Sealed bool `json:"sealed"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+ Nonce string `json:"nonce"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+ RecoverySeal bool `json:"recovery_seal"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/cert.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/cert.pem
deleted file mode 100644
index 942d266..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/cert.pem
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
-MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
-TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
-SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
-YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
-donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
-B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
-MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
-HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
-k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
-OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
-AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
-aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
-X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
-aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
-KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
-QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
-xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/key.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/key.pem
deleted file mode 100644
index add9820..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
-HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
-6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
-TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
-y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
-DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
-9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
-RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
-rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
-5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
-oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
-GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
-VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
-akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
-FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
-efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
-r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
-0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
-FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
-kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
-UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
-xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
-injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
-2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
-gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/pkioutput b/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/pkioutput
deleted file mode 100644
index 526ff03..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/keys/pkioutput
+++ /dev/null
@@ -1,74 +0,0 @@
-Key Value
-lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4
-lease_duration 279359999
-lease_renewable false
-certificate -----BEGIN CERTIFICATE-----
-MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
-MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
-TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
-SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
-YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
-donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
-B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
-MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
-HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
-k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
-OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
-AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
-aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
-X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
-aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
-KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
-QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
-xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
------END CERTIFICATE-----
-issuing_ca -----BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
-private_key -----BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
-HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
-6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
-TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
-y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
-DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
-9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
-RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
-rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
-5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
-oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
-GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
-VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
-akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
-FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
-efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
-r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
-0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
-FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
-kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
-UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
-xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
-injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
-2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
-gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
------END RSA PRIVATE KEY-----
-private_key_type rsa
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/pkioutput b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/pkioutput
deleted file mode 100644
index 312ae18..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/pkioutput
+++ /dev/null
@@ -1,74 +0,0 @@
-Key Value
-lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586
-lease_duration 315359999
-lease_renewable false
-certificate -----BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
-expiration 1.772072879e+09
-issuing_ca -----BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
-private_key -----BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
-t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
-BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
-/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
-0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
-18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
-ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
-8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
-nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
-2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
-grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
-bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
-0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
-ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
-lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
-lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
-AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
-ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
-thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
-4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
-iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
-tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
-LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
-4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
-OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
------END RSA PRIVATE KEY-----
-private_key_type rsa
-serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/root.crl b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/root.crl
deleted file mode 100644
index a80c9e4..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/root.crl
+++ /dev/null
@@ -1,12 +0,0 @@
------BEGIN X509 CRL-----
-MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN
-MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3
-UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H
-MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn
-HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk
-RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J
-xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y
-UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg
-1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw
-IA0=
------END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcacert.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcacert.pem
deleted file mode 100644
index dcb307a..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcacert.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcakey.pem b/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcakey.pem
deleted file mode 100644
index e950da5..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/root/rootcakey.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
-t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
-BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
-/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
-0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
-18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
-ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
-8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
-nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
-2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
-grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
-bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
-0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
-ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
-lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
-lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
-AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
-ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
-thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
-4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
-iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
-tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
-LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
-4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
-OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/api/test-fixtures/vault.crt b/vendor/github.com/hashicorp/vault/api/test-fixtures/vault.crt
deleted file mode 100644
index 3e34cf1..0000000
--- a/vendor/github.com/hashicorp/vault/api/test-fixtures/vault.crt
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEEjCCAvqgAwIBAgIJAM7PFmA6Y+KeMA0GCSqGSIb3DQEBCwUAMIGWMQswCQYD
-VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFDASBgNVBAcMC1N0b255IEJyb29r
-MRIwEAYDVQQKDAlIYXNoaUNvcnAxDjAMBgNVBAsMBVZhdWx0MRUwEwYDVQQDDAxW
-aXNoYWwgTmF5YWsxIzAhBgkqhkiG9w0BCQEWFHZpc2hhbEBoYXNoaWNvcnAuY29t
-MB4XDTE1MDgwNzE5MTk1OFoXDTE1MDkwNjE5MTk1OFowgZYxCzAJBgNVBAYTAlVT
-MREwDwYDVQQIDAhOZXcgWW9yazEUMBIGA1UEBwwLU3RvbnkgQnJvb2sxEjAQBgNV
-BAoMCUhhc2hpQ29ycDEOMAwGA1UECwwFVmF1bHQxFTATBgNVBAMMDFZpc2hhbCBO
-YXlhazEjMCEGCSqGSIb3DQEJARYUdmlzaGFsQGhhc2hpY29ycC5jb20wggEiMA0G
-CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCcGlPKIrsq5sDJAUB7mtLjnjbcfR0b
-dX1sDHUaTdT+2YBq0JvtoLZOmKw1iVwsMBhaLeXwnKP/O/n67sE8zvZPsuU3REw1
-NTjPof8IbepkENWNxR68KoSB2Vn5r4KiO3ux+KbkXssrZB62+k9khj0e7qIiwyZP
-y5+RQPOL2ESmX5DznX+90vH4mzAEF654PbXFI/qOBZcWvWZJ37i+lHkeyCqcB+sm
-5o5+zd1ua8jVlN0eLjyqa7FDvIuXPAFEX+r5DVQgIvS2++YaFRqTFCIxRXdDQXdw
-1xDMCuG1w4PGVWf3TtlpHeGSIU07DdrCgXsvIRYfW++aZ2pvXwJYCr8hAgMBAAGj
-YTBfMA8GA1UdEQQIMAaHBKwYFugwHQYDVR0OBBYEFPl+AkButpRfbblZE9Jb3xNj
-AyhkMB8GA1UdIwQYMBaAFPl+AkButpRfbblZE9Jb3xNjAyhkMAwGA1UdEwQFMAMB
-Af8wDQYJKoZIhvcNAQELBQADggEBADdIyyBJ3BVghW1shhxYsqQgg/gj2TagpO1P
-ulGNzS0aCfB4tzMD4MGWm7cTlL6QW9W6r9OuWKCd1ADherIX9j0gtVWgIMtWGx+i
-NbHrYin1xHr4rkB7/f6veCiJ3CtzBC9P/rEI6keyfOn1BfQBsOxfo3oGe/HDlSzD
-lpu0GlQECjTXD7dd4jrD0T/wdRQI0BmxcYjn9cZLgoJHtLHZwaS16TGVmKs4iRAW
-V9Aw5hLK4jJ59IID830/ly+Ndfc//QGgdE5PM44OrvVFO3Q8+zs7pwr1ql7uQWew
-MSuDfbL7EcEGajD/o085sj2u4xVUfkVBW+3TQvs4/pHYOxlhPjI=
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/audit/audit.go b/vendor/github.com/hashicorp/vault/audit/audit.go
deleted file mode 100644
index b96391c..0000000
--- a/vendor/github.com/hashicorp/vault/audit/audit.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package audit
-
-import (
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-// Backend interface must be implemented for an audit
-// mechanism to be made available. Audit backends can be enabled to
-// sink information to different backends such as logs, file, databases,
-// or other external services.
-type Backend interface {
- // LogRequest is used to synchronously log a request. This is done after the
- // request is authorized but before the request is executed. The arguments
- // MUST not be modified in anyway. They should be deep copied if this is
- // a possibility.
- LogRequest(*logical.Auth, *logical.Request, error) error
-
- // LogResponse is used to synchronously log a response. This is done after
- // the request is processed but before the response is sent. The arguments
- // MUST not be modified in anyway. They should be deep copied if this is
- // a possibility.
- LogResponse(*logical.Auth, *logical.Request, *logical.Response, error) error
-
- // GetHash is used to return the given data with the backend's hash,
- // so that a caller can determine if a value in the audit log matches
- // an expected plaintext value
- GetHash(string) (string, error)
-
- // Reload is called on SIGHUP for supporting backends.
- Reload() error
-
- // Invalidate is called for path invalidation
- Invalidate()
-}
-
-type BackendConfig struct {
- // The view to store the salt
- SaltView logical.Storage
-
- // The salt config that should be used for any secret obfuscation
- SaltConfig *salt.Config
-
- // Config is the opaque user configuration provided when mounting
- Config map[string]string
-}
-
-// Factory is the factory function to create an audit backend.
-type Factory func(*BackendConfig) (Backend, error)
diff --git a/vendor/github.com/hashicorp/vault/audit/format.go b/vendor/github.com/hashicorp/vault/audit/format.go
deleted file mode 100644
index 18eb254..0000000
--- a/vendor/github.com/hashicorp/vault/audit/format.go
+++ /dev/null
@@ -1,437 +0,0 @@
-package audit
-
-import (
- "fmt"
- "io"
- "strings"
- "time"
-
- "github.com/SermoDigital/jose/jws"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/copystructure"
-)
-
-type AuditFormatWriter interface {
- WriteRequest(io.Writer, *AuditRequestEntry) error
- WriteResponse(io.Writer, *AuditResponseEntry) error
- Salt() (*salt.Salt, error)
-}
-
-// AuditFormatter implements the Formatter interface, and allows the underlying
-// marshaller to be swapped out
-type AuditFormatter struct {
- AuditFormatWriter
-}
-
-func (f *AuditFormatter) FormatRequest(
- w io.Writer,
- config FormatterConfig,
- auth *logical.Auth,
- req *logical.Request,
- inErr error) error {
-
- if req == nil {
- return fmt.Errorf("request to request-audit a nil request")
- }
-
- if w == nil {
- return fmt.Errorf("writer for audit request is nil")
- }
-
- if f.AuditFormatWriter == nil {
- return fmt.Errorf("no format writer specified")
- }
-
- salt, err := f.Salt()
- if err != nil {
- return errwrap.Wrapf("error fetching salt: {{err}}", err)
- }
-
- if !config.Raw {
- // Before we copy the structure we must nil out some data
- // otherwise we will cause reflection to panic and die
- if req.Connection != nil && req.Connection.ConnState != nil {
- origReq := req
- origState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- origReq.Connection.ConnState = origState
- }()
- }
-
- // Copy the auth structure
- if auth != nil {
- cp, err := copystructure.Copy(auth)
- if err != nil {
- return err
- }
- auth = cp.(*logical.Auth)
- }
-
- cp, err := copystructure.Copy(req)
- if err != nil {
- return err
- }
- req = cp.(*logical.Request)
-
- // Hash any sensitive information
- if auth != nil {
- // Cache and restore accessor in the auth
- var authAccessor string
- if !config.HMACAccessor && auth.Accessor != "" {
- authAccessor = auth.Accessor
- }
- if err := Hash(salt, auth); err != nil {
- return err
- }
- if authAccessor != "" {
- auth.Accessor = authAccessor
- }
- }
-
- // Cache and restore accessor in the request
- var clientTokenAccessor string
- if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" {
- clientTokenAccessor = req.ClientTokenAccessor
- }
- if err := Hash(salt, req); err != nil {
- return err
- }
- if clientTokenAccessor != "" {
- req.ClientTokenAccessor = clientTokenAccessor
- }
- }
-
- // If auth is nil, make an empty one
- if auth == nil {
- auth = new(logical.Auth)
- }
- var errString string
- if inErr != nil {
- errString = inErr.Error()
- }
-
- reqEntry := &AuditRequestEntry{
- Type: "request",
- Error: errString,
-
- Auth: AuditAuth{
- ClientToken: auth.ClientToken,
- Accessor: auth.Accessor,
- DisplayName: auth.DisplayName,
- Policies: auth.Policies,
- Metadata: auth.Metadata,
- RemainingUses: req.ClientTokenRemainingUses,
- },
-
- Request: AuditRequest{
- ID: req.ID,
- ClientToken: req.ClientToken,
- ClientTokenAccessor: req.ClientTokenAccessor,
- Operation: req.Operation,
- Path: req.Path,
- Data: req.Data,
- RemoteAddr: getRemoteAddr(req),
- ReplicationCluster: req.ReplicationCluster,
- Headers: req.Headers,
- },
- }
-
- if req.WrapInfo != nil {
- reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second)
- }
-
- if !config.OmitTime {
- reqEntry.Time = time.Now().UTC().Format(time.RFC3339)
- }
-
- return f.AuditFormatWriter.WriteRequest(w, reqEntry)
-}
-
-func (f *AuditFormatter) FormatResponse(
- w io.Writer,
- config FormatterConfig,
- auth *logical.Auth,
- req *logical.Request,
- resp *logical.Response,
- inErr error) error {
-
- if req == nil {
- return fmt.Errorf("request to response-audit a nil request")
- }
-
- if w == nil {
- return fmt.Errorf("writer for audit request is nil")
- }
-
- if f.AuditFormatWriter == nil {
- return fmt.Errorf("no format writer specified")
- }
-
- salt, err := f.Salt()
- if err != nil {
- return errwrap.Wrapf("error fetching salt: {{err}}", err)
- }
-
- if !config.Raw {
- // Before we copy the structure we must nil out some data
- // otherwise we will cause reflection to panic and die
- if req.Connection != nil && req.Connection.ConnState != nil {
- origReq := req
- origState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- origReq.Connection.ConnState = origState
- }()
- }
-
- // Copy the auth structure
- if auth != nil {
- cp, err := copystructure.Copy(auth)
- if err != nil {
- return err
- }
- auth = cp.(*logical.Auth)
- }
-
- cp, err := copystructure.Copy(req)
- if err != nil {
- return err
- }
- req = cp.(*logical.Request)
-
- if resp != nil {
- cp, err := copystructure.Copy(resp)
- if err != nil {
- return err
- }
- resp = cp.(*logical.Response)
- }
-
- // Hash any sensitive information
-
- // Cache and restore accessor in the auth
- if auth != nil {
- var accessor string
- if !config.HMACAccessor && auth.Accessor != "" {
- accessor = auth.Accessor
- }
- if err := Hash(salt, auth); err != nil {
- return err
- }
- if accessor != "" {
- auth.Accessor = accessor
- }
- }
-
- // Cache and restore accessor in the request
- var clientTokenAccessor string
- if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" {
- clientTokenAccessor = req.ClientTokenAccessor
- }
- if err := Hash(salt, req); err != nil {
- return err
- }
- if clientTokenAccessor != "" {
- req.ClientTokenAccessor = clientTokenAccessor
- }
-
- // Cache and restore accessor in the response
- if resp != nil {
- var accessor, wrappedAccessor string
- if !config.HMACAccessor && resp != nil && resp.Auth != nil && resp.Auth.Accessor != "" {
- accessor = resp.Auth.Accessor
- }
- if !config.HMACAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" {
- wrappedAccessor = resp.WrapInfo.WrappedAccessor
- }
- if err := Hash(salt, resp); err != nil {
- return err
- }
- if accessor != "" {
- resp.Auth.Accessor = accessor
- }
- if wrappedAccessor != "" {
- resp.WrapInfo.WrappedAccessor = wrappedAccessor
- }
- }
- }
-
- // If things are nil, make empty to avoid panics
- if auth == nil {
- auth = new(logical.Auth)
- }
- if resp == nil {
- resp = new(logical.Response)
- }
- var errString string
- if inErr != nil {
- errString = inErr.Error()
- }
-
- var respAuth *AuditAuth
- if resp.Auth != nil {
- respAuth = &AuditAuth{
- ClientToken: resp.Auth.ClientToken,
- Accessor: resp.Auth.Accessor,
- DisplayName: resp.Auth.DisplayName,
- Policies: resp.Auth.Policies,
- Metadata: resp.Auth.Metadata,
- NumUses: resp.Auth.NumUses,
- }
- }
-
- var respSecret *AuditSecret
- if resp.Secret != nil {
- respSecret = &AuditSecret{
- LeaseID: resp.Secret.LeaseID,
- }
- }
-
- var respWrapInfo *AuditResponseWrapInfo
- if resp.WrapInfo != nil {
- token := resp.WrapInfo.Token
- if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil {
- token = *jwtToken
- }
- respWrapInfo = &AuditResponseWrapInfo{
- TTL: int(resp.WrapInfo.TTL / time.Second),
- Token: token,
- CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano),
- CreationPath: resp.WrapInfo.CreationPath,
- WrappedAccessor: resp.WrapInfo.WrappedAccessor,
- }
- }
-
- respEntry := &AuditResponseEntry{
- Type: "response",
- Error: errString,
- Auth: AuditAuth{
- ClientToken: auth.ClientToken,
- Accessor: auth.Accessor,
- DisplayName: auth.DisplayName,
- Policies: auth.Policies,
- Metadata: auth.Metadata,
- RemainingUses: req.ClientTokenRemainingUses,
- },
-
- Request: AuditRequest{
- ID: req.ID,
- ClientToken: req.ClientToken,
- ClientTokenAccessor: req.ClientTokenAccessor,
- Operation: req.Operation,
- Path: req.Path,
- Data: req.Data,
- RemoteAddr: getRemoteAddr(req),
- ReplicationCluster: req.ReplicationCluster,
- Headers: req.Headers,
- },
-
- Response: AuditResponse{
- Auth: respAuth,
- Secret: respSecret,
- Data: resp.Data,
- Redirect: resp.Redirect,
- WrapInfo: respWrapInfo,
- },
- }
-
- if req.WrapInfo != nil {
- respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second)
- }
-
- if !config.OmitTime {
- respEntry.Time = time.Now().UTC().Format(time.RFC3339)
- }
-
- return f.AuditFormatWriter.WriteResponse(w, respEntry)
-}
-
-// AuditRequest is the structure of a request audit log entry in Audit.
-type AuditRequestEntry struct {
- Time string `json:"time,omitempty"`
- Type string `json:"type"`
- Auth AuditAuth `json:"auth"`
- Request AuditRequest `json:"request"`
- Error string `json:"error"`
-}
-
-// AuditResponseEntry is the structure of a response audit log entry in Audit.
-type AuditResponseEntry struct {
- Time string `json:"time,omitempty"`
- Type string `json:"type"`
- Auth AuditAuth `json:"auth"`
- Request AuditRequest `json:"request"`
- Response AuditResponse `json:"response"`
- Error string `json:"error"`
-}
-
-type AuditRequest struct {
- ID string `json:"id"`
- ReplicationCluster string `json:"replication_cluster,omitempty"`
- Operation logical.Operation `json:"operation"`
- ClientToken string `json:"client_token"`
- ClientTokenAccessor string `json:"client_token_accessor"`
- Path string `json:"path"`
- Data map[string]interface{} `json:"data"`
- RemoteAddr string `json:"remote_address"`
- WrapTTL int `json:"wrap_ttl"`
- Headers map[string][]string `json:"headers"`
-}
-
-type AuditResponse struct {
- Auth *AuditAuth `json:"auth,omitempty"`
- Secret *AuditSecret `json:"secret,omitempty"`
- Data map[string]interface{} `json:"data,omitempty"`
- Redirect string `json:"redirect,omitempty"`
- WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"`
-}
-
-type AuditAuth struct {
- ClientToken string `json:"client_token"`
- Accessor string `json:"accessor"`
- DisplayName string `json:"display_name"`
- Policies []string `json:"policies"`
- Metadata map[string]string `json:"metadata"`
- NumUses int `json:"num_uses,omitempty"`
- RemainingUses int `json:"remaining_uses,omitempty"`
-}
-
-type AuditSecret struct {
- LeaseID string `json:"lease_id"`
-}
-
-type AuditResponseWrapInfo struct {
- TTL int `json:"ttl"`
- Token string `json:"token"`
- CreationTime string `json:"creation_time"`
- CreationPath string `json:"creation_path"`
- WrappedAccessor string `json:"wrapped_accessor,omitempty"`
-}
-
-// getRemoteAddr safely gets the remote address avoiding a nil pointer
-func getRemoteAddr(req *logical.Request) string {
- if req != nil && req.Connection != nil {
- return req.Connection.RemoteAddr
- }
- return ""
-}
-
-// parseVaultTokenFromJWT returns a string iff the token was a JWT and we could
-// extract the original token ID from inside
-func parseVaultTokenFromJWT(token string) *string {
- if strings.Count(token, ".") != 2 {
- return nil
- }
-
- wt, err := jws.ParseJWT([]byte(token))
- if err != nil || wt == nil {
- return nil
- }
-
- result, _ := wt.Claims().JWTID()
-
- return &result
-}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_json.go b/vendor/github.com/hashicorp/vault/audit/format_json.go
deleted file mode 100644
index 0a5c9d9..0000000
--- a/vendor/github.com/hashicorp/vault/audit/format_json.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package audit
-
-import (
- "encoding/json"
- "fmt"
- "io"
-
- "github.com/hashicorp/vault/helper/salt"
-)
-
-// JSONFormatWriter is an AuditFormatWriter implementation that structures data into
-// a JSON format.
-type JSONFormatWriter struct {
- Prefix string
- SaltFunc func() (*salt.Salt, error)
-}
-
-func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error {
- if req == nil {
- return fmt.Errorf("request entry was nil, cannot encode")
- }
-
- if len(f.Prefix) > 0 {
- _, err := w.Write([]byte(f.Prefix))
- if err != nil {
- return err
- }
- }
-
- enc := json.NewEncoder(w)
- return enc.Encode(req)
-}
-
-func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error {
- if resp == nil {
- return fmt.Errorf("response entry was nil, cannot encode")
- }
-
- if len(f.Prefix) > 0 {
- _, err := w.Write([]byte(f.Prefix))
- if err != nil {
- return err
- }
- }
-
- enc := json.NewEncoder(w)
- return enc.Encode(resp)
-}
-
-func (f *JSONFormatWriter) Salt() (*salt.Salt, error) {
- return f.SaltFunc()
-}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_json_test.go b/vendor/github.com/hashicorp/vault/audit/format_json_test.go
deleted file mode 100644
index 688ae3d..0000000
--- a/vendor/github.com/hashicorp/vault/audit/format_json_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package audit
-
-import (
- "bytes"
- "encoding/json"
- "strings"
- "testing"
- "time"
-
- "errors"
-
- "fmt"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestFormatJSON_formatRequest(t *testing.T) {
- salter, err := salt.NewSalt(nil, nil)
- if err != nil {
- t.Fatal(err)
- }
- saltFunc := func() (*salt.Salt, error) {
- return salter, nil
- }
-
- expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, salter.GetIdentifiedHMAC("foo"))
-
- cases := map[string]struct {
- Auth *logical.Auth
- Req *logical.Request
- Err error
- Prefix string
- ExpectedStr string
- }{
- "auth, request": {
- &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
- &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "/foo",
- Connection: &logical.Connection{
- RemoteAddr: "127.0.0.1",
- },
- WrapInfo: &logical.RequestWrapInfo{
- TTL: 60 * time.Second,
- },
- Headers: map[string][]string{
- "foo": []string{"bar"},
- },
- },
- errors.New("this is an error"),
- "",
- expectedResultStr,
- },
- "auth, request with prefix": {
- &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
- &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "/foo",
- Connection: &logical.Connection{
- RemoteAddr: "127.0.0.1",
- },
- WrapInfo: &logical.RequestWrapInfo{
- TTL: 60 * time.Second,
- },
- Headers: map[string][]string{
- "foo": []string{"bar"},
- },
- },
- errors.New("this is an error"),
- "@cee: ",
- expectedResultStr,
- },
- }
-
- for name, tc := range cases {
- var buf bytes.Buffer
- formatter := AuditFormatter{
- AuditFormatWriter: &JSONFormatWriter{
- Prefix: tc.Prefix,
- SaltFunc: saltFunc,
- },
- }
- config := FormatterConfig{
- HMACAccessor: false,
- }
- if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil {
- t.Fatalf("bad: %s\nerr: %s", name, err)
- }
-
- if !strings.HasPrefix(buf.String(), tc.Prefix) {
- t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix)
- }
-
- var expectedjson = new(AuditRequestEntry)
-
- if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil {
- t.Fatalf("bad json: %s", err)
- }
-
- var actualjson = new(AuditRequestEntry)
- if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil {
- t.Fatalf("bad json: %s", err)
- }
-
- expectedjson.Time = actualjson.Time
-
- expectedBytes, err := json.Marshal(expectedjson)
- if err != nil {
- t.Fatalf("unable to marshal json: %s", err)
- }
-
- if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(expectedBytes)) {
- t.Fatalf(
- "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'",
- name, buf.String(), string(expectedBytes))
- }
- }
-}
-
-const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"metadata":null},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"}
-`
diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go
deleted file mode 100644
index 792e552..0000000
--- a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package audit
-
-import (
- "encoding/json"
- "fmt"
- "io"
-
- "github.com/hashicorp/vault/helper/salt"
- "github.com/jefferai/jsonx"
-)
-
-// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into
-// a XML format.
-type JSONxFormatWriter struct {
- Prefix string
- SaltFunc func() (*salt.Salt, error)
-}
-
-func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error {
- if req == nil {
- return fmt.Errorf("request entry was nil, cannot encode")
- }
-
- if len(f.Prefix) > 0 {
- _, err := w.Write([]byte(f.Prefix))
- if err != nil {
- return err
- }
- }
-
- jsonBytes, err := json.Marshal(req)
- if err != nil {
- return err
- }
-
- xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes)
- if err != nil {
- return err
- }
-
- _, err = w.Write(xmlBytes)
- return err
-}
-
-func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error {
- if resp == nil {
- return fmt.Errorf("response entry was nil, cannot encode")
- }
-
- if len(f.Prefix) > 0 {
- _, err := w.Write([]byte(f.Prefix))
- if err != nil {
- return err
- }
- }
-
- jsonBytes, err := json.Marshal(resp)
- if err != nil {
- return err
- }
-
- xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes)
- if err != nil {
- return err
- }
-
- _, err = w.Write(xmlBytes)
- return err
-}
-
-func (f *JSONxFormatWriter) Salt() (*salt.Salt, error) {
- return f.SaltFunc()
-}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go
deleted file mode 100644
index b04ccd0..0000000
--- a/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package audit
-
-import (
- "bytes"
- "strings"
- "testing"
- "time"
-
- "errors"
-
- "fmt"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestFormatJSONx_formatRequest(t *testing.T) {
- salter, err := salt.NewSalt(nil, nil)
- if err != nil {
- t.Fatal(err)
- }
- saltFunc := func() (*salt.Salt, error) {
- return salter, nil
- }
-
- fooSalted := salter.GetIdentifiedHMAC("foo")
-
- cases := map[string]struct {
- Auth *logical.Auth
- Req *logical.Request
- Err error
- Prefix string
- Result string
- ExpectedStr string
- }{
- "auth, request": {
- &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
- &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "/foo",
- Connection: &logical.Connection{
- RemoteAddr: "127.0.0.1",
- },
- WrapInfo: &logical.RequestWrapInfo{
- TTL: 60 * time.Second,
- },
- Headers: map[string][]string{
- "foo": []string{"bar"},
- },
- },
- errors.New("this is an error"),
- "",
- "",
- fmt.Sprintf(`bar%stesttokenrootthis is an errorbarupdate/foo127.0.0.160request`,
- fooSalted),
- },
- "auth, request with prefix": {
- &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
- &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "/foo",
- Connection: &logical.Connection{
- RemoteAddr: "127.0.0.1",
- },
- WrapInfo: &logical.RequestWrapInfo{
- TTL: 60 * time.Second,
- },
- Headers: map[string][]string{
- "foo": []string{"bar"},
- },
- },
- errors.New("this is an error"),
- "",
- "@cee: ",
- fmt.Sprintf(`bar%stesttokenrootthis is an errorbarupdate/foo127.0.0.160request`,
- fooSalted),
- },
- }
-
- for name, tc := range cases {
- var buf bytes.Buffer
- formatter := AuditFormatter{
- AuditFormatWriter: &JSONxFormatWriter{
- Prefix: tc.Prefix,
- SaltFunc: saltFunc,
- },
- }
- config := FormatterConfig{
- OmitTime: true,
- HMACAccessor: false,
- }
- if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil {
- t.Fatalf("bad: %s\nerr: %s", name, err)
- }
-
- if !strings.HasPrefix(buf.String(), tc.Prefix) {
- t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix)
- }
-
- if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) {
- t.Fatalf(
- "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'",
- name, strings.TrimSpace(buf.String()), string(tc.ExpectedStr))
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_test.go b/vendor/github.com/hashicorp/vault/audit/format_test.go
deleted file mode 100644
index 5390229..0000000
--- a/vendor/github.com/hashicorp/vault/audit/format_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package audit
-
-import (
- "io"
- "io/ioutil"
- "testing"
-
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-type noopFormatWriter struct {
- salt *salt.Salt
- SaltFunc func() (*salt.Salt, error)
-}
-
-func (n *noopFormatWriter) WriteRequest(_ io.Writer, _ *AuditRequestEntry) error {
- return nil
-}
-
-func (n *noopFormatWriter) WriteResponse(_ io.Writer, _ *AuditResponseEntry) error {
- return nil
-}
-
-func (n *noopFormatWriter) Salt() (*salt.Salt, error) {
- if n.salt != nil {
- return n.salt, nil
- }
- var err error
- n.salt, err = salt.NewSalt(nil, nil)
- if err != nil {
- return nil, err
- }
- return n.salt, nil
-}
-
-func TestFormatRequestErrors(t *testing.T) {
- config := FormatterConfig{}
- formatter := AuditFormatter{
- AuditFormatWriter: &noopFormatWriter{},
- }
-
- if err := formatter.FormatRequest(ioutil.Discard, config, nil, nil, nil); err == nil {
- t.Fatal("expected error due to nil request")
- }
- if err := formatter.FormatRequest(nil, config, nil, &logical.Request{}, nil); err == nil {
- t.Fatal("expected error due to nil writer")
- }
-}
-
-func TestFormatResponseErrors(t *testing.T) {
- config := FormatterConfig{}
- formatter := AuditFormatter{
- AuditFormatWriter: &noopFormatWriter{},
- }
-
- if err := formatter.FormatResponse(ioutil.Discard, config, nil, nil, nil, nil); err == nil {
- t.Fatal("expected error due to nil request")
- }
- if err := formatter.FormatResponse(nil, config, nil, &logical.Request{}, nil, nil); err == nil {
- t.Fatal("expected error due to nil writer")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/audit/formatter.go b/vendor/github.com/hashicorp/vault/audit/formatter.go
deleted file mode 100644
index 3c1748f..0000000
--- a/vendor/github.com/hashicorp/vault/audit/formatter.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package audit
-
-import (
- "io"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// Formatter is an interface that is responsible for formating a
-// request/response into some format. Formatters write their output
-// to an io.Writer.
-//
-// It is recommended that you pass data through Hash prior to formatting it.
-type Formatter interface {
- FormatRequest(io.Writer, FormatterConfig, *logical.Auth, *logical.Request, error) error
- FormatResponse(io.Writer, FormatterConfig, *logical.Auth, *logical.Request, *logical.Response, error) error
-}
-
-type FormatterConfig struct {
- Raw bool
- HMACAccessor bool
-
- // This should only ever be used in a testing context
- OmitTime bool
-}
diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure.go b/vendor/github.com/hashicorp/vault/audit/hashstructure.go
deleted file mode 100644
index 8caf3eb..0000000
--- a/vendor/github.com/hashicorp/vault/audit/hashstructure.go
+++ /dev/null
@@ -1,308 +0,0 @@
-package audit
-
-import (
- "errors"
- "reflect"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/copystructure"
- "github.com/mitchellh/reflectwalk"
-)
-
-// HashString hashes the given opaque string and returns it
-func HashString(salter *salt.Salt, data string) string {
- return salter.GetIdentifiedHMAC(data)
-}
-
-// Hash will hash the given type. This has built-in support for auth,
-// requests, and responses. If it is a type that isn't recognized, then
-// it will be passed through.
-//
-// The structure is modified in-place.
-func Hash(salter *salt.Salt, raw interface{}) error {
- fn := salter.GetIdentifiedHMAC
-
- switch s := raw.(type) {
- case *logical.Auth:
- if s == nil {
- return nil
- }
- if s.ClientToken != "" {
- s.ClientToken = fn(s.ClientToken)
- }
- if s.Accessor != "" {
- s.Accessor = fn(s.Accessor)
- }
-
- case *logical.Request:
- if s == nil {
- return nil
- }
- if s.Auth != nil {
- if err := Hash(salter, s.Auth); err != nil {
- return err
- }
- }
-
- if s.ClientToken != "" {
- s.ClientToken = fn(s.ClientToken)
- }
-
- if s.ClientTokenAccessor != "" {
- s.ClientTokenAccessor = fn(s.ClientTokenAccessor)
- }
-
- data, err := HashStructure(s.Data, fn)
- if err != nil {
- return err
- }
-
- s.Data = data.(map[string]interface{})
-
- case *logical.Response:
- if s == nil {
- return nil
- }
-
- if s.Auth != nil {
- if err := Hash(salter, s.Auth); err != nil {
- return err
- }
- }
-
- if s.WrapInfo != nil {
- if err := Hash(salter, s.WrapInfo); err != nil {
- return err
- }
- }
-
- data, err := HashStructure(s.Data, fn)
- if err != nil {
- return err
- }
-
- s.Data = data.(map[string]interface{})
-
- case *wrapping.ResponseWrapInfo:
- if s == nil {
- return nil
- }
-
- s.Token = fn(s.Token)
-
- if s.WrappedAccessor != "" {
- s.WrappedAccessor = fn(s.WrappedAccessor)
- }
- }
-
- return nil
-}
-
-// HashStructure takes an interface and hashes all the values within
-// the structure. Only _values_ are hashed: keys of objects are not.
-//
-// For the HashCallback, see the built-in HashCallbacks below.
-func HashStructure(s interface{}, cb HashCallback) (interface{}, error) {
- s, err := copystructure.Copy(s)
- if err != nil {
- return nil, err
- }
-
- walker := &hashWalker{Callback: cb}
- if err := reflectwalk.Walk(s, walker); err != nil {
- return nil, err
- }
-
- return s, nil
-}
-
-// HashCallback is the callback called for HashStructure to hash
-// a value.
-type HashCallback func(string) string
-
-// hashWalker implements interfaces for the reflectwalk package
-// (github.com/mitchellh/reflectwalk) that can be used to automatically
-// replace primitives with a hashed value.
-type hashWalker struct {
- // Callback is the function to call with the primitive that is
- // to be hashed. If there is an error, walking will be halted
- // immediately and the error returned.
- Callback HashCallback
-
- key []string
- lastValue reflect.Value
- loc reflectwalk.Location
- cs []reflect.Value
- csKey []reflect.Value
- csData interface{}
- sliceIndex int
- unknownKeys []string
-}
-
-// hashTimeType stores a pre-computed reflect.Type for a time.Time so
-// we can quickly compare in hashWalker.Struct. We create an empty/invalid
-// time.Time{} so we don't need to incur any additional startup cost vs.
-// Now() or Unix().
-var hashTimeType = reflect.TypeOf(time.Time{})
-
-func (w *hashWalker) Enter(loc reflectwalk.Location) error {
- w.loc = loc
- return nil
-}
-
-func (w *hashWalker) Exit(loc reflectwalk.Location) error {
- w.loc = reflectwalk.None
-
- switch loc {
- case reflectwalk.Map:
- w.cs = w.cs[:len(w.cs)-1]
- case reflectwalk.MapValue:
- w.key = w.key[:len(w.key)-1]
- w.csKey = w.csKey[:len(w.csKey)-1]
- case reflectwalk.Slice:
- w.cs = w.cs[:len(w.cs)-1]
- case reflectwalk.SliceElem:
- w.csKey = w.csKey[:len(w.csKey)-1]
- }
-
- return nil
-}
-
-func (w *hashWalker) Map(m reflect.Value) error {
- w.cs = append(w.cs, m)
- return nil
-}
-
-func (w *hashWalker) MapElem(m, k, v reflect.Value) error {
- w.csData = k
- w.csKey = append(w.csKey, k)
- w.key = append(w.key, k.String())
- w.lastValue = v
- return nil
-}
-
-func (w *hashWalker) Slice(s reflect.Value) error {
- w.cs = append(w.cs, s)
- return nil
-}
-
-func (w *hashWalker) SliceElem(i int, elem reflect.Value) error {
- w.csKey = append(w.csKey, reflect.ValueOf(i))
- w.sliceIndex = i
- return nil
-}
-
-func (w *hashWalker) Struct(v reflect.Value) error {
- // We are looking for time values. If it isn't one, ignore it.
- if v.Type() != hashTimeType {
- return nil
- }
-
- // If we aren't in a map value, return an error to prevent a panic
- if v.Interface() != w.lastValue.Interface() {
- return errors.New("time.Time value in a non map key cannot be hashed for audits")
- }
-
- // Create a string value of the time. IMPORTANT: this must never change
- // across Vault versions or the hash value of equivalent time.Time will
- // change.
- strVal := v.Interface().(time.Time).Format(time.RFC3339Nano)
-
- // Set the map value to the string instead of the time.Time object
- m := w.cs[len(w.cs)-1]
- mk := w.csData.(reflect.Value)
- m.SetMapIndex(mk, reflect.ValueOf(strVal))
-
- // Skip this entry so that we don't walk the struct.
- return reflectwalk.SkipEntry
-}
-
-func (w *hashWalker) StructField(reflect.StructField, reflect.Value) error {
- return nil
-}
-
-func (w *hashWalker) Primitive(v reflect.Value) error {
- if w.Callback == nil {
- return nil
- }
-
- // We don't touch map keys
- if w.loc == reflectwalk.MapKey {
- return nil
- }
-
- setV := v
-
- // We only care about strings
- if v.Kind() == reflect.Interface {
- setV = v
- v = v.Elem()
- }
- if v.Kind() != reflect.String {
- return nil
- }
-
- replaceVal := w.Callback(v.String())
-
- resultVal := reflect.ValueOf(replaceVal)
- switch w.loc {
- case reflectwalk.MapKey:
- m := w.cs[len(w.cs)-1]
-
- // Delete the old value
- var zero reflect.Value
- m.SetMapIndex(w.csData.(reflect.Value), zero)
-
- // Set the new key with the existing value
- m.SetMapIndex(resultVal, w.lastValue)
-
- // Set the key to be the new key
- w.csData = resultVal
- case reflectwalk.MapValue:
- // If we're in a map, then the only way to set a map value is
- // to set it directly.
- m := w.cs[len(w.cs)-1]
- mk := w.csData.(reflect.Value)
- m.SetMapIndex(mk, resultVal)
- default:
- // Otherwise, we should be addressable
- setV.Set(resultVal)
- }
-
- return nil
-}
-
-func (w *hashWalker) removeCurrent() {
- // Append the key to the unknown keys
- w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
-
- for i := 1; i <= len(w.cs); i++ {
- c := w.cs[len(w.cs)-i]
- switch c.Kind() {
- case reflect.Map:
- // Zero value so that we delete the map key
- var val reflect.Value
-
- // Get the key and delete it
- k := w.csData.(reflect.Value)
- c.SetMapIndex(k, val)
- return
- }
- }
-
- panic("No container found for removeCurrent")
-}
-
-func (w *hashWalker) replaceCurrent(v reflect.Value) {
- c := w.cs[len(w.cs)-2]
- switch c.Kind() {
- case reflect.Map:
- // Get the key and delete it
- k := w.csKey[len(w.csKey)-1]
- c.SetMapIndex(k, v)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go b/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go
deleted file mode 100644
index 49afa6e..0000000
--- a/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go
+++ /dev/null
@@ -1,252 +0,0 @@
-package audit
-
-import (
- "crypto/sha256"
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/copystructure"
-)
-
-func TestCopy_auth(t *testing.T) {
- // Make a non-pointer one so that it can't be modified directly
- expected := logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- TTL: 1 * time.Hour,
- IssueTime: time.Now(),
- },
-
- ClientToken: "foo",
- }
- auth := expected
-
- // Copy it
- dup, err := copystructure.Copy(&auth)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Check equality
- auth2 := dup.(*logical.Auth)
- if !reflect.DeepEqual(*auth2, expected) {
- t.Fatalf("bad:\n\n%#v\n\n%#v", *auth2, expected)
- }
-}
-
-func TestCopy_request(t *testing.T) {
- // Make a non-pointer one so that it can't be modified directly
- expected := logical.Request{
- Data: map[string]interface{}{
- "foo": "bar",
- },
- WrapInfo: &logical.RequestWrapInfo{
- TTL: 60 * time.Second,
- },
- }
- arg := expected
-
- // Copy it
- dup, err := copystructure.Copy(&arg)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Check equality
- arg2 := dup.(*logical.Request)
- if !reflect.DeepEqual(*arg2, expected) {
- t.Fatalf("bad:\n\n%#v\n\n%#v", *arg2, expected)
- }
-}
-
-func TestCopy_response(t *testing.T) {
- // Make a non-pointer one so that it can't be modified directly
- expected := logical.Response{
- Data: map[string]interface{}{
- "foo": "bar",
- },
- WrapInfo: &wrapping.ResponseWrapInfo{
- TTL: 60,
- Token: "foo",
- CreationTime: time.Now(),
- WrappedAccessor: "abcd1234",
- },
- }
- arg := expected
-
- // Copy it
- dup, err := copystructure.Copy(&arg)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Check equality
- arg2 := dup.(*logical.Response)
- if !reflect.DeepEqual(*arg2, expected) {
- t.Fatalf("bad:\n\n%#v\n\n%#v", *arg2, expected)
- }
-}
-
-func TestHashString(t *testing.T) {
- inmemStorage := &logical.InmemStorage{}
- inmemStorage.Put(&logical.StorageEntry{
- Key: "salt",
- Value: []byte("foo"),
- })
- localSalt, err := salt.NewSalt(inmemStorage, &salt.Config{
- HMAC: sha256.New,
- HMACType: "hmac-sha256",
- })
- if err != nil {
- t.Fatalf("Error instantiating salt: %s", err)
- }
- out := HashString(localSalt, "foo")
- if out != "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a" {
- t.Fatalf("err: HashString output did not match expected")
- }
-}
-
-func TestHash(t *testing.T) {
- now := time.Now()
-
- cases := []struct {
- Input interface{}
- Output interface{}
- }{
- {
- &logical.Auth{ClientToken: "foo"},
- &logical.Auth{ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a"},
- },
- {
- &logical.Request{
- Data: map[string]interface{}{
- "foo": "bar",
- "private_key_type": certutil.PrivateKeyType("rsa"),
- },
- },
- &logical.Request{
- Data: map[string]interface{}{
- "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
- "private_key_type": "hmac-sha256:995230dca56fffd310ff591aa404aab52b2abb41703c787cfa829eceb4595bf1",
- },
- },
- },
- {
- &logical.Response{
- Data: map[string]interface{}{
- "foo": "bar",
-
- // Responses can contain time values, so test that with
- // a known fixed value.
- "bar": now,
- },
- WrapInfo: &wrapping.ResponseWrapInfo{
- TTL: 60,
- Token: "bar",
- CreationTime: now,
- WrappedAccessor: "bar",
- },
- },
- &logical.Response{
- Data: map[string]interface{}{
- "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
- "bar": now.Format(time.RFC3339Nano),
- },
- WrapInfo: &wrapping.ResponseWrapInfo{
- TTL: 60,
- Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
- CreationTime: now,
- WrappedAccessor: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
- },
- },
- },
- {
- "foo",
- "foo",
- },
- {
- &logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- TTL: 1 * time.Hour,
- IssueTime: now,
- },
-
- ClientToken: "foo",
- },
- &logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- TTL: 1 * time.Hour,
- IssueTime: now,
- },
-
- ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a",
- },
- },
- }
-
- inmemStorage := &logical.InmemStorage{}
- inmemStorage.Put(&logical.StorageEntry{
- Key: "salt",
- Value: []byte("foo"),
- })
- localSalt, err := salt.NewSalt(inmemStorage, &salt.Config{
- HMAC: sha256.New,
- HMACType: "hmac-sha256",
- })
- if err != nil {
- t.Fatalf("Error instantiating salt: %s", err)
- }
- for _, tc := range cases {
- input := fmt.Sprintf("%#v", tc.Input)
- if err := Hash(localSalt, tc.Input); err != nil {
- t.Fatalf("err: %s\n\n%s", err, input)
- }
- if !reflect.DeepEqual(tc.Input, tc.Output) {
- t.Fatalf("bad:\nInput:\n%s\nTest case input:\n%#v\nTest case output\n%#v", input, tc.Input, tc.Output)
- }
- }
-}
-
-func TestHashWalker(t *testing.T) {
- replaceText := "foo"
-
- cases := []struct {
- Input interface{}
- Output interface{}
- }{
- {
- map[string]interface{}{
- "hello": "foo",
- },
- map[string]interface{}{
- "hello": replaceText,
- },
- },
-
- {
- map[string]interface{}{
- "hello": []interface{}{"world"},
- },
- map[string]interface{}{
- "hello": []interface{}{replaceText},
- },
- },
- }
-
- for _, tc := range cases {
- output, err := HashStructure(tc.Input, func(string) string {
- return replaceText
- })
- if err != nil {
- t.Fatalf("err: %s\n\n%#v", err, tc.Input)
- }
- if !reflect.DeepEqual(output, tc.Output) {
- t.Fatalf("bad:\n\n%#v\n\n%#v", tc.Input, output)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go
deleted file mode 100644
index 614e153..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go
+++ /dev/null
@@ -1,291 +0,0 @@
-package file
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
- if conf.SaltConfig == nil {
- return nil, fmt.Errorf("nil salt config")
- }
- if conf.SaltView == nil {
- return nil, fmt.Errorf("nil salt view")
- }
-
- path, ok := conf.Config["file_path"]
- if !ok {
- path, ok = conf.Config["path"]
- if !ok {
- return nil, fmt.Errorf("file_path is required")
- }
- }
-
- // normalize path if configured for stdout
- if strings.ToLower(path) == "stdout" {
- path = "stdout"
- }
- if strings.ToLower(path) == "discard" {
- path = "discard"
- }
-
- format, ok := conf.Config["format"]
- if !ok {
- format = "json"
- }
- switch format {
- case "json", "jsonx":
- default:
- return nil, fmt.Errorf("unknown format type %s", format)
- }
-
- // Check if hashing of accessor is disabled
- hmacAccessor := true
- if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
- value, err := strconv.ParseBool(hmacAccessorRaw)
- if err != nil {
- return nil, err
- }
- hmacAccessor = value
- }
-
- // Check if raw logging is enabled
- logRaw := false
- if raw, ok := conf.Config["log_raw"]; ok {
- b, err := strconv.ParseBool(raw)
- if err != nil {
- return nil, err
- }
- logRaw = b
- }
-
- // Check if mode is provided
- mode := os.FileMode(0600)
- if modeRaw, ok := conf.Config["mode"]; ok {
- m, err := strconv.ParseUint(modeRaw, 8, 32)
- if err != nil {
- return nil, err
- }
- mode = os.FileMode(m)
- }
-
- b := &Backend{
- path: path,
- mode: mode,
- saltConfig: conf.SaltConfig,
- saltView: conf.SaltView,
- formatConfig: audit.FormatterConfig{
- Raw: logRaw,
- HMACAccessor: hmacAccessor,
- },
- }
-
- switch format {
- case "json":
- b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
- Prefix: conf.Config["prefix"],
- SaltFunc: b.Salt,
- }
- case "jsonx":
- b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
- Prefix: conf.Config["prefix"],
- SaltFunc: b.Salt,
- }
- }
-
- switch path {
- case "stdout", "discard":
- // no need to test opening file if outputting to stdout or discarding
- default:
- // Ensure that the file can be successfully opened for writing;
- // otherwise it will be too late to catch later without problems
- // (ref: https://github.com/hashicorp/vault/issues/550)
- if err := b.open(); err != nil {
- return nil, fmt.Errorf("sanity check failed; unable to open %s for writing: %v", path, err)
- }
- }
-
- return b, nil
-}
-
-// Backend is the audit backend for the file-based audit store.
-//
-// NOTE: This audit backend is currently very simple: it appends to a file.
-// It doesn't do anything more at the moment to assist with rotation
-// or reset the write cursor, this should be done in the future.
-type Backend struct {
- path string
-
- formatter audit.AuditFormatter
- formatConfig audit.FormatterConfig
-
- fileLock sync.RWMutex
- f *os.File
- mode os.FileMode
-
- saltMutex sync.RWMutex
- salt *salt.Salt
- saltConfig *salt.Config
- saltView logical.Storage
-}
-
-func (b *Backend) Salt() (*salt.Salt, error) {
- b.saltMutex.RLock()
- if b.salt != nil {
- defer b.saltMutex.RUnlock()
- return b.salt, nil
- }
- b.saltMutex.RUnlock()
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- if b.salt != nil {
- return b.salt, nil
- }
- salt, err := salt.NewSalt(b.saltView, b.saltConfig)
- if err != nil {
- return nil, err
- }
- b.salt = salt
- return salt, nil
-}
-
-func (b *Backend) GetHash(data string) (string, error) {
- salt, err := b.Salt()
- if err != nil {
- return "", err
- }
- return audit.HashString(salt, data), nil
-}
-
-func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
- b.fileLock.Lock()
- defer b.fileLock.Unlock()
-
- switch b.path {
- case "stdout":
- return b.formatter.FormatRequest(os.Stdout, b.formatConfig, auth, req, outerErr)
- case "discard":
- return b.formatter.FormatRequest(ioutil.Discard, b.formatConfig, auth, req, outerErr)
- }
-
- if err := b.open(); err != nil {
- return err
- }
-
- if err := b.formatter.FormatRequest(b.f, b.formatConfig, auth, req, outerErr); err == nil {
- return nil
- }
-
- // Opportunistically try to re-open the FD, once per call
- b.f.Close()
- b.f = nil
-
- if err := b.open(); err != nil {
- return err
- }
-
- return b.formatter.FormatRequest(b.f, b.formatConfig, auth, req, outerErr)
-}
-
-func (b *Backend) LogResponse(
- auth *logical.Auth,
- req *logical.Request,
- resp *logical.Response,
- err error) error {
-
- b.fileLock.Lock()
- defer b.fileLock.Unlock()
-
- switch b.path {
- case "stdout":
- return b.formatter.FormatResponse(os.Stdout, b.formatConfig, auth, req, resp, err)
- case "discard":
- return b.formatter.FormatResponse(ioutil.Discard, b.formatConfig, auth, req, resp, err)
- }
-
- if err := b.open(); err != nil {
- return err
- }
-
- if err := b.formatter.FormatResponse(b.f, b.formatConfig, auth, req, resp, err); err == nil {
- return nil
- }
-
- // Opportunistically try to re-open the FD, once per call
- b.f.Close()
- b.f = nil
-
- if err := b.open(); err != nil {
- return err
- }
-
- return b.formatter.FormatResponse(b.f, b.formatConfig, auth, req, resp, err)
-}
-
-// The file lock must be held before calling this
-func (b *Backend) open() error {
- if b.f != nil {
- return nil
- }
- if err := os.MkdirAll(filepath.Dir(b.path), b.mode); err != nil {
- return err
- }
-
- var err error
- b.f, err = os.OpenFile(b.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode)
- if err != nil {
- return err
- }
-
- // Change the file mode in case the log file already existed. We special
- // case /dev/null since we can't chmod it
- switch b.path {
- case "/dev/null":
- default:
- err = os.Chmod(b.path, b.mode)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (b *Backend) Reload() error {
- switch b.path {
- case "stdout", "discard":
- return nil
- }
-
- b.fileLock.Lock()
- defer b.fileLock.Unlock()
-
- if b.f == nil {
- return b.open()
- }
-
- err := b.f.Close()
- // Set to nil here so that even if we error out, on the next access open()
- // will be tried
- b.f = nil
- if err != nil {
- return err
- }
-
- return b.open()
-}
-
-func (b *Backend) Invalidate() {
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- b.salt = nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go
deleted file mode 100644
index 3b4ec84..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package file
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "testing"
-
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestAuditFile_fileModeNew(t *testing.T) {
- modeStr := "0777"
- mode, err := strconv.ParseUint(modeStr, 8, 32)
- if err != nil {
- t.Fatal(err)
- }
-
- path, err := ioutil.TempDir("", "vault-test_audit_file-file_mode_new")
- if err != nil {
- t.Fatal(err)
- }
-
- defer os.RemoveAll(path)
-
- file := filepath.Join(path, "auditTest.txt")
-
- config := map[string]string{
- "path": file,
- "mode": modeStr,
- }
-
- _, err = Factory(&audit.BackendConfig{
- SaltConfig: &salt.Config{},
- SaltView: &logical.InmemStorage{},
- Config: config,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- info, err := os.Stat(file)
- if err != nil {
- t.Fatalf("Cannot retrieve file mode from `Stat`")
- }
- if info.Mode() != os.FileMode(mode) {
- t.Fatalf("File mode does not match.")
- }
-}
-
-func TestAuditFile_fileModeExisting(t *testing.T) {
- f, err := ioutil.TempFile("", "test")
- if err != nil {
- t.Fatalf("Failure to create test file.")
- }
- defer os.Remove(f.Name())
-
- err = os.Chmod(f.Name(), 0777)
- if err != nil {
- t.Fatalf("Failure to chmod temp file for testing.")
- }
-
- err = f.Close()
- if err != nil {
- t.Fatalf("Failure to close temp file for test.")
- }
-
- config := map[string]string{
- "path": f.Name(),
- }
-
- _, err = Factory(&audit.BackendConfig{
- Config: config,
- SaltConfig: &salt.Config{},
- SaltView: &logical.InmemStorage{},
- })
- if err != nil {
- t.Fatal(err)
- }
-
- info, err := os.Stat(f.Name())
- if err != nil {
- t.Fatalf("cannot retrieve file mode from `Stat`")
- }
- if info.Mode() != os.FileMode(0600) {
- t.Fatalf("File mode does not match.")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go
deleted file mode 100644
index bf0ce7f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package socket
-
-import (
- "bytes"
- "fmt"
- "net"
- "strconv"
- "sync"
- "time"
-
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
- if conf.SaltConfig == nil {
- return nil, fmt.Errorf("nil salt config")
- }
- if conf.SaltView == nil {
- return nil, fmt.Errorf("nil salt view")
- }
-
- address, ok := conf.Config["address"]
- if !ok {
- return nil, fmt.Errorf("address is required")
- }
-
- socketType, ok := conf.Config["socket_type"]
- if !ok {
- socketType = "tcp"
- }
-
- writeDeadline, ok := conf.Config["write_timeout"]
- if !ok {
- writeDeadline = "2s"
- }
- writeDuration, err := parseutil.ParseDurationSecond(writeDeadline)
- if err != nil {
- return nil, err
- }
-
- format, ok := conf.Config["format"]
- if !ok {
- format = "json"
- }
- switch format {
- case "json", "jsonx":
- default:
- return nil, fmt.Errorf("unknown format type %s", format)
- }
-
- // Check if hashing of accessor is disabled
- hmacAccessor := true
- if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
- value, err := strconv.ParseBool(hmacAccessorRaw)
- if err != nil {
- return nil, err
- }
- hmacAccessor = value
- }
-
- // Check if raw logging is enabled
- logRaw := false
- if raw, ok := conf.Config["log_raw"]; ok {
- b, err := strconv.ParseBool(raw)
- if err != nil {
- return nil, err
- }
- logRaw = b
- }
-
- b := &Backend{
- saltConfig: conf.SaltConfig,
- saltView: conf.SaltView,
- formatConfig: audit.FormatterConfig{
- Raw: logRaw,
- HMACAccessor: hmacAccessor,
- },
-
- writeDuration: writeDuration,
- address: address,
- socketType: socketType,
- }
-
- switch format {
- case "json":
- b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
- Prefix: conf.Config["prefix"],
- SaltFunc: b.Salt,
- }
- case "jsonx":
- b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
- Prefix: conf.Config["prefix"],
- SaltFunc: b.Salt,
- }
- }
-
- return b, nil
-}
-
-// Backend is the audit backend for the socket audit transport.
-type Backend struct {
- connection net.Conn
-
- formatter audit.AuditFormatter
- formatConfig audit.FormatterConfig
-
- writeDuration time.Duration
- address string
- socketType string
-
- sync.Mutex
-
- saltMutex sync.RWMutex
- salt *salt.Salt
- saltConfig *salt.Config
- saltView logical.Storage
-}
-
-func (b *Backend) GetHash(data string) (string, error) {
- salt, err := b.Salt()
- if err != nil {
- return "", err
- }
- return audit.HashString(salt, data), nil
-}
-
-func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
- var buf bytes.Buffer
- if err := b.formatter.FormatRequest(&buf, b.formatConfig, auth, req, outerErr); err != nil {
- return err
- }
-
- b.Lock()
- defer b.Unlock()
-
- err := b.write(buf.Bytes())
- if err != nil {
- rErr := b.reconnect()
- if rErr != nil {
- err = multierror.Append(err, rErr)
- } else {
- // Try once more after reconnecting
- err = b.write(buf.Bytes())
- }
- }
-
- return err
-}
-
-func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request,
- resp *logical.Response, outerErr error) error {
- var buf bytes.Buffer
- if err := b.formatter.FormatResponse(&buf, b.formatConfig, auth, req, resp, outerErr); err != nil {
- return err
- }
-
- b.Lock()
- defer b.Unlock()
-
- err := b.write(buf.Bytes())
- if err != nil {
- rErr := b.reconnect()
- if rErr != nil {
- err = multierror.Append(err, rErr)
- } else {
- // Try once more after reconnecting
- err = b.write(buf.Bytes())
- }
- }
-
- return err
-}
-
-func (b *Backend) write(buf []byte) error {
- if b.connection == nil {
- if err := b.reconnect(); err != nil {
- return err
- }
- }
-
- err := b.connection.SetWriteDeadline(time.Now().Add(b.writeDuration))
- if err != nil {
- return err
- }
-
- _, err = b.connection.Write(buf)
- if err != nil {
- return err
- }
-
- return err
-}
-
-func (b *Backend) reconnect() error {
- if b.connection != nil {
- b.connection.Close()
- b.connection = nil
- }
-
- conn, err := net.Dial(b.socketType, b.address)
- if err != nil {
- return err
- }
-
- b.connection = conn
-
- return nil
-}
-
-func (b *Backend) Reload() error {
- b.Lock()
- defer b.Unlock()
-
- err := b.reconnect()
-
- return err
-}
-
-func (b *Backend) Salt() (*salt.Salt, error) {
- b.saltMutex.RLock()
- if b.salt != nil {
- defer b.saltMutex.RUnlock()
- return b.salt, nil
- }
- b.saltMutex.RUnlock()
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- if b.salt != nil {
- return b.salt, nil
- }
- salt, err := salt.NewSalt(b.saltView, b.saltConfig)
- if err != nil {
- return nil, err
- }
- b.salt = salt
- return salt, nil
-}
-
-func (b *Backend) Invalidate() {
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- b.salt = nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go
deleted file mode 100644
index 22c39d4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package syslog
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "sync"
-
- "github.com/hashicorp/go-syslog"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
- if conf.SaltConfig == nil {
- return nil, fmt.Errorf("nil salt config")
- }
- if conf.SaltView == nil {
- return nil, fmt.Errorf("nil salt view")
- }
-
- // Get facility or default to AUTH
- facility, ok := conf.Config["facility"]
- if !ok {
- facility = "AUTH"
- }
-
- // Get tag or default to 'vault'
- tag, ok := conf.Config["tag"]
- if !ok {
- tag = "vault"
- }
-
- format, ok := conf.Config["format"]
- if !ok {
- format = "json"
- }
- switch format {
- case "json", "jsonx":
- default:
- return nil, fmt.Errorf("unknown format type %s", format)
- }
-
- // Check if hashing of accessor is disabled
- hmacAccessor := true
- if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
- value, err := strconv.ParseBool(hmacAccessorRaw)
- if err != nil {
- return nil, err
- }
- hmacAccessor = value
- }
-
- // Check if raw logging is enabled
- logRaw := false
- if raw, ok := conf.Config["log_raw"]; ok {
- b, err := strconv.ParseBool(raw)
- if err != nil {
- return nil, err
- }
- logRaw = b
- }
-
- // Get the logger
- logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag)
- if err != nil {
- return nil, err
- }
-
- b := &Backend{
- logger: logger,
- saltConfig: conf.SaltConfig,
- saltView: conf.SaltView,
- formatConfig: audit.FormatterConfig{
- Raw: logRaw,
- HMACAccessor: hmacAccessor,
- },
- }
-
- switch format {
- case "json":
- b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
- Prefix: conf.Config["prefix"],
- SaltFunc: b.Salt,
- }
- case "jsonx":
- b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
- Prefix: conf.Config["prefix"],
- SaltFunc: b.Salt,
- }
- }
-
- return b, nil
-}
-
-// Backend is the audit backend for the syslog-based audit store.
-type Backend struct {
- logger gsyslog.Syslogger
-
- formatter audit.AuditFormatter
- formatConfig audit.FormatterConfig
-
- saltMutex sync.RWMutex
- salt *salt.Salt
- saltConfig *salt.Config
- saltView logical.Storage
-}
-
-func (b *Backend) GetHash(data string) (string, error) {
- salt, err := b.Salt()
- if err != nil {
- return "", err
- }
- return audit.HashString(salt, data), nil
-}
-
-func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
- var buf bytes.Buffer
- if err := b.formatter.FormatRequest(&buf, b.formatConfig, auth, req, outerErr); err != nil {
- return err
- }
-
- // Write out to syslog
- _, err := b.logger.Write(buf.Bytes())
- return err
-}
-
-func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request, resp *logical.Response, err error) error {
- var buf bytes.Buffer
- if err := b.formatter.FormatResponse(&buf, b.formatConfig, auth, req, resp, err); err != nil {
- return err
- }
-
- // Write out to syslog
- _, err = b.logger.Write(buf.Bytes())
- return err
-}
-
-func (b *Backend) Reload() error {
- return nil
-}
-
-func (b *Backend) Salt() (*salt.Salt, error) {
- b.saltMutex.RLock()
- if b.salt != nil {
- defer b.saltMutex.RUnlock()
- return b.salt, nil
- }
- b.saltMutex.RUnlock()
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- if b.salt != nil {
- return b.salt, nil
- }
- salt, err := salt.NewSalt(b.saltView, b.saltConfig)
- if err != nil {
- return nil, err
- }
- b.salt = salt
- return salt, nil
-}
-
-func (b *Backend) Invalidate() {
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- b.salt = nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go
deleted file mode 100644
index a25c9ee..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package appId
-
-import (
- "sync"
-
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b, err := Backend(conf)
- if err != nil {
- return nil, err
- }
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend(conf *logical.BackendConfig) (*backend, error) {
- var b backend
- b.MapAppId = &framework.PolicyMap{
- PathMap: framework.PathMap{
- Name: "app-id",
- Schema: map[string]*framework.FieldSchema{
- "display_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "A name to map to this app ID for logs.",
- },
-
- "value": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Policies for the app ID.",
- },
- },
- },
- DefaultKey: "default",
- }
-
- b.MapUserId = &framework.PathMap{
- Name: "user-id",
- Schema: map[string]*framework.FieldSchema{
- "cidr_block": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "If not blank, restricts auth by this CIDR block",
- },
-
- "value": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "App IDs that this user associates with.",
- },
- },
- }
-
- b.Backend = &framework.Backend{
- Help: backendHelp,
-
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "login",
- "login/*",
- },
- },
- Paths: framework.PathAppend([]*framework.Path{
- pathLogin(&b),
- pathLoginWithAppIDPath(&b),
- },
- b.MapAppId.Paths(),
- b.MapUserId.Paths(),
- ),
- AuthRenew: b.pathLoginRenew,
- Invalidate: b.invalidate,
- BackendType: logical.TypeCredential,
- }
-
- b.view = conf.StorageView
- b.MapAppId.SaltFunc = b.Salt
- b.MapUserId.SaltFunc = b.Salt
-
- return &b, nil
-}
-
-type backend struct {
- *framework.Backend
-
- salt *salt.Salt
- SaltMutex sync.RWMutex
- view logical.Storage
- MapAppId *framework.PolicyMap
- MapUserId *framework.PathMap
-}
-
-func (b *backend) Salt() (*salt.Salt, error) {
- b.SaltMutex.RLock()
- if b.salt != nil {
- defer b.SaltMutex.RUnlock()
- return b.salt, nil
- }
- b.SaltMutex.RUnlock()
- b.SaltMutex.Lock()
- defer b.SaltMutex.Unlock()
- if b.salt != nil {
- return b.salt, nil
- }
- salt, err := salt.NewSalt(b.view, &salt.Config{
- HashFunc: salt.SHA1Hash,
- Location: salt.DefaultLocation,
- })
- if err != nil {
- return nil, err
- }
- b.salt = salt
- return salt, nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case salt.DefaultLocation:
- b.SaltMutex.Lock()
- defer b.SaltMutex.Unlock()
- b.salt = nil
- }
-}
-
-const backendHelp = `
-The App ID credential provider is used to perform authentication from
-within applications or machine by pairing together two hard-to-guess
-unique pieces of information: a unique app ID, and a unique user ID.
-
-The goal of this credential provider is to allow elastic users
-(dynamic machines, containers, etc.) to authenticate with Vault without
-having to store passwords outside of Vault. It is a single method of
-solving the chicken-and-egg problem of setting up Vault access on a machine.
-With this provider, nobody except the machine itself has access to both
-pieces of information necessary to authenticate. For example:
-configuration management will have the app IDs, but the machine itself
-will detect its user ID based on some unique machine property such as a
-MAC address (or a hash of it with some salt).
-
-An example, real world process for using this provider:
-
- 1. Create unique app IDs (UUIDs work well) and map them to policies.
- (Path: map/app-id/)
-
- 2. Store the app IDs within configuration management systems.
-
- 3. An out-of-band process run by security operators map unique user IDs
- to these app IDs. Example: when an instance is launched, a cloud-init
- system tells security operators a unique ID for this machine. This
- process can be scripted, but the key is that it is out-of-band and
- out of reach of configuration management.
- (Path: map/user-id/)
-
- 4. A new server is provisioned. Configuration management configures the
- app ID, the server itself detects its user ID. With both of these
- pieces of information, Vault can be accessed according to the policy
- set by the app ID.
-
-More details on this process follow:
-
-The app ID is a unique ID that maps to a set of policies. This ID is
-generated by an operator and configured into the backend. The ID itself
-is usually a UUID, but any hard-to-guess unique value can be used.
-
-After creating app IDs, an operator authorizes a fixed set of user IDs
-with each app ID. When a valid {app ID, user ID} tuple is given to the
-"login" path, then the user is authenticated with the configured app
-ID policies.
-
-The user ID can be any value (just like the app ID), however it is
-generally a value unique to a machine, such as a MAC address or instance ID,
-or a value hashed from these unique values.
-
-It is possible to authorize multiple app IDs with each
-user ID by writing them as comma-separated values to the map/user-id/
-path.
-
-It is also possible to renew the auth tokens with 'vault token-renew ' command.
-Before the token is renewed, the validity of app ID, user ID and the associated
-policies are checked again.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go
deleted file mode 100644
index 4ae5d3e..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package appId
-
-import (
- "fmt"
- "testing"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
-)
-
-func TestBackend_basic(t *testing.T) {
- var b *backend
- var err error
- var storage logical.Storage
- factory := func(conf *logical.BackendConfig) (logical.Backend, error) {
- b, err = Backend(conf)
- if err != nil {
- t.Fatal(err)
- }
- storage = conf.StorageView
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: factory,
- Steps: []logicaltest.TestStep{
- testAccStepMapAppId(t),
- testAccStepMapUserId(t),
- testAccLogin(t, ""),
- testAccLoginAppIDInPath(t, ""),
- testAccLoginInvalid(t),
- testAccStepDeleteUserId(t),
- testAccLoginDeleted(t),
- },
- })
-
- req := &logical.Request{
- Path: "map/app-id",
- Operation: logical.ListOperation,
- Storage: storage,
- }
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("nil response")
- }
- keys := resp.Data["keys"].([]string)
- if len(keys) != 1 {
- t.Fatalf("expected 1 key, got %d", len(keys))
- }
- salt, err := b.Salt()
- if err != nil {
- t.Fatal(err)
- }
- if keys[0] != salt.SaltID("foo") {
- t.Fatal("value was improperly salted")
- }
-}
-
-func TestBackend_cidr(t *testing.T) {
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
- Steps: []logicaltest.TestStep{
- testAccStepMapAppIdDisplayName(t),
- testAccStepMapUserIdCidr(t, "192.168.1.0/16"),
- testAccLoginCidr(t, "192.168.1.5", false),
- testAccLoginCidr(t, "10.0.1.5", true),
- testAccLoginCidr(t, "", true),
- },
- })
-}
-
-func TestBackend_displayName(t *testing.T) {
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
- Steps: []logicaltest.TestStep{
- testAccStepMapAppIdDisplayName(t),
- testAccStepMapUserId(t),
- testAccLogin(t, "tubbin"),
- testAccLoginAppIDInPath(t, "tubbin"),
- testAccLoginInvalid(t),
- testAccStepDeleteUserId(t),
- testAccLoginDeleted(t),
- },
- })
-}
-
-func testAccStepMapAppId(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "map/app-id/foo",
- Data: map[string]interface{}{
- "value": "foo,bar",
- },
- }
-}
-
-func testAccStepMapAppIdDisplayName(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "map/app-id/foo",
- Data: map[string]interface{}{
- "display_name": "tubbin",
- "value": "foo,bar",
- },
- }
-}
-
-func testAccStepMapUserId(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "map/user-id/42",
- Data: map[string]interface{}{
- "value": "foo",
- },
- }
-}
-
-func testAccStepDeleteUserId(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "map/user-id/42",
- }
-}
-
-func testAccStepMapUserIdCidr(t *testing.T, cidr string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "map/user-id/42",
- Data: map[string]interface{}{
- "value": "foo",
- "cidr_block": cidr,
- },
- }
-}
-
-func testAccLogin(t *testing.T, display string) logicaltest.TestStep {
- checkTTL := func(resp *logical.Response) error {
- if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" {
- return fmt.Errorf("invalid TTL")
- }
- return nil
- }
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Data: map[string]interface{}{
- "app_id": "foo",
- "user_id": "42",
- },
- Unauthenticated: true,
-
- Check: logicaltest.TestCheckMulti(
- logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}),
- logicaltest.TestCheckAuthDisplayName(display),
- checkTTL,
- ),
- }
-}
-
-func testAccLoginAppIDInPath(t *testing.T, display string) logicaltest.TestStep {
- checkTTL := func(resp *logical.Response) error {
- if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" {
- return fmt.Errorf("invalid TTL")
- }
- return nil
- }
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/foo",
- Data: map[string]interface{}{
- "user_id": "42",
- },
- Unauthenticated: true,
-
- Check: logicaltest.TestCheckMulti(
- logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}),
- logicaltest.TestCheckAuthDisplayName(display),
- checkTTL,
- ),
- }
-}
-
-func testAccLoginCidr(t *testing.T, ip string, err bool) logicaltest.TestStep {
- check := logicaltest.TestCheckError()
- if !err {
- check = logicaltest.TestCheckAuth([]string{"bar", "default", "foo"})
- }
-
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Data: map[string]interface{}{
- "app_id": "foo",
- "user_id": "42",
- },
- ErrorOk: err,
- Unauthenticated: true,
- RemoteAddr: ip,
-
- Check: check,
- }
-}
-
-func testAccLoginInvalid(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Data: map[string]interface{}{
- "app_id": "foo",
- "user_id": "48",
- },
- ErrorOk: true,
- Unauthenticated: true,
-
- Check: logicaltest.TestCheckError(),
- }
-}
-
-func testAccLoginDeleted(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Data: map[string]interface{}{
- "app_id": "foo",
- "user_id": "42",
- },
- ErrorOk: true,
- Unauthenticated: true,
-
- Check: logicaltest.TestCheckError(),
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/path_login.go
deleted file mode 100644
index 1fba37a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/path_login.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package appId
-
-import (
- "crypto/sha1"
- "crypto/subtle"
- "encoding/hex"
- "fmt"
- "net"
- "strings"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathLoginWithAppIDPath(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login/(?P.+)",
- Fields: map[string]*framework.FieldSchema{
- "app_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The unique app ID",
- },
-
- "user_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The unique user ID",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
-
- HelpSynopsis: pathLoginSyn,
- HelpDescription: pathLoginDesc,
- }
-}
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login$",
- Fields: map[string]*framework.FieldSchema{
- "app_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The unique app ID",
- },
-
- "user_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The unique user ID",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
-
- HelpSynopsis: pathLoginSyn,
- HelpDescription: pathLoginDesc,
- }
-}
-
-func (b *backend) pathLogin(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- appId := data.Get("app_id").(string)
- userId := data.Get("user_id").(string)
-
- var displayName string
- if dispName, resp, err := b.verifyCredentials(req, appId, userId); err != nil {
- return nil, err
- } else if resp != nil {
- return resp, nil
- } else {
- displayName = dispName
- }
-
- // Get the policies associated with the app
- policies, err := b.MapAppId.Policies(req.Storage, appId)
- if err != nil {
- return nil, err
- }
-
- // Store hashes of the app ID and user ID for the metadata
- appIdHash := sha1.Sum([]byte(appId))
- userIdHash := sha1.Sum([]byte(userId))
- metadata := map[string]string{
- "app-id": "sha1:" + hex.EncodeToString(appIdHash[:]),
- "user-id": "sha1:" + hex.EncodeToString(userIdHash[:]),
- }
-
- return &logical.Response{
- Auth: &logical.Auth{
- InternalData: map[string]interface{}{
- "app-id": appId,
- "user-id": userId,
- },
- DisplayName: displayName,
- Policies: policies,
- Metadata: metadata,
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- },
- },
- }, nil
-}
-
-func (b *backend) pathLoginRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- appId := req.Auth.InternalData["app-id"].(string)
- userId := req.Auth.InternalData["user-id"].(string)
-
- // Skipping CIDR verification to enable renewal from machines other than
- // the ones encompassed by CIDR block.
- if _, resp, err := b.verifyCredentials(req, appId, userId); err != nil {
- return nil, err
- } else if resp != nil {
- return resp, nil
- }
-
- // Get the policies associated with the app
- mapPolicies, err := b.MapAppId.Policies(req.Storage, appId)
- if err != nil {
- return nil, err
- }
- if !policyutil.EquivalentPolicies(mapPolicies, req.Auth.Policies) {
- return nil, fmt.Errorf("policies do not match")
- }
-
- return framework.LeaseExtend(0, 0, b.System())(req, d)
-}
-
-func (b *backend) verifyCredentials(req *logical.Request, appId, userId string) (string, *logical.Response, error) {
- // Ensure both appId and userId are provided
- if appId == "" || userId == "" {
- return "", logical.ErrorResponse("missing 'app_id' or 'user_id'"), nil
- }
-
- // Look up the apps that this user is allowed to access
- appsMap, err := b.MapUserId.Get(req.Storage, userId)
- if err != nil {
- return "", nil, err
- }
- if appsMap == nil {
- return "", logical.ErrorResponse("invalid user ID or app ID"), nil
- }
-
- // If there is a CIDR block restriction, check that
- if raw, ok := appsMap["cidr_block"]; ok {
- _, cidr, err := net.ParseCIDR(raw.(string))
- if err != nil {
- return "", nil, fmt.Errorf("invalid restriction cidr: %s", err)
- }
-
- var addr string
- if req.Connection != nil {
- addr = req.Connection.RemoteAddr
- }
- if addr == "" || !cidr.Contains(net.ParseIP(addr)) {
- return "", logical.ErrorResponse("unauthorized source address"), nil
- }
- }
-
- appsRaw, ok := appsMap["value"]
- if !ok {
- appsRaw = ""
- }
-
- apps, ok := appsRaw.(string)
- if !ok {
- return "", nil, fmt.Errorf("internal error: mapping is not a string")
- }
-
- // Verify that the app is in the list
- found := false
- appIdBytes := []byte(appId)
- for _, app := range strings.Split(apps, ",") {
- match := []byte(strings.TrimSpace(app))
- // Protect against a timing attack with the app_id comparison
- if subtle.ConstantTimeCompare(match, appIdBytes) == 1 {
- found = true
- }
- }
- if !found {
- return "", logical.ErrorResponse("invalid user ID or app ID"), nil
- }
-
- // Get the raw data associated with the app
- appRaw, err := b.MapAppId.Get(req.Storage, appId)
- if err != nil {
- return "", nil, err
- }
- if appRaw == nil {
- return "", logical.ErrorResponse("invalid user ID or app ID"), nil
- }
- var displayName string
- if raw, ok := appRaw["display_name"]; ok {
- displayName = raw.(string)
- }
-
- return displayName, nil, nil
-}
-
-const pathLoginSyn = `
-Log in with an App ID and User ID.
-`
-
-const pathLoginDesc = `
-This endpoint authenticates using an application ID, user ID and potential the IP address of the connecting client.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go
deleted file mode 100644
index d086d3c..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package approle
-
-import (
- "sync"
-
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-type backend struct {
- *framework.Backend
-
- // The salt value to be used by the information to be accessed only
- // by this backend.
- salt *salt.Salt
- saltMutex sync.RWMutex
-
- // The view to use when creating the salt
- view logical.Storage
-
- // Guard to clean-up the expired SecretID entries
- tidySecretIDCASGuard uint32
-
- // Locks to make changes to role entries. These will be initialized to a
- // predefined number of locks when the backend is created, and will be
- // indexed based on salted role names.
- roleLocks []*locksutil.LockEntry
-
- // Locks to make changes to the storage entries of RoleIDs generated. These
- // will be initialized to a predefined number of locks when the backend is
- // created, and will be indexed based on the salted RoleIDs.
- roleIDLocks []*locksutil.LockEntry
-
- // Locks to make changes to the storage entries of SecretIDs generated.
- // These will be initialized to a predefined number of locks when the
- // backend is created, and will be indexed based on the HMAC-ed SecretIDs.
- secretIDLocks []*locksutil.LockEntry
-
- // Locks to make changes to the storage entries of SecretIDAccessors
- // generated. These will be initialized to a predefined number of locks
- // when the backend is created, and will be indexed based on the
- // SecretIDAccessors itself.
- secretIDAccessorLocks []*locksutil.LockEntry
-
- // secretIDListingLock is a dedicated lock for listing SecretIDAccessors
- // for all the SecretIDs issued against an approle
- secretIDListingLock sync.RWMutex
-}
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b, err := Backend(conf)
- if err != nil {
- return nil, err
- }
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend(conf *logical.BackendConfig) (*backend, error) {
- // Create a backend object
- b := &backend{
- view: conf.StorageView,
-
- // Create locks to modify the registered roles
- roleLocks: locksutil.CreateLocks(),
-
- // Create locks to modify the generated RoleIDs
- roleIDLocks: locksutil.CreateLocks(),
-
- // Create locks to modify the generated SecretIDs
- secretIDLocks: locksutil.CreateLocks(),
-
- // Create locks to modify the generated SecretIDAccessors
- secretIDAccessorLocks: locksutil.CreateLocks(),
- }
-
- // Attach the paths and secrets that are to be handled by the backend
- b.Backend = &framework.Backend{
- // Register a periodic function that deletes the expired SecretID entries
- PeriodicFunc: b.periodicFunc,
- Help: backendHelp,
- AuthRenew: b.pathLoginRenew,
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "login",
- },
- },
- Paths: framework.PathAppend(
- rolePaths(b),
- []*framework.Path{
- pathLogin(b),
- pathTidySecretID(b),
- },
- ),
- Invalidate: b.invalidate,
- BackendType: logical.TypeCredential,
- }
- return b, nil
-}
-
-func (b *backend) Salt() (*salt.Salt, error) {
- b.saltMutex.RLock()
- if b.salt != nil {
- defer b.saltMutex.RUnlock()
- return b.salt, nil
- }
- b.saltMutex.RUnlock()
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- if b.salt != nil {
- return b.salt, nil
- }
- salt, err := salt.NewSalt(b.view, &salt.Config{
- HashFunc: salt.SHA256Hash,
- Location: salt.DefaultLocation,
- })
- if err != nil {
- return nil, err
- }
- b.salt = salt
- return salt, nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case salt.DefaultLocation:
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- b.salt = nil
- }
-}
-
-// periodicFunc of the backend will be invoked once a minute by the RollbackManager.
-// RoleRole backend utilizes this function to delete expired SecretID entries.
-// This could mean that the SecretID may live in the backend upto 1 min after its
-// expiration. The deletion of SecretIDs are not security sensitive and it is okay
-// to delay the removal of SecretIDs by a minute.
-func (b *backend) periodicFunc(req *logical.Request) error {
- // Initiate clean-up of expired SecretID entries
- b.tidySecretID(req.Storage)
- return nil
-}
-
-const backendHelp = `
-Any registered Role can authenticate itself with Vault. The credentials
-depends on the constraints that are set on the Role. One common required
-credential is the 'role_id' which is a unique identifier of the Role.
-It can be retrieved from the 'role//role-id' endpoint.
-
-The default constraint configuration is 'bind_secret_id', which requires
-the credential 'secret_id' to be presented during login. Refer to the
-documentation for other types of constraints.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go
deleted file mode 100644
index 5f16e5f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package approle
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- if b == nil {
- t.Fatalf("failed to create backend")
- }
- err = b.Backend.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- return b, config.StorageView
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go
deleted file mode 100644
index 9b902a4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package approle
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login$",
- Fields: map[string]*framework.FieldSchema{
- "role_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Unique identifier of the Role. Required to be supplied when the 'bind_secret_id' constraint is set.",
- },
- "secret_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "SecretID belong to the App role",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLoginUpdate,
- },
- HelpSynopsis: pathLoginHelpSys,
- HelpDescription: pathLoginHelpDesc,
- }
-}
-
-// Returns the Auth object indicating the authentication and authorization information
-// if the credentials provided are validated by the backend.
-func (b *backend) pathLoginUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, roleName, metadata, err := b.validateCredentials(req, data)
- if err != nil || role == nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to validate SecretID: %s", err)), nil
- }
-
- // Always include the role name, for later filtering
- metadata["role_name"] = roleName
-
- auth := &logical.Auth{
- NumUses: role.TokenNumUses,
- Period: role.Period,
- InternalData: map[string]interface{}{
- "role_name": roleName,
- },
- Metadata: metadata,
- Policies: role.Policies,
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- },
- }
-
- // If 'Period' is set, use the value of 'Period' as the TTL.
- // Otherwise, set the normal TokenTTL.
- if role.Period > time.Duration(0) {
- auth.TTL = role.Period
- } else {
- auth.TTL = role.TokenTTL
- }
-
- return &logical.Response{
- Auth: auth,
- }, nil
-}
-
-// Invoked when the token issued by this backend is attempting a renewal.
-func (b *backend) pathLoginRenew(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := req.Auth.InternalData["role_name"].(string)
- if roleName == "" {
- return nil, fmt.Errorf("failed to fetch role_name during renewal")
- }
-
- // Ensure that the Role still exists.
- role, err := b.roleEntry(req.Storage, roleName)
- if err != nil {
- return nil, fmt.Errorf("failed to validate role %s during renewal:%s", roleName, err)
- }
- if role == nil {
- return nil, fmt.Errorf("role %s does not exist during renewal", roleName)
- }
-
- // If 'Period' is set on the Role, the token should never expire.
- // Replenish the TTL with 'Period's value.
- if role.Period > time.Duration(0) {
- // If 'Period' was updated after the token was issued,
- // token will bear the updated 'Period' value as its TTL.
- req.Auth.TTL = role.Period
- return &logical.Response{Auth: req.Auth}, nil
- } else {
- return framework.LeaseExtend(role.TokenTTL, role.TokenMaxTTL, b.System())(req, data)
- }
-}
-
-const pathLoginHelpSys = "Issue a token based on the credentials supplied"
-
-const pathLoginHelpDesc = `
-While the credential 'role_id' is required at all times,
-other credentials required depends on the properties App role
-to which the 'role_id' belongs to. The 'bind_secret_id'
-constraint (enabled by default) on the App role requires the
-'secret_id' credential to be presented.
-
-'role_id' is fetched using the 'role//role_id'
-endpoint and 'secret_id' is fetched using the 'role//secret_id'
-endpoint.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login_test.go
deleted file mode 100644
index 211b40f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package approle
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestAppRole_RoleLogin(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- createRole(t, b, storage, "role1", "a,b,c")
- roleRoleIDReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/role1/role-id",
- Storage: storage,
- }
- resp, err = b.HandleRequest(roleRoleIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- roleID := resp.Data["role_id"]
-
- roleSecretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/role1/secret-id",
- Storage: storage,
- }
- resp, err = b.HandleRequest(roleSecretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- secretID := resp.Data["secret_id"]
-
- loginData := map[string]interface{}{
- "role_id": roleID,
- "secret_id": secretID,
- }
- loginReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "login",
- Storage: storage,
- Data: loginData,
- Connection: &logical.Connection{
- RemoteAddr: "127.0.0.1",
- },
- }
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Auth == nil {
- t.Fatalf("expected a non-nil auth object in the response")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go
deleted file mode 100644
index b9f7e5b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go
+++ /dev/null
@@ -1,2134 +0,0 @@
-package approle
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/cidrutil"
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// roleStorageEntry stores all the options that are set on an role
-type roleStorageEntry struct {
- // UUID that uniquely represents this role. This serves as a credential
- // to perform login using this role.
- RoleID string `json:"role_id" structs:"role_id" mapstructure:"role_id"`
-
- // UUID that serves as the HMAC key for the hashing the 'secret_id's
- // of the role
- HMACKey string `json:"hmac_key" structs:"hmac_key" mapstructure:"hmac_key"`
-
- // Policies that are to be required by the token to access this role
- Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
-
- // Number of times the SecretID generated against this role can be
- // used to perform login operation
- SecretIDNumUses int `json:"secret_id_num_uses" structs:"secret_id_num_uses" mapstructure:"secret_id_num_uses"`
-
- // Duration (less than the backend mount's max TTL) after which a
- // SecretID generated against the role will expire
- SecretIDTTL time.Duration `json:"secret_id_ttl" structs:"secret_id_ttl" mapstructure:"secret_id_ttl"`
-
- // TokenNumUses defines the number of allowed uses of the token issued
- TokenNumUses int `json:"token_num_uses" mapstructure:"token_num_uses" structs:"token_num_uses"`
-
- // Duration before which an issued token must be renewed
- TokenTTL time.Duration `json:"token_ttl" structs:"token_ttl" mapstructure:"token_ttl"`
-
- // Duration after which an issued token should not be allowed to be renewed
- TokenMaxTTL time.Duration `json:"token_max_ttl" structs:"token_max_ttl" mapstructure:"token_max_ttl"`
-
- // A constraint, if set, requires 'secret_id' credential to be presented during login
- BindSecretID bool `json:"bind_secret_id" structs:"bind_secret_id" mapstructure:"bind_secret_id"`
-
- // A constraint, if set, specifies the CIDR blocks from which logins should be allowed
- BoundCIDRList string `json:"bound_cidr_list" structs:"bound_cidr_list" mapstructure:"bound_cidr_list"`
-
- // Period, if set, indicates that the token generated using this role
- // should never expire. The token should be renewed within the duration
- // specified by this value. The renewal duration will be fixed if the
- // value is not modified on the role. If the `Period` in the role is modified,
- // a token will pick up the new value during its next renewal.
- Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
-}
-
-// roleIDStorageEntry represents the reverse mapping from RoleID to Role
-type roleIDStorageEntry struct {
- Name string `json:"name" structs:"name" mapstructure:"name"`
-}
-
-// rolePaths creates all the paths that are used to register and manage an role.
-//
-// Paths returned:
-// role/ - For listing all the registered roles
-// role/ - For registering an role
-// role//policies - For updating the param
-// role//secret-id-num-uses - For updating the param
-// role//secret-id-ttl - For updating the param
-// role//token-ttl - For updating the param
-// role//token-max-ttl - For updating the param
-// role//token-num-uses - For updating the param
-// role//bind-secret-id - For updating the param
-// role//bound-cidr-list - For updating the param
-// role//period - For updating the param
-// role//role-id - For fetching the role_id of an role
-// role//secret-id - For issuing a secret_id against an role, also to list the secret_id_accessorss
-// role//custom-secret-id - For assigning a custom SecretID against an role
-// role//secret-id/lookup - For reading the properties of a secret_id
-// role//secret-id/destroy - For deleting a secret_id
-// role//secret-id-accessor/lookup - For reading secret_id using accessor
-// role//secret-id-accessor/destroy - For deleting secret_id using accessor
-func rolePaths(b *backend) []*framework.Path {
- return []*framework.Path{
- &framework.Path{
- Pattern: "role/?",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name"),
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "bind_secret_id": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.",
- },
- "bound_cidr_list": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Comma separated list of CIDR blocks, if set, specifies blocks of IP
-addresses which can perform the login operation`,
- },
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Default: "default",
- Description: "Comma separated list of policies on the role.",
- },
- "secret_id_num_uses": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `Number of times a SecretID can access the role, after which the SecretID
-will expire. Defaults to 0 meaning that the the secret_id is of unlimited use.`,
- },
- "secret_id_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Duration in seconds after which the issued SecretID should expire. Defaults
-to 0, meaning no expiration.`,
- },
- "token_num_uses": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `Number of times issued tokens can be used`,
- },
- "token_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Duration in seconds after which the issued token should expire. Defaults
-to 0, in which case the value will fall back to the system/mount defaults.`,
- },
- "token_max_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Duration in seconds after which the issued token should not be allowed to
-be renewed. Defaults to 0, in which case the value will fall back to the system/mount defaults.`,
- },
- "period": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: `If set, indicates that the token generated using this role
-should never expire. The token should be renewed within the
-duration specified by this value. At each renewal, the token's
-TTL will be set to the value of this parameter.`,
- },
- "role_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Identifier of the role. Defaults to a UUID.",
- },
- },
- ExistenceCheck: b.pathRoleExistenceCheck,
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathRoleCreateUpdate,
- logical.UpdateOperation: b.pathRoleCreateUpdate,
- logical.ReadOperation: b.pathRoleRead,
- logical.DeleteOperation: b.pathRoleDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/policies$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Default: "default",
- Description: "Comma separated list of policies on the role.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRolePoliciesUpdate,
- logical.ReadOperation: b.pathRolePoliciesRead,
- logical.DeleteOperation: b.pathRolePoliciesDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-policies"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-policies"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bound-cidr-list$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "bound_cidr_list": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Comma separated list of CIDR blocks, if set, specifies blocks of IP
-addresses which can perform the login operation`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleBoundCIDRListUpdate,
- logical.ReadOperation: b.pathRoleBoundCIDRListRead,
- logical.DeleteOperation: b.pathRoleBoundCIDRListDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-bound-cidr-list"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-bound-cidr-list"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bind-secret-id$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "bind_secret_id": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: "Impose secret_id to be presented when logging in using this role.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleBindSecretIDUpdate,
- logical.ReadOperation: b.pathRoleBindSecretIDRead,
- logical.DeleteOperation: b.pathRoleBindSecretIDDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-bind-secret-id"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-bind-secret-id"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-num-uses$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "secret_id_num_uses": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Number of times a SecretID can access the role, after which the SecretID will expire.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleSecretIDNumUsesUpdate,
- logical.ReadOperation: b.pathRoleSecretIDNumUsesRead,
- logical.DeleteOperation: b.pathRoleSecretIDNumUsesDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-ttl$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "secret_id_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Duration in seconds after which the issued SecretID should expire. Defaults
-to 0, meaning no expiration.`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleSecretIDTTLUpdate,
- logical.ReadOperation: b.pathRoleSecretIDTTLRead,
- logical.DeleteOperation: b.pathRoleSecretIDTTLDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-ttl"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-ttl"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/period$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "period": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: `If set, indicates that the token generated using this role
-should never expire. The token should be renewed within the
-duration specified by this value. At each renewal, the token's
-TTL will be set to the value of this parameter.`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRolePeriodUpdate,
- logical.ReadOperation: b.pathRolePeriodRead,
- logical.DeleteOperation: b.pathRolePeriodDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-period"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-period"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-num-uses$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "token_num_uses": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `Number of times issued tokens can be used`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleTokenNumUsesUpdate,
- logical.ReadOperation: b.pathRoleTokenNumUsesRead,
- logical.DeleteOperation: b.pathRoleTokenNumUsesDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-token-num-uses"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-token-num-uses"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-ttl$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "token_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Duration in seconds after which the issued token should expire. Defaults
-to 0, in which case the value will fall back to the system/mount defaults.`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleTokenTTLUpdate,
- logical.ReadOperation: b.pathRoleTokenTTLRead,
- logical.DeleteOperation: b.pathRoleTokenTTLDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-token-ttl"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-token-ttl"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-max-ttl$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "token_max_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Duration in seconds after which the issued token should not be allowed to
-be renewed. Defaults to 0, in which case the value will fall back to the system/mount defaults.`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleTokenMaxTTLUpdate,
- logical.ReadOperation: b.pathRoleTokenMaxTTLRead,
- logical.DeleteOperation: b.pathRoleTokenMaxTTLDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-token-max-ttl"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-token-max-ttl"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/role-id$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "role_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Identifier of the role. Defaults to a UUID.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRoleIDRead,
- logical.UpdateOperation: b.pathRoleRoleIDUpdate,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-id"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-id"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/?$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "metadata": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Metadata to be tied to the SecretID. This should be a JSON
-formatted string containing the metadata in key value pairs.`,
- },
- "cidr_list": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Comma separated list of CIDR blocks enforcing secret IDs to be used from
-specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the
-list of CIDR blocks listed here should be a subset of the CIDR blocks listed on
-the role.`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleSecretIDUpdate,
- logical.ListOperation: b.pathRoleSecretIDList,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-secret-id"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/lookup/?$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "secret_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "SecretID attached to the role.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleSecretIDLookupUpdate,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-lookup"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-lookup"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/destroy/?$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "secret_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "SecretID attached to the role.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleSecretIDDestroyUpdateDelete,
- logical.DeleteOperation: b.pathRoleSecretIDDestroyUpdateDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-destroy"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-destroy"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/lookup/?$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "secret_id_accessor": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Accessor of the SecretID",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleSecretIDAccessorLookupUpdate,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/destroy/?$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "secret_id_accessor": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Accessor of the SecretID",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleSecretIDAccessorDestroyUpdateDelete,
- logical.DeleteOperation: b.pathRoleSecretIDAccessorDestroyUpdateDelete,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]),
- },
- &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role_name") + "/custom-secret-id$",
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "secret_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "SecretID to be attached to the role.",
- },
- "metadata": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Metadata to be tied to the SecretID. This should be a JSON
-formatted string containing metadata in key value pairs.`,
- },
- "cidr_list": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Comma separated list of CIDR blocks enforcing secret IDs to be used from
-specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the
-list of CIDR blocks listed here should be a subset of the CIDR blocks listed on
-the role.`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleCustomSecretIDUpdate,
- },
- HelpSynopsis: strings.TrimSpace(roleHelp["role-custom-secret-id"][0]),
- HelpDescription: strings.TrimSpace(roleHelp["role-custom-secret-id"][1]),
- },
- }
-}
-
-// pathRoleExistenceCheck returns whether the role with the given name exists or not.
-func (b *backend) pathRoleExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- role, err := b.roleEntry(req.Storage, data.Get("role_name").(string))
- if err != nil {
- return false, err
- }
- return role != nil, nil
-}
-
-// pathRoleList is used to list all the Roles registered with the backend.
-func (b *backend) pathRoleList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- lock := b.roleLock("")
-
- lock.RLock()
- defer lock.RUnlock()
-
- roles, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(roles), nil
-}
-
-// pathRoleSecretIDList is used to list all the 'secret_id_accessor's issued against the role.
-func (b *backend) pathRoleSecretIDList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- // Get the role entry
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("role %s does not exist", roleName)), nil
- }
-
- // Guard the list operation with an outer lock
- b.secretIDListingLock.RLock()
- defer b.secretIDListingLock.RUnlock()
-
- roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
- }
-
- // Listing works one level at a time. Get the first level of data
- // which could then be used to get the actual SecretID storage entries.
- secretIDHMACs, err := req.Storage.List(fmt.Sprintf("secret_id/%s/", roleNameHMAC))
- if err != nil {
- return nil, err
- }
-
- var listItems []string
- for _, secretIDHMAC := range secretIDHMACs {
- // For sanity
- if secretIDHMAC == "" {
- continue
- }
-
- // Prepare the full index of the SecretIDs.
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
-
- // SecretID locks are not indexed by SecretIDs itself.
- // This is because SecretIDs are not stored in plaintext
- // form anywhere in the backend, and hence accessing its
- // corresponding lock many times using SecretIDs is not
- // possible. Also, indexing it everywhere using secretIDHMACs
- // makes listing operation easier.
- secretIDLock := b.secretIDLock(secretIDHMAC)
-
- secretIDLock.RLock()
-
- result := secretIDStorageEntry{}
- if entry, err := req.Storage.Get(entryIndex); err != nil {
- secretIDLock.RUnlock()
- return nil, err
- } else if entry == nil {
- secretIDLock.RUnlock()
- return nil, fmt.Errorf("storage entry for SecretID is present but no content found at the index")
- } else if err := entry.DecodeJSON(&result); err != nil {
- secretIDLock.RUnlock()
- return nil, err
- }
- listItems = append(listItems, result.SecretIDAccessor)
- secretIDLock.RUnlock()
- }
-
- return logical.ListResponse(listItems), nil
-}
-
-// validateRoleConstraints checks if the role has at least one constraint
-// enabled.
-func validateRoleConstraints(role *roleStorageEntry) error {
- if role == nil {
- return fmt.Errorf("nil role")
- }
-
- // At least one constraint should be enabled on the role
- switch {
- case role.BindSecretID:
- case role.BoundCIDRList != "":
- default:
- return fmt.Errorf("at least one constraint should be enabled on the role")
- }
-
- return nil
-}
-
-// setRoleEntry grabs a write lock and stores the options on an role into the
-// storage. Also creates a reverse index from the role's RoleID to the role
-// itself.
-func (b *backend) setRoleEntry(s logical.Storage, roleName string, role *roleStorageEntry, previousRoleID string) error {
- if roleName == "" {
- return fmt.Errorf("missing role name")
- }
-
- if role == nil {
- return fmt.Errorf("nil role")
- }
-
- // Check if role constraints are properly set
- if err := validateRoleConstraints(role); err != nil {
- return err
- }
-
- // Create a storage entry for the role
- entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), role)
- if err != nil {
- return err
- }
- if entry == nil {
- return fmt.Errorf("failed to create storage entry for role %s", roleName)
- }
-
- // Check if the index from the role_id to role already exists
- roleIDIndex, err := b.roleIDEntry(s, role.RoleID)
- if err != nil {
- return fmt.Errorf("failed to read role_id index: %v", err)
- }
-
- // If the entry exists, make sure that it belongs to the current role
- if roleIDIndex != nil && roleIDIndex.Name != roleName {
- return fmt.Errorf("role_id already in use")
- }
-
- // When role_id is getting updated, delete the old index before
- // a new one is created
- if previousRoleID != "" && previousRoleID != role.RoleID {
- if err = b.roleIDEntryDelete(s, previousRoleID); err != nil {
- return fmt.Errorf("failed to delete previous role ID index")
- }
- }
-
- // Save the role entry only after all the validations
- if err = s.Put(entry); err != nil {
- return err
- }
-
- // If previousRoleID is still intact, don't create another one
- if previousRoleID != "" && previousRoleID == role.RoleID {
- return nil
- }
-
- // Create a storage entry for reverse mapping of RoleID to role.
- // Note that secondary index is created when the roleLock is held.
- return b.setRoleIDEntry(s, role.RoleID, &roleIDStorageEntry{
- Name: roleName,
- })
-}
-
-// roleEntry grabs the read lock and fetches the options of an role from the storage
-func (b *backend) roleEntry(s logical.Storage, roleName string) (*roleStorageEntry, error) {
- if roleName == "" {
- return nil, fmt.Errorf("missing role_name")
- }
-
- var role roleStorageEntry
-
- lock := b.roleLock(roleName)
-
- lock.RLock()
- defer lock.RUnlock()
-
- if entry, err := s.Get("role/" + strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if entry == nil {
- return nil, nil
- } else if err := entry.DecodeJSON(&role); err != nil {
- return nil, err
- }
-
- return &role, nil
-}
-
-// pathRoleCreateUpdate registers a new role with the backend or updates the options
-// of an existing role
-func (b *backend) pathRoleCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- // Check if the role already exists
- role, err := b.roleEntry(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
-
- // Create a new entry object if this is a CreateOperation
- if role == nil && req.Operation == logical.CreateOperation {
- hmacKey, err := uuid.GenerateUUID()
- if err != nil {
- return nil, fmt.Errorf("failed to create role_id: %s\n", err)
- }
- role = &roleStorageEntry{
- HMACKey: hmacKey,
- }
- } else if role == nil {
- return nil, fmt.Errorf("role entry not found during update operation")
- }
-
- previousRoleID := role.RoleID
- if roleIDRaw, ok := data.GetOk("role_id"); ok {
- role.RoleID = roleIDRaw.(string)
- } else if req.Operation == logical.CreateOperation {
- roleID, err := uuid.GenerateUUID()
- if err != nil {
- return nil, fmt.Errorf("failed to generate role_id: %s\n", err)
- }
- role.RoleID = roleID
- }
- if role.RoleID == "" {
- return logical.ErrorResponse("invalid role_id"), nil
- }
-
- if bindSecretIDRaw, ok := data.GetOk("bind_secret_id"); ok {
- role.BindSecretID = bindSecretIDRaw.(bool)
- } else if req.Operation == logical.CreateOperation {
- role.BindSecretID = data.Get("bind_secret_id").(bool)
- }
-
- if boundCIDRListRaw, ok := data.GetOk("bound_cidr_list"); ok {
- role.BoundCIDRList = strings.TrimSpace(boundCIDRListRaw.(string))
- } else if req.Operation == logical.CreateOperation {
- role.BoundCIDRList = data.Get("bound_cidr_list").(string)
- }
-
- if role.BoundCIDRList != "" {
- valid, err := cidrutil.ValidateCIDRListString(role.BoundCIDRList, ",")
- if err != nil {
- return nil, fmt.Errorf("failed to validate CIDR blocks: %v", err)
- }
- if !valid {
- return logical.ErrorResponse("invalid CIDR blocks"), nil
- }
- }
-
- if policiesRaw, ok := data.GetOk("policies"); ok {
- role.Policies = policyutil.ParsePolicies(policiesRaw)
- } else if req.Operation == logical.CreateOperation {
- role.Policies = policyutil.ParsePolicies(data.Get("policies"))
- }
-
- periodRaw, ok := data.GetOk("period")
- if ok {
- role.Period = time.Second * time.Duration(periodRaw.(int))
- } else if req.Operation == logical.CreateOperation {
- role.Period = time.Second * time.Duration(data.Get("period").(int))
- }
- if role.Period > b.System().MaxLeaseTTL() {
- return logical.ErrorResponse(fmt.Sprintf("'period' of '%s' is greater than the backend's maximum lease TTL of '%s'", role.Period.String(), b.System().MaxLeaseTTL().String())), nil
- }
-
- if secretIDNumUsesRaw, ok := data.GetOk("secret_id_num_uses"); ok {
- role.SecretIDNumUses = secretIDNumUsesRaw.(int)
- } else if req.Operation == logical.CreateOperation {
- role.SecretIDNumUses = data.Get("secret_id_num_uses").(int)
- }
- if role.SecretIDNumUses < 0 {
- return logical.ErrorResponse("secret_id_num_uses cannot be negative"), nil
- }
-
- if secretIDTTLRaw, ok := data.GetOk("secret_id_ttl"); ok {
- role.SecretIDTTL = time.Second * time.Duration(secretIDTTLRaw.(int))
- } else if req.Operation == logical.CreateOperation {
- role.SecretIDTTL = time.Second * time.Duration(data.Get("secret_id_ttl").(int))
- }
-
- if tokenNumUsesRaw, ok := data.GetOk("token_num_uses"); ok {
- role.TokenNumUses = tokenNumUsesRaw.(int)
- } else if req.Operation == logical.CreateOperation {
- role.TokenNumUses = data.Get("token_num_uses").(int)
- }
- if role.TokenNumUses < 0 {
- return logical.ErrorResponse("token_num_uses cannot be negative"), nil
- }
-
- if tokenTTLRaw, ok := data.GetOk("token_ttl"); ok {
- role.TokenTTL = time.Second * time.Duration(tokenTTLRaw.(int))
- } else if req.Operation == logical.CreateOperation {
- role.TokenTTL = time.Second * time.Duration(data.Get("token_ttl").(int))
- }
-
- if tokenMaxTTLRaw, ok := data.GetOk("token_max_ttl"); ok {
- role.TokenMaxTTL = time.Second * time.Duration(tokenMaxTTLRaw.(int))
- } else if req.Operation == logical.CreateOperation {
- role.TokenMaxTTL = time.Second * time.Duration(data.Get("token_max_ttl").(int))
- }
-
- // Check that the TokenTTL value provided is less than the TokenMaxTTL.
- // Sanitizing the TTL and MaxTTL is not required now and can be performed
- // at credential issue time.
- if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL {
- return logical.ErrorResponse("token_ttl should not be greater than token_max_ttl"), nil
- }
-
- var resp *logical.Response
- if role.TokenMaxTTL > b.System().MaxLeaseTTL() {
- resp = &logical.Response{}
- resp.AddWarning("token_max_ttl is greater than the backend mount's maximum TTL value; issued tokens' max TTL value will be truncated")
- }
-
- // Store the entry.
- return resp, b.setRoleEntry(req.Storage, roleName, role, previousRoleID)
-}
-
-// pathRoleRead grabs a read lock and reads the options set on the role from the storage
-func (b *backend) pathRoleRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- // Convert the 'time.Duration' values to second.
- role.SecretIDTTL /= time.Second
- role.TokenTTL /= time.Second
- role.TokenMaxTTL /= time.Second
- role.Period /= time.Second
-
- // Create a map of data to be returned and remove sensitive information from it
- data := structs.New(role).Map()
- delete(data, "role_id")
- delete(data, "hmac_key")
-
- resp := &logical.Response{
- Data: data,
- }
-
- if err := validateRoleConstraints(role); err != nil {
- resp.AddWarning("Role does not have any constraints set on it. Updates to this role will require a constraint to be set")
- }
-
- return resp, nil
- }
-}
-
-// pathRoleDelete removes the role from the storage
-func (b *backend) pathRoleDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- // Acquire the lock before deleting the secrets.
- lock := b.roleLock(roleName)
- lock.Lock()
- defer lock.Unlock()
-
- // Just before the role is deleted, remove all the SecretIDs issued as part of the role.
- if err = b.flushRoleSecrets(req.Storage, roleName, role.HMACKey); err != nil {
- return nil, fmt.Errorf("failed to invalidate the secrets belonging to role '%s': %s", roleName, err)
- }
-
- // Delete the reverse mapping from RoleID to the role
- if err = b.roleIDEntryDelete(req.Storage, role.RoleID); err != nil {
- return nil, fmt.Errorf("failed to delete the mapping from RoleID to role '%s': %s", roleName, err)
- }
-
- // After deleting the SecretIDs and the RoleID, delete the role itself
- if err = req.Storage.Delete("role/" + strings.ToLower(roleName)); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// Returns the properties of the SecretID
-func (b *backend) pathRoleSecretIDLookupUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- secretID := data.Get("secret_id").(string)
- if secretID == "" {
- return logical.ErrorResponse("missing secret_id"), nil
- }
-
- // Fetch the role
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, fmt.Errorf("role %s does not exist", roleName)
- }
-
- // Create the HMAC of the secret ID using the per-role HMAC key
- secretIDHMAC, err := createHMAC(role.HMACKey, secretID)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of secret_id: %s", err)
- }
-
- // Create the HMAC of the roleName using the per-role HMAC key
- roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
- }
-
- // Create the index at which the secret_id would've been stored
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
-
- return b.secretIDCommon(req.Storage, entryIndex, secretIDHMAC)
-}
-
-func (b *backend) secretIDCommon(s logical.Storage, entryIndex, secretIDHMAC string) (*logical.Response, error) {
- lock := b.secretIDLock(secretIDHMAC)
- lock.RLock()
- defer lock.RUnlock()
-
- result := secretIDStorageEntry{}
- if entry, err := s.Get(entryIndex); err != nil {
- return nil, err
- } else if entry == nil {
- return nil, nil
- } else if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- result.SecretIDTTL /= time.Second
- d := structs.New(result).Map()
-
- // Converting the time values to RFC3339Nano format.
- //
- // Map() from 'structs' package formats time in RFC3339Nano.
- // In order to not break the API due to a modification in the
- // third party package, converting the time values again.
- d["creation_time"] = result.CreationTime.Format(time.RFC3339Nano)
- d["expiration_time"] = result.ExpirationTime.Format(time.RFC3339Nano)
- d["last_updated_time"] = result.LastUpdatedTime.Format(time.RFC3339Nano)
-
- resp := &logical.Response{
- Data: d,
- }
-
- if _, ok := d["SecretIDNumUses"]; ok {
- resp.AddWarning("The field SecretIDNumUses is deprecated and will be removed in a future release; refer to secret_id_num_uses instead")
- }
-
- return resp, nil
-}
-
-func (b *backend) pathRoleSecretIDDestroyUpdateDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- secretID := data.Get("secret_id").(string)
- if secretID == "" {
- return logical.ErrorResponse("missing secret_id"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, fmt.Errorf("role %s does not exist", roleName)
- }
-
- secretIDHMAC, err := createHMAC(role.HMACKey, secretID)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of secret_id: %s", err)
- }
-
- roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
- }
-
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
-
- lock := b.secretIDLock(secretIDHMAC)
- lock.Lock()
- defer lock.Unlock()
-
- result := secretIDStorageEntry{}
- if entry, err := req.Storage.Get(entryIndex); err != nil {
- return nil, err
- } else if entry == nil {
- return nil, nil
- } else if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- // Delete the accessor of the SecretID first
- if err := b.deleteSecretIDAccessorEntry(req.Storage, result.SecretIDAccessor); err != nil {
- return nil, err
- }
-
- // Delete the storage entry that corresponds to the SecretID
- if err := req.Storage.Delete(entryIndex); err != nil {
- return nil, fmt.Errorf("failed to delete SecretID: %s", err)
- }
-
- return nil, nil
-}
-
-// pathRoleSecretIDAccessorLookupUpdate returns the properties of the SecretID
-// given its accessor
-func (b *backend) pathRoleSecretIDAccessorLookupUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- secretIDAccessor := data.Get("secret_id_accessor").(string)
- if secretIDAccessor == "" {
- return logical.ErrorResponse("missing secret_id_accessor"), nil
- }
-
- // SecretID is indexed based on HMACed roleName and HMACed SecretID.
- // Get the role details to fetch the RoleID and accessor to get
- // the HMACed SecretID.
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, fmt.Errorf("role %s does not exist", roleName)
- }
-
- accessorEntry, err := b.secretIDAccessorEntry(req.Storage, secretIDAccessor)
- if err != nil {
- return nil, err
- }
- if accessorEntry == nil {
- return nil, fmt.Errorf("failed to find accessor entry for secret_id_accessor:%s\n", secretIDAccessor)
- }
-
- roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
- }
-
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, accessorEntry.SecretIDHMAC)
-
- return b.secretIDCommon(req.Storage, entryIndex, accessorEntry.SecretIDHMAC)
-}
-
-func (b *backend) pathRoleSecretIDAccessorDestroyUpdateDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- secretIDAccessor := data.Get("secret_id_accessor").(string)
- if secretIDAccessor == "" {
- return logical.ErrorResponse("missing secret_id_accessor"), nil
- }
-
- // SecretID is indexed based on HMACed roleName and HMACed SecretID.
- // Get the role details to fetch the RoleID and accessor to get
- // the HMACed SecretID.
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, fmt.Errorf("role %s does not exist", roleName)
- }
-
- accessorEntry, err := b.secretIDAccessorEntry(req.Storage, secretIDAccessor)
- if err != nil {
- return nil, err
- }
- if accessorEntry == nil {
- return nil, fmt.Errorf("failed to find accessor entry for secret_id_accessor:%s\n", secretIDAccessor)
- }
-
- roleNameHMAC, err := createHMAC(role.HMACKey, roleName)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of role_name: %s", err)
- }
-
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, accessorEntry.SecretIDHMAC)
-
- lock := b.secretIDLock(accessorEntry.SecretIDHMAC)
- lock.Lock()
- defer lock.Unlock()
-
- // Delete the accessor of the SecretID first
- if err := b.deleteSecretIDAccessorEntry(req.Storage, secretIDAccessor); err != nil {
- return nil, err
- }
-
- // Delete the storage entry that corresponds to the SecretID
- if err := req.Storage.Delete(entryIndex); err != nil {
- return nil, fmt.Errorf("failed to delete SecretID: %s", err)
- }
-
- return nil, nil
-}
-
-func (b *backend) pathRoleBoundCIDRListUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.BoundCIDRList = strings.TrimSpace(data.Get("bound_cidr_list").(string))
- if role.BoundCIDRList == "" {
- return logical.ErrorResponse("missing bound_cidr_list"), nil
- }
-
- if role.BoundCIDRList != "" {
- valid, err := cidrutil.ValidateCIDRListString(role.BoundCIDRList, ",")
- if err != nil {
- return nil, fmt.Errorf("failed to validate CIDR blocks: %q", err)
- }
- if !valid {
- return logical.ErrorResponse("failed to validate CIDR blocks"), nil
- }
- }
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleBoundCIDRListRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- return &logical.Response{
- Data: map[string]interface{}{
- "bound_cidr_list": role.BoundCIDRList,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleBoundCIDRListDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- // Deleting a field implies setting the value to it's default value.
- role.BoundCIDRList = data.GetDefaultOrZero("bound_cidr_list").(string)
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleBindSecretIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- if bindSecretIDRaw, ok := data.GetOk("bind_secret_id"); ok {
- role.BindSecretID = bindSecretIDRaw.(bool)
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
- } else {
- return logical.ErrorResponse("missing bind_secret_id"), nil
- }
-}
-
-func (b *backend) pathRoleBindSecretIDRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- return &logical.Response{
- Data: map[string]interface{}{
- "bind_secret_id": role.BindSecretID,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleBindSecretIDDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- // Deleting a field implies setting the value to it's default value.
- role.BindSecretID = data.GetDefaultOrZero("bind_secret_id").(bool)
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRolePoliciesUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- policiesRaw, ok := data.GetOk("policies")
- if !ok {
- return logical.ErrorResponse("missing policies"), nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.Policies = policyutil.ParsePolicies(policiesRaw)
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRolePoliciesRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- return &logical.Response{
- Data: map[string]interface{}{
- "policies": role.Policies,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRolePoliciesDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.Policies = []string{}
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleSecretIDNumUsesUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- if numUsesRaw, ok := data.GetOk("secret_id_num_uses"); ok {
- role.SecretIDNumUses = numUsesRaw.(int)
- if role.SecretIDNumUses < 0 {
- return logical.ErrorResponse("secret_id_num_uses cannot be negative"), nil
- }
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
- } else {
- return logical.ErrorResponse("missing secret_id_num_uses"), nil
- }
-}
-
-func (b *backend) pathRoleRoleIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- previousRoleID := role.RoleID
- role.RoleID = data.Get("role_id").(string)
- if role.RoleID == "" {
- return logical.ErrorResponse("missing role_id"), nil
- }
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, previousRoleID)
-}
-
-func (b *backend) pathRoleRoleIDRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- return &logical.Response{
- Data: map[string]interface{}{
- "role_id": role.RoleID,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleSecretIDNumUsesRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- return &logical.Response{
- Data: map[string]interface{}{
- "secret_id_num_uses": role.SecretIDNumUses,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleSecretIDNumUsesDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.SecretIDNumUses = data.GetDefaultOrZero("secret_id_num_uses").(int)
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleSecretIDTTLUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- if secretIDTTLRaw, ok := data.GetOk("secret_id_ttl"); ok {
- role.SecretIDTTL = time.Second * time.Duration(secretIDTTLRaw.(int))
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
- } else {
- return logical.ErrorResponse("missing secret_id_ttl"), nil
- }
-}
-
-func (b *backend) pathRoleSecretIDTTLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- role.SecretIDTTL /= time.Second
- return &logical.Response{
- Data: map[string]interface{}{
- "secret_id_ttl": role.SecretIDTTL,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleSecretIDTTLDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.SecretIDTTL = time.Second * time.Duration(data.GetDefaultOrZero("secret_id_ttl").(int))
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRolePeriodUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- if periodRaw, ok := data.GetOk("period"); ok {
- role.Period = time.Second * time.Duration(periodRaw.(int))
- if role.Period > b.System().MaxLeaseTTL() {
- return logical.ErrorResponse(fmt.Sprintf("'period' of '%s' is greater than the backend's maximum lease TTL of '%s'", role.Period.String(), b.System().MaxLeaseTTL().String())), nil
- }
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
- } else {
- return logical.ErrorResponse("missing period"), nil
- }
-}
-
-func (b *backend) pathRolePeriodRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- role.Period /= time.Second
- return &logical.Response{
- Data: map[string]interface{}{
- "period": role.Period,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRolePeriodDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.Period = time.Second * time.Duration(data.GetDefaultOrZero("period").(int))
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleTokenNumUsesUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- if tokenNumUsesRaw, ok := data.GetOk("token_num_uses"); ok {
- role.TokenNumUses = tokenNumUsesRaw.(int)
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
- } else {
- return logical.ErrorResponse("missing token_num_uses"), nil
- }
-}
-
-func (b *backend) pathRoleTokenNumUsesRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- return &logical.Response{
- Data: map[string]interface{}{
- "token_num_uses": role.TokenNumUses,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleTokenNumUsesDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.TokenNumUses = data.GetDefaultOrZero("token_num_uses").(int)
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleTokenTTLUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- if tokenTTLRaw, ok := data.GetOk("token_ttl"); ok {
- role.TokenTTL = time.Second * time.Duration(tokenTTLRaw.(int))
- if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL {
- return logical.ErrorResponse("token_ttl should not be greater than token_max_ttl"), nil
- }
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
- } else {
- return logical.ErrorResponse("missing token_ttl"), nil
- }
-}
-
-func (b *backend) pathRoleTokenTTLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- role.TokenTTL /= time.Second
- return &logical.Response{
- Data: map[string]interface{}{
- "token_ttl": role.TokenTTL,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleTokenTTLDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.TokenTTL = time.Second * time.Duration(data.GetDefaultOrZero("token_ttl").(int))
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleTokenMaxTTLUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- if tokenMaxTTLRaw, ok := data.GetOk("token_max_ttl"); ok {
- role.TokenMaxTTL = time.Second * time.Duration(tokenMaxTTLRaw.(int))
- if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL {
- return logical.ErrorResponse("token_max_ttl should be greater than or equal to token_ttl"), nil
- }
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
- } else {
- return logical.ErrorResponse("missing token_max_ttl"), nil
- }
-}
-
-func (b *backend) pathRoleTokenMaxTTLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if role, err := b.roleEntry(req.Storage, strings.ToLower(roleName)); err != nil {
- return nil, err
- } else if role == nil {
- return nil, nil
- } else {
- role.TokenMaxTTL /= time.Second
- return &logical.Response{
- Data: map[string]interface{}{
- "token_max_ttl": role.TokenMaxTTL,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleTokenMaxTTLDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- lock := b.roleLock(roleName)
-
- lock.Lock()
- defer lock.Unlock()
-
- role.TokenMaxTTL = time.Second * time.Duration(data.GetDefaultOrZero("token_max_ttl").(int))
-
- return nil, b.setRoleEntry(req.Storage, roleName, role, "")
-}
-
-func (b *backend) pathRoleSecretIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- secretID, err := uuid.GenerateUUID()
- if err != nil {
- return nil, fmt.Errorf("failed to generate SecretID:%s", err)
- }
- return b.handleRoleSecretIDCommon(req, data, secretID)
-}
-
-func (b *backend) pathRoleCustomSecretIDUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRoleSecretIDCommon(req, data, data.Get("secret_id").(string))
-}
-
-func (b *backend) handleRoleSecretIDCommon(req *logical.Request, data *framework.FieldData, secretID string) (*logical.Response, error) {
- roleName := data.Get("role_name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role_name"), nil
- }
-
- if secretID == "" {
- return logical.ErrorResponse("missing secret_id"), nil
- }
-
- role, err := b.roleEntry(req.Storage, strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("role %s does not exist", roleName)), nil
- }
-
- if !role.BindSecretID {
- return logical.ErrorResponse("bind_secret_id is not set on the role"), nil
- }
-
- cidrList := data.Get("cidr_list").(string)
-
- // Validate the list of CIDR blocks
- if cidrList != "" {
- valid, err := cidrutil.ValidateCIDRListString(cidrList, ",")
- if err != nil {
- return nil, fmt.Errorf("failed to validate CIDR blocks: %q", err)
- }
- if !valid {
- return logical.ErrorResponse("failed to validate CIDR blocks"), nil
- }
- }
-
- // Parse the CIDR blocks into a slice
- secretIDCIDRs := strutil.ParseDedupLowercaseAndSortStrings(cidrList, ",")
-
- // Ensure that the CIDRs on the secret ID are a subset of that of role's
- if err := verifyCIDRRoleSecretIDSubset(secretIDCIDRs, role.BoundCIDRList); err != nil {
- return nil, err
- }
-
- secretIDStorage := &secretIDStorageEntry{
- SecretIDNumUses: role.SecretIDNumUses,
- SecretIDTTL: role.SecretIDTTL,
- Metadata: make(map[string]string),
- CIDRList: secretIDCIDRs,
- }
-
- if err = strutil.ParseArbitraryKeyValues(data.Get("metadata").(string), secretIDStorage.Metadata, ","); err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to parse metadata: %v", err)), nil
- }
-
- if secretIDStorage, err = b.registerSecretIDEntry(req.Storage, roleName, secretID, role.HMACKey, secretIDStorage); err != nil {
- return nil, fmt.Errorf("failed to store SecretID: %s", err)
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "secret_id": secretID,
- "secret_id_accessor": secretIDStorage.SecretIDAccessor,
- },
- }, nil
-}
-
-func (b *backend) roleIDLock(roleID string) *locksutil.LockEntry {
- return locksutil.LockForKey(b.roleIDLocks, roleID)
-}
-
-func (b *backend) roleLock(roleName string) *locksutil.LockEntry {
- return locksutil.LockForKey(b.roleLocks, roleName)
-}
-
-// setRoleIDEntry creates a storage entry that maps RoleID to Role
-func (b *backend) setRoleIDEntry(s logical.Storage, roleID string, roleIDEntry *roleIDStorageEntry) error {
- lock := b.roleIDLock(roleID)
- lock.Lock()
- defer lock.Unlock()
-
- salt, err := b.Salt()
- if err != nil {
- return err
- }
- entryIndex := "role_id/" + salt.SaltID(roleID)
-
- entry, err := logical.StorageEntryJSON(entryIndex, roleIDEntry)
- if err != nil {
- return err
- }
- if err = s.Put(entry); err != nil {
- return err
- }
- return nil
-}
-
-// roleIDEntry is used to read the storage entry that maps RoleID to Role
-func (b *backend) roleIDEntry(s logical.Storage, roleID string) (*roleIDStorageEntry, error) {
- if roleID == "" {
- return nil, fmt.Errorf("missing roleID")
- }
-
- lock := b.roleIDLock(roleID)
- lock.RLock()
- defer lock.RUnlock()
-
- var result roleIDStorageEntry
-
- salt, err := b.Salt()
- if err != nil {
- return nil, err
- }
- entryIndex := "role_id/" + salt.SaltID(roleID)
-
- if entry, err := s.Get(entryIndex); err != nil {
- return nil, err
- } else if entry == nil {
- return nil, nil
- } else if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-// roleIDEntryDelete is used to remove the secondary index that maps the
-// RoleID to the Role itself.
-func (b *backend) roleIDEntryDelete(s logical.Storage, roleID string) error {
- if roleID == "" {
- return fmt.Errorf("missing roleID")
- }
-
- lock := b.roleIDLock(roleID)
- lock.Lock()
- defer lock.Unlock()
-
- salt, err := b.Salt()
- if err != nil {
- return err
- }
- entryIndex := "role_id/" + salt.SaltID(roleID)
-
- return s.Delete(entryIndex)
-}
-
-var roleHelp = map[string][2]string{
- "role-list": {
- "Lists all the roles registered with the backend.",
- "The list will contain the names of the roles.",
- },
- "role": {
- "Register an role with the backend.",
- `A role can represent a service, a machine or anything that can be IDed.
-The set of policies on the role defines access to the role, meaning, any
-Vault token with a policy set that is a superset of the policies on the
-role registered here will have access to the role. If a SecretID is desired
-to be generated against only this specific role, it can be done via
-'role//secret-id' and 'role//custom-secret-id' endpoints.
-The properties of the SecretID created against the role and the properties
-of the token issued with the SecretID generated againt the role, can be
-configured using the parameters of this endpoint.`,
- },
- "role-bind-secret-id": {
- "Impose secret_id to be presented during login using this role.",
- `By setting this to 'true', during login the parameter 'secret_id' becomes a mandatory argument.
-The value of 'secret_id' can be retrieved using 'role//secret-id' endpoint.`,
- },
- "role-bound-cidr-list": {
- `Comma separated list of CIDR blocks, if set, specifies blocks of IP
-addresses which can perform the login operation`,
- `During login, the IP address of the client will be checked to see if it
-belongs to the CIDR blocks specified. If CIDR blocks were set and if the
-IP is not encompassed by it, login fails`,
- },
- "role-policies": {
- "Policies of the role.",
- `A comma-delimited set of Vault policies that defines access to the role.
-All the Vault tokens with policies that encompass the policy set
-defined on the role, can access the role.`,
- },
- "role-secret-id-num-uses": {
- "Use limit of the SecretID generated against the role.",
- `If the SecretIDs are generated/assigned against the role using the
-'role//secret-id' or 'role//custom-secret-id' endpoints,
-then the number of times that SecretID can access the role is defined by
-this option.`,
- },
- "role-secret-id-ttl": {
- `Duration in seconds, representing the lifetime of the SecretIDs
-that are generated against the role using 'role//secret-id' or
-'role//custom-secret-id' endpoints.`,
- ``,
- },
- "role-secret-id-lookup": {
- "Read the properties of an issued secret_id",
- `This endpoint is used to read the properties of a secret_id associated to a
-role.`},
- "role-secret-id-destroy": {
- "Invalidate an issued secret_id",
- `This endpoint is used to delete the properties of a secret_id associated to a
-role.`},
- "role-secret-id-accessor-lookup": {
- "Read an issued secret_id, using its accessor",
- `This is particularly useful to lookup the non-expiring 'secret_id's.
-The list operation on the 'role//secret-id' endpoint will return
-the 'secret_id_accessor's. This endpoint can be used to read the properties
-of the secret. If the 'secret_id_num_uses' field in the response is 0, it
-represents a non-expiring 'secret_id'.`,
- },
- "role-secret-id-accessor-destroy": {
- "Delete an issued secret_id, using its accessor",
- `This is particularly useful to clean-up the non-expiring 'secret_id's.
-The list operation on the 'role//secret-id' endpoint will return
-the 'secret_id_accessor's. This endpoint can be used to read the properties
-of the secret. If the 'secret_id_num_uses' field in the response is 0, it
-represents a non-expiring 'secret_id'.`,
- },
- "role-token-num-uses": {
- "Number of times issued tokens can be used",
- `By default, this will be set to zero, indicating that the issued
-tokens can be used any number of times.`,
- },
- "role-token-ttl": {
- `Duration in seconds, the lifetime of the token issued by using the SecretID that
-is generated against this role, before which the token needs to be renewed.`,
- `If SecretIDs are generated against the role, using 'role//secret-id' or the
-'role//custom-secret-id' endpoints, and if those SecretIDs are used
-to perform the login operation, then the value of 'token-ttl' defines the
-lifetime of the token issued, before which the token needs to be renewed.`,
- },
- "role-token-max-ttl": {
- `Duration in seconds, the maximum lifetime of the tokens issued by using
-the SecretIDs that were generated against this role, after which the
-tokens are not allowed to be renewed.`,
- `If SecretIDs are generated against the role using 'role//secret-id'
-or the 'role//custom-secret-id' endpoints, and if those SecretIDs
-are used to perform the login operation, then the value of 'token-max-ttl'
-defines the maximum lifetime of the tokens issued, after which the tokens
-cannot be renewed. A reauthentication is required after this duration.
-This value will be capped by the backend mount's maximum TTL value.`,
- },
- "role-id": {
- "Returns the 'role_id' of the role.",
- `If login is performed from an role, then its 'role_id' should be presented
-as a credential during the login. This 'role_id' can be retrieved using
-this endpoint.`,
- },
- "role-secret-id": {
- "Generate a SecretID against this role.",
- `The SecretID generated using this endpoint will be scoped to access
-just this role and none else. The properties of this SecretID will be
-based on the options set on the role. It will expire after a period
-defined by the 'secret_id_ttl' option on the role and/or the backend
-mount's maximum TTL value.`,
- },
- "role-custom-secret-id": {
- "Assign a SecretID of choice against the role.",
- `This option is not recommended unless there is a specific need
-to do so. This will assign a client supplied SecretID to be used to access
-the role. This SecretID will behave similarly to the SecretIDs generated by
-the backend. The properties of this SecretID will be based on the options
-set on the role. It will expire after a period defined by the 'secret_id_ttl'
-option on the role and/or the backend mount's maximum TTL value.`,
- },
- "role-period": {
- "Updates the value of 'period' on the role",
- `If set, indicates that the token generated using this role
-should never expire. The token should be renewed within the
-duration specified by this value. The renewal duration will
-be fixed. If the Period in the role is modified, the token
-will pick up the new value during its next renewal.`,
- },
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go
deleted file mode 100644
index fa3e681..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go
+++ /dev/null
@@ -1,1070 +0,0 @@
-package approle
-
-import (
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/mapstructure"
-)
-
-func TestAppRole_CIDRSubset(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "role_id": "role-id-123",
- "policies": "a,b",
- "bound_cidr_list": "127.0.0.1/24",
- }
-
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/testrole1",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err: %v resp: %#v", err, resp)
- }
-
- secretIDData := map[string]interface{}{
- "cidr_list": "127.0.0.1/16",
- }
- secretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/testrole1/secret-id",
- Data: secretIDData,
- }
-
- resp, err = b.HandleRequest(secretIDReq)
- if resp != nil || resp.IsError() {
- t.Fatalf("resp:%#v", resp)
- }
- if err == nil {
- t.Fatal("expected an error")
- }
-
- roleData["bound_cidr_list"] = "192.168.27.29/16,172.245.30.40/24,10.20.30.40/30"
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err: %v resp: %#v", err, resp)
- }
-
- secretIDData["cidr_list"] = "192.168.27.29/20,172.245.30.40/25,10.20.30.40/32"
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatalf("resp: %#v", resp)
- }
-}
-
-func TestAppRole_RoleConstraints(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "role_id": "role-id-123",
- "policies": "a,b",
- }
-
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/testrole1",
- Storage: storage,
- Data: roleData,
- }
-
- // Set bind_secret_id, which is enabled by default
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Set bound_cidr_list alone by explicitly disabling bind_secret_id
- roleReq.Operation = logical.UpdateOperation
- roleData["bind_secret_id"] = false
- roleData["bound_cidr_list"] = "0.0.0.0/0"
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Remove both constraints
- roleReq.Operation = logical.UpdateOperation
- roleData["bound_cidr_list"] = ""
- roleData["bind_secret_id"] = false
- resp, err = b.HandleRequest(roleReq)
- if resp != nil && resp.IsError() {
- t.Fatalf("err:%v, resp:%#v", err, resp)
- }
- if err == nil {
- t.Fatalf("expected an error")
- }
-}
-
-func TestAppRole_RoleIDUpdate(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "role_id": "role-id-123",
- "policies": "a,b",
- "secret_id_num_uses": 10,
- "secret_id_ttl": 300,
- "token_ttl": 400,
- "token_max_ttl": 500,
- }
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/testrole1",
- Storage: storage,
- Data: roleData,
- }
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleIDUpdateReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/testrole1/role-id",
- Storage: storage,
- Data: map[string]interface{}{
- "role_id": "customroleid",
- },
- }
- resp, err = b.HandleRequest(roleIDUpdateReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- secretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/testrole1/secret-id",
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- secretID := resp.Data["secret_id"].(string)
-
- loginData := map[string]interface{}{
- "role_id": "customroleid",
- "secret_id": secretID,
- }
- loginReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "login",
- Storage: storage,
- Data: loginData,
- Connection: &logical.Connection{
- RemoteAddr: "127.0.0.1",
- },
- }
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Auth == nil {
- t.Fatalf("expected a non-nil auth object in the response")
- }
-}
-
-func TestAppRole_RoleIDUniqueness(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "role_id": "role-id-123",
- "policies": "a,b",
- "secret_id_num_uses": 10,
- "secret_id_ttl": 300,
- "token_ttl": 400,
- "token_max_ttl": 500,
- }
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/testrole1",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Path = "role/testrole2"
- resp, err = b.HandleRequest(roleReq)
- if err == nil && !(resp != nil && resp.IsError()) {
- t.Fatalf("expected an error: got resp:%#v", resp)
- }
-
- roleData["role_id"] = "role-id-456"
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.UpdateOperation
- roleData["role_id"] = "role-id-123"
- resp, err = b.HandleRequest(roleReq)
- if err == nil && !(resp != nil && resp.IsError()) {
- t.Fatalf("expected an error: got resp:%#v", resp)
- }
-
- roleReq.Path = "role/testrole1"
- roleData["role_id"] = "role-id-456"
- resp, err = b.HandleRequest(roleReq)
- if err == nil && !(resp != nil && resp.IsError()) {
- t.Fatalf("expected an error: got resp:%#v", resp)
- }
-
- roleIDData := map[string]interface{}{
- "role_id": "role-id-456",
- }
- roleIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/testrole1/role-id",
- Storage: storage,
- Data: roleIDData,
- }
- resp, err = b.HandleRequest(roleIDReq)
- if err == nil && !(resp != nil && resp.IsError()) {
- t.Fatalf("expected an error: got resp:%#v", resp)
- }
-
- roleIDData["role_id"] = "role-id-123"
- roleIDReq.Path = "role/testrole2/role-id"
- resp, err = b.HandleRequest(roleIDReq)
- if err == nil && !(resp != nil && resp.IsError()) {
- t.Fatalf("expected an error: got resp:%#v", resp)
- }
-
- roleIDData["role_id"] = "role-id-2000"
- resp, err = b.HandleRequest(roleIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleIDData["role_id"] = "role-id-1000"
- roleIDReq.Path = "role/testrole1/role-id"
- resp, err = b.HandleRequest(roleIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-}
-
-func TestAppRole_RoleDeleteSecretID(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- createRole(t, b, storage, "role1", "a,b")
- secretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/role1/secret-id",
- }
- // Create 3 secrets on the role
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- listReq := &logical.Request{
- Operation: logical.ListOperation,
- Storage: storage,
- Path: "role/role1/secret-id",
- }
- resp, err = b.HandleRequest(listReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- secretIDAccessors := resp.Data["keys"].([]string)
- if len(secretIDAccessors) != 3 {
- t.Fatalf("bad: len of secretIDAccessors: expected:3 actual:%d", len(secretIDAccessors))
- }
-
- roleReq := &logical.Request{
- Operation: logical.DeleteOperation,
- Storage: storage,
- Path: "role/role1",
- }
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- resp, err = b.HandleRequest(listReq)
- if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
- t.Fatalf("expected an error. err:%v resp:%#v", err, resp)
- }
-}
-
-func TestAppRole_RoleSecretIDReadDelete(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- createRole(t, b, storage, "role1", "a,b")
- secretIDCreateReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/role1/secret-id",
- }
- resp, err = b.HandleRequest(secretIDCreateReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- secretID := resp.Data["secret_id"].(string)
- if secretID == "" {
- t.Fatal("expected non empty secret ID")
- }
-
- secretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/role1/secret-id/lookup",
- Data: map[string]interface{}{
- "secret_id": secretID,
- },
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- if resp.Data == nil {
- t.Fatal(err)
- }
-
- deleteSecretIDReq := &logical.Request{
- Operation: logical.DeleteOperation,
- Storage: storage,
- Path: "role/role1/secret-id/destroy",
- Data: map[string]interface{}{
- "secret_id": secretID,
- },
- }
- resp, err = b.HandleRequest(deleteSecretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- resp, err = b.HandleRequest(secretIDReq)
- if resp != nil && resp.IsError() {
- t.Fatalf("error response:%#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestAppRole_RoleSecretIDAccessorReadDelete(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- createRole(t, b, storage, "role1", "a,b")
- secretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/role1/secret-id",
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- listReq := &logical.Request{
- Operation: logical.ListOperation,
- Storage: storage,
- Path: "role/role1/secret-id",
- }
- resp, err = b.HandleRequest(listReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- hmacSecretID := resp.Data["keys"].([]string)[0]
-
- hmacReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/role1/secret-id-accessor/lookup",
- Data: map[string]interface{}{
- "secret_id_accessor": hmacSecretID,
- },
- }
- resp, err = b.HandleRequest(hmacReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- if resp.Data == nil {
- t.Fatal(err)
- }
-
- hmacReq.Path = "role/role1/secret-id-accessor/destroy"
- resp, err = b.HandleRequest(hmacReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- hmacReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(hmacReq)
- if resp != nil && resp.IsError() {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- if err == nil {
- t.Fatalf("expected an error")
- }
-}
-
-func TestAppRoleRoleListSecretID(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- createRole(t, b, storage, "role1", "a,b")
-
- secretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/role1/secret-id",
- }
- // Create 5 'secret_id's
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- listReq := &logical.Request{
- Operation: logical.ListOperation,
- Storage: storage,
- Path: "role/role1/secret-id/",
- }
- resp, err = b.HandleRequest(listReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- secrets := resp.Data["keys"].([]string)
- if len(secrets) != 5 {
- t.Fatalf("bad: len of secrets: expected:5 actual:%d", len(secrets))
- }
-}
-
-func TestAppRole_RoleList(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- createRole(t, b, storage, "role1", "a,b")
- createRole(t, b, storage, "role2", "c,d")
- createRole(t, b, storage, "role3", "e,f")
- createRole(t, b, storage, "role4", "g,h")
- createRole(t, b, storage, "role5", "i,j")
-
- listReq := &logical.Request{
- Operation: logical.ListOperation,
- Path: "role",
- Storage: storage,
- }
- resp, err = b.HandleRequest(listReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- actual := resp.Data["keys"].([]string)
- expected := []string{"role1", "role2", "role3", "role4", "role5"}
- if !policyutil.EquivalentPolicies(actual, expected) {
- t.Fatalf("bad: listed roles: expected:%s\nactual:%s", expected, actual)
- }
-}
-
-func TestAppRole_RoleSecretID(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "policies": "p,q,r,s",
- "secret_id_num_uses": 10,
- "secret_id_ttl": 300,
- "token_ttl": 400,
- "token_max_ttl": 500,
- }
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/role1",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleSecretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/role1/secret-id",
- Storage: storage,
- }
- resp, err = b.HandleRequest(roleSecretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["secret_id"].(string) == "" {
- t.Fatalf("failed to generate secret_id")
- }
-
- roleSecretIDReq.Path = "role/role1/custom-secret-id"
- roleCustomSecretIDData := map[string]interface{}{
- "secret_id": "abcd123",
- }
- roleSecretIDReq.Data = roleCustomSecretIDData
- roleSecretIDReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleSecretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["secret_id"] != "abcd123" {
- t.Fatalf("failed to set specific secret_id to role")
- }
-}
-
-func TestAppRole_RoleCRUD(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "policies": "p,q,r,s",
- "secret_id_num_uses": 10,
- "secret_id_ttl": 300,
- "token_ttl": 400,
- "token_max_ttl": 500,
- "token_num_uses": 600,
- "bound_cidr_list": "127.0.0.1/32,127.0.0.1/16",
- }
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/role1",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- expected := map[string]interface{}{
- "bind_secret_id": true,
- "policies": []string{"p", "q", "r", "s"},
- "secret_id_num_uses": 10,
- "secret_id_ttl": 300,
- "token_ttl": 400,
- "token_max_ttl": 500,
- "token_num_uses": 600,
- "bound_cidr_list": "127.0.0.1/32,127.0.0.1/16",
- }
- var expectedStruct roleStorageEntry
- err = mapstructure.Decode(expected, &expectedStruct)
- if err != nil {
- t.Fatal(err)
- }
-
- var actualStruct roleStorageEntry
- err = mapstructure.Decode(resp.Data, &actualStruct)
- if err != nil {
- t.Fatal(err)
- }
-
- expectedStruct.RoleID = actualStruct.RoleID
- if !reflect.DeepEqual(expectedStruct, actualStruct) {
- t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct)
- }
-
- roleData = map[string]interface{}{
- "role_id": "test_role_id",
- "policies": "a,b,c,d",
- "secret_id_num_uses": 100,
- "secret_id_ttl": 3000,
- "token_ttl": 4000,
- "token_max_ttl": 5000,
- }
- roleReq.Data = roleData
- roleReq.Operation = logical.UpdateOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- expected = map[string]interface{}{
- "policies": []string{"a", "b", "c", "d"},
- "secret_id_num_uses": 100,
- "secret_id_ttl": 3000,
- "token_ttl": 4000,
- "token_max_ttl": 5000,
- }
- err = mapstructure.Decode(expected, &expectedStruct)
- if err != nil {
- t.Fatal(err)
- }
-
- err = mapstructure.Decode(resp.Data, &actualStruct)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(expectedStruct, actualStruct) {
- t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct)
- }
-
- // RU for role_id field
- roleReq.Path = "role/role1/role-id"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- if resp.Data["role_id"].(string) != "test_role_id" {
- t.Fatalf("bad: role_id: expected:test_role_id actual:%s\n", resp.Data["role_id"].(string))
- }
-
- roleReq.Data = map[string]interface{}{"role_id": "custom_role_id"}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- if resp.Data["role_id"].(string) != "custom_role_id" {
- t.Fatalf("bad: role_id: expected:custom_role_id actual:%s\n", resp.Data["role_id"].(string))
- }
-
- // RUD for bind_secret_id field
- roleReq.Path = "role/role1/bind-secret-id"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Data = map[string]interface{}{"bind_secret_id": false}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["bind_secret_id"].(bool) {
- t.Fatalf("bad: bind_secret_id: expected:false actual:%t\n", resp.Data["bind_secret_id"].(bool))
- }
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if !resp.Data["bind_secret_id"].(bool) {
- t.Fatalf("expected the default value of 'true' to be set")
- }
-
- // RUD for policies field
- roleReq.Path = "role/role1/policies"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Data = map[string]interface{}{"policies": "a1,b1,c1,d1"}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1"}) {
- t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string))
- }
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- expectedPolicies := []string{"default"}
- actualPolicies := resp.Data["policies"].([]string)
- if !policyutil.EquivalentPolicies(expectedPolicies, actualPolicies) {
- t.Fatalf("bad: policies: expected:%s actual:%s", expectedPolicies, actualPolicies)
- }
-
- // RUD for secret-id-num-uses field
- roleReq.Path = "role/role1/secret-id-num-uses"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Data = map[string]interface{}{"secret_id_num_uses": 200}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["secret_id_num_uses"].(int) != 200 {
- t.Fatalf("bad: secret_id_num_uses: expected:200 actual:%d\n", resp.Data["secret_id_num_uses"].(int))
- }
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["secret_id_num_uses"].(int) != 0 {
- t.Fatalf("expected value to be reset")
- }
-
- // RUD for secret_id_ttl field
- roleReq.Path = "role/role1/secret-id-ttl"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Data = map[string]interface{}{"secret_id_ttl": 3001}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["secret_id_ttl"].(time.Duration) != 3001 {
- t.Fatalf("bad: secret_id_ttl: expected:3001 actual:%d\n", resp.Data["secret_id_ttl"].(time.Duration))
- }
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["secret_id_ttl"].(time.Duration) != 0 {
- t.Fatalf("expected value to be reset")
- }
-
- // RUD for secret-id-num-uses field
- roleReq.Path = "role/role1/token-num-uses"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- if resp.Data["token_num_uses"].(int) != 600 {
- t.Fatalf("bad: token_num_uses: expected:600 actual:%d\n", resp.Data["token_num_uses"].(int))
- }
-
- roleReq.Data = map[string]interface{}{"token_num_uses": 60}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["token_num_uses"].(int) != 60 {
- t.Fatalf("bad: token_num_uses: expected:60 actual:%d\n", resp.Data["token_num_uses"].(int))
- }
-
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["token_num_uses"].(int) != 0 {
- t.Fatalf("expected value to be reset")
- }
-
- // RUD for 'period' field
- roleReq.Path = "role/role1/period"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Data = map[string]interface{}{"period": 9001}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["period"].(time.Duration) != 9001 {
- t.Fatalf("bad: period: expected:9001 actual:%d\n", resp.Data["9001"].(time.Duration))
- }
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["period"].(time.Duration) != 0 {
- t.Fatalf("expected value to be reset")
- }
-
- // RUD for token_ttl field
- roleReq.Path = "role/role1/token-ttl"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Data = map[string]interface{}{"token_ttl": 4001}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["token_ttl"].(time.Duration) != 4001 {
- t.Fatalf("bad: token_ttl: expected:4001 actual:%d\n", resp.Data["token_ttl"].(time.Duration))
- }
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["token_ttl"].(time.Duration) != 0 {
- t.Fatalf("expected value to be reset")
- }
-
- // RUD for token_max_ttl field
- roleReq.Path = "role/role1/token-max-ttl"
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Data = map[string]interface{}{"token_max_ttl": 5001}
- roleReq.Operation = logical.UpdateOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["token_max_ttl"].(time.Duration) != 5001 {
- t.Fatalf("bad: token_max_ttl: expected:5001 actual:%d\n", resp.Data["token_max_ttl"].(time.Duration))
- }
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["token_max_ttl"].(time.Duration) != 0 {
- t.Fatalf("expected value to be reset")
- }
-
- // Delete test for role
- roleReq.Path = "role/role1"
- roleReq.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp != nil {
- t.Fatalf("expected a nil response")
- }
-}
-
-func createRole(t *testing.T, b *backend, s logical.Storage, roleName, policies string) {
- roleData := map[string]interface{}{
- "policies": policies,
- "secret_id_num_uses": 10,
- "secret_id_ttl": 300,
- "token_ttl": 400,
- "token_max_ttl": 500,
- }
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/" + roleName,
- Storage: s,
- Data: roleData,
- }
-
- resp, err := b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_tidy_user_id.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_tidy_user_id.go
deleted file mode 100644
index 4b06554..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_tidy_user_id.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package approle
-
-import (
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathTidySecretID(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "tidy/secret-id$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathTidySecretIDUpdate,
- },
-
- HelpSynopsis: pathTidySecretIDSyn,
- HelpDescription: pathTidySecretIDDesc,
- }
-}
-
-// tidySecretID is used to delete entries in the whitelist that are expired.
-func (b *backend) tidySecretID(s logical.Storage) error {
- grabbed := atomic.CompareAndSwapUint32(&b.tidySecretIDCASGuard, 0, 1)
- if grabbed {
- defer atomic.StoreUint32(&b.tidySecretIDCASGuard, 0)
- } else {
- return fmt.Errorf("SecretID tidy operation already running")
- }
-
- roleNameHMACs, err := s.List("secret_id/")
- if err != nil {
- return err
- }
-
- var result error
- for _, roleNameHMAC := range roleNameHMACs {
- // roleNameHMAC will already have a '/' suffix. Don't append another one.
- secretIDHMACs, err := s.List(fmt.Sprintf("secret_id/%s", roleNameHMAC))
- if err != nil {
- return err
- }
- for _, secretIDHMAC := range secretIDHMACs {
- // In order to avoid lock swroleing in case there is need to delete,
- // grab the write lock.
- lock := b.secretIDLock(secretIDHMAC)
- lock.Lock()
- // roleNameHMAC will already have a '/' suffix. Don't append another one.
- entryIndex := fmt.Sprintf("secret_id/%s%s", roleNameHMAC, secretIDHMAC)
- secretIDEntry, err := s.Get(entryIndex)
- if err != nil {
- lock.Unlock()
- return fmt.Errorf("error fetching SecretID %s: %s", secretIDHMAC, err)
- }
-
- if secretIDEntry == nil {
- result = multierror.Append(result, fmt.Errorf("entry for SecretID %s is nil", secretIDHMAC))
- lock.Unlock()
- continue
- }
-
- if secretIDEntry.Value == nil || len(secretIDEntry.Value) == 0 {
- lock.Unlock()
- return fmt.Errorf("found entry for SecretID %s but actual SecretID is empty", secretIDHMAC)
- }
-
- var result secretIDStorageEntry
- if err := secretIDEntry.DecodeJSON(&result); err != nil {
- lock.Unlock()
- return err
- }
-
- // ExpirationTime not being set indicates non-expiring SecretIDs
- if !result.ExpirationTime.IsZero() && time.Now().After(result.ExpirationTime) {
- if err := s.Delete(entryIndex); err != nil {
- lock.Unlock()
- return fmt.Errorf("error deleting SecretID %s from storage: %s", secretIDHMAC, err)
- }
- }
- lock.Unlock()
- }
- }
- return result
-}
-
-// pathTidySecretIDUpdate is used to delete the expired SecretID entries
-func (b *backend) pathTidySecretIDUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return nil, b.tidySecretID(req.Storage)
-}
-
-const pathTidySecretIDSyn = "Trigger the clean-up of expired SecretID entries."
-const pathTidySecretIDDesc = `SecretIDs will have expiratin time attached to them. The periodic function
-of the backend will look for expired entries and delete them. This happens once in a minute. Invoking
-this endpoint will trigger the clean-up action, without waiting for the backend's periodic function.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go
deleted file mode 100644
index c7e32e1..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go
+++ /dev/null
@@ -1,574 +0,0 @@
-package approle
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/cidrutil"
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// secretIDStorageEntry represents the information stored in storage
-// when a SecretID is created. The structure of the SecretID storage
-// entry is the same for all the types of SecretIDs generated.
-type secretIDStorageEntry struct {
- // Accessor for the SecretID. It is a random UUID serving as
- // a secondary index for the SecretID. This uniquely identifies
- // the SecretID it belongs to, and hence can be used for listing
- // and deleting SecretIDs. Accessors cannot be used as valid
- // SecretIDs during login.
- SecretIDAccessor string `json:"secret_id_accessor" structs:"secret_id_accessor" mapstructure:"secret_id_accessor"`
-
- // Number of times this SecretID can be used to perform the login
- // operation
- SecretIDNumUses int `json:"secret_id_num_uses" structs:"secret_id_num_uses" mapstructure:"secret_id_num_uses"`
-
- // Duration after which this SecretID should expire. This is capped by
- // the backend mount's max TTL value.
- SecretIDTTL time.Duration `json:"secret_id_ttl" structs:"secret_id_ttl" mapstructure:"secret_id_ttl"`
-
- // The time when the SecretID was created
- CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
-
- // The time when the SecretID becomes eligible for tidy operation.
- // Tidying is performed by the PeriodicFunc of the backend which is 1
- // minute apart.
- ExpirationTime time.Time `json:"expiration_time" structs:"expiration_time" mapstructure:"expiration_time"`
-
- // The time representing the last time this storage entry was modified
- LastUpdatedTime time.Time `json:"last_updated_time" structs:"last_updated_time" mapstructure:"last_updated_time"`
-
- // Metadata that belongs to the SecretID
- Metadata map[string]string `json:"metadata" structs:"metadata" mapstructure:"metadata"`
-
- // CIDRList is a set of CIDR blocks that impose source address
- // restrictions on the usage of SecretID
- CIDRList []string `json:"cidr_list" structs:"cidr_list" mapstructure:"cidr_list"`
-
- // This is a deprecated field
- SecretIDNumUsesDeprecated int `json:"SecretIDNumUses" structs:"SecretIDNumUses" mapstructure:"SecretIDNumUses"`
-}
-
-// Represents the payload of the storage entry of the accessor that maps to a
-// unique SecretID. Note that SecretIDs should never be stored in plaintext
-// anywhere in the backend. SecretIDHMAC will be used as an index to fetch the
-// properties of the SecretID and to delete the SecretID.
-type secretIDAccessorStorageEntry struct {
- // Hash of the SecretID which can be used to find the storage index at which
- // properties of SecretID is stored.
- SecretIDHMAC string `json:"secret_id_hmac" structs:"secret_id_hmac" mapstructure:"secret_id_hmac"`
-}
-
-// Checks if the Role represented by the RoleID still exists
-func (b *backend) validateRoleID(s logical.Storage, roleID string) (*roleStorageEntry, string, error) {
- // Look for the storage entry that maps the roleID to role
- roleIDIndex, err := b.roleIDEntry(s, roleID)
- if err != nil {
- return nil, "", err
- }
- if roleIDIndex == nil {
- return nil, "", fmt.Errorf("failed to find secondary index for role_id %q\n", roleID)
- }
-
- role, err := b.roleEntry(s, roleIDIndex.Name)
- if err != nil {
- return nil, "", err
- }
- if role == nil {
- return nil, "", fmt.Errorf("role %q referred by the SecretID does not exist", roleIDIndex.Name)
- }
-
- return role, roleIDIndex.Name, nil
-}
-
-// Validates the supplied RoleID and SecretID
-func (b *backend) validateCredentials(req *logical.Request, data *framework.FieldData) (*roleStorageEntry, string, map[string]string, error) {
- metadata := make(map[string]string)
- // RoleID must be supplied during every login
- roleID := strings.TrimSpace(data.Get("role_id").(string))
- if roleID == "" {
- return nil, "", metadata, fmt.Errorf("missing role_id")
- }
-
- // Validate the RoleID and get the Role entry
- role, roleName, err := b.validateRoleID(req.Storage, roleID)
- if err != nil {
- return nil, "", metadata, err
- }
- if role == nil || roleName == "" {
- return nil, "", metadata, fmt.Errorf("failed to validate role_id")
- }
-
- // Calculate the TTL boundaries since this reflects the properties of the token issued
- if role.TokenTTL, role.TokenMaxTTL, err = b.SanitizeTTL(role.TokenTTL, role.TokenMaxTTL); err != nil {
- return nil, "", metadata, err
- }
-
- if role.BindSecretID {
- // If 'bind_secret_id' was set on role, look for the field 'secret_id'
- // to be specified and validate it.
- secretID := strings.TrimSpace(data.Get("secret_id").(string))
- if secretID == "" {
- return nil, "", metadata, fmt.Errorf("missing secret_id")
- }
-
- // Check if the SecretID supplied is valid. If use limit was specified
- // on the SecretID, it will be decremented in this call.
- var valid bool
- valid, metadata, err = b.validateBindSecretID(req, roleName, secretID, role.HMACKey, role.BoundCIDRList)
- if err != nil {
- return nil, "", metadata, err
- }
- if !valid {
- return nil, "", metadata, fmt.Errorf("invalid secret_id %q", secretID)
- }
- }
-
- if role.BoundCIDRList != "" {
- // If 'bound_cidr_list' was set, verify the CIDR restrictions
- if req.Connection == nil || req.Connection.RemoteAddr == "" {
- return nil, "", metadata, fmt.Errorf("failed to get connection information")
- }
-
- belongs, err := cidrutil.IPBelongsToCIDRBlocksString(req.Connection.RemoteAddr, role.BoundCIDRList, ",")
- if err != nil {
- return nil, "", metadata, fmt.Errorf("failed to verify the CIDR restrictions set on the role: %v", err)
- }
- if !belongs {
- return nil, "", metadata, fmt.Errorf("source address %q unauthorized through CIDR restrictions on the role", req.Connection.RemoteAddr)
- }
- }
-
- return role, roleName, metadata, nil
-}
-
-// validateBindSecretID is used to determine if the given SecretID is a valid one.
-func (b *backend) validateBindSecretID(req *logical.Request, roleName, secretID,
- hmacKey, roleBoundCIDRList string) (bool, map[string]string, error) {
- secretIDHMAC, err := createHMAC(hmacKey, secretID)
- if err != nil {
- return false, nil, fmt.Errorf("failed to create HMAC of secret_id: %v", err)
- }
-
- roleNameHMAC, err := createHMAC(hmacKey, roleName)
- if err != nil {
- return false, nil, fmt.Errorf("failed to create HMAC of role_name: %v", err)
- }
-
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
-
- // SecretID locks are always index based on secretIDHMACs. This helps
- // acquiring the locks when the SecretIDs are listed. This allows grabbing
- // the correct locks even if the SecretIDs are not known in plaintext.
- lock := b.secretIDLock(secretIDHMAC)
- lock.RLock()
-
- result, err := b.nonLockedSecretIDStorageEntry(req.Storage, roleNameHMAC, secretIDHMAC)
- if err != nil {
- lock.RUnlock()
- return false, nil, err
- } else if result == nil {
- lock.RUnlock()
- return false, nil, nil
- }
-
- // SecretIDNumUses will be zero only if the usage limit was not set at all,
- // in which case, the SecretID will remain to be valid as long as it is not
- // expired.
- if result.SecretIDNumUses == 0 {
- // Ensure that the CIDRs on the secret ID are still a subset of that of
- // role's
- if err := verifyCIDRRoleSecretIDSubset(result.CIDRList,
- roleBoundCIDRList); err != nil {
- return false, nil, err
- }
-
- // If CIDR restrictions are present on the secret ID, check if the
- // source IP complies to it
- if len(result.CIDRList) != 0 {
- if req.Connection == nil || req.Connection.RemoteAddr == "" {
- return false, nil, fmt.Errorf("failed to get connection information")
- }
-
- if belongs, err := cidrutil.IPBelongsToCIDRBlocksSlice(req.Connection.RemoteAddr, result.CIDRList); !belongs || err != nil {
- return false, nil, fmt.Errorf("source address %q unauthorized through CIDR restrictions on the secret ID: %v", req.Connection.RemoteAddr, err)
- }
- }
-
- lock.RUnlock()
- return true, result.Metadata, nil
- }
-
- // If the SecretIDNumUses is non-zero, it means that its use-count should be updated
- // in the storage. Switch the lock from a `read` to a `write` and update
- // the storage entry.
- lock.RUnlock()
-
- lock.Lock()
- defer lock.Unlock()
-
- // Lock switching may change the data. Refresh the contents.
- result, err = b.nonLockedSecretIDStorageEntry(req.Storage, roleNameHMAC, secretIDHMAC)
- if err != nil {
- return false, nil, err
- }
- if result == nil {
- return false, nil, nil
- }
-
- // If there exists a single use left, delete the SecretID entry from
- // the storage but do not fail the validation request. Subsequest
- // requests to use the same SecretID will fail.
- if result.SecretIDNumUses == 1 {
- // Delete the secret IDs accessor first
- if err := b.deleteSecretIDAccessorEntry(req.Storage, result.SecretIDAccessor); err != nil {
- return false, nil, err
- }
- if err := req.Storage.Delete(entryIndex); err != nil {
- return false, nil, fmt.Errorf("failed to delete secret ID: %v", err)
- }
- } else {
- // If the use count is greater than one, decrement it and update the last updated time.
- result.SecretIDNumUses -= 1
- result.LastUpdatedTime = time.Now()
- if entry, err := logical.StorageEntryJSON(entryIndex, &result); err != nil {
- return false, nil, fmt.Errorf("failed to decrement the use count for secret ID %q", secretID)
- } else if err = req.Storage.Put(entry); err != nil {
- return false, nil, fmt.Errorf("failed to decrement the use count for secret ID %q", secretID)
- }
- }
-
- // Ensure that the CIDRs on the secret ID are still a subset of that of
- // role's
- if err := verifyCIDRRoleSecretIDSubset(result.CIDRList,
- roleBoundCIDRList); err != nil {
- return false, nil, err
- }
-
- // If CIDR restrictions are present on the secret ID, check if the
- // source IP complies to it
- if len(result.CIDRList) != 0 {
- if req.Connection == nil || req.Connection.RemoteAddr == "" {
- return false, nil, fmt.Errorf("failed to get connection information")
- }
-
- if belongs, err := cidrutil.IPBelongsToCIDRBlocksSlice(req.Connection.RemoteAddr, result.CIDRList); !belongs || err != nil {
- return false, nil, fmt.Errorf("source address %q unauthorized through CIDR restrictions on the secret ID: %v", req.Connection.RemoteAddr, err)
- }
- }
-
- return true, result.Metadata, nil
-}
-
-// verifyCIDRRoleSecretIDSubset checks if the CIDR blocks set on the secret ID
-// are a subset of CIDR blocks set on the role
-func verifyCIDRRoleSecretIDSubset(secretIDCIDRs []string, roleBoundCIDRList string) error {
- if len(secretIDCIDRs) != 0 {
- // Parse the CIDRs on role as a slice
- roleCIDRs := strutil.ParseDedupLowercaseAndSortStrings(roleBoundCIDRList, ",")
-
- // If there are no CIDR blocks on the role, then the subset
- // requirement would be satisfied
- if len(roleCIDRs) != 0 {
- subset, err := cidrutil.SubsetBlocks(roleCIDRs, secretIDCIDRs)
- if !subset || err != nil {
- return fmt.Errorf("failed to verify subset relationship between CIDR blocks on the role %q and CIDR blocks on the secret ID %q: %v", roleCIDRs, secretIDCIDRs, err)
- }
- }
- }
-
- return nil
-}
-
-// Creates a SHA256 HMAC of the given 'value' using the given 'key' and returns
-// a hex encoded string.
-func createHMAC(key, value string) (string, error) {
- if key == "" {
- return "", fmt.Errorf("invalid HMAC key")
- }
- hm := hmac.New(sha256.New, []byte(key))
- hm.Write([]byte(value))
- return hex.EncodeToString(hm.Sum(nil)), nil
-}
-
-func (b *backend) secretIDLock(secretIDHMAC string) *locksutil.LockEntry {
- return locksutil.LockForKey(b.secretIDLocks, secretIDHMAC)
-}
-
-func (b *backend) secretIDAccessorLock(secretIDAccessor string) *locksutil.LockEntry {
- return locksutil.LockForKey(b.secretIDAccessorLocks, secretIDAccessor)
-}
-
-// nonLockedSecretIDStorageEntry fetches the secret ID properties from physical
-// storage. The entry will be indexed based on the given HMACs of both role
-// name and the secret ID. This method will not acquire secret ID lock to fetch
-// the storage entry. Locks need to be acquired before calling this method.
-func (b *backend) nonLockedSecretIDStorageEntry(s logical.Storage, roleNameHMAC, secretIDHMAC string) (*secretIDStorageEntry, error) {
- if secretIDHMAC == "" {
- return nil, fmt.Errorf("missing secret ID HMAC")
- }
-
- if roleNameHMAC == "" {
- return nil, fmt.Errorf("missing role name HMAC")
- }
-
- // Prepare the storage index at which the secret ID will be stored
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
-
- entry, err := s.Get(entryIndex)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- result := secretIDStorageEntry{}
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- // TODO: Remove this upgrade bit in future releases
- persistNeeded := false
- if result.SecretIDNumUsesDeprecated != 0 {
- if result.SecretIDNumUses == 0 ||
- result.SecretIDNumUsesDeprecated < result.SecretIDNumUses {
- result.SecretIDNumUses = result.SecretIDNumUsesDeprecated
- persistNeeded = true
- }
- if result.SecretIDNumUses < result.SecretIDNumUsesDeprecated {
- result.SecretIDNumUsesDeprecated = result.SecretIDNumUses
- persistNeeded = true
- }
- }
-
- if persistNeeded {
- if err := b.nonLockedSetSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC, &result); err != nil {
- return nil, fmt.Errorf("failed to upgrade role storage entry %s", err)
- }
- }
-
- return &result, nil
-}
-
-// nonLockedSetSecretIDStorageEntry creates or updates a secret ID entry at the
-// physical storage. The entry will be indexed based on the given HMACs of both
-// role name and the secret ID. This method will not acquire secret ID lock to
-// create/update the storage entry. Locks need to be acquired before calling
-// this method.
-func (b *backend) nonLockedSetSecretIDStorageEntry(s logical.Storage, roleNameHMAC, secretIDHMAC string, secretEntry *secretIDStorageEntry) error {
- if secretIDHMAC == "" {
- return fmt.Errorf("missing secret ID HMAC")
- }
-
- if roleNameHMAC == "" {
- return fmt.Errorf("missing role name HMAC")
- }
-
- if secretEntry == nil {
- return fmt.Errorf("nil secret entry")
- }
-
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
-
- if entry, err := logical.StorageEntryJSON(entryIndex, secretEntry); err != nil {
- return err
- } else if err = s.Put(entry); err != nil {
- return err
- }
-
- return nil
-}
-
-// registerSecretIDEntry creates a new storage entry for the given SecretID.
-func (b *backend) registerSecretIDEntry(s logical.Storage, roleName, secretID, hmacKey string, secretEntry *secretIDStorageEntry) (*secretIDStorageEntry, error) {
- secretIDHMAC, err := createHMAC(hmacKey, secretID)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of secret ID: %v", err)
- }
- roleNameHMAC, err := createHMAC(hmacKey, roleName)
- if err != nil {
- return nil, fmt.Errorf("failed to create HMAC of role_name: %v", err)
- }
-
- lock := b.secretIDLock(secretIDHMAC)
- lock.RLock()
-
- entry, err := b.nonLockedSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC)
- if err != nil {
- lock.RUnlock()
- return nil, err
- }
- if entry != nil {
- lock.RUnlock()
- return nil, fmt.Errorf("SecretID is already registered")
- }
-
- // If there isn't an entry for the secretID already, switch the read lock
- // with a write lock and create an entry.
- lock.RUnlock()
- lock.Lock()
- defer lock.Unlock()
-
- // But before saving a new entry, check if the secretID entry was created during the lock switch.
- entry, err = b.nonLockedSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC)
- if err != nil {
- return nil, err
- }
- if entry != nil {
- return nil, fmt.Errorf("SecretID is already registered")
- }
-
- //
- // Create a new entry for the SecretID
- //
-
- // Set the creation time for the SecretID
- currentTime := time.Now()
- secretEntry.CreationTime = currentTime
- secretEntry.LastUpdatedTime = currentTime
-
- // If SecretIDTTL is not specified or if it crosses the backend mount's limit,
- // cap the expiration to backend's max. Otherwise, use it to determine the
- // expiration time.
- if secretEntry.SecretIDTTL < time.Duration(0) || secretEntry.SecretIDTTL > b.System().MaxLeaseTTL() {
- secretEntry.ExpirationTime = currentTime.Add(b.System().MaxLeaseTTL())
- } else if secretEntry.SecretIDTTL != time.Duration(0) {
- // Set the ExpirationTime only if SecretIDTTL was set. SecretIDs should not
- // expire by default.
- secretEntry.ExpirationTime = currentTime.Add(secretEntry.SecretIDTTL)
- }
-
- // Before storing the SecretID, store its accessor.
- if err := b.createSecretIDAccessorEntry(s, secretEntry, secretIDHMAC); err != nil {
- return nil, err
- }
-
- if err := b.nonLockedSetSecretIDStorageEntry(s, roleNameHMAC, secretIDHMAC, secretEntry); err != nil {
- return nil, err
- }
-
- return secretEntry, nil
-}
-
-// secretIDAccessorEntry is used to read the storage entry that maps an
-// accessor to a secret_id.
-func (b *backend) secretIDAccessorEntry(s logical.Storage, secretIDAccessor string) (*secretIDAccessorStorageEntry, error) {
- if secretIDAccessor == "" {
- return nil, fmt.Errorf("missing secretIDAccessor")
- }
-
- var result secretIDAccessorStorageEntry
-
- // Create index entry, mapping the accessor to the token ID
- salt, err := b.Salt()
- if err != nil {
- return nil, err
- }
- entryIndex := "accessor/" + salt.SaltID(secretIDAccessor)
-
- accessorLock := b.secretIDAccessorLock(secretIDAccessor)
- accessorLock.RLock()
- defer accessorLock.RUnlock()
-
- if entry, err := s.Get(entryIndex); err != nil {
- return nil, err
- } else if entry == nil {
- return nil, nil
- } else if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-// createSecretIDAccessorEntry creates an identifier for the SecretID. A storage index,
-// mapping the accessor to the SecretID is also created. This method should
-// be called when the lock for the corresponding SecretID is held.
-func (b *backend) createSecretIDAccessorEntry(s logical.Storage, entry *secretIDStorageEntry, secretIDHMAC string) error {
- // Create a random accessor
- accessorUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.SecretIDAccessor = accessorUUID
-
- // Create index entry, mapping the accessor to the token ID
- salt, err := b.Salt()
- if err != nil {
- return err
- }
- entryIndex := "accessor/" + salt.SaltID(entry.SecretIDAccessor)
-
- accessorLock := b.secretIDAccessorLock(accessorUUID)
- accessorLock.Lock()
- defer accessorLock.Unlock()
-
- if entry, err := logical.StorageEntryJSON(entryIndex, &secretIDAccessorStorageEntry{
- SecretIDHMAC: secretIDHMAC,
- }); err != nil {
- return err
- } else if err = s.Put(entry); err != nil {
- return fmt.Errorf("failed to persist accessor index entry: %v", err)
- }
-
- return nil
-}
-
-// deleteSecretIDAccessorEntry deletes the storage index mapping the accessor to a SecretID.
-func (b *backend) deleteSecretIDAccessorEntry(s logical.Storage, secretIDAccessor string) error {
- salt, err := b.Salt()
- if err != nil {
- return err
- }
- accessorEntryIndex := "accessor/" + salt.SaltID(secretIDAccessor)
-
- accessorLock := b.secretIDAccessorLock(secretIDAccessor)
- accessorLock.Lock()
- defer accessorLock.Unlock()
-
- // Delete the accessor of the SecretID first
- if err := s.Delete(accessorEntryIndex); err != nil {
- return fmt.Errorf("failed to delete accessor storage entry: %v", err)
- }
-
- return nil
-}
-
-// flushRoleSecrets deletes all the SecretIDs that belong to the given
-// RoleID.
-func (b *backend) flushRoleSecrets(s logical.Storage, roleName, hmacKey string) error {
- roleNameHMAC, err := createHMAC(hmacKey, roleName)
- if err != nil {
- return fmt.Errorf("failed to create HMAC of role_name: %v", err)
- }
-
- // Acquire the custom lock to perform listing of SecretIDs
- b.secretIDListingLock.RLock()
- defer b.secretIDListingLock.RUnlock()
-
- secretIDHMACs, err := s.List(fmt.Sprintf("secret_id/%s/", roleNameHMAC))
- if err != nil {
- return err
- }
- for _, secretIDHMAC := range secretIDHMACs {
- // Acquire the lock belonging to the SecretID
- lock := b.secretIDLock(secretIDHMAC)
- lock.Lock()
- entryIndex := fmt.Sprintf("secret_id/%s/%s", roleNameHMAC, secretIDHMAC)
- if err := s.Delete(entryIndex); err != nil {
- lock.Unlock()
- return fmt.Errorf("error deleting SecretID %q from storage: %v", secretIDHMAC, err)
- }
- lock.Unlock()
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation_test.go
deleted file mode 100644
index aa66644..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package approle
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestAppRole_SecretIDNumUsesUpgrade(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "secret_id_num_uses": 10,
- }
-
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/role1",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- secretIDReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/role1/secret-id",
- Storage: storage,
- }
-
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- secretIDReq.Operation = logical.UpdateOperation
- secretIDReq.Path = "role/role1/secret-id/lookup"
- secretIDReq.Data = map[string]interface{}{
- "secret_id": resp.Data["secret_id"].(string),
- }
- resp, err = b.HandleRequest(secretIDReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Check if the response contains the value set for secret_id_num_uses
- // and not SecretIDNumUses
- if resp.Data["secret_id_num_uses"] != 10 ||
- resp.Data["SecretIDNumUses"] != 0 {
- t.Fatal("invalid secret_id_num_uses")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go
deleted file mode 100644
index 30feba9..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go
+++ /dev/null
@@ -1,291 +0,0 @@
-package awsauth
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/endpoints"
- "github.com/aws/aws-sdk-go/service/ec2"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/patrickmn/go-cache"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b, err := Backend(conf)
- if err != nil {
- return nil, err
- }
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-type backend struct {
- *framework.Backend
-
- // Lock to make changes to any of the backend's configuration endpoints.
- configMutex sync.RWMutex
-
- // Lock to make changes to role entries
- roleMutex sync.RWMutex
-
- // Lock to make changes to the blacklist entries
- blacklistMutex sync.RWMutex
-
- // Guards the blacklist/whitelist tidy functions
- tidyBlacklistCASGuard uint32
- tidyWhitelistCASGuard uint32
-
- // Duration after which the periodic function of the backend needs to
- // tidy the blacklist and whitelist entries.
- tidyCooldownPeriod time.Duration
-
- // nextTidyTime holds the time at which the periodic func should initiatite
- // the tidy operations. This is set by the periodicFunc based on the value
- // of tidyCooldownPeriod.
- nextTidyTime time.Time
-
- // Map to hold the EC2 client objects indexed by region and STS role.
- // This avoids the overhead of creating a client object for every login request.
- // When the credentials are modified or deleted, all the cached client objects
- // will be flushed. The empty STS role signifies the master account
- EC2ClientsMap map[string]map[string]*ec2.EC2
-
- // Map to hold the IAM client objects indexed by region and STS role.
- // This avoids the overhead of creating a client object for every login request.
- // When the credentials are modified or deleted, all the cached client objects
- // will be flushed. The empty STS role signifies the master account
- IAMClientsMap map[string]map[string]*iam.IAM
-
- // Map of AWS unique IDs to the full ARN corresponding to that unique ID
- // This avoids the overhead of an AWS API hit for every login request
- // using the IAM auth method when bound_iam_principal_arn contains a wildcard
- iamUserIdToArnCache *cache.Cache
-
- // AWS Account ID of the "default" AWS credentials
- // This cache avoids the need to call GetCallerIdentity repeatedly to learn it
- // We can't store this because, in certain pathological cases, it could change
- // out from under us, such as a standby and active Vault server in different AWS
- // accounts using their IAM instance profile to get their credentials.
- defaultAWSAccountID string
-
- resolveArnToUniqueIDFunc func(logical.Storage, string) (string, error)
-}
-
-func Backend(conf *logical.BackendConfig) (*backend, error) {
- b := &backend{
- // Setting the periodic func to be run once in an hour.
- // If there is a real need, this can be made configurable.
- tidyCooldownPeriod: time.Hour,
- EC2ClientsMap: make(map[string]map[string]*ec2.EC2),
- IAMClientsMap: make(map[string]map[string]*iam.IAM),
- iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour),
- }
-
- b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId
-
- b.Backend = &framework.Backend{
- PeriodicFunc: b.periodicFunc,
- AuthRenew: b.pathLoginRenew,
- Help: backendHelp,
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "login",
- },
- LocalStorage: []string{
- "whitelist/identity/",
- },
- },
- Paths: []*framework.Path{
- pathLogin(b),
- pathListRole(b),
- pathListRoles(b),
- pathRole(b),
- pathRoleTag(b),
- pathConfigClient(b),
- pathConfigCertificate(b),
- pathConfigSts(b),
- pathListSts(b),
- pathConfigTidyRoletagBlacklist(b),
- pathConfigTidyIdentityWhitelist(b),
- pathListCertificates(b),
- pathListRoletagBlacklist(b),
- pathRoletagBlacklist(b),
- pathTidyRoletagBlacklist(b),
- pathListIdentityWhitelist(b),
- pathIdentityWhitelist(b),
- pathTidyIdentityWhitelist(b),
- },
- Invalidate: b.invalidate,
- BackendType: logical.TypeCredential,
- }
-
- return b, nil
-}
-
-// periodicFunc performs the tasks that the backend wishes to do periodically.
-// Currently this will be triggered once in a minute by the RollbackManager.
-//
-// The tasks being done currently by this function are to cleanup the expired
-// entries of both blacklist role tags and whitelist identities. Tidying is done
-// not once in a minute, but once in an hour, controlled by 'tidyCooldownPeriod'.
-// Tidying of blacklist and whitelist are by default enabled. This can be
-// changed using `config/tidy/roletags` and `config/tidy/identities` endpoints.
-func (b *backend) periodicFunc(req *logical.Request) error {
- // Run the tidy operations for the first time. Then run it when current
- // time matches the nextTidyTime.
- if b.nextTidyTime.IsZero() || !time.Now().Before(b.nextTidyTime) {
- // safety_buffer defaults to 180 days for roletag blacklist
- safety_buffer := 15552000
- tidyBlacklistConfigEntry, err := b.lockedConfigTidyRoleTags(req.Storage)
- if err != nil {
- return err
- }
- skipBlacklistTidy := false
- // check if tidying of role tags was configured
- if tidyBlacklistConfigEntry != nil {
- // check if periodic tidying of role tags was disabled
- if tidyBlacklistConfigEntry.DisablePeriodicTidy {
- skipBlacklistTidy = true
- }
- // overwrite the default safety_buffer with the configured value
- safety_buffer = tidyBlacklistConfigEntry.SafetyBuffer
- }
- // tidy role tags if explicitly not disabled
- if !skipBlacklistTidy {
- b.tidyBlacklistRoleTag(req.Storage, safety_buffer)
- }
-
- // reset the safety_buffer to 72h
- safety_buffer = 259200
- tidyWhitelistConfigEntry, err := b.lockedConfigTidyIdentities(req.Storage)
- if err != nil {
- return err
- }
- skipWhitelistTidy := false
- // check if tidying of identities was configured
- if tidyWhitelistConfigEntry != nil {
- // check if periodic tidying of identities was disabled
- if tidyWhitelistConfigEntry.DisablePeriodicTidy {
- skipWhitelistTidy = true
- }
- // overwrite the default safety_buffer with the configured value
- safety_buffer = tidyWhitelistConfigEntry.SafetyBuffer
- }
- // tidy identities if explicitly not disabled
- if !skipWhitelistTidy {
- b.tidyWhitelistIdentity(req.Storage, safety_buffer)
- }
-
- // Update the time at which to run the tidy functions again.
- b.nextTidyTime = time.Now().Add(b.tidyCooldownPeriod)
- }
- return nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "config/client":
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
- b.flushCachedEC2Clients()
- b.flushCachedIAMClients()
- b.defaultAWSAccountID = ""
- }
-}
-
-// Putting this here so we can inject a fake resolver into the backend for unit testing
-// purposes
-func (b *backend) resolveArnToRealUniqueId(s logical.Storage, arn string) (string, error) {
- entity, err := parseIamArn(arn)
- if err != nil {
- return "", err
- }
- // This odd-looking code is here because IAM is an inherently global service. IAM and STS ARNs
- // don't have regions in them, and there is only a single global endpoint for IAM; see
- // http://docs.aws.amazon.com/general/latest/gr/rande.html#iam_region
- // However, the ARNs do have a partition in them, because the GovCloud and China partitions DO
- // have their own separate endpoints, and the partition is encoded in the ARN. If Amazon's Go SDK
- // would allow us to pass a partition back to the IAM client, it would be much simpler. But it
- // doesn't appear that's possible, so in order to properly support GovCloud and China, we do a
- // circular dance of extracting the partition from the ARN, finding any arbitrary region in the
- // partition, and passing that region back back to the SDK, so that the SDK can figure out the
- // proper partition from the arbitrary region we passed in to look up the endpoint.
- // Sigh
- region := getAnyRegionForAwsPartition(entity.Partition)
- if region == nil {
- return "", fmt.Errorf("Unable to resolve partition %q to a region", entity.Partition)
- }
- iamClient, err := b.clientIAM(s, region.ID(), entity.AccountNumber)
- if err != nil {
- return "", err
- }
-
- switch entity.Type {
- case "user":
- userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName})
- if err != nil {
- return "", err
- }
- if userInfo == nil {
- return "", fmt.Errorf("got nil result from GetUser")
- }
- return *userInfo.User.UserId, nil
- case "role":
- roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName})
- if err != nil {
- return "", err
- }
- if roleInfo == nil {
- return "", fmt.Errorf("got nil result from GetRole")
- }
- return *roleInfo.Role.RoleId, nil
- case "instance-profile":
- profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName})
- if err != nil {
- return "", err
- }
- if profileInfo == nil {
- return "", fmt.Errorf("got nil result from GetInstanceProfile")
- }
- return *profileInfo.InstanceProfile.InstanceProfileId, nil
- default:
- return "", fmt.Errorf("unrecognized error type %#v", entity.Type)
- }
-}
-
-// Adapted from https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/
-// the "Enumerating Regions and Endpoint Metadata" section
-func getAnyRegionForAwsPartition(partitionId string) *endpoints.Region {
- resolver := endpoints.DefaultResolver()
- partitions := resolver.(endpoints.EnumPartitions).Partitions()
-
- for _, p := range partitions {
- if p.ID() == partitionId {
- for _, r := range p.Regions() {
- return &r
- }
- }
- }
- return nil
-}
-
-const backendHelp = `
-aws-ec2 auth backend takes in PKCS#7 signature of an AWS EC2 instance and a client
-created nonce to authenticates the EC2 instance with Vault.
-
-Authentication is backed by a preconfigured role in the backend. The role
-represents the authorization of resources by containing Vault's policies.
-Role can be created using 'role/' endpoint.
-
-If there is need to further restrict the capabilities of the role on the instance
-that is using the role, 'role_tag' option can be enabled on the role, and a tag
-can be generated using 'role//tag' endpoint. This tag represents the
-subset of capabilities set on the role. When the 'role_tag' option is enabled on
-the role, the login operation requires that a respective role tag is attached to
-the EC2 instance which performs the login.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go
deleted file mode 100644
index 881ca85..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go
+++ /dev/null
@@ -1,1619 +0,0 @@
-package awsauth
-
-import (
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "strings"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- logicaltest "github.com/hashicorp/vault/logical/testing"
-)
-
-func TestBackend_CreateParseVerifyRoleTag(t *testing.T) {
- // create a backend
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // create a role entry
- data := map[string]interface{}{
- "auth_type": "ec2",
- "policies": "p,q,r,s",
- "bound_ami_id": "abcd-123",
- }
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/abcd-123",
- Storage: storage,
- Data: data,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // read the created role entry
- roleEntry, err := b.lockedAWSRole(storage, "abcd-123")
- if err != nil {
- t.Fatal(err)
- }
-
- // create a nonce for the role tag
- nonce, err := createRoleTagNonce()
- if err != nil {
- t.Fatal(err)
- }
- rTag1 := &roleTag{
- Version: "v1",
- Role: "abcd-123",
- Nonce: nonce,
- Policies: []string{"p", "q", "r"},
- MaxTTL: 200000000000, // 200s
- }
-
- // create a role tag against the role entry
- val, err := createRoleTagValue(rTag1, roleEntry)
- if err != nil {
- t.Fatal(err)
- }
- if val == "" {
- t.Fatalf("failed to create role tag")
- }
-
- // parse the created role tag
- rTag2, err := b.parseAndVerifyRoleTagValue(storage, val)
- if err != nil {
- t.Fatal(err)
- }
-
- // check the values in parsed role tag
- if rTag2.Version != "v1" ||
- rTag2.Nonce != nonce ||
- rTag2.Role != "abcd-123" ||
- rTag2.MaxTTL != 200000000000 || // 200s
- !policyutil.EquivalentPolicies(rTag2.Policies, []string{"p", "q", "r"}) ||
- len(rTag2.HMAC) == 0 {
- t.Fatalf("parsed role tag is invalid")
- }
-
- // verify the tag contents using role specific HMAC key
- verified, err := verifyRoleTagValue(rTag2, roleEntry)
- if err != nil {
- t.Fatal(err)
- }
- if !verified {
- t.Fatalf("failed to verify the role tag")
- }
-
- // register a different role
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/ami-6789",
- Storage: storage,
- Data: data,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // get the entry of the newly created role entry
- roleEntry2, err := b.lockedAWSRole(storage, "ami-6789")
- if err != nil {
- t.Fatal(err)
- }
-
- // try to verify the tag created with previous role's HMAC key
- // with the newly registered entry's HMAC key
- verified, err = verifyRoleTagValue(rTag2, roleEntry2)
- if err != nil {
- t.Fatal(err)
- }
- if verified {
- t.Fatalf("verification of role tag should have failed")
- }
-
- // modify any value in role tag and try to verify it
- rTag2.Version = "v2"
- verified, err = verifyRoleTagValue(rTag2, roleEntry)
- if err != nil {
- t.Fatal(err)
- }
- if verified {
- t.Fatalf("verification of role tag should have failed: invalid Version")
- }
-}
-
-func TestBackend_prepareRoleTagPlaintextValue(t *testing.T) {
- // create a nonce for the role tag
- nonce, err := createRoleTagNonce()
- if err != nil {
- t.Fatal(err)
- }
- rTag := &roleTag{
- Version: "v1",
- Nonce: nonce,
- Role: "abcd-123",
- }
-
- rTag.Version = ""
- // try to create plaintext part of role tag
- // without specifying version
- val, err := prepareRoleTagPlaintextValue(rTag)
- if err == nil {
- t.Fatalf("expected error for missing version")
- }
- rTag.Version = "v1"
-
- rTag.Nonce = ""
- // try to create plaintext part of role tag
- // without specifying nonce
- val, err = prepareRoleTagPlaintextValue(rTag)
- if err == nil {
- t.Fatalf("expected error for missing nonce")
- }
- rTag.Nonce = nonce
-
- rTag.Role = ""
- // try to create plaintext part of role tag
- // without specifying role
- val, err = prepareRoleTagPlaintextValue(rTag)
- if err == nil {
- t.Fatalf("expected error for missing role")
- }
- rTag.Role = "abcd-123"
-
- // create the plaintext part of the tag
- val, err = prepareRoleTagPlaintextValue(rTag)
- if err != nil {
- t.Fatal(err)
- }
-
- // verify if it contains known fields
- if !strings.Contains(val, "r=") ||
- !strings.Contains(val, "d=") ||
- !strings.Contains(val, "m=") ||
- !strings.HasPrefix(val, "v1") {
- t.Fatalf("incorrect information in role tag plaintext value")
- }
-
- rTag.InstanceID = "instance-123"
- // create the role tag with instance_id specified
- val, err = prepareRoleTagPlaintextValue(rTag)
- if err != nil {
- t.Fatal(err)
- }
- // verify it
- if !strings.Contains(val, "i=") {
- t.Fatalf("missing instance ID in role tag plaintext value")
- }
-
- rTag.MaxTTL = 200000000000
- // create the role tag with max_ttl specified
- val, err = prepareRoleTagPlaintextValue(rTag)
- if err != nil {
- t.Fatal(err)
- }
- // verify it
- if !strings.Contains(val, "t=") {
- t.Fatalf("missing max_ttl field in role tag plaintext value")
- }
-}
-
-func TestBackend_CreateRoleTagNonce(t *testing.T) {
- // create a nonce for the role tag
- nonce, err := createRoleTagNonce()
- if err != nil {
- t.Fatal(err)
- }
- if nonce == "" {
- t.Fatalf("failed to create role tag nonce")
- }
-
- // verify that the value returned is base64 encoded
- nonceBytes, err := base64.StdEncoding.DecodeString(nonce)
- if err != nil {
- t.Fatal(err)
- }
- if len(nonceBytes) == 0 {
- t.Fatalf("length of role tag nonce is zero")
- }
-}
-
-func TestBackend_ConfigTidyIdentities(t *testing.T) {
- // create a backend
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // test update operation
- tidyRequest := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/tidy/identity-whitelist",
- Storage: storage,
- }
- data := map[string]interface{}{
- "safety_buffer": "60",
- "disable_periodic_tidy": true,
- }
- tidyRequest.Data = data
- _, err = b.HandleRequest(tidyRequest)
- if err != nil {
- t.Fatal(err)
- }
-
- // test read operation
- tidyRequest.Operation = logical.ReadOperation
- resp, err := b.HandleRequest(tidyRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to read config/tidy/identity-whitelist endpoint")
- }
- if resp.Data["safety_buffer"].(int) != 60 || !resp.Data["disable_periodic_tidy"].(bool) {
- t.Fatalf("bad: expected: safety_buffer:60 disable_periodic_tidy:true actual: safety_buffer:%s disable_periodic_tidy:%t\n", resp.Data["safety_buffer"].(int), resp.Data["disable_periodic_tidy"].(bool))
- }
-
- // test delete operation
- tidyRequest.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(tidyRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatalf("failed to delete config/tidy/identity-whitelist")
- }
-}
-
-func TestBackend_ConfigTidyRoleTags(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // test update operation
- tidyRequest := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/tidy/roletag-blacklist",
- Storage: storage,
- }
- data := map[string]interface{}{
- "safety_buffer": "60",
- "disable_periodic_tidy": true,
- }
- tidyRequest.Data = data
- _, err = b.HandleRequest(tidyRequest)
- if err != nil {
- t.Fatal(err)
- }
-
- // test read operation
- tidyRequest.Operation = logical.ReadOperation
- resp, err := b.HandleRequest(tidyRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to read config/tidy/roletag-blacklist endpoint")
- }
- if resp.Data["safety_buffer"].(int) != 60 || !resp.Data["disable_periodic_tidy"].(bool) {
- t.Fatalf("bad: expected: safety_buffer:60 disable_periodic_tidy:true actual: safety_buffer:%s disable_periodic_tidy:%t\n", resp.Data["safety_buffer"].(int), resp.Data["disable_periodic_tidy"].(bool))
- }
-
- // test delete operation
- tidyRequest.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(tidyRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatalf("failed to delete config/tidy/roletag-blacklist")
- }
-}
-
-func TestBackend_TidyIdentities(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // test update operation
- _, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "tidy/identity-whitelist",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_TidyRoleTags(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // test update operation
- _, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "tidy/roletag-blacklist",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_ConfigClient(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- data := map[string]interface{}{"access_key": "AKIAJBRHKV6EVTTNXDHA",
- "secret_key": "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",
- }
-
- stepCreate := logicaltest.TestStep{
- Operation: logical.CreateOperation,
- Path: "config/client",
- Data: data,
- }
-
- stepUpdate := logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/client",
- Data: data,
- }
-
- data3 := map[string]interface{}{"access_key": "",
- "secret_key": "mCtSM8ZUEQ3mOFVZYPBQkf2sO6F/W7a5TVzrl3Oj",
- }
- stepInvalidAccessKey := logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/client",
- Data: data3,
- ErrorOk: true,
- }
-
- data4 := map[string]interface{}{"access_key": "accesskey",
- "secret_key": "",
- }
- stepInvalidSecretKey := logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/client",
- Data: data4,
- ErrorOk: true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: false,
- Backend: b,
- Steps: []logicaltest.TestStep{
- stepCreate,
- stepInvalidAccessKey,
- stepInvalidSecretKey,
- stepUpdate,
- },
- })
-
- // test existence check returning false
- checkFound, exists, err := b.HandleExistenceCheck(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "config/client",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'config/client'")
- }
- if exists {
- t.Fatal("existence check should have returned 'false' for 'config/client'")
- }
-
- // create an entry
- configClientCreateRequest := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/client",
- Data: data,
- Storage: storage,
- }
- _, err = b.HandleRequest(configClientCreateRequest)
- if err != nil {
- t.Fatal(err)
- }
-
- //test existence check returning true
- checkFound, exists, err = b.HandleExistenceCheck(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "config/client",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'config/client'")
- }
- if !exists {
- t.Fatal("existence check should have returned 'true' for 'config/client'")
- }
-
- endpointData := map[string]interface{}{
- "secret_key": "secretkey",
- "access_key": "accesskey",
- "endpoint": "endpointvalue",
- }
-
- endpointReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/client",
- Storage: storage,
- Data: endpointData,
- }
- _, err = b.HandleRequest(endpointReq)
- if err != nil {
- t.Fatal(err)
- }
-
- endpointReq.Operation = logical.ReadOperation
- resp, err := b.HandleRequest(endpointReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil ||
- resp.IsError() {
- t.Fatalf("")
- }
- actual := resp.Data["endpoint"].(string)
- if actual != "endpointvalue" {
- t.Fatalf("bad: endpoint: expected:endpointvalue actual:%s\n", actual)
- }
-}
-
-func TestBackend_pathConfigCertificate(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- certReq := &logical.Request{
- Operation: logical.CreateOperation,
- Storage: storage,
- Path: "config/certificate/cert1",
- }
- checkFound, exists, err := b.HandleExistenceCheck(certReq)
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'config/certificate/cert1'")
- }
- if exists {
- t.Fatal("existence check should have returned 'false' for 'config/certificate/cert1'")
- }
-
- data := map[string]interface{}{
- "type": "pkcs7",
- "aws_public_cert": `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3VENDQXEwQ0NRQ1d1a2paNVY0YVp6QUpC
-Z2NxaGtqT09BUURNRnd4Q3pBSkJnTlZCQVlUQWxWVE1Sa3cKRndZRFZRUUlFeEJYWVhOb2FXNW5k
-Rzl1SUZOMFlYUmxNUkF3RGdZRFZRUUhFd2RUWldGMGRHeGxNU0F3SGdZRApWUVFLRXhkQmJXRjZi
-MjRnVjJWaUlGTmxjblpwWTJWeklFeE1RekFlRncweE1qQXhNRFV4TWpVMk1USmFGdzB6Ck9EQXhN
-RFV4TWpVMk1USmFNRnd4Q3pBSkJnTlZCQVlUQWxWVE1Sa3dGd1lEVlFRSUV4QlhZWE5vYVc1bmRH
-OXUKSUZOMFlYUmxNUkF3RGdZRFZRUUhFd2RUWldGMGRHeGxNU0F3SGdZRFZRUUtFeGRCYldGNmIy
-NGdWMlZpSUZObApjblpwWTJWeklFeE1RekNDQWJjd2dnRXNCZ2NxaGtqT09BUUJNSUlCSHdLQmdR
-Q2prdmNTMmJiMVZRNHl0LzVlCmloNU9PNmtLL24xTHpsbHI3RDhad3RRUDhmT0VwcDVFMm5nK0Q2
-VWQxWjFnWWlwcjU4S2ozbnNzU05wSTZiWDMKVnlJUXpLN3dMY2xuZC9Zb3pxTk5tZ0l5WmVjTjdF
-Z2xLOUlUSEpMUCt4OEZ0VXB0M1FieVlYSmRtVk1lZ042UApodmlZdDVKSC9uWWw0aGgzUGExSEpk
-c2tnUUlWQUxWSjNFUjExK0tvNHRQNm53dkh3aDYrRVJZUkFvR0JBSTFqCmsrdGtxTVZIdUFGY3ZB
-R0tvY1Rnc2pKZW02LzVxb216SnVLRG1iSk51OVF4dzNyQW90WGF1OFFlK01CY0psL1UKaGh5MUtI
-VnBDR2w5ZnVlUTJzNklMMENhTy9idXljVTFDaVlRazQwS05IQ2NIZk5pWmJkbHgxRTlycFVwN2Ju
-RgpsUmEydjFudE1YM2NhUlZEZGJ0UEVXbWR4U0NZc1lGRGs0bVpyT0xCQTRHRUFBS0JnRWJtZXZl
-NWY4TElFL0dmCk1ObVA5Q001ZW92UU9HeDVobzhXcUQrYVRlYnMrazJ0bjkyQkJQcWVacXBXUmE1
-UC8ranJkS21sMXF4NGxsSFcKTVhyczNJZ0liNitoVUlCK1M4ZHo4L21tTzBicHI3NlJvWlZDWFlh
-YjJDWmVkRnV0N3FjM1dVSDkrRVVBSDVtdwp2U2VEQ09VTVlRUjdSOUxJTll3b3VISXppcVFZTUFr
-R0J5cUdTTTQ0QkFNREx3QXdMQUlVV1hCbGs0MHhUd1N3CjdIWDMyTXhYWXJ1c2U5QUNGQk5HbWRY
-MlpCclZOR3JOOU4yZjZST2swazlLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
-`,
- }
-
- certReq.Data = data
- // test create operation
- resp, err := b.HandleRequest(certReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- certReq.Data = nil
- // test existence check
- checkFound, exists, err = b.HandleExistenceCheck(certReq)
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'config/certificate/cert1'")
- }
- if !exists {
- t.Fatal("existence check should have returned 'true' for 'config/certificate/cert1'")
- }
-
- certReq.Operation = logical.ReadOperation
- // test read operation
- resp, err = b.HandleRequest(certReq)
- if err != nil {
- t.Fatal(err)
- }
- expectedCert := `-----BEGIN CERTIFICATE-----
-MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
-FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
-VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
-ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
-IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
-cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
-ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
-VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
-hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
-k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
-hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
-lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
-MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
-MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
-vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
-7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
------END CERTIFICATE-----
-`
- if resp.Data["aws_public_cert"].(string) != expectedCert {
- t.Fatalf("bad: expected:%s\n got:%s\n", expectedCert, resp.Data["aws_public_cert"].(string))
- }
-
- certReq.Operation = logical.CreateOperation
- certReq.Path = "config/certificate/cert2"
- certReq.Data = data
- // create another entry to test the list operation
- _, err = b.HandleRequest(certReq)
- if err != nil {
- t.Fatal(err)
- }
-
- certReq.Operation = logical.ListOperation
- certReq.Path = "config/certificates"
- // test list operation
- resp, err = b.HandleRequest(certReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to list config/certificates")
- }
- keys := resp.Data["keys"].([]string)
- if len(keys) != 2 {
- t.Fatalf("invalid keys listed: %#v\n", keys)
- }
-
- certReq.Operation = logical.DeleteOperation
- certReq.Path = "config/certificate/cert1"
- _, err = b.HandleRequest(certReq)
- if err != nil {
- t.Fatal(err)
- }
-
- certReq.Path = "config/certificate/cert2"
- _, err = b.HandleRequest(certReq)
- if err != nil {
- t.Fatal(err)
- }
-
- certReq.Operation = logical.ListOperation
- certReq.Path = "config/certificates"
- // test list operation
- resp, err = b.HandleRequest(certReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to list config/certificates")
- }
- if resp.Data["keys"] != nil {
- t.Fatalf("no entries should be present")
- }
-}
-
-func TestBackend_parseAndVerifyRoleTagValue(t *testing.T) {
- // create a backend
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // create a role
- data := map[string]interface{}{
- "auth_type": "ec2",
- "policies": "p,q,r,s",
- "max_ttl": "120s",
- "role_tag": "VaultRole",
- "bound_ami_id": "abcd-123",
- }
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/abcd-123",
- Storage: storage,
- Data: data,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // verify that the entry is created
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/abcd-123",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatalf("expected an role entry for abcd-123")
- }
-
- // create a role tag
- data2 := map[string]interface{}{
- "policies": "p,q,r,s",
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/abcd-123/tag",
- Storage: storage,
- Data: data2,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["tag_key"].(string) == "" ||
- resp.Data["tag_value"].(string) == "" {
- t.Fatalf("invalid tag response: %#v\n", resp)
- }
- tagValue := resp.Data["tag_value"].(string)
-
- // parse the value and check if the verifiable values match
- rTag, err := b.parseAndVerifyRoleTagValue(storage, tagValue)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if rTag == nil {
- t.Fatalf("failed to parse role tag")
- }
- if rTag.Version != "v1" ||
- !policyutil.EquivalentPolicies(rTag.Policies, []string{"p", "q", "r", "s"}) ||
- rTag.Role != "abcd-123" {
- t.Fatalf("bad: parsed role tag contains incorrect values. Got: %#v\n", rTag)
- }
-}
-
-func TestBackend_PathRoleTag(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- data := map[string]interface{}{
- "auth_type": "ec2",
- "policies": "p,q,r,s",
- "max_ttl": "120s",
- "role_tag": "VaultRole",
- "bound_ami_id": "abcd-123",
- }
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/abcd-123",
- Storage: storage,
- Data: data,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/abcd-123",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatalf("failed to find a role entry for abcd-123")
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/abcd-123/tag",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Data == nil {
- t.Fatalf("failed to create a tag on role: abcd-123")
- }
- if resp.IsError() {
- t.Fatalf("failed to create a tag on role: abcd-123: %s\n", resp.Data["error"])
- }
- if resp.Data["tag_value"].(string) == "" {
- t.Fatalf("role tag not present in the response data: %#v\n", resp.Data)
- }
-}
-
-func TestBackend_PathBlacklistRoleTag(t *testing.T) {
- // create the backend
- storage := &logical.InmemStorage{}
- config := logical.TestBackendConfig()
- config.StorageView = storage
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // create an role entry
- data := map[string]interface{}{
- "auth_type": "ec2",
- "policies": "p,q,r,s",
- "role_tag": "VaultRole",
- "bound_ami_id": "abcd-123",
- }
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/abcd-123",
- Storage: storage,
- Data: data,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // create a role tag against an role registered before
- data2 := map[string]interface{}{
- "policies": "p,q,r,s",
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/abcd-123/tag",
- Storage: storage,
- Data: data2,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Data == nil {
- t.Fatalf("failed to create a tag on role: abcd-123")
- }
- if resp.IsError() {
- t.Fatalf("failed to create a tag on role: abcd-123: %s\n", resp.Data["error"])
- }
- tag := resp.Data["tag_value"].(string)
- if tag == "" {
- t.Fatalf("role tag not present in the response data: %#v\n", resp.Data)
- }
-
- // blacklist that role tag
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roletag-blacklist/" + tag,
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatalf("failed to blacklist the roletag: %s\n", tag)
- }
-
- // read the blacklist entry
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "roletag-blacklist/" + tag,
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Data == nil {
- t.Fatalf("failed to read the blacklisted role tag: %s\n", tag)
- }
- if resp.IsError() {
- t.Fatalf("failed to read the blacklisted role tag:%s. Err: %s\n", tag, resp.Data["error"])
- }
-
- // delete the blacklisted entry
- _, err = b.HandleRequest(&logical.Request{
- Operation: logical.DeleteOperation,
- Path: "roletag-blacklist/" + tag,
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // try to read the deleted entry
- tagEntry, err := b.lockedBlacklistRoleTagEntry(storage, tag)
- if err != nil {
- t.Fatal(err)
- }
- if tagEntry != nil {
- t.Fatalf("role tag should not have been present: %s\n", tag)
- }
-}
-
-// This is an acceptance test.
-// Requires the following env vars:
-// TEST_AWS_EC2_PKCS7
-// TEST_AWS_EC2_AMI_ID
-// TEST_AWS_EC2_ACCOUNT_ID
-// TEST_AWS_EC2_IAM_ROLE_ARN
-//
-// If the test is not being run on an EC2 instance that has access to
-// credentials using EC2RoleProvider, on top of the above vars, following
-// needs to be set:
-// TEST_AWS_SECRET_KEY
-// TEST_AWS_ACCESS_KEY
-func TestBackendAcc_LoginWithInstanceIdentityDocAndWhitelistIdentity(t *testing.T) {
- // This test case should be run only when certain env vars are set and
- // executed as an acceptance test.
- if os.Getenv(logicaltest.TestEnvVar) == "" {
- t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
- return
- }
-
- pkcs7 := os.Getenv("TEST_AWS_EC2_PKCS7")
- if pkcs7 == "" {
- t.Fatalf("env var TEST_AWS_EC2_PKCS7 not set")
- }
-
- amiID := os.Getenv("TEST_AWS_EC2_AMI_ID")
- if amiID == "" {
- t.Fatalf("env var TEST_AWS_EC2_AMI_ID not set")
- }
-
- iamARN := os.Getenv("TEST_AWS_EC2_IAM_ROLE_ARN")
- if iamARN == "" {
- t.Fatalf("env var TEST_AWS_EC2_IAM_ROLE_ARN not set")
- }
-
- accountID := os.Getenv("TEST_AWS_EC2_ACCOUNT_ID")
- if accountID == "" {
- t.Fatalf("env var TEST_AWS_EC2_ACCOUNT_ID not set")
- }
-
- roleName := amiID
-
- // create the backend
- storage := &logical.InmemStorage{}
- config := logical.TestBackendConfig()
- config.StorageView = storage
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- accessKey := os.Getenv("TEST_AWS_ACCESS_KEY")
- secretKey := os.Getenv("TEST_AWS_SECRET_KEY")
-
- // In case of problems with making API calls using the credentials (2FA enabled,
- // for instance), the keys need not be set if the test is running on an EC2
- // instance with permissions to get the credentials using EC2RoleProvider.
- if accessKey != "" && secretKey != "" {
- // get the API credentials from env vars
- clientConfig := map[string]interface{}{
- "access_key": accessKey,
- "secret_key": secretKey,
- }
- if clientConfig["access_key"] == "" ||
- clientConfig["secret_key"] == "" {
- t.Fatalf("credentials not configured")
- }
-
- // store the credentials
- _, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "config/client",
- Data: clientConfig,
- })
- if err != nil {
- t.Fatal(err)
- }
- }
-
- loginInput := map[string]interface{}{
- "pkcs7": pkcs7,
- "nonce": "vault-client-nonce",
- }
-
- // Perform the login operation with a AMI ID that is not matching
- // the bound on the role.
- loginRequest := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "login",
- Storage: storage,
- Data: loginInput,
- }
-
- // Place the wrong AMI ID in the role data.
- data := map[string]interface{}{
- "auth_type": "ec2",
- "policies": "root",
- "max_ttl": "120s",
- "bound_ami_id": "wrong_ami_id",
- "bound_account_id": accountID,
- "bound_iam_role_arn": iamARN,
- }
-
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/" + roleName,
- Storage: storage,
- Data: data,
- }
-
- // Save the role with wrong AMI ID
- resp, err := b.HandleRequest(roleReq)
- if err != nil && (resp != nil && resp.IsError()) {
- t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
- }
-
- // Expect failure when tried to login with wrong AMI ID
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
- t.Fatalf("bad: expected error response: resp:%#v\nerr:%v", resp, err)
- }
-
- // Place the correct AMI ID, but make the AccountID wrong
- roleReq.Operation = logical.UpdateOperation
- data["bound_ami_id"] = amiID
- data["bound_account_id"] = "wrong-account-id"
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
- }
-
- // Expect failure when tried to login with incorrect AccountID
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
- t.Fatalf("bad: expected error response: resp:%#v\nerr:%v", resp, err)
- }
-
- // Place the correct AccountID, but make the wrong IAMRoleARN
- data["bound_account_id"] = accountID
- data["bound_iam_role_arn"] = "wrong_iam_role_arn"
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
- }
-
- // Attempt to login and expect a fail because IAM Role ARN is wrong
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
- t.Fatalf("bad: expected error response: resp:%#v\nerr:%v", resp, err)
- }
-
- // place the correct IAM role ARN
- data["bound_iam_role_arn"] = iamARN
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
- }
-
- // Now, the login attempt should succeed
- resp, err = b.HandleRequest(loginRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Auth == nil || resp.IsError() {
- t.Fatalf("bad: failed to login: resp:%#v\nerr:%v", resp, err)
- }
-
- // verify the presence of instance_id in the response object.
- instanceID := resp.Auth.Metadata["instance_id"]
- if instanceID == "" {
- t.Fatalf("instance ID not present in the response object")
- }
-
- loginInput["nonce"] = "changed-vault-client-nonce"
- // try to login again with changed nonce
- resp, err = b.HandleRequest(loginRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || !resp.IsError() {
- t.Fatalf("login attempt should have failed due to client nonce mismatch")
- }
-
- // Check if a whitelist identity entry is created after the login.
- wlRequest := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "identity-whitelist/" + instanceID,
- Storage: storage,
- }
- resp, err = b.HandleRequest(wlRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Data == nil || resp.Data["role"] != roleName {
- t.Fatalf("failed to read whitelist identity")
- }
-
- // Delete the whitelist identity entry.
- wlRequest.Operation = logical.DeleteOperation
- resp, err = b.HandleRequest(wlRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp.IsError() {
- t.Fatalf("failed to delete whitelist identity")
- }
-
- // Allow a fresh login.
- resp, err = b.HandleRequest(loginRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Auth == nil || resp.IsError() {
- t.Fatalf("login attempt failed")
- }
-}
-
-func TestBackend_pathStsConfig(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- stsReq := &logical.Request{
- Operation: logical.CreateOperation,
- Storage: storage,
- Path: "config/sts/account1",
- }
- checkFound, exists, err := b.HandleExistenceCheck(stsReq)
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'config/sts/account1'")
- }
- if exists {
- t.Fatal("existence check should have returned 'false' for 'config/sts/account1'")
- }
-
- data := map[string]interface{}{
- "sts_role": "arn:aws:iam:account1:role/myRole",
- }
-
- stsReq.Data = data
- // test create operation
- resp, err := b.HandleRequest(stsReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- stsReq.Data = nil
- // test existence check
- checkFound, exists, err = b.HandleExistenceCheck(stsReq)
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'config/sts/account1'")
- }
- if !exists {
- t.Fatal("existence check should have returned 'true' for 'config/sts/account1'")
- }
-
- stsReq.Operation = logical.ReadOperation
- // test read operation
- resp, err = b.HandleRequest(stsReq)
- if err != nil {
- t.Fatal(err)
- }
- expectedStsRole := "arn:aws:iam:account1:role/myRole"
- if resp.Data["sts_role"].(string) != expectedStsRole {
- t.Fatalf("bad: expected:%s\n got:%s\n", expectedStsRole, resp.Data["sts_role"].(string))
- }
-
- stsReq.Operation = logical.CreateOperation
- stsReq.Path = "config/sts/account2"
- stsReq.Data = data
- // create another entry to test the list operation
- resp, err = b.HandleRequest(stsReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatal(err)
- }
-
- stsReq.Operation = logical.ListOperation
- stsReq.Path = "config/sts"
- // test list operation
- resp, err = b.HandleRequest(stsReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to list config/sts")
- }
- keys := resp.Data["keys"].([]string)
- if len(keys) != 2 {
- t.Fatalf("invalid keys listed: %#v\n", keys)
- }
-
- stsReq.Operation = logical.DeleteOperation
- stsReq.Path = "config/sts/account1"
- resp, err = b.HandleRequest(stsReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatal(err)
- }
-
- stsReq.Path = "config/sts/account2"
- resp, err = b.HandleRequest(stsReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatal(err)
- }
-
- stsReq.Operation = logical.ListOperation
- stsReq.Path = "config/sts"
- // test list operation
- resp, err = b.HandleRequest(stsReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to list config/sts")
- }
- if resp.Data["keys"] != nil {
- t.Fatalf("no entries should be present")
- }
-}
-
-func buildCallerIdentityLoginData(request *http.Request, roleName string) (map[string]interface{}, error) {
- headersJson, err := json.Marshal(request.Header)
- if err != nil {
- return nil, err
- }
- requestBody, err := ioutil.ReadAll(request.Body)
- if err != nil {
- return nil, err
- }
- return map[string]interface{}{
- "iam_http_request_method": request.Method,
- "iam_request_url": base64.StdEncoding.EncodeToString([]byte(request.URL.String())),
- "iam_request_headers": base64.StdEncoding.EncodeToString(headersJson),
- "iam_request_body": base64.StdEncoding.EncodeToString(requestBody),
- "request_role": roleName,
- }, nil
-}
-
-// This is an acceptance test.
-// If the test is NOT being run on an AWS EC2 instance in an instance profile,
-// it requires the following environment variables to be set:
-// TEST_AWS_ACCESS_KEY_ID
-// TEST_AWS_SECRET_ACCESS_KEY
-// TEST_AWS_SECURITY_TOKEN or TEST_AWS_SESSION_TOKEN (optional, if you are using short-lived creds)
-// These are intentionally NOT the "standard" variables to prevent accidentally
-// using prod creds in acceptance tests
-func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
- // This test case should be run only when certain env vars are set and
- // executed as an acceptance test.
- if os.Getenv(logicaltest.TestEnvVar) == "" {
- t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
- return
- }
-
- storage := &logical.InmemStorage{}
- config := logical.TestBackendConfig()
- config.StorageView = storage
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Override the default AWS env vars (if set) with our test creds
- // so that the credential provider chain will pick them up
- // NOTE that I'm not bothing to override the shared config file location,
- // so if creds are specified there, they will be used before IAM
- // instance profile creds
- // This doesn't provide perfect leakage protection (e.g., it will still
- // potentially pick up credentials from the ~/.config files), but probably
- // good enough rather than having to muck around in the low-level details
- for _, envvar := range []string{
- "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SECURITY_TOKEN", "AWS_SESSION_TOKEN"} {
- // restore existing environment variables (in case future tests need them)
- defer os.Setenv(envvar, os.Getenv(envvar))
- os.Setenv(envvar, os.Getenv("TEST_"+envvar))
- }
- awsSession, err := session.NewSession()
- if err != nil {
- fmt.Println("failed to create session,", err)
- return
- }
-
- stsService := sts.New(awsSession)
- stsInputParams := &sts.GetCallerIdentityInput{}
-
- testIdentity, err := stsService.GetCallerIdentity(stsInputParams)
- if err != nil {
- t.Fatalf("Received error retrieving identity: %s", err)
- }
- entity, err := parseIamArn(*testIdentity.Arn)
- if err != nil {
- t.Fatal(err)
- }
-
- // Test setup largely done
- // At this point, we're going to:
- // 1. Configure the client to require our test header value
- // 2. Configure two different roles:
- // a. One bound to our test user
- // b. One bound to a garbage ARN
- // 3. Pass in a request that doesn't have the signed header, ensure
- // we're not allowed to login
- // 4. Passin a request that has a validly signed header, but the wrong
- // value, ensure it doesn't allow login
- // 5. Pass in a request that has a validly signed request, ensure
- // it allows us to login to our role
- // 6. Pass in a request that has a validly signed request, asking for
- // the other role, ensure it fails
- const testVaultHeaderValue = "VaultAcceptanceTesting"
- const testValidRoleName = "valid-role"
- const testInvalidRoleName = "invalid-role"
-
- clientConfigData := map[string]interface{}{
- "iam_server_id_header_value": testVaultHeaderValue,
- }
- clientRequest := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/client",
- Storage: storage,
- Data: clientConfigData,
- }
- _, err = b.HandleRequest(clientRequest)
- if err != nil {
- t.Fatal(err)
- }
-
- // configuring the valid role we'll be able to login to
- roleData := map[string]interface{}{
- "bound_iam_principal_arn": entity.canonicalArn(),
- "policies": "root",
- "auth_type": iamAuthType,
- }
- roleRequest := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/" + testValidRoleName,
- Storage: storage,
- Data: roleData,
- }
- resp, err := b.HandleRequest(roleRequest)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
- }
-
- // configuring a valid role we won't be able to login to
- roleDataEc2 := map[string]interface{}{
- "auth_type": "ec2",
- "policies": "root",
- "bound_ami_id": "ami-1234567",
- }
- roleRequestEc2 := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/ec2only",
- Storage: storage,
- Data: roleDataEc2,
- }
- resp, err = b.HandleRequest(roleRequestEc2)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: failed to create role; resp:%#v\nerr:%v", resp, err)
- }
-
- fakeArn := "arn:aws:iam::123456789012:role/somePath/FakeRole"
- fakeArnResolver := func(s logical.Storage, arn string) (string, error) {
- if arn == fakeArn {
- return fmt.Sprintf("FakeUniqueIdFor%s", fakeArn), nil
- }
- return b.resolveArnToRealUniqueId(s, arn)
- }
- b.resolveArnToUniqueIDFunc = fakeArnResolver
-
- // now we're creating the invalid role we won't be able to login to
- roleData["bound_iam_principal_arn"] = fakeArn
- roleRequest.Path = "role/" + testInvalidRoleName
- resp, err = b.HandleRequest(roleRequest)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: didn't fail to create role: resp:%#v\nerr:%v", resp, err)
- }
-
- // now, create the request without the signed header
- stsRequestNoHeader, _ := stsService.GetCallerIdentityRequest(stsInputParams)
- stsRequestNoHeader.Sign()
- loginData, err := buildCallerIdentityLoginData(stsRequestNoHeader.HTTPRequest, testValidRoleName)
- if err != nil {
- t.Fatal(err)
- }
- loginRequest := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "login",
- Storage: storage,
- Data: loginData,
- }
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || !resp.IsError() {
- t.Errorf("bad: expected failed login due to missing header: resp:%#v\nerr:%v", resp, err)
- }
-
- // create the request with the invalid header value
-
- // Not reusing stsRequestNoHeader because the process of signing the request
- // and reading the body modifies the underlying request, so it's just cleaner
- // to get new requests.
- stsRequestInvalidHeader, _ := stsService.GetCallerIdentityRequest(stsInputParams)
- stsRequestInvalidHeader.HTTPRequest.Header.Add(iamServerIdHeader, "InvalidValue")
- stsRequestInvalidHeader.Sign()
- loginData, err = buildCallerIdentityLoginData(stsRequestInvalidHeader.HTTPRequest, testValidRoleName)
- if err != nil {
- t.Fatal(err)
- }
- loginRequest = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "login",
- Storage: storage,
- Data: loginData,
- }
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || !resp.IsError() {
- t.Errorf("bad: expected failed login due to invalid header: resp:%#v\nerr:%v", resp, err)
- }
-
- // Now, valid request against invalid role
- stsRequestValid, _ := stsService.GetCallerIdentityRequest(stsInputParams)
- stsRequestValid.HTTPRequest.Header.Add(iamServerIdHeader, testVaultHeaderValue)
- stsRequestValid.Sign()
- loginData, err = buildCallerIdentityLoginData(stsRequestValid.HTTPRequest, testInvalidRoleName)
- if err != nil {
- t.Fatal(err)
- }
- loginRequest = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "login",
- Storage: storage,
- Data: loginData,
- }
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || !resp.IsError() {
- t.Errorf("bad: expected failed login due to invalid role: resp:%#v\nerr:%v", resp, err)
- }
-
- loginData["role"] = "ec2only"
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || !resp.IsError() {
- t.Errorf("bad: expected failed login due to bad auth type: resp:%#v\nerr:%v", resp, err)
- }
-
- // finally, the happy path test :)
-
- loginData["role"] = testValidRoleName
- resp, err = b.HandleRequest(loginRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Auth == nil || resp.IsError() {
- t.Fatalf("bad: expected valid login: resp:%#v", resp)
- }
-
- renewReq := generateRenewRequest(storage, resp.Auth)
- // dump a fake ARN into the metadata to ensure that we ONLY look
- // at the unique ID that has been generated
- renewReq.Auth.Metadata["canonical_arn"] = "fake_arn"
- empty_login_fd := &framework.FieldData{
- Raw: map[string]interface{}{},
- Schema: pathLogin(b).Fields,
- }
- // ensure we can renew
- resp, err = b.pathLoginRenew(renewReq, empty_login_fd)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response from renew")
- }
- if resp.IsError() {
- t.Fatalf("got error when renewing: %#v", *resp)
- }
-
- // Now, fake out the unique ID resolver to ensure we fail login if the unique ID
- // changes from under us
- b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId
- // First, we need to update the role to force Vault to use our fake resolver to
- // pick up the fake user ID
- roleData["bound_iam_principal_arn"] = entity.canonicalArn()
- roleRequest.Path = "role/" + testValidRoleName
- resp, err = b.HandleRequest(roleRequest)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: failed to recreate role: resp:%#v\nerr:%v", resp, err)
- }
- resp, err = b.HandleRequest(loginRequest)
- if err != nil || resp == nil || !resp.IsError() {
- t.Errorf("bad: expected failed login due to changed AWS role ID: resp: %#v\nerr:%v", resp, err)
- }
-
- // and ensure a renew no longer works
- resp, err = b.pathLoginRenew(renewReq, empty_login_fd)
- if err == nil || (resp != nil && !resp.IsError()) {
- t.Errorf("bad: expected failed renew due to changed AWS role ID: resp: %#v", resp, err)
- }
- // Undo the fake resolver...
- b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId
-
- // Now test that wildcard matching works
- wildcardRoleName := "valid_wildcard"
- wildcardEntity := *entity
- wildcardEntity.FriendlyName = "*"
- roleData["bound_iam_principal_arn"] = wildcardEntity.canonicalArn()
- roleRequest.Path = "role/" + wildcardRoleName
- resp, err = b.HandleRequest(roleRequest)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: failed to create wildcard role: resp:%#v\nerr:%v", resp, err)
- }
-
- loginData["role"] = wildcardRoleName
- resp, err = b.HandleRequest(loginRequest)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Auth == nil || resp.IsError() {
- t.Fatalf("bad: expected valid login: resp:%#v", resp)
- }
- // and ensure we can renew
- renewReq = generateRenewRequest(storage, resp.Auth)
- resp, err = b.pathLoginRenew(renewReq, empty_login_fd)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response from renew")
- }
- if resp.IsError() {
- t.Fatalf("got error when renewing: %#v", *resp)
- }
- // ensure the cache is populated
- cachedArn := b.getCachedUserId(resp.Auth.Metadata["client_user_id"])
- if cachedArn == "" {
- t.Errorf("got empty ARN back from user ID cache; expected full arn")
- }
-}
-
-func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Request {
- renewReq := &logical.Request{
- Storage: s,
- Auth: &logical.Auth{},
- }
- renewReq.Auth.InternalData = auth.InternalData
- renewReq.Auth.Metadata = auth.Metadata
- renewReq.Auth.LeaseOptions = auth.LeaseOptions
- renewReq.Auth.Policies = auth.Policies
- renewReq.Auth.IssueTime = time.Now()
-
- return renewReq
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go
deleted file mode 100644
index 2842c24..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package awsauth
-
-import (
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/awsutil"
-)
-
-type CLIHandler struct{}
-
-// Generates the necessary data to send to the Vault server for generating a token
-// This is useful for other API clients to use
-func GenerateLoginData(accessKey, secretKey, sessionToken, headerValue string) (map[string]interface{}, error) {
- loginData := make(map[string]interface{})
-
- credConfig := &awsutil.CredentialsConfig{
- AccessKey: accessKey,
- SecretKey: secretKey,
- SessionToken: sessionToken,
- }
- creds, err := credConfig.GenerateCredentialChain()
- if err != nil {
- return nil, err
- }
- if creds == nil {
- return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
- }
-
- // Use the credentials we've found to construct an STS session
- stsSession, err := session.NewSessionWithOptions(session.Options{
- Config: aws.Config{Credentials: creds},
- })
- if err != nil {
- return nil, err
- }
-
- var params *sts.GetCallerIdentityInput
- svc := sts.New(stsSession)
- stsRequest, _ := svc.GetCallerIdentityRequest(params)
-
- // Inject the required auth header value, if supplied, and then sign the request including that header
- if headerValue != "" {
- stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue)
- }
- stsRequest.Sign()
-
- // Now extract out the relevant parts of the request
- headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header)
- if err != nil {
- return nil, err
- }
- requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body)
- if err != nil {
- return nil, err
- }
- loginData["iam_http_request_method"] = stsRequest.HTTPRequest.Method
- loginData["iam_request_url"] = base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String()))
- loginData["iam_request_headers"] = base64.StdEncoding.EncodeToString(headersJson)
- loginData["iam_request_body"] = base64.StdEncoding.EncodeToString(requestBody)
-
- return loginData, nil
-}
-
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
- mount, ok := m["mount"]
- if !ok {
- mount = "aws"
- }
-
- role, ok := m["role"]
- if !ok {
- role = ""
- }
-
- headerValue, ok := m["header_value"]
- if !ok {
- headerValue = ""
- }
-
- loginData, err := GenerateLoginData(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], headerValue)
- if err != nil {
- return nil, err
- }
- if loginData == nil {
- return nil, fmt.Errorf("got nil response from GenerateLoginData")
- }
- loginData["role"] = role
- path := fmt.Sprintf("auth/%s/login", mount)
- secret, err := c.Logical().Write(path, loginData)
-
- if err != nil {
- return nil, err
- }
- if secret == nil {
- return nil, fmt.Errorf("empty response from credential provider")
- }
-
- return secret, nil
-}
-
-func (h *CLIHandler) Help() string {
- help := `
-The AWS credential provider allows you to authenticate with
-AWS IAM credentials. To use it, you specify valid AWS IAM credentials
-in one of a number of ways. They can be specified explicitly on the
-command line (which in general you should not do), via the standard AWS
-environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and
-AWS_SECURITY_TOKEN), via the ~/.aws/credentials file, or via an EC2
-instance profile (in that order).
-
- Example: vault auth -method=aws
-
-If you need to explicitly pass in credentials, you would do it like this:
- Example: vault auth -method=aws aws_access_key_id= aws_secret_access_key= aws_security_token=
-
-Key/Value Pairs:
-
- mount=aws The mountpoint for the AWS credential provider.
- Defaults to "aws"
- aws_access_key_id= Explicitly specified AWS access key
- aws_secret_access_key= Explicitly specified AWS secret key
- aws_security_token= Security token for temporary credentials
- header_value The Value of the X-Vault-AWS-IAM-Server-ID header.
- role The name of the role you're requesting a token for
- `
-
- return strings.TrimSpace(help)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go
deleted file mode 100644
index aa3da0d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package awsauth
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/ec2"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/awsutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// getRawClientConfig creates a aws-sdk-go config, which is used to create client
-// that can interact with AWS API. This builds credentials in the following
-// order of preference:
-//
-// * Static credentials from 'config/client'
-// * Environment variables
-// * Instance metadata role
-func (b *backend) getRawClientConfig(s logical.Storage, region, clientType string) (*aws.Config, error) {
- credsConfig := &awsutil.CredentialsConfig{
- Region: region,
- }
-
- // Read the configured secret key and access key
- config, err := b.nonLockedClientConfigEntry(s)
- if err != nil {
- return nil, err
- }
-
- endpoint := aws.String("")
- if config != nil {
- // Override the default endpoint with the configured endpoint.
- switch {
- case clientType == "ec2" && config.Endpoint != "":
- endpoint = aws.String(config.Endpoint)
- case clientType == "iam" && config.IAMEndpoint != "":
- endpoint = aws.String(config.IAMEndpoint)
- case clientType == "sts" && config.STSEndpoint != "":
- endpoint = aws.String(config.STSEndpoint)
- }
-
- credsConfig.AccessKey = config.AccessKey
- credsConfig.SecretKey = config.SecretKey
- }
-
- credsConfig.HTTPClient = cleanhttp.DefaultClient()
-
- creds, err := credsConfig.GenerateCredentialChain()
- if err != nil {
- return nil, err
- }
- if creds == nil {
- return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
- }
-
- // Create a config that can be used to make the API calls.
- return &aws.Config{
- Credentials: creds,
- Region: aws.String(region),
- HTTPClient: cleanhttp.DefaultClient(),
- Endpoint: endpoint,
- }, nil
-}
-
-// getClientConfig returns an aws-sdk-go config, with optionally assumed credentials
-// It uses getRawClientConfig to obtain config for the runtime environemnt, and if
-// stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed
-// credentials. The credentials will expire after 15 minutes but will auto-refresh.
-func (b *backend) getClientConfig(s logical.Storage, region, stsRole, accountID, clientType string) (*aws.Config, error) {
-
- config, err := b.getRawClientConfig(s, region, clientType)
- if err != nil {
- return nil, err
- }
- if config == nil {
- return nil, fmt.Errorf("could not compile valid credentials through the default provider chain")
- }
-
- stsConfig, err := b.getRawClientConfig(s, region, "sts")
- if stsConfig == nil {
- return nil, fmt.Errorf("could not configure STS client")
- }
- if err != nil {
- return nil, err
- }
- if stsRole != "" {
- assumedCredentials := stscreds.NewCredentials(session.New(stsConfig), stsRole)
- // Test that we actually have permissions to assume the role
- if _, err = assumedCredentials.Get(); err != nil {
- return nil, err
- }
- config.Credentials = assumedCredentials
- } else {
- if b.defaultAWSAccountID == "" {
- client := sts.New(session.New(stsConfig))
- if client == nil {
- return nil, fmt.Errorf("could not obtain sts client: %v", err)
- }
- inputParams := &sts.GetCallerIdentityInput{}
- identity, err := client.GetCallerIdentity(inputParams)
- if err != nil {
- return nil, fmt.Errorf("unable to fetch current caller: %v", err)
- }
- if identity == nil {
- return nil, fmt.Errorf("got nil result from GetCallerIdentity")
- }
- b.defaultAWSAccountID = *identity.Account
- }
- if b.defaultAWSAccountID != accountID {
- return nil, fmt.Errorf("unable to fetch client for account ID %s -- default client is for account %s", accountID, b.defaultAWSAccountID)
- }
- }
-
- return config, nil
-}
-
-// flushCachedEC2Clients deletes all the cached ec2 client objects from the backend.
-// If the client credentials configuration is deleted or updated in the backend, all
-// the cached EC2 client objects will be flushed. Config mutex lock should be
-// acquired for write operation before calling this method.
-func (b *backend) flushCachedEC2Clients() {
- // deleting items in map during iteration is safe
- for region, _ := range b.EC2ClientsMap {
- delete(b.EC2ClientsMap, region)
- }
-}
-
-// flushCachedIAMClients deletes all the cached iam client objects from the
-// backend. If the client credentials configuration is deleted or updated in
-// the backend, all the cached IAM client objects will be flushed. Config mutex
-// lock should be acquired for write operation before calling this method.
-func (b *backend) flushCachedIAMClients() {
- // deleting items in map during iteration is safe
- for region, _ := range b.IAMClientsMap {
- delete(b.IAMClientsMap, region)
- }
-}
-
-// Gets an entry out of the user ID cache
-func (b *backend) getCachedUserId(userId string) string {
- if userId == "" {
- return ""
- }
- if entry, ok := b.iamUserIdToArnCache.Get(userId); ok {
- b.iamUserIdToArnCache.SetDefault(userId, entry)
- return entry.(string)
- }
- return ""
-}
-
-// Sets an entry in the user ID cache
-func (b *backend) setCachedUserId(userId, arn string) {
- if userId != "" {
- b.iamUserIdToArnCache.SetDefault(userId, arn)
- }
-}
-
-func (b *backend) stsRoleForAccount(s logical.Storage, accountID string) (string, error) {
- // Check if an STS configuration exists for the AWS account
- sts, err := b.lockedAwsStsEntry(s, accountID)
- if err != nil {
- return "", fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err)
- }
- // An empty STS role signifies the master account
- if sts != nil {
- return sts.StsRole, nil
- }
- return "", nil
-}
-
-// clientEC2 creates a client to interact with AWS EC2 API
-func (b *backend) clientEC2(s logical.Storage, region, accountID string) (*ec2.EC2, error) {
- stsRole, err := b.stsRoleForAccount(s, accountID)
- if err != nil {
- return nil, err
- }
- b.configMutex.RLock()
- if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil {
- defer b.configMutex.RUnlock()
- // If the client object was already created, return it
- return b.EC2ClientsMap[region][stsRole], nil
- }
-
- // Release the read lock and acquire the write lock
- b.configMutex.RUnlock()
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- // If the client gets created while switching the locks, return it
- if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil {
- return b.EC2ClientsMap[region][stsRole], nil
- }
-
- // Create an AWS config object using a chain of providers
- var awsConfig *aws.Config
- awsConfig, err = b.getClientConfig(s, region, stsRole, accountID, "ec2")
-
- if err != nil {
- return nil, err
- }
-
- if awsConfig == nil {
- return nil, fmt.Errorf("could not retrieve valid assumed credentials")
- }
-
- // Create a new EC2 client object, cache it and return the same
- client := ec2.New(session.New(awsConfig))
- if client == nil {
- return nil, fmt.Errorf("could not obtain ec2 client")
- }
- if _, ok := b.EC2ClientsMap[region]; !ok {
- b.EC2ClientsMap[region] = map[string]*ec2.EC2{stsRole: client}
- } else {
- b.EC2ClientsMap[region][stsRole] = client
- }
-
- return b.EC2ClientsMap[region][stsRole], nil
-}
-
-// clientIAM creates a client to interact with AWS IAM API
-func (b *backend) clientIAM(s logical.Storage, region, accountID string) (*iam.IAM, error) {
- stsRole, err := b.stsRoleForAccount(s, accountID)
- if err != nil {
- return nil, err
- }
- b.configMutex.RLock()
- if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil {
- defer b.configMutex.RUnlock()
- // If the client object was already created, return it
- return b.IAMClientsMap[region][stsRole], nil
- }
-
- // Release the read lock and acquire the write lock
- b.configMutex.RUnlock()
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- // If the client gets created while switching the locks, return it
- if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil {
- return b.IAMClientsMap[region][stsRole], nil
- }
-
- // Create an AWS config object using a chain of providers
- var awsConfig *aws.Config
- awsConfig, err = b.getClientConfig(s, region, stsRole, accountID, "iam")
-
- if err != nil {
- return nil, err
- }
-
- if awsConfig == nil {
- return nil, fmt.Errorf("could not retrieve valid assumed credentials")
- }
-
- // Create a new IAM client object, cache it and return the same
- client := iam.New(session.New(awsConfig))
- if client == nil {
- return nil, fmt.Errorf("could not obtain iam client")
- }
- if _, ok := b.IAMClientsMap[region]; !ok {
- b.IAMClientsMap[region] = map[string]*iam.IAM{stsRole: client}
- } else {
- b.IAMClientsMap[region][stsRole] = client
- }
- return b.IAMClientsMap[region][stsRole], nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_certificate.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_certificate.go
deleted file mode 100644
index 0c026ed..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_certificate.go
+++ /dev/null
@@ -1,447 +0,0 @@
-package awsauth
-
-import (
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "fmt"
- "math/big"
- "strings"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// dsaSignature represents the contents of the signature of a signed
-// content using digital signature algorithm.
-type dsaSignature struct {
- R, S *big.Int
-}
-
-// This certificate is used to verify the PKCS#7 signature of the instance
-// identity document. As per AWS documentation, this public key is valid for
-// US East (N. Virginia), US West (Oregon), US West (N. California), EU
-// (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia
-// Pacific (Singapore), Asia Pacific (Sydney), and South America (Sao Paulo).
-//
-// It's also the same certificate, but for some reason listed separately, for
-// GovCloud (US)
-const genericAWSPublicCertificatePkcs7 = `-----BEGIN CERTIFICATE-----
-MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
-FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
-VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
-ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
-IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
-cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
-ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
-VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
-hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
-k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
-hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
-lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
-MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
-MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
-vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
-7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
------END CERTIFICATE-----
-`
-
-// This certificate is used to verify the instance identity document using the
-// RSA digest of the same
-const genericAWSPublicCertificateIdentity = `-----BEGIN CERTIFICATE-----
-MIIDIjCCAougAwIBAgIJAKnL4UEDMN/FMA0GCSqGSIb3DQEBBQUAMGoxCzAJBgNV
-BAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgw
-FgYDVQQKEw9BbWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3Mu
-Y29tMB4XDTE0MDYwNTE0MjgwMloXDTI0MDYwNTE0MjgwMlowajELMAkGA1UEBhMC
-VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1NlYXR0bGUxGDAWBgNV
-BAoTD0FtYXpvbi5jb20gSW5jLjEaMBgGA1UEAxMRZWMyLmFtYXpvbmF3cy5jb20w
-gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAIe9GN//SRK2knbjySG0ho3yqQM3
-e2TDhWO8D2e8+XZqck754gFSo99AbT2RmXClambI7xsYHZFapbELC4H91ycihvrD
-jbST1ZjkLQgga0NE1q43eS68ZeTDccScXQSNivSlzJZS8HJZjgqzBlXjZftjtdJL
-XeE4hwvo0sD4f3j9AgMBAAGjgc8wgcwwHQYDVR0OBBYEFCXWzAgVyrbwnFncFFIs
-77VBdlE4MIGcBgNVHSMEgZQwgZGAFCXWzAgVyrbwnFncFFIs77VBdlE4oW6kbDBq
-MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2Vh
-dHRsZTEYMBYGA1UEChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1h
-em9uYXdzLmNvbYIJAKnL4UEDMN/FMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
-BQADgYEAFYcz1OgEhQBXIwIdsgCOS8vEtiJYF+j9uO6jz7VOmJqO+pRlAbRlvY8T
-C1haGgSI/A1uZUKs/Zfnph0oEI0/hu1IIJ/SKBDtN5lvmZ/IzbOPIJWirlsllQIQ
-7zvWbGd9c9+Rm3p04oTvhup99la7kZqevJK0QRdD/6NpCKsqP/0=
------END CERTIFICATE-----`
-
-// pathListCertificates creates a path that enables listing of all
-// the AWS public certificates registered with Vault.
-func pathListCertificates(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/certificates/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathCertificatesList,
- },
-
- HelpSynopsis: pathListCertificatesHelpSyn,
- HelpDescription: pathListCertificatesHelpDesc,
- }
-}
-
-func pathConfigCertificate(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/certificate/" + framework.GenericNameRegex("cert_name"),
- Fields: map[string]*framework.FieldSchema{
- "cert_name": {
- Type: framework.TypeString,
- Description: "Name of the certificate.",
- },
- "aws_public_cert": {
- Type: framework.TypeString,
- Description: "AWS Public cert required to verify PKCS7 signature of the EC2 instance metadata.",
- },
- "type": {
- Type: framework.TypeString,
- Default: "pkcs7",
- Description: `
-Takes the value of either "pkcs7" or "identity", indicating the type of
-document which can be verified using the given certificate. The reason is that
-the PKCS#7 document will have a DSA digest and the identity signature will have
-an RSA signature, and accordingly the public certificates to verify those also
-vary. Defaults to "pkcs7".`,
- },
- },
-
- ExistenceCheck: b.pathConfigCertificateExistenceCheck,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathConfigCertificateCreateUpdate,
- logical.UpdateOperation: b.pathConfigCertificateCreateUpdate,
- logical.ReadOperation: b.pathConfigCertificateRead,
- logical.DeleteOperation: b.pathConfigCertificateDelete,
- },
-
- HelpSynopsis: pathConfigCertificateSyn,
- HelpDescription: pathConfigCertificateDesc,
- }
-}
-
-// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
-// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
-func (b *backend) pathConfigCertificateExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- certName := data.Get("cert_name").(string)
- if certName == "" {
- return false, fmt.Errorf("missing cert_name")
- }
-
- entry, err := b.lockedAWSPublicCertificateEntry(req.Storage, certName)
- if err != nil {
- return false, err
- }
- return entry != nil, nil
-}
-
-// pathCertificatesList is used to list all the AWS public certificates registered with Vault
-func (b *backend) pathCertificatesList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
-
- certs, err := req.Storage.List("config/certificate/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(certs), nil
-}
-
-// Decodes the PEM encoded certiticate and parses it into a x509 cert
-func decodePEMAndParseCertificate(certificate string) (*x509.Certificate, error) {
- // Decode the PEM block and error out if a block is not detected in the first attempt
- decodedPublicCert, rest := pem.Decode([]byte(certificate))
- if len(rest) != 0 {
- return nil, fmt.Errorf("invalid certificate; should be one PEM block only")
- }
-
- // Check if the certificate can be parsed
- publicCert, err := x509.ParseCertificate(decodedPublicCert.Bytes)
- if err != nil {
- return nil, err
- }
- if publicCert == nil {
- return nil, fmt.Errorf("invalid certificate; failed to parse certificate")
- }
- return publicCert, nil
-}
-
-// awsPublicCertificates returns a slice of all the parsed AWS public
-// certificates, which are used to verify either the SHA256 RSA signature, or
-// the PKCS7 signatures of the instance identity documents. This method will
-// append the certificates registered using `config/certificate/`
-// endpoint, along with the default certificate in the backend.
-func (b *backend) awsPublicCertificates(s logical.Storage, isPkcs bool) ([]*x509.Certificate, error) {
- // Lock at beginning and use internal method so that we are consistent as
- // we iterate through
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
-
- var certs []*x509.Certificate
-
- defaultCert := genericAWSPublicCertificateIdentity
- if isPkcs {
- defaultCert = genericAWSPublicCertificatePkcs7
- }
-
- // Append the generic certificate provided in the AWS EC2 instance metadata documentation
- decodedCert, err := decodePEMAndParseCertificate(defaultCert)
- if err != nil {
- return nil, err
- }
- certs = append(certs, decodedCert)
-
- // Get the list of all the registered certificates
- registeredCerts, err := s.List("config/certificate/")
- if err != nil {
- return nil, err
- }
-
- // Iterate through each certificate, parse and append it to a slice
- for _, cert := range registeredCerts {
- certEntry, err := b.nonLockedAWSPublicCertificateEntry(s, cert)
- if err != nil {
- return nil, err
- }
- if certEntry == nil {
- return nil, fmt.Errorf("certificate storage has a nil entry under the name:%s\n", cert)
- }
- // Append relevant certificates only
- if (isPkcs && certEntry.Type == "pkcs7") ||
- (!isPkcs && certEntry.Type == "identity") {
- decodedCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert)
- if err != nil {
- return nil, err
- }
- certs = append(certs, decodedCert)
- }
- }
-
- return certs, nil
-}
-
-// lockedSetAWSPublicCertificateEntry is used to store the AWS public key in
-// the storage. This method acquires lock before creating or updating a storage
-// entry.
-func (b *backend) lockedSetAWSPublicCertificateEntry(s logical.Storage, certName string, certEntry *awsPublicCert) error {
- if certName == "" {
- return fmt.Errorf("missing certificate name")
- }
-
- if certEntry == nil {
- return fmt.Errorf("nil AWS public key certificate")
- }
-
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- return b.nonLockedSetAWSPublicCertificateEntry(s, certName, certEntry)
-}
-
-// nonLockedSetAWSPublicCertificateEntry is used to store the AWS public key in
-// the storage. This method does not acquire lock before reading the storage.
-// If locking is desired, use lockedSetAWSPublicCertificateEntry instead.
-func (b *backend) nonLockedSetAWSPublicCertificateEntry(s logical.Storage, certName string, certEntry *awsPublicCert) error {
- if certName == "" {
- return fmt.Errorf("missing certificate name")
- }
-
- if certEntry == nil {
- return fmt.Errorf("nil AWS public key certificate")
- }
-
- entry, err := logical.StorageEntryJSON("config/certificate/"+certName, certEntry)
- if err != nil {
- return err
- }
- if entry == nil {
- return fmt.Errorf("failed to create storage entry for AWS public key certificate")
- }
-
- return s.Put(entry)
-}
-
-// lockedAWSPublicCertificateEntry is used to get the configured AWS Public Key
-// that is used to verify the PKCS#7 signature of the instance identity
-// document.
-func (b *backend) lockedAWSPublicCertificateEntry(s logical.Storage, certName string) (*awsPublicCert, error) {
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
-
- return b.nonLockedAWSPublicCertificateEntry(s, certName)
-}
-
-// nonLockedAWSPublicCertificateEntry reads the certificate information from
-// the storage. This method does not acquire lock before reading the storage.
-// If locking is desired, use lockedAWSPublicCertificateEntry instead.
-func (b *backend) nonLockedAWSPublicCertificateEntry(s logical.Storage, certName string) (*awsPublicCert, error) {
- entry, err := s.Get("config/certificate/" + certName)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- var certEntry awsPublicCert
- if err := entry.DecodeJSON(&certEntry); err != nil {
- return nil, err
- }
-
- // Handle upgrade for certificate type
- persistNeeded := false
- if certEntry.Type == "" {
- certEntry.Type = "pkcs7"
- persistNeeded = true
- }
-
- if persistNeeded {
- if err := b.nonLockedSetAWSPublicCertificateEntry(s, certName, &certEntry); err != nil {
- return nil, err
- }
- }
-
- return &certEntry, nil
-}
-
-// pathConfigCertificateDelete is used to delete the previously configured AWS
-// Public Key that is used to verify the PKCS#7 signature of the instance
-// identity document.
-func (b *backend) pathConfigCertificateDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- certName := data.Get("cert_name").(string)
- if certName == "" {
- return logical.ErrorResponse("missing cert_name"), nil
- }
-
- return nil, req.Storage.Delete("config/certificate/" + certName)
-}
-
-// pathConfigCertificateRead is used to view the configured AWS Public Key that
-// is used to verify the PKCS#7 signature of the instance identity document.
-func (b *backend) pathConfigCertificateRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- certName := data.Get("cert_name").(string)
- if certName == "" {
- return logical.ErrorResponse("missing cert_name"), nil
- }
-
- certificateEntry, err := b.lockedAWSPublicCertificateEntry(req.Storage, certName)
- if err != nil {
- return nil, err
- }
- if certificateEntry == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: structs.New(certificateEntry).Map(),
- }, nil
-}
-
-// pathConfigCertificateCreateUpdate is used to register an AWS Public Key that
-// is used to verify the PKCS#7 signature of the instance identity document.
-func (b *backend) pathConfigCertificateCreateUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- certName := data.Get("cert_name").(string)
- if certName == "" {
- return logical.ErrorResponse("missing certificate name"), nil
- }
-
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- // Check if there is already a certificate entry registered
- certEntry, err := b.nonLockedAWSPublicCertificateEntry(req.Storage, certName)
- if err != nil {
- return nil, err
- }
- if certEntry == nil {
- certEntry = &awsPublicCert{}
- }
-
- // Check if type information is provided
- certTypeRaw, ok := data.GetOk("type")
- if ok {
- certEntry.Type = strings.ToLower(certTypeRaw.(string))
- } else if req.Operation == logical.CreateOperation {
- certEntry.Type = data.Get("type").(string)
- }
-
- switch certEntry.Type {
- case "pkcs7":
- case "identity":
- default:
- return logical.ErrorResponse(fmt.Sprintf("invalid certificate type %q", certEntry.Type)), nil
- }
-
- // Check if the value is provided by the client
- certStrData, ok := data.GetOk("aws_public_cert")
- if ok {
- if certBytes, err := base64.StdEncoding.DecodeString(certStrData.(string)); err == nil {
- certEntry.AWSPublicCert = string(certBytes)
- } else {
- certEntry.AWSPublicCert = certStrData.(string)
- }
- } else {
- // aws_public_cert should be supplied for both create and update operations.
- // If it is not provided, throw an error.
- return logical.ErrorResponse("missing aws_public_cert"), nil
- }
-
- // If explicitly set to empty string, error out
- if certEntry.AWSPublicCert == "" {
- return logical.ErrorResponse("invalid aws_public_cert"), nil
- }
-
- // Verify the certificate by decoding it and parsing it
- publicCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert)
- if err != nil {
- return nil, err
- }
- if publicCert == nil {
- return logical.ErrorResponse("invalid certificate; failed to decode and parse certificate"), nil
- }
-
- // If none of the checks fail, save the provided certificate
- if err := b.nonLockedSetAWSPublicCertificateEntry(req.Storage, certName, certEntry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// Struct awsPublicCert holds the AWS Public Key that is used to verify the PKCS#7 signature
-// of the instnace identity document.
-type awsPublicCert struct {
- AWSPublicCert string `json:"aws_public_cert" structs:"aws_public_cert" mapstructure:"aws_public_cert"`
- Type string `json:"type" structs:"type" mapstructure:"type"`
-}
-
-const pathConfigCertificateSyn = `
-Adds the AWS Public Key that is used to verify the PKCS#7 signature of the identidy document.
-`
-
-const pathConfigCertificateDesc = `
-AWS Public Key which is used to verify the PKCS#7 signature of the identity document,
-varies by region. The public key(s) can be found in AWS EC2 instance metadata documentation.
-The default key that is used to verify the signature is the one that is applicable for
-following regions: US East (N. Virginia), US West (Oregon), US West (N. California),
-EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore),
-Asia Pacific (Sydney), and South America (Sao Paulo).
-
-If the instances belongs to region other than the above, the public key(s) for the
-corresponding regions should be registered using this endpoint. PKCS#7 is verified
-using a collection of certificates containing the default certificate and all the
-certificates that are registered using this endpoint.
-`
-const pathListCertificatesHelpSyn = `
-Lists all the AWS public certificates that are registered with the backend.
-`
-const pathListCertificatesHelpDesc = `
-Certificates will be listed by their respective names that were used during registration.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go
deleted file mode 100644
index 9242ebd..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package awsauth
-
-import (
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigClient(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/client$",
- Fields: map[string]*framework.FieldSchema{
- "access_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "AWS Access Key ID for the account used to make AWS API requests.",
- },
-
- "secret_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "AWS Secret Access Key for the account used to make AWS API requests.",
- },
-
- "endpoint": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "URL to override the default generated endpoint for making AWS EC2 API calls.",
- },
-
- "iam_endpoint": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "URL to override the default generated endpoint for making AWS IAM API calls.",
- },
-
- "sts_endpoint": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "URL to override the default generated endpoint for making AWS STS API calls.",
- },
-
- "iam_server_id_header_value": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "Value to require in the X-Vault-AWS-IAM-Server-ID request header",
- },
- },
-
- ExistenceCheck: b.pathConfigClientExistenceCheck,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathConfigClientCreateUpdate,
- logical.UpdateOperation: b.pathConfigClientCreateUpdate,
- logical.DeleteOperation: b.pathConfigClientDelete,
- logical.ReadOperation: b.pathConfigClientRead,
- },
-
- HelpSynopsis: pathConfigClientHelpSyn,
- HelpDescription: pathConfigClientHelpDesc,
- }
-}
-
-// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
-// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
-func (b *backend) pathConfigClientExistenceCheck(
- req *logical.Request, data *framework.FieldData) (bool, error) {
-
- entry, err := b.lockedClientConfigEntry(req.Storage)
- if err != nil {
- return false, err
- }
- return entry != nil, nil
-}
-
-// Fetch the client configuration required to access the AWS API, after acquiring an exclusive lock.
-func (b *backend) lockedClientConfigEntry(s logical.Storage) (*clientConfig, error) {
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
-
- return b.nonLockedClientConfigEntry(s)
-}
-
-// Fetch the client configuration required to access the AWS API.
-func (b *backend) nonLockedClientConfigEntry(s logical.Storage) (*clientConfig, error) {
- entry, err := s.Get("config/client")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result clientConfig
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-func (b *backend) pathConfigClientRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- clientConfig, err := b.lockedClientConfigEntry(req.Storage)
- if err != nil {
- return nil, err
- }
-
- if clientConfig == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: structs.New(clientConfig).Map(),
- }, nil
-}
-
-func (b *backend) pathConfigClientDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- if err := req.Storage.Delete("config/client"); err != nil {
- return nil, err
- }
-
- // Remove all the cached EC2 client objects in the backend.
- b.flushCachedEC2Clients()
-
- // Remove all the cached EC2 client objects in the backend.
- b.flushCachedIAMClients()
-
- // unset the cached default AWS account ID
- b.defaultAWSAccountID = ""
-
- return nil, nil
-}
-
-// pathConfigClientCreateUpdate is used to register the 'aws_secret_key' and 'aws_access_key'
-// that can be used to interact with AWS EC2 API.
-func (b *backend) pathConfigClientCreateUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- configEntry, err := b.nonLockedClientConfigEntry(req.Storage)
- if err != nil {
- return nil, err
- }
- if configEntry == nil {
- configEntry = &clientConfig{}
- }
-
- // changedCreds is whether we need to flush the cached AWS clients and store in the backend
- changedCreds := false
- // changedOtherConfig is whether other config has changed that requires storing in the backend
- // but does not require flushing the cached clients
- changedOtherConfig := false
-
- accessKeyStr, ok := data.GetOk("access_key")
- if ok {
- if configEntry.AccessKey != accessKeyStr.(string) {
- changedCreds = true
- configEntry.AccessKey = accessKeyStr.(string)
- }
- } else if req.Operation == logical.CreateOperation {
- // Use the default
- configEntry.AccessKey = data.Get("access_key").(string)
- }
-
- secretKeyStr, ok := data.GetOk("secret_key")
- if ok {
- if configEntry.SecretKey != secretKeyStr.(string) {
- changedCreds = true
- configEntry.SecretKey = secretKeyStr.(string)
- }
- } else if req.Operation == logical.CreateOperation {
- configEntry.SecretKey = data.Get("secret_key").(string)
- }
-
- endpointStr, ok := data.GetOk("endpoint")
- if ok {
- if configEntry.Endpoint != endpointStr.(string) {
- changedCreds = true
- configEntry.Endpoint = endpointStr.(string)
- }
- } else if req.Operation == logical.CreateOperation {
- configEntry.Endpoint = data.Get("endpoint").(string)
- }
-
- iamEndpointStr, ok := data.GetOk("iam_endpoint")
- if ok {
- if configEntry.IAMEndpoint != iamEndpointStr.(string) {
- changedCreds = true
- configEntry.IAMEndpoint = iamEndpointStr.(string)
- }
- } else if req.Operation == logical.CreateOperation {
- configEntry.IAMEndpoint = data.Get("iam_endpoint").(string)
- }
-
- stsEndpointStr, ok := data.GetOk("sts_endpoint")
- if ok {
- if configEntry.STSEndpoint != stsEndpointStr.(string) {
- // We don't directly cache STS clients as they are ever directly used.
- // However, they are potentially indirectly used as credential providers
- // for the EC2 and IAM clients, and thus we would be indirectly caching
- // them there. So, if we change the STS endpoint, we should flush those
- // cached clients.
- changedCreds = true
- configEntry.STSEndpoint = stsEndpointStr.(string)
- }
- } else if req.Operation == logical.CreateOperation {
- configEntry.STSEndpoint = data.Get("sts_endpoint").(string)
- }
-
- headerValStr, ok := data.GetOk("iam_server_id_header_value")
- if ok {
- if configEntry.IAMServerIdHeaderValue != headerValStr.(string) {
- // NOT setting changedCreds here, since this isn't really cached
- configEntry.IAMServerIdHeaderValue = headerValStr.(string)
- changedOtherConfig = true
- }
- } else if req.Operation == logical.CreateOperation {
- configEntry.IAMServerIdHeaderValue = data.Get("iam_server_id_header_value").(string)
- }
-
- // Since this endpoint supports both create operation and update operation,
- // the error checks for access_key and secret_key not being set are not present.
- // This allows calling this endpoint multiple times to provide the values.
- // Hence, the readers of this endpoint should do the validation on
- // the validation of keys before using them.
- entry, err := logical.StorageEntryJSON("config/client", configEntry)
- if err != nil {
- return nil, err
- }
-
- if changedCreds || changedOtherConfig || req.Operation == logical.CreateOperation {
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
- }
-
- if changedCreds {
- b.flushCachedEC2Clients()
- b.flushCachedIAMClients()
- b.defaultAWSAccountID = ""
- }
-
- return nil, nil
-}
-
-// Struct to hold 'aws_access_key' and 'aws_secret_key' that are required to
-// interact with the AWS EC2 API.
-type clientConfig struct {
- AccessKey string `json:"access_key" structs:"access_key" mapstructure:"access_key"`
- SecretKey string `json:"secret_key" structs:"secret_key" mapstructure:"secret_key"`
- Endpoint string `json:"endpoint" structs:"endpoint" mapstructure:"endpoint"`
- IAMEndpoint string `json:"iam_endpoint" structs:"iam_endpoint" mapstructure:"iam_endpoint"`
- STSEndpoint string `json:"sts_endpoint" structs:"sts_endpoint" mapstructure:"sts_endpoint"`
- IAMServerIdHeaderValue string `json:"iam_server_id_header_value" structs:"iam_server_id_header_value" mapstructure:"iam_server_id_header_value"`
-}
-
-const pathConfigClientHelpSyn = `
-Configure AWS IAM credentials that are used to query instance and role details from the AWS API.
-`
-
-const pathConfigClientHelpDesc = `
-The aws-ec2 auth backend makes AWS API queries to retrieve information
-regarding EC2 instances that perform login operations. The 'aws_secret_key' and
-'aws_access_key' parameters configured here should map to an AWS IAM user that
-has permission to make the following API queries:
-
-* ec2:DescribeInstances
-* iam:GetInstanceProfile (if IAM Role binding is used)
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go
deleted file mode 100644
index ff60ebf..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package awsauth
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestBackend_pathConfigClient(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // make sure we start with empty roles, which gives us confidence that the read later
- // actually is the two roles we created
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "config/client",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- // at this point, resp == nil is valid as no client config exists
- // if resp != nil, then resp.Data must have EndPoint and IAMServerIdHeaderValue as nil
- if resp != nil {
- if resp.IsError() {
- t.Fatalf("failed to read client config entry")
- } else if resp.Data["endpoint"] != nil || resp.Data["iam_server_id_header_value"] != nil {
- t.Fatalf("returned endpoint or iam_server_id_header_value non-nil")
- }
- }
-
- data := map[string]interface{}{
- "sts_endpoint": "https://my-custom-sts-endpoint.example.com",
- "iam_server_id_header_value": "vault_server_identification_314159",
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "config/client",
- Data: data,
- Storage: storage,
- })
-
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatal("failed to create the client config entry")
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "config/client",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatal("failed to read the client config entry")
- }
- if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] {
- t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'",
- data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"])
- }
-
- data = map[string]interface{}{
- "iam_server_id_header_value": "vault_server_identification_2718281",
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/client",
- Data: data,
- Storage: storage,
- })
-
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatal("failed to update the client config entry")
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "config/client",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatal("failed to read the client config entry")
- }
- if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] {
- t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'",
- data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"])
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_sts.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_sts.go
deleted file mode 100644
index 4366feb..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_sts.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package awsauth
-
-import (
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// awsStsEntry is used to store details of an STS role for assumption
-type awsStsEntry struct {
- StsRole string `json:"sts_role" structs:"sts_role" mapstructure:"sts_role"`
-}
-
-func pathListSts(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/sts/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathStsList,
- },
-
- HelpSynopsis: pathListStsHelpSyn,
- HelpDescription: pathListStsHelpDesc,
- }
-}
-
-func pathConfigSts(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/sts/" + framework.GenericNameRegex("account_id"),
- Fields: map[string]*framework.FieldSchema{
- "account_id": {
- Type: framework.TypeString,
- Description: `AWS account ID to be associated with STS role. If set,
-Vault will use assumed credentials to verify any login attempts from EC2
-instances in this account.`,
- },
- "sts_role": {
- Type: framework.TypeString,
- Description: `AWS ARN for STS role to be assumed when interacting with the account specified.
-The Vault server must have permissions to assume this role.`,
- },
- },
-
- ExistenceCheck: b.pathConfigStsExistenceCheck,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathConfigStsCreateUpdate,
- logical.UpdateOperation: b.pathConfigStsCreateUpdate,
- logical.ReadOperation: b.pathConfigStsRead,
- logical.DeleteOperation: b.pathConfigStsDelete,
- },
-
- HelpSynopsis: pathConfigStsSyn,
- HelpDescription: pathConfigStsDesc,
- }
-}
-
-// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
-// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
-func (b *backend) pathConfigStsExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- accountID := data.Get("account_id").(string)
- if accountID == "" {
- return false, fmt.Errorf("missing account_id")
- }
-
- entry, err := b.lockedAwsStsEntry(req.Storage, accountID)
- if err != nil {
- return false, err
- }
-
- return entry != nil, nil
-}
-
-// pathStsList is used to list all the AWS STS role configurations
-func (b *backend) pathStsList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
- sts, err := req.Storage.List("config/sts/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(sts), nil
-}
-
-// nonLockedSetAwsStsEntry creates or updates an STS role association with the given accountID
-// This method does not acquire the write lock before creating or updating. If locking is
-// desired, use lockedSetAwsStsEntry instead
-func (b *backend) nonLockedSetAwsStsEntry(s logical.Storage, accountID string, stsEntry *awsStsEntry) error {
- if accountID == "" {
- return fmt.Errorf("missing AWS account ID")
- }
-
- if stsEntry == nil {
- return fmt.Errorf("missing AWS STS Role ARN")
- }
-
- entry, err := logical.StorageEntryJSON("config/sts/"+accountID, stsEntry)
- if err != nil {
- return err
- }
-
- if entry == nil {
- return fmt.Errorf("failed to create storage entry for AWS STS configuration")
- }
-
- return s.Put(entry)
-}
-
-// lockedSetAwsStsEntry creates or updates an STS role association with the given accountID
-// This method acquires the write lock before creating or updating the STS entry.
-func (b *backend) lockedSetAwsStsEntry(s logical.Storage, accountID string, stsEntry *awsStsEntry) error {
- if accountID == "" {
- return fmt.Errorf("missing AWS account ID")
- }
-
- if stsEntry == nil {
- return fmt.Errorf("missing sts entry")
- }
-
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- return b.nonLockedSetAwsStsEntry(s, accountID, stsEntry)
-}
-
-// nonLockedAwsStsEntry returns the STS role associated with the given accountID.
-// This method does not acquire the read lock before returning information. If locking is
-// desired, use lockedAwsStsEntry instead
-func (b *backend) nonLockedAwsStsEntry(s logical.Storage, accountID string) (*awsStsEntry, error) {
- entry, err := s.Get("config/sts/" + accountID)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- var stsEntry awsStsEntry
- if err := entry.DecodeJSON(&stsEntry); err != nil {
- return nil, err
- }
-
- return &stsEntry, nil
-}
-
-// lockedAwsStsEntry returns the STS role associated with the given accountID.
-// This method acquires the read lock before returning the association.
-func (b *backend) lockedAwsStsEntry(s logical.Storage, accountID string) (*awsStsEntry, error) {
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
-
- return b.nonLockedAwsStsEntry(s, accountID)
-}
-
-// pathConfigStsRead is used to return information about an STS role/AWS accountID association
-func (b *backend) pathConfigStsRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- accountID := data.Get("account_id").(string)
- if accountID == "" {
- return logical.ErrorResponse("missing account id"), nil
- }
-
- stsEntry, err := b.lockedAwsStsEntry(req.Storage, accountID)
- if err != nil {
- return nil, err
- }
- if stsEntry == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: structs.New(stsEntry).Map(),
- }, nil
-}
-
-// pathConfigStsCreateUpdate is used to associate an STS role with a given AWS accountID
-func (b *backend) pathConfigStsCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- accountID := data.Get("account_id").(string)
- if accountID == "" {
- return logical.ErrorResponse("missing AWS account ID"), nil
- }
-
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- // Check if an STS role is already registered
- stsEntry, err := b.nonLockedAwsStsEntry(req.Storage, accountID)
- if err != nil {
- return nil, err
- }
- if stsEntry == nil {
- stsEntry = &awsStsEntry{}
- }
-
- // Check that an STS role has actually been provided
- stsRole, ok := data.GetOk("sts_role")
- if ok {
- stsEntry.StsRole = stsRole.(string)
- } else if req.Operation == logical.CreateOperation {
- return logical.ErrorResponse("missing sts role"), nil
- }
-
- if stsEntry.StsRole == "" {
- return logical.ErrorResponse("sts role cannot be empty"), nil
- }
-
- // save the provided STS role
- if err := b.nonLockedSetAwsStsEntry(req.Storage, accountID, stsEntry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// pathConfigStsDelete is used to delete a previously configured STS configuration
-func (b *backend) pathConfigStsDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- accountID := data.Get("account_id").(string)
- if accountID == "" {
- return logical.ErrorResponse("missing account id"), nil
- }
-
- return nil, req.Storage.Delete("config/sts/" + accountID)
-}
-
-const pathConfigStsSyn = `
-Specify STS roles to be assumed for certain AWS accounts.
-`
-
-const pathConfigStsDesc = `
-Allows the explicit association of STS roles to satellite AWS accounts (i.e. those
-which are not the account in which the Vault server is running.) Login attempts from
-EC2 instances running in these accounts will be verified using credentials obtained
-by assumption of these STS roles.
-
-The environment in which the Vault server resides must have access to assume the
-given STS roles.
-`
-const pathListStsHelpSyn = `
-List all the AWS account/STS role relationships registered with Vault.
-`
-
-const pathListStsHelpDesc = `
-AWS accounts will be listed by account ID, along with their respective role names.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_identity_whitelist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_identity_whitelist.go
deleted file mode 100644
index 43aafaa..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_identity_whitelist.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package awsauth
-
-import (
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- identityWhitelistConfigPath = "config/tidy/identity-whitelist"
-)
-
-func pathConfigTidyIdentityWhitelist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: fmt.Sprintf("%s$", identityWhitelistConfigPath),
- Fields: map[string]*framework.FieldSchema{
- "safety_buffer": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 259200, //72h
- Description: `The amount of extra time that must have passed beyond the identity's
-expiration, before it is removed from the backend storage.`,
- },
- "disable_periodic_tidy": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: "If set to 'true', disables the periodic tidying of the 'identity-whitelist/' entries.",
- },
- },
-
- ExistenceCheck: b.pathConfigTidyIdentityWhitelistExistenceCheck,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathConfigTidyIdentityWhitelistCreateUpdate,
- logical.UpdateOperation: b.pathConfigTidyIdentityWhitelistCreateUpdate,
- logical.ReadOperation: b.pathConfigTidyIdentityWhitelistRead,
- logical.DeleteOperation: b.pathConfigTidyIdentityWhitelistDelete,
- },
-
- HelpSynopsis: pathConfigTidyIdentityWhitelistHelpSyn,
- HelpDescription: pathConfigTidyIdentityWhitelistHelpDesc,
- }
-}
-
-func (b *backend) pathConfigTidyIdentityWhitelistExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- entry, err := b.lockedConfigTidyIdentities(req.Storage)
- if err != nil {
- return false, err
- }
- return entry != nil, nil
-}
-
-func (b *backend) lockedConfigTidyIdentities(s logical.Storage) (*tidyWhitelistIdentityConfig, error) {
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
-
- return b.nonLockedConfigTidyIdentities(s)
-}
-
-func (b *backend) nonLockedConfigTidyIdentities(s logical.Storage) (*tidyWhitelistIdentityConfig, error) {
- entry, err := s.Get(identityWhitelistConfigPath)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result tidyWhitelistIdentityConfig
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-func (b *backend) pathConfigTidyIdentityWhitelistCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- configEntry, err := b.nonLockedConfigTidyIdentities(req.Storage)
- if err != nil {
- return nil, err
- }
- if configEntry == nil {
- configEntry = &tidyWhitelistIdentityConfig{}
- }
-
- safetyBufferInt, ok := data.GetOk("safety_buffer")
- if ok {
- configEntry.SafetyBuffer = safetyBufferInt.(int)
- } else if req.Operation == logical.CreateOperation {
- configEntry.SafetyBuffer = data.Get("safety_buffer").(int)
- }
-
- disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy")
- if ok {
- configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool)
- } else if req.Operation == logical.CreateOperation {
- configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool)
- }
-
- entry, err := logical.StorageEntryJSON(identityWhitelistConfigPath, configEntry)
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathConfigTidyIdentityWhitelistRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- clientConfig, err := b.lockedConfigTidyIdentities(req.Storage)
- if err != nil {
- return nil, err
- }
- if clientConfig == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: structs.New(clientConfig).Map(),
- }, nil
-}
-
-func (b *backend) pathConfigTidyIdentityWhitelistDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- return nil, req.Storage.Delete(identityWhitelistConfigPath)
-}
-
-type tidyWhitelistIdentityConfig struct {
- SafetyBuffer int `json:"safety_buffer" structs:"safety_buffer" mapstructure:"safety_buffer"`
- DisablePeriodicTidy bool `json:"disable_periodic_tidy" structs:"disable_periodic_tidy" mapstructure:"disable_periodic_tidy"`
-}
-
-const pathConfigTidyIdentityWhitelistHelpSyn = `
-Configures the periodic tidying operation of the whitelisted identity entries.
-`
-const pathConfigTidyIdentityWhitelistHelpDesc = `
-By default, the expired entries in the whitelist will be attempted to be removed
-periodically. This operation will look for expired items in the list and purges them.
-However, there is a safety buffer duration (defaults to 72h), purges the entries
-only if they have been persisting this duration, past its expiration time.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_roletag_blacklist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_roletag_blacklist.go
deleted file mode 100644
index c3059c6..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_tidy_roletag_blacklist.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package awsauth
-
-import (
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- roletagBlacklistConfigPath = "config/tidy/roletag-blacklist"
-)
-
-func pathConfigTidyRoletagBlacklist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: fmt.Sprintf("%s$", roletagBlacklistConfigPath),
- Fields: map[string]*framework.FieldSchema{
- "safety_buffer": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 15552000, //180d
- Description: `The amount of extra time that must have passed beyond the roletag
-expiration, before it is removed from the backend storage.
-Defaults to 4320h (180 days).`,
- },
-
- "disable_periodic_tidy": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: "If set to 'true', disables the periodic tidying of blacklisted entries.",
- },
- },
-
- ExistenceCheck: b.pathConfigTidyRoletagBlacklistExistenceCheck,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathConfigTidyRoletagBlacklistCreateUpdate,
- logical.UpdateOperation: b.pathConfigTidyRoletagBlacklistCreateUpdate,
- logical.ReadOperation: b.pathConfigTidyRoletagBlacklistRead,
- logical.DeleteOperation: b.pathConfigTidyRoletagBlacklistDelete,
- },
-
- HelpSynopsis: pathConfigTidyRoletagBlacklistHelpSyn,
- HelpDescription: pathConfigTidyRoletagBlacklistHelpDesc,
- }
-}
-
-func (b *backend) pathConfigTidyRoletagBlacklistExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- entry, err := b.lockedConfigTidyRoleTags(req.Storage)
- if err != nil {
- return false, err
- }
- return entry != nil, nil
-}
-
-func (b *backend) lockedConfigTidyRoleTags(s logical.Storage) (*tidyBlacklistRoleTagConfig, error) {
- b.configMutex.RLock()
- defer b.configMutex.RUnlock()
-
- return b.nonLockedConfigTidyRoleTags(s)
-}
-
-func (b *backend) nonLockedConfigTidyRoleTags(s logical.Storage) (*tidyBlacklistRoleTagConfig, error) {
- entry, err := s.Get(roletagBlacklistConfigPath)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result tidyBlacklistRoleTagConfig
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathConfigTidyRoletagBlacklistCreateUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- configEntry, err := b.nonLockedConfigTidyRoleTags(req.Storage)
- if err != nil {
- return nil, err
- }
- if configEntry == nil {
- configEntry = &tidyBlacklistRoleTagConfig{}
- }
- safetyBufferInt, ok := data.GetOk("safety_buffer")
- if ok {
- configEntry.SafetyBuffer = safetyBufferInt.(int)
- } else if req.Operation == logical.CreateOperation {
- configEntry.SafetyBuffer = data.Get("safety_buffer").(int)
- }
- disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy")
- if ok {
- configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool)
- } else if req.Operation == logical.CreateOperation {
- configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool)
- }
-
- entry, err := logical.StorageEntryJSON(roletagBlacklistConfigPath, configEntry)
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathConfigTidyRoletagBlacklistRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- clientConfig, err := b.lockedConfigTidyRoleTags(req.Storage)
- if err != nil {
- return nil, err
- }
- if clientConfig == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: structs.New(clientConfig).Map(),
- }, nil
-}
-
-func (b *backend) pathConfigTidyRoletagBlacklistDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.configMutex.Lock()
- defer b.configMutex.Unlock()
-
- return nil, req.Storage.Delete(roletagBlacklistConfigPath)
-}
-
-type tidyBlacklistRoleTagConfig struct {
- SafetyBuffer int `json:"safety_buffer" structs:"safety_buffer" mapstructure:"safety_buffer"`
- DisablePeriodicTidy bool `json:"disable_periodic_tidy" structs:"disable_periodic_tidy" mapstructure:"disable_periodic_tidy"`
-}
-
-const pathConfigTidyRoletagBlacklistHelpSyn = `
-Configures the periodic tidying operation of the blacklisted role tag entries.
-`
-const pathConfigTidyRoletagBlacklistHelpDesc = `
-By default, the expired entries in the blacklist will be attempted to be removed
-periodically. This operation will look for expired items in the list and purges them.
-However, there is a safety buffer duration (defaults to 72h), purges the entries
-only if they have been persisting this duration, past its expiration time.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_identity_whitelist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_identity_whitelist.go
deleted file mode 100644
index 600fc7d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_identity_whitelist.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package awsauth
-
-import (
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathIdentityWhitelist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "identity-whitelist/" + framework.GenericNameRegex("instance_id"),
- Fields: map[string]*framework.FieldSchema{
- "instance_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `EC2 instance ID. A successful login operation from an EC2 instance
-gets cached in this whitelist, keyed off of instance ID.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathIdentityWhitelistRead,
- logical.DeleteOperation: b.pathIdentityWhitelistDelete,
- },
-
- HelpSynopsis: pathIdentityWhitelistSyn,
- HelpDescription: pathIdentityWhitelistDesc,
- }
-}
-
-func pathListIdentityWhitelist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "identity-whitelist/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathWhitelistIdentitiesList,
- },
-
- HelpSynopsis: pathListIdentityWhitelistHelpSyn,
- HelpDescription: pathListIdentityWhitelistHelpDesc,
- }
-}
-
-// pathWhitelistIdentitiesList is used to list all the instance IDs that are present
-// in the identity whitelist. This will list both valid and expired entries.
-func (b *backend) pathWhitelistIdentitiesList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- identities, err := req.Storage.List("whitelist/identity/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(identities), nil
-}
-
-// Fetch an item from the whitelist given an instance ID.
-func whitelistIdentityEntry(s logical.Storage, instanceID string) (*whitelistIdentity, error) {
- entry, err := s.Get("whitelist/identity/" + instanceID)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result whitelistIdentity
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-// Stores an instance ID and the information required to validate further login/renewal attempts from
-// the same instance ID.
-func setWhitelistIdentityEntry(s logical.Storage, instanceID string, identity *whitelistIdentity) error {
- entry, err := logical.StorageEntryJSON("whitelist/identity/"+instanceID, identity)
- if err != nil {
- return err
- }
-
- if err := s.Put(entry); err != nil {
- return err
- }
- return nil
-}
-
-// pathIdentityWhitelistDelete is used to delete an entry from the identity whitelist given an instance ID.
-func (b *backend) pathIdentityWhitelistDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- instanceID := data.Get("instance_id").(string)
- if instanceID == "" {
- return logical.ErrorResponse("missing instance_id"), nil
- }
-
- return nil, req.Storage.Delete("whitelist/identity/" + instanceID)
-}
-
-// pathIdentityWhitelistRead is used to view an entry in the identity whitelist given an instance ID.
-func (b *backend) pathIdentityWhitelistRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- instanceID := data.Get("instance_id").(string)
- if instanceID == "" {
- return logical.ErrorResponse("missing instance_id"), nil
- }
-
- entry, err := whitelistIdentityEntry(req.Storage, instanceID)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- resp := &logical.Response{
- Data: structs.New(entry).Map(),
- }
- resp.Data["creation_time"] = entry.CreationTime.Format(time.RFC3339Nano)
- resp.Data["expiration_time"] = entry.ExpirationTime.Format(time.RFC3339Nano)
- resp.Data["last_updated_time"] = entry.LastUpdatedTime.Format(time.RFC3339Nano)
-
- return resp, nil
-}
-
-// Struct to represent each item in the identity whitelist.
-type whitelistIdentity struct {
- Role string `json:"role" structs:"role" mapstructure:"role"`
- ClientNonce string `json:"client_nonce" structs:"client_nonce" mapstructure:"client_nonce"`
- CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
- DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
- PendingTime string `json:"pending_time" structs:"pending_time" mapstructure:"pending_time"`
- ExpirationTime time.Time `json:"expiration_time" structs:"expiration_time" mapstructure:"expiration_time"`
- LastUpdatedTime time.Time `json:"last_updated_time" structs:"last_updated_time" mapstructure:"last_updated_time"`
-}
-
-const pathIdentityWhitelistSyn = `
-Read or delete entries in the identity whitelist.
-`
-
-const pathIdentityWhitelistDesc = `
-Each login from an EC2 instance creates/updates an entry in the identity whitelist.
-
-Entries in this list can be viewed or deleted using this endpoint.
-
-By default, a cron task will periodically look for expired entries in the whitelist
-and deletes them. The duration to periodically run this, is one hour by default.
-However, this can be configured using the 'config/tidy/identities' endpoint. This tidy
-action can be triggered via the API as well, using the 'tidy/identities' endpoint.
-`
-
-const pathListIdentityWhitelistHelpSyn = `
-Lists the items present in the identity whitelist.
-`
-
-const pathListIdentityWhitelistHelpDesc = `
-The entries in the identity whitelist is keyed off of the EC2 instance IDs.
-This endpoint lists all the entries present in the identity whitelist, both
-expired and un-expired entries. Use 'tidy/identities' endpoint to clean-up
-the whitelist of identities.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go
deleted file mode 100644
index cca2d75..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go
+++ /dev/null
@@ -1,1646 +0,0 @@
-package awsauth
-
-import (
- "crypto/subtle"
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "encoding/xml"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "reflect"
- "regexp"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/ec2"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/fullsailor/pkcs7"
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- reauthenticationDisabledNonce = "reauthentication-disabled-nonce"
- iamAuthType = "iam"
- ec2AuthType = "ec2"
- ec2EntityType = "ec2_instance"
-)
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login$",
- Fields: map[string]*framework.FieldSchema{
- "role": {
- Type: framework.TypeString,
- Description: `Name of the role against which the login is being attempted.
-If 'role' is not specified, then the login endpoint looks for a role
-bearing the name of the AMI ID of the EC2 instance that is trying to login.
-If a matching role is not found, login fails.`,
- },
-
- "pkcs7": {
- Type: framework.TypeString,
- Description: `PKCS7 signature of the identity document when using an auth_type
-of ec2.`,
- },
-
- "nonce": {
- Type: framework.TypeString,
- Description: `The nonce to be used for subsequent login requests when
-auth_type is ec2. If this parameter is not specified at
-all and if reauthentication is allowed, then the backend will generate a random
-nonce, attaches it to the instance's identity-whitelist entry and returns the
-nonce back as part of auth metadata. This value should be used with further
-login requests, to establish client authenticity. Clients can choose to set a
-custom nonce if preferred, in which case, it is recommended that clients provide
-a strong nonce. If a nonce is provided but with an empty value, it indicates
-intent to disable reauthentication. Note that, when 'disallow_reauthentication'
-option is enabled on either the role or the role tag, the 'nonce' holds no
-significance.`,
- },
-
- "iam_http_request_method": {
- Type: framework.TypeString,
- Description: `HTTP method to use for the AWS request when auth_type is
-iam. This must match what has been signed in the
-presigned request. Currently, POST is the only supported value`,
- },
-
- "iam_request_url": {
- Type: framework.TypeString,
- Description: `Base64-encoded full URL against which to make the AWS request
-when using iam auth_type.`,
- },
-
- "iam_request_body": {
- Type: framework.TypeString,
- Description: `Base64-encoded request body when auth_type is iam.
-This must match the request body included in the signature.`,
- },
- "iam_request_headers": {
- Type: framework.TypeString,
- Description: `Base64-encoded JSON representation of the request headers when auth_type is
-iam. This must at a minimum include the headers over
-which AWS has included a signature.`,
- },
- "identity": {
- Type: framework.TypeString,
- Description: `Base64 encoded EC2 instance identity document. This needs to be supplied along
-with the 'signature' parameter. If using 'curl' for fetching the identity
-document, consider using the option '-w 0' while piping the output to 'base64'
-binary.`,
- },
- "signature": {
- Type: framework.TypeString,
- Description: `Base64 encoded SHA256 RSA signature of the instance identity document. This
-needs to be supplied along with 'identity' parameter.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLoginUpdate,
- },
-
- HelpSynopsis: pathLoginSyn,
- HelpDescription: pathLoginDesc,
- }
-}
-
-// instanceIamRoleARN fetches the IAM role ARN associated with the given
-// instance profile name
-func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName string) (string, error) {
- if iamClient == nil {
- return "", fmt.Errorf("nil iamClient")
- }
- if instanceProfileName == "" {
- return "", fmt.Errorf("missing instance profile name")
- }
-
- profile, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{
- InstanceProfileName: aws.String(instanceProfileName),
- })
- if err != nil {
- return "", err
- }
- if profile == nil {
- return "", fmt.Errorf("nil output while getting instance profile details")
- }
-
- if profile.InstanceProfile == nil {
- return "", fmt.Errorf("nil instance profile in the output of instance profile details")
- }
-
- if profile.InstanceProfile.Roles == nil || len(profile.InstanceProfile.Roles) != 1 {
- return "", fmt.Errorf("invalid roles in the output of instance profile details")
- }
-
- if profile.InstanceProfile.Roles[0].Arn == nil {
- return "", fmt.Errorf("nil role ARN in the output of instance profile details")
- }
-
- return *profile.InstanceProfile.Roles[0].Arn, nil
-}
-
-// validateInstance queries the status of the EC2 instance using AWS EC2 API
-// and checks if the instance is running and is healthy
-func (b *backend) validateInstance(s logical.Storage, instanceID, region, accountID string) (*ec2.Instance, error) {
- // Create an EC2 client to pull the instance information
- ec2Client, err := b.clientEC2(s, region, accountID)
- if err != nil {
- return nil, err
- }
-
- status, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{
- InstanceIds: []*string{
- aws.String(instanceID),
- },
- })
- if err != nil {
- return nil, fmt.Errorf("error fetching description for instance ID %q: %q\n", instanceID, err)
- }
- if status == nil {
- return nil, fmt.Errorf("nil output from describe instances")
- }
- if len(status.Reservations) == 0 {
- return nil, fmt.Errorf("no reservations found in instance description")
-
- }
- if len(status.Reservations[0].Instances) == 0 {
- return nil, fmt.Errorf("no instance details found in reservations")
- }
- if *status.Reservations[0].Instances[0].InstanceId != instanceID {
- return nil, fmt.Errorf("expected instance ID not matching the instance ID in the instance description")
- }
- if status.Reservations[0].Instances[0].State == nil {
- return nil, fmt.Errorf("instance state in instance description is nil")
- }
- if *status.Reservations[0].Instances[0].State.Name != "running" {
- return nil, fmt.Errorf("instance is not in 'running' state")
- }
- return status.Reservations[0].Instances[0], nil
-}
-
-// validateMetadata matches the given client nonce and pending time with the
-// one cached in the identity whitelist during the previous login. But, if
-// reauthentication is disabled, login attempt is failed immediately.
-func validateMetadata(clientNonce, pendingTime string, storedIdentity *whitelistIdentity, roleEntry *awsRoleEntry) error {
- // For sanity
- if !storedIdentity.DisallowReauthentication && storedIdentity.ClientNonce == "" {
- return fmt.Errorf("client nonce missing in stored identity")
- }
-
- // If reauthentication is disabled or if the nonce supplied matches a
- // predefied nonce which indicates reauthentication to be disabled,
- // authentication will not succeed.
- if storedIdentity.DisallowReauthentication ||
- subtle.ConstantTimeCompare([]byte(reauthenticationDisabledNonce), []byte(clientNonce)) == 1 {
- return fmt.Errorf("reauthentication is disabled")
- }
-
- givenPendingTime, err := time.Parse(time.RFC3339, pendingTime)
- if err != nil {
- return err
- }
-
- storedPendingTime, err := time.Parse(time.RFC3339, storedIdentity.PendingTime)
- if err != nil {
- return err
- }
-
- // When the presented client nonce does not match the cached entry, it
- // is either that a rogue client is trying to login or that a valid
- // client suffered a migration. The migration is detected via
- // pendingTime in the instance metadata, which sadly is only updated
- // when an instance is stopped and started but *not* when the instance
- // is rebooted. If reboot survivability is needed, either
- // instrumentation to delete the instance ID from the whitelist is
- // necessary, or the client must durably store the nonce.
- //
- // If the `allow_instance_migration` property of the registered role is
- // enabled, then the client nonce mismatch is ignored, as long as the
- // pending time in the presented instance identity document is newer
- // than the cached pending time. The new pendingTime is stored and used
- // for future checks.
- //
- // This is a weak criterion and hence the `allow_instance_migration`
- // option should be used with caution.
- if subtle.ConstantTimeCompare([]byte(clientNonce), []byte(storedIdentity.ClientNonce)) != 1 {
- if !roleEntry.AllowInstanceMigration {
- return fmt.Errorf("client nonce mismatch")
- }
- if roleEntry.AllowInstanceMigration && !givenPendingTime.After(storedPendingTime) {
- return fmt.Errorf("client nonce mismatch and instance meta-data incorrect")
- }
- }
-
- // Ensure that the 'pendingTime' on the given identity document is not
- // before the 'pendingTime' that was used for previous login. This
- // disallows old metadata documents from being used to perform login.
- if givenPendingTime.Before(storedPendingTime) {
- return fmt.Errorf("instance meta-data is older than the one used for previous login")
- }
- return nil
-}
-
-// Verifies the integrity of the instance identity document using its SHA256
-// RSA signature. After verification, returns the unmarshaled instance identity
-// document.
-func (b *backend) verifyInstanceIdentitySignature(s logical.Storage, identityBytes, signatureBytes []byte) (*identityDocument, error) {
- if len(identityBytes) == 0 {
- return nil, fmt.Errorf("missing instance identity document")
- }
-
- if len(signatureBytes) == 0 {
- return nil, fmt.Errorf("missing SHA256 RSA signature of the instance identity document")
- }
-
- // Get the public certificates that are used to verify the signature.
- // This returns a slice of certificates containing the default
- // certificate and all the registered certificates via
- // 'config/certificate/' endpoint, for verifying the RSA
- // digest.
- publicCerts, err := b.awsPublicCertificates(s, false)
- if err != nil {
- return nil, err
- }
- if publicCerts == nil || len(publicCerts) == 0 {
- return nil, fmt.Errorf("certificates to verify the signature are not found")
- }
-
- // Check if any of the certs registered at the backend can verify the
- // signature
- for _, cert := range publicCerts {
- err := cert.CheckSignature(x509.SHA256WithRSA, identityBytes, signatureBytes)
- if err == nil {
- var identityDoc identityDocument
- if decErr := jsonutil.DecodeJSON(identityBytes, &identityDoc); decErr != nil {
- return nil, decErr
- }
- return &identityDoc, nil
- }
- }
-
- return nil, fmt.Errorf("instance identity verification using SHA256 RSA signature is unsuccessful")
-}
-
-// Verifies the correctness of the authenticated attributes present in the PKCS#7
-// signature. After verification, extracts the instance identity document from the
-// signature, parses it and returns it.
-func (b *backend) parseIdentityDocument(s logical.Storage, pkcs7B64 string) (*identityDocument, error) {
- // Insert the header and footer for the signature to be able to pem decode it
- pkcs7B64 = fmt.Sprintf("-----BEGIN PKCS7-----\n%s\n-----END PKCS7-----", pkcs7B64)
-
- // Decode the PEM encoded signature
- pkcs7BER, pkcs7Rest := pem.Decode([]byte(pkcs7B64))
- if len(pkcs7Rest) != 0 {
- return nil, fmt.Errorf("failed to decode the PEM encoded PKCS#7 signature")
- }
-
- // Parse the signature from asn1 format into a struct
- pkcs7Data, err := pkcs7.Parse(pkcs7BER.Bytes)
- if err != nil {
- return nil, fmt.Errorf("failed to parse the BER encoded PKCS#7 signature: %v\n", err)
- }
-
- // Get the public certificates that are used to verify the signature.
- // This returns a slice of certificates containing the default certificate
- // and all the registered certificates via 'config/certificate/' endpoint
- publicCerts, err := b.awsPublicCertificates(s, true)
- if err != nil {
- return nil, err
- }
- if publicCerts == nil || len(publicCerts) == 0 {
- return nil, fmt.Errorf("certificates to verify the signature are not found")
- }
-
- // Before calling Verify() on the PKCS#7 struct, set the certificates to be used
- // to verify the contents in the signer information.
- pkcs7Data.Certificates = publicCerts
-
- // Verify extracts the authenticated attributes in the PKCS#7 signature, and verifies
- // the authenticity of the content using 'dsa.PublicKey' embedded in the public certificate.
- if pkcs7Data.Verify() != nil {
- return nil, fmt.Errorf("failed to verify the signature")
- }
-
- // Check if the signature has content inside of it
- if len(pkcs7Data.Content) == 0 {
- return nil, fmt.Errorf("instance identity document could not be found in the signature")
- }
-
- var identityDoc identityDocument
- if err := jsonutil.DecodeJSON(pkcs7Data.Content, &identityDoc); err != nil {
- return nil, err
- }
-
- return &identityDoc, nil
-}
-
-func (b *backend) pathLoginUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- anyEc2, allEc2 := hasValuesForEc2Auth(data)
- anyIam, allIam := hasValuesForIamAuth(data)
- switch {
- case anyEc2 && anyIam:
- return logical.ErrorResponse("supplied auth values for both ec2 and iam auth types"), nil
- case anyEc2 && !allEc2:
- return logical.ErrorResponse("supplied some of the auth values for the ec2 auth type but not all"), nil
- case anyEc2:
- return b.pathLoginUpdateEc2(req, data)
- case anyIam && !allIam:
- return logical.ErrorResponse("supplied some of the auth values for the iam auth type but not all"), nil
- case anyIam:
- return b.pathLoginUpdateIam(req, data)
- default:
- return logical.ErrorResponse("didn't supply required authentication values"), nil
- }
-}
-
-// Returns whether the EC2 instance meets the requirements of the particular
-// AWS role entry.
-// The first error return value is whether there's some sort of validation
-// error that means the instance doesn't meet the role requirements
-// The second error return value indicates whether there's an error in even
-// trying to validate those requirements
-func (b *backend) verifyInstanceMeetsRoleRequirements(
- s logical.Storage, instance *ec2.Instance, roleEntry *awsRoleEntry, roleName string, identityDoc *identityDocument) (error, error) {
-
- switch {
- case instance == nil:
- return nil, fmt.Errorf("nil instance")
- case roleEntry == nil:
- return nil, fmt.Errorf("nil roleEntry")
- case identityDoc == nil:
- return nil, fmt.Errorf("nil identityDoc")
- }
-
- // Verify that the AccountID of the instance trying to login matches the
- // AccountID specified as a constraint on role
- if roleEntry.BoundAccountID != "" && identityDoc.AccountID != roleEntry.BoundAccountID {
- return fmt.Errorf("account ID %q does not belong to role %q", identityDoc.AccountID, roleName), nil
- }
-
- // Verify that the AMI ID of the instance trying to login matches the
- // AMI ID specified as a constraint on the role.
- //
- // Here, we're making a tradeoff and pulling the AMI ID out of the EC2
- // API rather than the signed instance identity doc. They *should* match.
- // This means we require an EC2 API call to retrieve the AMI ID, but we're
- // already calling the API to validate the Instance ID anyway, so it shouldn't
- // matter. The benefit is that we have the exact same code whether auth_type
- // is ec2 or iam.
- if roleEntry.BoundAmiID != "" {
- if instance.ImageId == nil {
- return nil, fmt.Errorf("AMI ID in the instance description is nil")
- }
- if roleEntry.BoundAmiID != *instance.ImageId {
- return fmt.Errorf("AMI ID %q does not belong to role %q", instance.ImageId, roleName), nil
- }
- }
-
- // Validate the SubnetID if corresponding bound was set on the role
- if roleEntry.BoundSubnetID != "" {
- if instance.SubnetId == nil {
- return nil, fmt.Errorf("subnet ID in the instance description is nil")
- }
- if roleEntry.BoundSubnetID != *instance.SubnetId {
- return fmt.Errorf("subnet ID %q does not satisfy the constraint on role %q", *instance.SubnetId, roleName), nil
- }
- }
-
- // Validate the VpcID if corresponding bound was set on the role
- if roleEntry.BoundVpcID != "" {
- if instance.VpcId == nil {
- return nil, fmt.Errorf("VPC ID in the instance description is nil")
- }
- if roleEntry.BoundVpcID != *instance.VpcId {
- return fmt.Errorf("VPC ID %q does not satisfy the constraint on role %q", *instance.VpcId, roleName), nil
- }
- }
-
- // Check if the IAM instance profile ARN of the instance trying to
- // login, matches the IAM instance profile ARN specified as a constraint
- // on the role
- if roleEntry.BoundIamInstanceProfileARN != "" {
- if instance.IamInstanceProfile == nil {
- return nil, fmt.Errorf("IAM instance profile in the instance description is nil")
- }
- if instance.IamInstanceProfile.Arn == nil {
- return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil")
- }
- iamInstanceProfileARN := *instance.IamInstanceProfile.Arn
- if !strings.HasPrefix(iamInstanceProfileARN, roleEntry.BoundIamInstanceProfileARN) {
- return fmt.Errorf("IAM instance profile ARN %q does not satisfy the constraint role %q", iamInstanceProfileARN, roleName), nil
- }
- }
-
- // Check if the IAM role ARN of the instance trying to login, matches
- // the IAM role ARN specified as a constraint on the role.
- if roleEntry.BoundIamRoleARN != "" {
- if instance.IamInstanceProfile == nil {
- return nil, fmt.Errorf("IAM instance profile in the instance description is nil")
- }
- if instance.IamInstanceProfile.Arn == nil {
- return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil")
- }
-
- // Fetch the instance profile ARN from the instance description
- iamInstanceProfileARN := *instance.IamInstanceProfile.Arn
-
- if iamInstanceProfileARN == "" {
- return nil, fmt.Errorf("IAM instance profile ARN in the instance description is empty")
- }
-
- // Extract out the instance profile name from the instance
- // profile ARN
- iamInstanceProfileEntity, err := parseIamArn(iamInstanceProfileARN)
-
- if err != nil {
- return nil, fmt.Errorf("failed to parse IAM instance profile ARN %q; error: %v", iamInstanceProfileARN, err)
- }
-
- // Use instance profile ARN to fetch the associated role ARN
- iamClient, err := b.clientIAM(s, identityDoc.Region, identityDoc.AccountID)
- if err != nil {
- return nil, fmt.Errorf("could not fetch IAM client: %v", err)
- } else if iamClient == nil {
- return nil, fmt.Errorf("received a nil iamClient")
- }
- iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileEntity.FriendlyName)
- if err != nil {
- return nil, fmt.Errorf("IAM role ARN could not be fetched: %v", err)
- }
- if iamRoleARN == "" {
- return nil, fmt.Errorf("IAM role ARN could not be fetched")
- }
-
- if !strings.HasPrefix(iamRoleARN, roleEntry.BoundIamRoleARN) {
- return fmt.Errorf("IAM role ARN %q does not satisfy the constraint role %q", iamRoleARN, roleName), nil
- }
- }
-
- return nil, nil
-}
-
-// pathLoginUpdateEc2 is used to create a Vault token by the EC2 instances
-// by providing the pkcs7 signature of the instance identity document
-// and a client created nonce. Client nonce is optional if 'disallow_reauthentication'
-// option is enabled on the registered role.
-func (b *backend) pathLoginUpdateEc2(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- identityDocB64 := data.Get("identity").(string)
- var identityDocBytes []byte
- var err error
- if identityDocB64 != "" {
- identityDocBytes, err = base64.StdEncoding.DecodeString(identityDocB64)
- if err != nil || len(identityDocBytes) == 0 {
- return logical.ErrorResponse("failed to base64 decode the instance identity document"), nil
- }
- }
-
- signatureB64 := data.Get("signature").(string)
- var signatureBytes []byte
- if signatureB64 != "" {
- signatureBytes, err = base64.StdEncoding.DecodeString(signatureB64)
- if err != nil {
- return logical.ErrorResponse("failed to base64 decode the SHA256 RSA signature of the instance identity document"), nil
- }
- }
-
- pkcs7B64 := data.Get("pkcs7").(string)
-
- // Either the pkcs7 signature of the instance identity document, or
- // the identity document itself along with its SHA256 RSA signature
- // needs to be provided.
- if pkcs7B64 == "" && (len(identityDocBytes) == 0 && len(signatureBytes) == 0) {
- return logical.ErrorResponse("either pkcs7 or a tuple containing the instance identity document and its SHA256 RSA signature needs to be provided"), nil
- } else if pkcs7B64 != "" && (len(identityDocBytes) != 0 && len(signatureBytes) != 0) {
- return logical.ErrorResponse("both pkcs7 and a tuple containing the instance identity document and its SHA256 RSA signature is supplied; provide only one"), nil
- }
-
- // Verify the signature of the identity document and unmarshal it
- var identityDocParsed *identityDocument
- if pkcs7B64 != "" {
- identityDocParsed, err = b.parseIdentityDocument(req.Storage, pkcs7B64)
- if err != nil {
- return nil, err
- }
- if identityDocParsed == nil {
- return logical.ErrorResponse("failed to verify the instance identity document using pkcs7"), nil
- }
- } else {
- identityDocParsed, err = b.verifyInstanceIdentitySignature(req.Storage, identityDocBytes, signatureBytes)
- if err != nil {
- return nil, err
- }
- if identityDocParsed == nil {
- return logical.ErrorResponse("failed to verify the instance identity document using the SHA256 RSA digest"), nil
- }
- }
-
- roleName := data.Get("role").(string)
-
- // If roleName is not supplied, a role in the name of the instance's AMI ID will be looked for
- if roleName == "" {
- roleName = identityDocParsed.AmiID
- }
-
- // Get the entry for the role used by the instance
- roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return logical.ErrorResponse(fmt.Sprintf("entry for role %q not found", roleName)), nil
- }
-
- if roleEntry.AuthType != ec2AuthType {
- return logical.ErrorResponse(fmt.Sprintf("auth method ec2 not allowed for role %s", roleName)), nil
- }
-
- // Validate the instance ID by making a call to AWS EC2 DescribeInstances API
- // and fetching the instance description. Validation succeeds only if the
- // instance is in 'running' state.
- instance, err := b.validateInstance(req.Storage, identityDocParsed.InstanceID, identityDocParsed.Region, identityDocParsed.AccountID)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to verify instance ID: %v", err)), nil
- }
-
- // Verify that the `Region` of the instance trying to login matches the
- // `Region` specified as a constraint on role
- if roleEntry.BoundRegion != "" && identityDocParsed.Region != roleEntry.BoundRegion {
- return logical.ErrorResponse(fmt.Sprintf("Region %q does not satisfy the constraint on role %q", identityDocParsed.Region, roleName)), nil
- }
-
- validationError, err := b.verifyInstanceMeetsRoleRequirements(req.Storage, instance, roleEntry, roleName, identityDocParsed)
- if err != nil {
- return nil, err
- }
- if validationError != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error validating instance: %v", validationError)), nil
- }
-
- // Get the entry from the identity whitelist, if there is one
- storedIdentity, err := whitelistIdentityEntry(req.Storage, identityDocParsed.InstanceID)
- if err != nil {
- return nil, err
- }
-
- // disallowReauthentication value that gets cached at the stored
- // identity-whitelist entry is determined not just by the role entry.
- // If client explicitly sets nonce to be empty, it implies intent to
- // disable reauthentication. Also, role tag can override the 'false'
- // value with 'true' (the other way around is not allowed).
-
- // Read the value from the role entry
- disallowReauthentication := roleEntry.DisallowReauthentication
-
- clientNonce := ""
-
- // Check if the nonce is supplied by the client
- clientNonceRaw, clientNonceSupplied := data.GetOk("nonce")
- if clientNonceSupplied {
- clientNonce = clientNonceRaw.(string)
-
- // Nonce explicitly set to empty implies intent to disable
- // reauthentication by the client. Set a predefined nonce which
- // indicates reauthentication being disabled.
- if clientNonce == "" {
- clientNonce = reauthenticationDisabledNonce
-
- // Ensure that the intent lands in the whitelist
- disallowReauthentication = true
- }
- }
-
- // This is NOT a first login attempt from the client
- if storedIdentity != nil {
- // Check if the client nonce match the cached nonce and if the pending time
- // of the identity document is not before the pending time of the document
- // with which previous login was made. If 'allow_instance_migration' is
- // enabled on the registered role, client nonce requirement is relaxed.
- if err = validateMetadata(clientNonce, identityDocParsed.PendingTime, storedIdentity, roleEntry); err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- // Don't let subsequent login attempts to bypass in initial
- // intent of disabling reauthentication, despite the properties
- // of role getting updated. For example: Role has the value set
- // to 'false', a role-tag login sets the value to 'true', then
- // role gets updated to not use a role-tag, and a login attempt
- // is made with role's value set to 'false'. Removing the entry
- // from the identity-whitelist should be the only way to be
- // able to login from the instance again.
- disallowReauthentication = disallowReauthentication || storedIdentity.DisallowReauthentication
- }
-
- // If we reach this point without erroring and if the client nonce was
- // not supplied, a first time login is implied and that the client
- // intends that the nonce be generated by the backend. Create a random
- // nonce to be associated for the instance ID.
- if !clientNonceSupplied {
- if clientNonce, err = uuid.GenerateUUID(); err != nil {
- return nil, fmt.Errorf("failed to generate random nonce")
- }
- }
-
- // Load the current values for max TTL and policies from the role entry,
- // before checking for overriding max TTL in the role tag. The shortest
- // max TTL is used to cap the token TTL; the longest max TTL is used to
- // make the whitelist entry as long as possible as it controls for replay
- // attacks.
- shortestMaxTTL := b.System().MaxLeaseTTL()
- longestMaxTTL := b.System().MaxLeaseTTL()
- if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL {
- shortestMaxTTL = roleEntry.MaxTTL
- }
- if roleEntry.MaxTTL > longestMaxTTL {
- longestMaxTTL = roleEntry.MaxTTL
- }
-
- policies := roleEntry.Policies
- rTagMaxTTL := time.Duration(0)
- var roleTagResp *roleTagLoginResponse
- if roleEntry.RoleTag != "" {
- roleTagResp, err := b.handleRoleTagLogin(req.Storage, roleName, roleEntry, instance)
- if err != nil {
- return nil, err
- }
- if roleTagResp == nil {
- return logical.ErrorResponse("failed to fetch and verify the role tag"), nil
- }
- }
-
- if roleTagResp != nil {
- // Role tag is enabled on the role.
- //
-
- // Overwrite the policies with the ones returned from processing the role tag
- // If there are no policies on the role tag, policies on the role are inherited.
- // If policies on role tag are set, by this point, it is verified that it is a subset of the
- // policies on the role. So, apply only those.
- if len(roleTagResp.Policies) != 0 {
- policies = roleTagResp.Policies
- }
-
- // If roleEntry had disallowReauthentication set to 'true', do not reset it
- // to 'false' based on role tag having it not set. But, if role tag had it set,
- // be sure to override the value.
- if !disallowReauthentication {
- disallowReauthentication = roleTagResp.DisallowReauthentication
- }
-
- // Cache the value of role tag's max_ttl value
- rTagMaxTTL = roleTagResp.MaxTTL
-
- // Scope the shortestMaxTTL to the value set on the role tag
- if roleTagResp.MaxTTL > time.Duration(0) && roleTagResp.MaxTTL < shortestMaxTTL {
- shortestMaxTTL = roleTagResp.MaxTTL
- }
- if roleTagResp.MaxTTL > longestMaxTTL {
- longestMaxTTL = roleTagResp.MaxTTL
- }
- }
-
- // Save the login attempt in the identity whitelist
- currentTime := time.Now()
- if storedIdentity == nil {
- // Role, ClientNonce and CreationTime of the identity entry,
- // once set, should never change.
- storedIdentity = &whitelistIdentity{
- Role: roleName,
- ClientNonce: clientNonce,
- CreationTime: currentTime,
- }
- }
-
- // DisallowReauthentication, PendingTime, LastUpdatedTime and
- // ExpirationTime may change.
- storedIdentity.LastUpdatedTime = currentTime
- storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL)
- storedIdentity.PendingTime = identityDocParsed.PendingTime
- storedIdentity.DisallowReauthentication = disallowReauthentication
-
- // Don't cache the nonce if DisallowReauthentication is set
- if storedIdentity.DisallowReauthentication {
- storedIdentity.ClientNonce = ""
- }
-
- // Sanitize the nonce to a reasonable length
- if len(clientNonce) > 128 && !storedIdentity.DisallowReauthentication {
- return logical.ErrorResponse("client nonce exceeding the limit of 128 characters"), nil
- }
-
- if err = setWhitelistIdentityEntry(req.Storage, identityDocParsed.InstanceID, storedIdentity); err != nil {
- return nil, err
- }
-
- resp := &logical.Response{
- Auth: &logical.Auth{
- Period: roleEntry.Period,
- Policies: policies,
- Metadata: map[string]string{
- "instance_id": identityDocParsed.InstanceID,
- "region": identityDocParsed.Region,
- "account_id": identityDocParsed.AccountID,
- "role_tag_max_ttl": rTagMaxTTL.String(),
- "role": roleName,
- "ami_id": identityDocParsed.AmiID,
- },
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- TTL: roleEntry.TTL,
- },
- },
- }
-
- // Return the nonce only if reauthentication is allowed
- if !disallowReauthentication {
- // Echo the client nonce back. If nonce param was not supplied
- // to the endpoint at all (setting it to empty string does not
- // qualify here), callers should extract out the nonce from
- // this field for reauthentication requests.
- resp.Auth.Metadata["nonce"] = clientNonce
- }
-
- if roleEntry.Period > time.Duration(0) {
- resp.Auth.TTL = roleEntry.Period
- } else {
- // Cap the TTL value.
- shortestTTL := b.System().DefaultLeaseTTL()
- if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
- shortestTTL = roleEntry.TTL
- }
- if shortestMaxTTL < shortestTTL {
- resp.AddWarning(fmt.Sprintf("Effective ttl of %q exceeded the effective max_ttl of %q; ttl value is capped appropriately", (shortestTTL / time.Second).String(), (shortestMaxTTL / time.Second).String()))
- shortestTTL = shortestMaxTTL
- }
- resp.Auth.TTL = shortestTTL
- }
-
- return resp, nil
-
-}
-
-// handleRoleTagLogin is used to fetch the role tag of the instance and
-// verifies it to be correct. Then the policies for the login request will be
-// set off of the role tag, if certain creteria satisfies.
-func (b *backend) handleRoleTagLogin(s logical.Storage, roleName string, roleEntry *awsRoleEntry, instance *ec2.Instance) (*roleTagLoginResponse, error) {
- if roleEntry == nil {
- return nil, fmt.Errorf("nil role entry")
- }
- if instance == nil {
- return nil, fmt.Errorf("nil instance")
- }
-
- // Input validation on instance is not performed here considering
- // that it would have been done in validateInstance method.
- tags := instance.Tags
- if tags == nil || len(tags) == 0 {
- return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag)
- }
-
- // Iterate through the tags attached on the instance and look for
- // a tag with its 'key' matching the expected role tag value.
- rTagValue := ""
- for _, tagItem := range tags {
- if tagItem.Key != nil && *tagItem.Key == roleEntry.RoleTag {
- rTagValue = *tagItem.Value
- break
- }
- }
-
- // If 'role_tag' is enabled on the role, and if a corresponding tag is not found
- // to be attached to the instance, fail.
- if rTagValue == "" {
- return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag)
- }
-
- // Parse the role tag into a struct, extract the plaintext part of it and verify its HMAC
- rTag, err := b.parseAndVerifyRoleTagValue(s, rTagValue)
- if err != nil {
- return nil, err
- }
-
- // Check if the role name with which this login is being made is same
- // as the role name embedded in the tag.
- if rTag.Role != roleName {
- return nil, fmt.Errorf("role on the tag is not matching the role supplied")
- }
-
- // If instance_id was set on the role tag, check if the same instance is attempting to login
- if rTag.InstanceID != "" && rTag.InstanceID != *instance.InstanceId {
- return nil, fmt.Errorf("role tag is being used by an unauthorized instance.")
- }
-
- // Check if the role tag is blacklisted
- blacklistEntry, err := b.lockedBlacklistRoleTagEntry(s, rTagValue)
- if err != nil {
- return nil, err
- }
- if blacklistEntry != nil {
- return nil, fmt.Errorf("role tag is blacklisted")
- }
-
- // Ensure that the policies on the RoleTag is a subset of policies on the role
- if !strutil.StrListSubset(roleEntry.Policies, rTag.Policies) {
- return nil, fmt.Errorf("policies on the role tag must be subset of policies on the role")
- }
-
- return &roleTagLoginResponse{
- Policies: rTag.Policies,
- MaxTTL: rTag.MaxTTL,
- DisallowReauthentication: rTag.DisallowReauthentication,
- }, nil
-}
-
-// pathLoginRenew is used to renew an authenticated token
-func (b *backend) pathLoginRenew(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- authType, ok := req.Auth.Metadata["auth_type"]
- if !ok {
- // backwards compatibility for clients that have leases from before we added auth_type
- authType = ec2AuthType
- }
-
- if authType == ec2AuthType {
- return b.pathLoginRenewEc2(req, data)
- } else if authType == iamAuthType {
- return b.pathLoginRenewIam(req, data)
- } else {
- return nil, fmt.Errorf("unrecognized auth_type: %q", authType)
- }
-}
-
-func (b *backend) pathLoginRenewIam(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- canonicalArn := req.Auth.Metadata["canonical_arn"]
- if canonicalArn == "" {
- return nil, fmt.Errorf("unable to retrieve canonical ARN from metadata during renewal")
- }
-
- roleName := req.Auth.InternalData["role_name"].(string)
- if roleName == "" {
- return nil, fmt.Errorf("error retrieving role_name during renewal")
- }
- roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return nil, fmt.Errorf("role entry not found")
- }
-
- // we don't really care what the inferred entity type was when the role was initially created. We
- // care about what the role currently requires. However, the metadata's inferred_entity_id is only
- // set when inferencing is turned on at initial login time. So, if inferencing is turned on, any
- // existing roles will NOT be able to renew tokens.
- // This might change later, but authenticating the actual inferred entity ID is NOT done if there
- // is no inferencing requested in the role. The reason is that authenticating the inferred entity
- // ID requires additional AWS IAM permissions that might not be present (e.g.,
- // ec2:DescribeInstances) as well as additional inferencing configuration (the inferred region).
- // So, for now, if you want to turn on inferencing, all clients must re-authenticate and cannot
- // renew existing tokens.
- if roleEntry.InferredEntityType != "" {
- if roleEntry.InferredEntityType == ec2EntityType {
- instanceID, ok := req.Auth.Metadata["inferred_entity_id"]
- if !ok {
- return nil, fmt.Errorf("no inferred entity ID in auth metadata")
- }
- instanceRegion, ok := req.Auth.Metadata["inferred_aws_region"]
- if !ok {
- return nil, fmt.Errorf("no inferred AWS region in auth metadata")
- }
- _, err := b.validateInstance(req.Storage, instanceID, instanceRegion, req.Auth.Metadata["account_id"])
- if err != nil {
- return nil, fmt.Errorf("failed to verify instance ID %q: %v", instanceID, err)
- }
- } else {
- return nil, fmt.Errorf("unrecognized entity_type in metadata: %q", roleEntry.InferredEntityType)
- }
- }
-
- // Note that the error messages below can leak a little bit of information about the role information
- // For example, if on renew, the client gets the "error parsing ARN..." error message, the client
- // will know that it's a wildcard bind (but not the actual bind), even if the client can't actually
- // read the role directly to know what the bind is. It's a relatively small amount of leakage, in
- // some fairly corner cases, and in the most likely error case (role has been changed to a new ARN),
- // the error message is identical.
- if roleEntry.BoundIamPrincipalARN != "" {
- // We might not get here if all bindings were on the inferred entity, which we've already validated
- // above
- clientUserId, ok := req.Auth.Metadata["client_user_id"]
- if ok && roleEntry.BoundIamPrincipalID != "" {
- // Resolving unique IDs is enabled and the auth metadata contains the unique ID, so checking the
- // unique ID is authoritative at this stage
- if roleEntry.BoundIamPrincipalID != clientUserId {
- return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn)
- }
- } else if strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
- fullArn := b.getCachedUserId(clientUserId)
- if fullArn == "" {
- entity, err := parseIamArn(canonicalArn)
- if err != nil {
- return nil, fmt.Errorf("error parsing ARN %q: %v", canonicalArn, err)
- }
- fullArn, err = b.fullArn(entity, req.Storage)
- if err != nil {
- return nil, fmt.Errorf("error looking up full ARN of entity %v: %v", entity, err)
- }
- if fullArn == "" {
- return nil, fmt.Errorf("got empty string back when looking up full ARN of entity %v", entity)
- }
- if clientUserId != "" {
- b.setCachedUserId(clientUserId, fullArn)
- }
- }
- if !strutil.GlobbedStringsMatch(roleEntry.BoundIamPrincipalARN, fullArn) {
- return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn)
- }
- } else if roleEntry.BoundIamPrincipalARN != canonicalArn {
- return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn)
- }
- }
-
- // If 'Period' is set on the role, then the token should never expire.
- if roleEntry.Period > time.Duration(0) {
- req.Auth.TTL = roleEntry.Period
- return &logical.Response{Auth: req.Auth}, nil
- } else {
- return framework.LeaseExtend(roleEntry.TTL, roleEntry.MaxTTL, b.System())(req, data)
- }
-}
-
-func (b *backend) pathLoginRenewEc2(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- instanceID := req.Auth.Metadata["instance_id"]
- if instanceID == "" {
- return nil, fmt.Errorf("unable to fetch instance ID from metadata during renewal")
- }
-
- region := req.Auth.Metadata["region"]
- if region == "" {
- return nil, fmt.Errorf("unable to fetch region from metadata during renewal")
- }
-
- // Ensure backwards compatibility for older clients without account_id saved in metadata
- accountID, ok := req.Auth.Metadata["account_id"]
- if ok {
- if accountID == "" {
- return nil, fmt.Errorf("unable to fetch account_id from metadata during renewal")
- }
- }
-
- // Cross check that the instance is still in 'running' state
- _, err := b.validateInstance(req.Storage, instanceID, region, accountID)
- if err != nil {
- return nil, fmt.Errorf("failed to verify instance ID %q: %q", instanceID, err)
- }
-
- storedIdentity, err := whitelistIdentityEntry(req.Storage, instanceID)
- if err != nil {
- return nil, err
- }
- if storedIdentity == nil {
- return nil, fmt.Errorf("failed to verify the whitelist identity entry for instance ID: %q", instanceID)
- }
-
- // Ensure that role entry is not deleted
- roleEntry, err := b.lockedAWSRole(req.Storage, storedIdentity.Role)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return nil, fmt.Errorf("role entry not found")
- }
-
- // If the login was made using the role tag, then max_ttl from tag
- // is cached in internal data during login and used here to cap the
- // max_ttl of renewal.
- rTagMaxTTL, err := time.ParseDuration(req.Auth.Metadata["role_tag_max_ttl"])
- if err != nil {
- return nil, err
- }
-
- // Re-evaluate the maxTTL bounds
- shortestMaxTTL := b.System().MaxLeaseTTL()
- longestMaxTTL := b.System().MaxLeaseTTL()
- if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < shortestMaxTTL {
- shortestMaxTTL = roleEntry.MaxTTL
- }
- if roleEntry.MaxTTL > longestMaxTTL {
- longestMaxTTL = roleEntry.MaxTTL
- }
- if rTagMaxTTL > time.Duration(0) && rTagMaxTTL < shortestMaxTTL {
- shortestMaxTTL = rTagMaxTTL
- }
- if rTagMaxTTL > longestMaxTTL {
- longestMaxTTL = rTagMaxTTL
- }
-
- // Only LastUpdatedTime and ExpirationTime change and all other fields remain the same
- currentTime := time.Now()
- storedIdentity.LastUpdatedTime = currentTime
- storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL)
-
- // Updating the expiration time is required for the tidy operation on the
- // whitelist identity storage items
- if err = setWhitelistIdentityEntry(req.Storage, instanceID, storedIdentity); err != nil {
- return nil, err
- }
-
- // If 'Period' is set on the role, then the token should never expire. Role
- // tag does not have a 'Period' field. So, regarless of whether the token
- // was issued using a role login or a role tag login, the period set on the
- // role should take effect.
- if roleEntry.Period > time.Duration(0) {
- req.Auth.TTL = roleEntry.Period
- return &logical.Response{Auth: req.Auth}, nil
- } else {
- // Cap the TTL value
- shortestTTL := b.System().DefaultLeaseTTL()
- if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
- shortestTTL = roleEntry.TTL
- }
- if shortestMaxTTL < shortestTTL {
- shortestTTL = shortestMaxTTL
- }
- return framework.LeaseExtend(shortestTTL, shortestMaxTTL, b.System())(req, data)
- }
-}
-
-func (b *backend) pathLoginUpdateIam(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- method := data.Get("iam_http_request_method").(string)
- if method == "" {
- return logical.ErrorResponse("missing iam_http_request_method"), nil
- }
-
- // In the future, might consider supporting GET
- if method != "POST" {
- return logical.ErrorResponse("invalid iam_http_request_method; currently only 'POST' is supported"), nil
- }
-
- rawUrlB64 := data.Get("iam_request_url").(string)
- if rawUrlB64 == "" {
- return logical.ErrorResponse("missing iam_request_url"), nil
- }
- rawUrl, err := base64.StdEncoding.DecodeString(rawUrlB64)
- if err != nil {
- return logical.ErrorResponse("failed to base64 decode iam_request_url"), nil
- }
- parsedUrl, err := url.Parse(string(rawUrl))
- if err != nil {
- return logical.ErrorResponse("error parsing iam_request_url"), nil
- }
-
- // TODO: There are two potentially valid cases we're not yet supporting that would
- // necessitate this check being changed. First, if we support GET requests.
- // Second if we support presigned POST requests
- bodyB64 := data.Get("iam_request_body").(string)
- if bodyB64 == "" {
- return logical.ErrorResponse("missing iam_request_body"), nil
- }
- bodyRaw, err := base64.StdEncoding.DecodeString(bodyB64)
- if err != nil {
- return logical.ErrorResponse("failed to base64 decode iam_request_body"), nil
- }
- body := string(bodyRaw)
-
- headersB64 := data.Get("iam_request_headers").(string)
- if headersB64 == "" {
- return logical.ErrorResponse("missing iam_request_headers"), nil
- }
- headers, err := parseIamRequestHeaders(headersB64)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error parsing iam_request_headers: %v", err)), nil
- }
- if headers == nil {
- return logical.ErrorResponse("nil response when parsing iam_request_headers"), nil
- }
-
- config, err := b.lockedClientConfigEntry(req.Storage)
- if err != nil {
- return logical.ErrorResponse("error getting configuration"), nil
- }
-
- endpoint := "https://sts.amazonaws.com"
-
- if config != nil {
- if config.IAMServerIdHeaderValue != "" {
- err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error validating %s header: %v", iamServerIdHeader, err)), nil
- }
- }
- if config.STSEndpoint != "" {
- endpoint = config.STSEndpoint
- }
- }
-
- callerID, err := submitCallerIdentityRequest(method, endpoint, parsedUrl, body, headers)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil
- }
- // This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID"
- // (in the case of an IAM user).
- callerUniqueId := strings.Split(callerID.UserId, ":")[0]
- entity, err := parseIamArn(callerID.Arn)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error parsing arn %q: %v", callerID.Arn, err)), nil
- }
-
- roleName := data.Get("role").(string)
- if roleName == "" {
- roleName = entity.FriendlyName
- }
-
- roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return logical.ErrorResponse(fmt.Sprintf("entry for role %s not found", roleName)), nil
- }
-
- if roleEntry.AuthType != iamAuthType {
- return logical.ErrorResponse(fmt.Sprintf("auth method iam not allowed for role %s", roleName)), nil
- }
-
- // The role creation should ensure that either we're inferring this is an EC2 instance
- // or that we're binding an ARN
- // The only way BoundIamPrincipalID could get set is if BoundIamPrincipalARN was also set and
- // resolving to internal IDs was turned on, which can't be turned off. So, there should be no
- // way for this to be set and not match BoundIamPrincipalARN
- if roleEntry.BoundIamPrincipalID != "" {
- if callerUniqueId != roleEntry.BoundIamPrincipalID {
- return logical.ErrorResponse(fmt.Sprintf("expected IAM %s %s to resolve to unique AWS ID %q but got %q instead", entity.Type, entity.FriendlyName, roleEntry.BoundIamPrincipalID, callerUniqueId)), nil
- }
- } else if roleEntry.BoundIamPrincipalARN != "" {
- if strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
- fullArn := b.getCachedUserId(callerUniqueId)
- if fullArn == "" {
- fullArn, err = b.fullArn(entity, req.Storage)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error looking up full ARN of entity %v: %v", entity, err)), nil
- }
- if fullArn == "" {
- return logical.ErrorResponse(fmt.Sprintf("got empty string back when looking up full ARN of entity %v", entity)), nil
- }
- b.setCachedUserId(callerUniqueId, fullArn)
- }
- if !strutil.GlobbedStringsMatch(roleEntry.BoundIamPrincipalARN, fullArn) {
- // Note: Intentionally giving the exact same error message as a few lines below. Otherwise, we might leak information
- // about whether the bound IAM principal ARN is a wildcard or not, and what that wildcard is.
- return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName)), nil
- }
- } else if roleEntry.BoundIamPrincipalARN != entity.canonicalArn() {
- return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName)), nil
- }
- }
-
- policies := roleEntry.Policies
-
- inferredEntityType := ""
- inferredEntityId := ""
- if roleEntry.InferredEntityType == ec2EntityType {
- instance, err := b.validateInstance(req.Storage, entity.SessionInfo, roleEntry.InferredAWSRegion, callerID.Account)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", entity.SessionInfo, roleEntry.InferredAWSRegion)), nil
- }
-
- // build a fake identity doc to pass on metadata about the instance to verifyInstanceMeetsRoleRequirements
- identityDoc := &identityDocument{
- Tags: nil, // Don't really need the tags, so not doing the work of converting them from Instance.Tags to identityDocument.Tags
- InstanceID: *instance.InstanceId,
- AmiID: *instance.ImageId,
- AccountID: callerID.Account,
- Region: roleEntry.InferredAWSRegion,
- PendingTime: instance.LaunchTime.Format(time.RFC3339),
- }
-
- validationError, err := b.verifyInstanceMeetsRoleRequirements(req.Storage, instance, roleEntry, roleName, identityDoc)
- if err != nil {
- return nil, err
- }
- if validationError != nil {
- return logical.ErrorResponse(fmt.Sprintf("error validating instance: %s", validationError)), nil
- }
-
- inferredEntityType = ec2EntityType
- inferredEntityId = entity.SessionInfo
- }
-
- resp := &logical.Response{
- Auth: &logical.Auth{
- Period: roleEntry.Period,
- Policies: policies,
- Metadata: map[string]string{
- "client_arn": callerID.Arn,
- "canonical_arn": entity.canonicalArn(),
- "client_user_id": callerUniqueId,
- "auth_type": iamAuthType,
- "inferred_entity_type": inferredEntityType,
- "inferred_entity_id": inferredEntityId,
- "inferred_aws_region": roleEntry.InferredAWSRegion,
- "account_id": entity.AccountNumber,
- },
- InternalData: map[string]interface{}{
- "role_name": roleName,
- },
- DisplayName: entity.FriendlyName,
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- TTL: roleEntry.TTL,
- },
- },
- }
-
- if roleEntry.Period > time.Duration(0) {
- resp.Auth.TTL = roleEntry.Period
- } else {
- shortestTTL := b.System().DefaultLeaseTTL()
- if roleEntry.TTL > time.Duration(0) && roleEntry.TTL < shortestTTL {
- shortestTTL = roleEntry.TTL
- }
-
- maxTTL := b.System().MaxLeaseTTL()
- if roleEntry.MaxTTL > time.Duration(0) && roleEntry.MaxTTL < maxTTL {
- maxTTL = roleEntry.MaxTTL
- }
-
- if shortestTTL > maxTTL {
- resp.AddWarning(fmt.Sprintf("Effective TTL of %q exceeded the effective max_ttl of %q; TTL value is capped accordingly", (shortestTTL / time.Second).String(), (maxTTL / time.Second).String()))
- shortestTTL = maxTTL
- }
-
- resp.Auth.TTL = shortestTTL
- }
-
- return resp, nil
-}
-
-// These two methods (hasValuesFor*) return two bools
-// The first is a hasAll, that is, does the request have all the values
-// necessary for this auth method
-// The second is a hasAny, that is, does the request have any of the fields
-// exclusive to this auth method
-func hasValuesForEc2Auth(data *framework.FieldData) (bool, bool) {
- _, hasPkcs7 := data.GetOk("pkcs7")
- _, hasIdentity := data.GetOk("identity")
- _, hasSignature := data.GetOk("signature")
- return (hasPkcs7 || (hasIdentity && hasSignature)), (hasPkcs7 || hasIdentity || hasSignature)
-}
-
-func hasValuesForIamAuth(data *framework.FieldData) (bool, bool) {
- _, hasRequestMethod := data.GetOk("iam_http_request_method")
- _, hasRequestUrl := data.GetOk("iam_request_url")
- _, hasRequestBody := data.GetOk("iam_request_body")
- _, hasRequestHeaders := data.GetOk("iam_request_headers")
- return (hasRequestMethod && hasRequestUrl && hasRequestBody && hasRequestHeaders),
- (hasRequestMethod || hasRequestUrl || hasRequestBody || hasRequestHeaders)
-}
-
-func parseIamArn(iamArn string) (*iamEntity, error) {
- // iamArn should look like one of the following:
- // 1. arn:aws:iam:::/
- // 2. arn:aws:sts:::assumed-role//
- // if we get something like 2, then we want to transform that back to what
- // most people would expect, which is arn:aws:iam:::role/
- var entity iamEntity
- fullParts := strings.Split(iamArn, ":")
- if len(fullParts) != 6 {
- return nil, fmt.Errorf("unrecognized arn: contains %d colon-separated parts, expected 6", len(fullParts))
- }
- if fullParts[0] != "arn" {
- return nil, fmt.Errorf("unrecognized arn: does not begin with arn:")
- }
- // normally aws, but could be aws-cn or aws-us-gov
- entity.Partition = fullParts[1]
- if fullParts[2] != "iam" && fullParts[2] != "sts" {
- return nil, fmt.Errorf("unrecognized service: %v, not one of iam or sts", fullParts[2])
- }
- // fullParts[3] is the region, which doesn't matter for AWS IAM entities
- entity.AccountNumber = fullParts[4]
- // fullParts[5] would now be something like user/ or assumed-role//
- parts := strings.Split(fullParts[5], "/")
- if len(parts) < 2 {
- return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 2 slash-separated parts", fullParts[5])
- }
- entity.Type = parts[0]
- entity.Path = strings.Join(parts[1:len(parts)-1], "/")
- entity.FriendlyName = parts[len(parts)-1]
- // now, entity.FriendlyName should either be or
- switch entity.Type {
- case "assumed-role":
- // Assumed roles don't have paths and have a slightly different format
- // parts[2] is
- entity.Path = ""
- entity.FriendlyName = parts[1]
- entity.SessionInfo = parts[2]
- case "user":
- case "role":
- case "instance-profile":
- default:
- return &iamEntity{}, fmt.Errorf("unrecognized principal type: %q", entity.Type)
- }
- return &entity, nil
-}
-
-func validateVaultHeaderValue(headers http.Header, requestUrl *url.URL, requiredHeaderValue string) error {
- providedValue := ""
- for k, v := range headers {
- if strings.ToLower(iamServerIdHeader) == strings.ToLower(k) {
- providedValue = strings.Join(v, ",")
- break
- }
- }
- if providedValue == "" {
- return fmt.Errorf("didn't find %s", iamServerIdHeader)
- }
-
- // NOT doing a constant time compare here since the value is NOT intended to be secret
- if providedValue != requiredHeaderValue {
- return fmt.Errorf("expected %s but got %s", requiredHeaderValue, providedValue)
- }
-
- if authzHeaders, ok := headers["Authorization"]; ok {
- // authzHeader looks like AWS4-HMAC-SHA256 Credential=AKI..., SignedHeaders=host;x-amz-date;x-vault-awsiam-id, Signature=...
- // We need to extract out the SignedHeaders
- re := regexp.MustCompile(".*SignedHeaders=([^,]+)")
- authzHeader := strings.Join(authzHeaders, ",")
- matches := re.FindSubmatch([]byte(authzHeader))
- if len(matches) < 1 {
- return fmt.Errorf("vault header wasn't signed")
- }
- if len(matches) > 2 {
- return fmt.Errorf("found multiple SignedHeaders components")
- }
- signedHeaders := string(matches[1])
- return ensureHeaderIsSigned(signedHeaders, iamServerIdHeader)
- }
- // TODO: If we support GET requests, then we need to parse the X-Amz-SignedHeaders
- // argument out of the query string and search in there for the header value
- return fmt.Errorf("missing Authorization header")
-}
-
-func buildHttpRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) *http.Request {
- // This is all a bit complicated because the AWS signature algorithm requires that
- // the Host header be included in the signed headers. See
- // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
- // The use cases we want to support, in order of increasing complexity, are:
- // 1. All defaults (client assumes sts.amazonaws.com and server has no override)
- // 2. Alternate STS regions: client wants to go to a specific region, in which case
- // Vault must be confiugred with that endpoint as well. The client's signed request
- // will include a signature over what the client expects the Host header to be,
- // so we cannot change that and must match.
- // 3. Alternate STS regions with a proxy that is transparent to Vault's clients.
- // In this case, Vault is aware of the proxy, as the proxy is configured as the
- // endpoint, but the clients should NOT be aware of the proxy (because STS will
- // not be aware of the proxy)
- // It's also annoying because:
- // 1. The AWS Sigv4 algorithm requires the Host header to be defined
- // 2. Some of the official SDKs (at least botocore and aws-sdk-go) don't actually
- // incude an explicit Host header in the HTTP requests they generate, relying on
- // the underlying HTTP library to do that for them.
- // 3. To get a validly signed request, the SDKs check if a Host header has been set
- // and, if not, add an inferred host header (based on the URI) to the internal
- // data structure used for calculating the signature, but never actually expose
- // that to clients. So then they just "hope" that the underlying library actually
- // adds the right Host header which was included in the signature calculation.
- // We could either explicity require all Vault clients to explicitly add the Host header
- // in the encoded request, or we could also implicitly infer it from the URI.
- // We choose to support both -- allow you to explicitly set a Host header, but if not,
- // infer one from the URI.
- // HOWEVER, we have to preserve the request URI portion of the client's
- // URL because the GetCallerIdentity Action can be encoded in either the body
- // or the URL. So, we need to rebuild the URL sent to the http library to have the
- // custom, Vault-specified endpoint with the client-side request parameters.
- targetUrl := fmt.Sprintf("%s/%s", endpoint, parsedUrl.RequestURI())
- request, err := http.NewRequest(method, targetUrl, strings.NewReader(body))
- if err != nil {
- return nil
- }
- request.Host = parsedUrl.Host
- for k, vals := range headers {
- for _, val := range vals {
- request.Header.Add(k, val)
- }
- }
- return request
-}
-
-func ensureHeaderIsSigned(signedHeaders, headerToSign string) error {
- // Not doing a constant time compare here, the values aren't secret
- for _, header := range strings.Split(signedHeaders, ";") {
- if header == strings.ToLower(headerToSign) {
- return nil
- }
- }
- return fmt.Errorf("vault header wasn't signed")
-}
-
-func parseGetCallerIdentityResponse(response string) (GetCallerIdentityResponse, error) {
- decoder := xml.NewDecoder(strings.NewReader(response))
- result := GetCallerIdentityResponse{}
- err := decoder.Decode(&result)
- return result, err
-}
-
-func parseIamRequestHeaders(headersB64 string) (http.Header, error) {
- headersJson, err := base64.StdEncoding.DecodeString(headersB64)
- if err != nil {
- return nil, fmt.Errorf("failed to base64 decode iam_request_headers")
- }
- var headersDecoded map[string]interface{}
- err = jsonutil.DecodeJSON(headersJson, &headersDecoded)
- if err != nil {
- return nil, fmt.Errorf("failed to JSON decode iam_request_headers %q: %v", headersJson, err)
- }
- headers := make(http.Header)
- for k, v := range headersDecoded {
- switch typedValue := v.(type) {
- case string:
- headers.Add(k, typedValue)
- case []interface{}:
- for _, individualVal := range typedValue {
- switch possibleStrVal := individualVal.(type) {
- case string:
- headers.Add(k, possibleStrVal)
- default:
- return nil, fmt.Errorf("header %q contains value %q that has type %s, not string", k, individualVal, reflect.TypeOf(individualVal))
- }
- }
- default:
- return nil, fmt.Errorf("header %q value %q has type %s, not string or []interface", k, typedValue, reflect.TypeOf(v))
- }
- }
- return headers, nil
-}
-
-func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*GetCallerIdentityResult, error) {
- // NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy
- // The protection against this is that this method will only call the endpoint specified in the
- // client config (defaulting to sts.amazonaws.com), so it would require a Vault admin to override
- // the endpoint to talk to alternate web addresses
- request := buildHttpRequest(method, endpoint, parsedUrl, body, headers)
- client := cleanhttp.DefaultClient()
- response, err := client.Do(request)
- if err != nil {
- return nil, fmt.Errorf("error making request: %v", err)
- }
- if response != nil {
- defer response.Body.Close()
- }
- // we check for status code afterwards to also print out response body
- responseBody, err := ioutil.ReadAll(response.Body)
- if err != nil {
- return nil, err
- }
- if response.StatusCode != 200 {
- return nil, fmt.Errorf("received error code %s from STS: %s", response.StatusCode, string(responseBody))
- }
- callerIdentityResponse, err := parseGetCallerIdentityResponse(string(responseBody))
- if err != nil {
- return nil, fmt.Errorf("error parsing STS response")
- }
- return &callerIdentityResponse.GetCallerIdentityResult[0], nil
-}
-
-type GetCallerIdentityResponse struct {
- XMLName xml.Name `xml:"GetCallerIdentityResponse"`
- GetCallerIdentityResult []GetCallerIdentityResult `xml:"GetCallerIdentityResult"`
- ResponseMetadata []ResponseMetadata `xml:"ResponseMetadata"`
-}
-
-type GetCallerIdentityResult struct {
- Arn string `xml:"Arn"`
- UserId string `xml:"UserId"`
- Account string `xml:"Account"`
-}
-
-type ResponseMetadata struct {
- RequestId string `xml:"RequestId"`
-}
-
-// identityDocument represents the items of interest from the EC2 instance
-// identity document
-type identityDocument struct {
- Tags map[string]interface{} `json:"tags,omitempty" structs:"tags" mapstructure:"tags"`
- InstanceID string `json:"instanceId,omitempty" structs:"instanceId" mapstructure:"instanceId"`
- AmiID string `json:"imageId,omitempty" structs:"imageId" mapstructure:"imageId"`
- AccountID string `json:"accountId,omitempty" structs:"accountId" mapstructure:"accountId"`
- Region string `json:"region,omitempty" structs:"region" mapstructure:"region"`
- PendingTime string `json:"pendingTime,omitempty" structs:"pendingTime" mapstructure:"pendingTime"`
-}
-
-// roleTagLoginResponse represents the return values required after the process
-// of verifying a role tag login
-type roleTagLoginResponse struct {
- Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
- MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
- DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
-}
-
-type iamEntity struct {
- Partition string
- AccountNumber string
- Type string
- Path string
- FriendlyName string
- SessionInfo string
-}
-
-// Returns a Vault-internal canonical ARN for referring to an IAM entity
-func (e *iamEntity) canonicalArn() string {
- entityType := e.Type
- // canonicalize "assumed-role" into "role"
- if entityType == "assumed-role" {
- entityType = "role"
- }
- // Annoyingly, the assumed-role entity type doesn't have the Path of the role which was assumed
- // So, we "canonicalize" it by just completely dropping the path. The other option would be to
- // make an AWS API call to look up the role by FriendlyName, which introduces more complexity to
- // code and test, and it also breaks backwards compatibility in an area where we would really want
- // it
- return fmt.Sprintf("arn:%s:iam::%s:%s/%s", e.Partition, e.AccountNumber, entityType, e.FriendlyName)
-}
-
-// This returns the "full" ARN of an iamEntity, how it would be referred to in AWS proper
-func (b *backend) fullArn(e *iamEntity, s logical.Storage) (string, error) {
- // Not assuming path is reliable for any entity types
- client, err := b.clientIAM(s, getAnyRegionForAwsPartition(e.Partition).ID(), e.AccountNumber)
- if err != nil {
- return "", fmt.Errorf("error creating IAM client: %v", err)
- }
-
- switch e.Type {
- case "user":
- input := iam.GetUserInput{
- UserName: aws.String(e.FriendlyName),
- }
- resp, err := client.GetUser(&input)
- if err != nil {
- return "", fmt.Errorf("error fetching user %q: %v", e.FriendlyName, err)
- }
- if resp == nil {
- return "", fmt.Errorf("nil response from GetUser")
- }
- return *(resp.User.Arn), nil
- case "assumed-role":
- fallthrough
- case "role":
- input := iam.GetRoleInput{
- RoleName: aws.String(e.FriendlyName),
- }
- resp, err := client.GetRole(&input)
- if err != nil {
- return "", fmt.Errorf("error fetching role %q: %v", e.FriendlyName, err)
- }
- if resp == nil {
- return "", fmt.Errorf("nil response form GetRole")
- }
- return *(resp.Role.Arn), nil
- default:
- return "", fmt.Errorf("unrecognized entity type: %s", e.Type)
- }
-}
-
-const iamServerIdHeader = "X-Vault-AWS-IAM-Server-ID"
-
-const pathLoginSyn = `
-Authenticates an EC2 instance with Vault.
-`
-
-const pathLoginDesc = `
-Authenticate AWS entities, either an arbitrary IAM principal or EC2 instances.
-
-IAM principals are authenticated by processing a signed sts:GetCallerIdentity
-request and then parsing the response to see who signed the request. Optionally,
-the caller can be inferred to be another AWS entity type, with EC2 instances
-the only currently supported entity type, and additional filtering can be
-implemented based on that inferred type.
-
-An EC2 instance is authenticated using the PKCS#7 signature of the instance identity
-document and a client created nonce. This nonce should be unique and should be used by
-the instance for all future logins, unless 'disallow_reauthenitcation' option on the
-registered role is enabled, in which case client nonce is optional.
-
-First login attempt, creates a whitelist entry in Vault associating the instance to the nonce
-provided. All future logins will succeed only if the client nonce matches the nonce in the
-whitelisted entry.
-
-By default, a cron task will periodically look for expired entries in the whitelist
-and deletes them. The duration to periodically run this, is one hour by default.
-However, this can be configured using the 'config/tidy/identities' endpoint. This tidy
-action can be triggered via the API as well, using the 'tidy/identities' endpoint.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go
deleted file mode 100644
index f813a58..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package awsauth
-
-import (
- "encoding/base64"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "reflect"
- "testing"
-)
-
-func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) {
- responseFromUser := `
-
- arn:aws:iam::123456789012:user/MyUserName
- ASOMETHINGSOMETHINGSOMETHING
- 123456789012
-
-
- 7f4fc40c-853a-11e6-8848-8d035d01eb87
-
-`
- expectedUserArn := "arn:aws:iam::123456789012:user/MyUserName"
-
- responseFromAssumedRole := `
-
- arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName
- ASOMETHINGSOMETHINGELSE:RoleSessionName
- 123456789012
-
-
- 7f4fc40c-853a-11e6-8848-8d035d01eb87
-
-`
- expectedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName"
-
- parsedUserResponse, err := parseGetCallerIdentityResponse(responseFromUser)
- if err != nil {
- t.Fatal(err)
- }
- if parsed_arn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedUserArn {
- t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsed_arn)
- }
-
- parsedRoleResponse, err := parseGetCallerIdentityResponse(responseFromAssumedRole)
- if err != nil {
- t.Fatal(err)
- }
- if parsed_arn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedRoleArn {
- t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsed_arn)
- }
-
- _, err = parseGetCallerIdentityResponse("SomeRandomGibberish")
- if err == nil {
- t.Errorf("expected to NOT parse random giberish, but didn't get an error")
- }
-}
-
-func TestBackend_pathLogin_parseIamArn(t *testing.T) {
- testParser := func(inputArn, expectedCanonicalArn string, expectedEntity iamEntity) {
- entity, err := parseIamArn(inputArn)
- if err != nil {
- t.Fatal(err)
- }
- if expectedCanonicalArn != "" && entity.canonicalArn() != expectedCanonicalArn {
- t.Fatalf("expected to canonicalize ARN %q into %q but got %q instead", inputArn, expectedCanonicalArn, entity.canonicalArn())
- }
- if *entity != expectedEntity {
- t.Fatalf("expected to get iamEntity %#v from input ARN %q but instead got %#v", expectedEntity, inputArn, *entity)
- }
- }
-
- testParser("arn:aws:iam::123456789012:user/UserPath/MyUserName",
- "arn:aws:iam::123456789012:user/MyUserName",
- iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "user", Path: "UserPath", FriendlyName: "MyUserName"},
- )
- canonicalRoleArn := "arn:aws:iam::123456789012:role/RoleName"
- testParser("arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName",
- canonicalRoleArn,
- iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "assumed-role", FriendlyName: "RoleName", SessionInfo: "RoleSessionName"},
- )
- testParser("arn:aws:iam::123456789012:role/RolePath/RoleName",
- canonicalRoleArn,
- iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "role", Path: "RolePath", FriendlyName: "RoleName"},
- )
- testParser("arn:aws:iam::123456789012:instance-profile/profilePath/InstanceProfileName",
- "",
- iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "instance-profile", Path: "profilePath", FriendlyName: "InstanceProfileName"},
- )
-
- // Test that it properly handles pathological inputs...
- _, err := parseIamArn("")
- if err == nil {
- t.Error("expected error from empty input string")
- }
-
- _, err = parseIamArn("arn:aws:iam::123456789012:role")
- if err == nil {
- t.Error("expected error from malformed ARN without a role name")
- }
-
- _, err = parseIamArn("arn:aws:iam")
- if err == nil {
- t.Error("expected error from incomplete ARN (arn:aws:iam)")
- }
-
- _, err = parseIamArn("arn:aws:iam::1234556789012:/")
- if err == nil {
- t.Error("expected error from empty principal type and no principal name (arn:aws:iam::1234556789012:/)")
- }
-}
-
-func TestBackend_validateVaultHeaderValue(t *testing.T) {
- const canaryHeaderValue = "Vault-Server"
- requestUrl, err := url.Parse("https://sts.amazonaws.com/")
- if err != nil {
- t.Fatalf("error parsing test URL: %v", err)
- }
- postHeadersMissing := http.Header{
- "Host": []string{"Foo"},
- "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
- }
- postHeadersInvalid := http.Header{
- "Host": []string{"Foo"},
- iamServerIdHeader: []string{"InvalidValue"},
- "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
- }
- postHeadersUnsigned := http.Header{
- "Host": []string{"Foo"},
- iamServerIdHeader: []string{canaryHeaderValue},
- "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
- }
- postHeadersValid := http.Header{
- "Host": []string{"Foo"},
- iamServerIdHeader: []string{canaryHeaderValue},
- "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
- }
-
- postHeadersSplit := http.Header{
- "Host": []string{"Foo"},
- iamServerIdHeader: []string{canaryHeaderValue},
- "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
- }
-
- err = validateVaultHeaderValue(postHeadersMissing, requestUrl, canaryHeaderValue)
- if err == nil {
- t.Error("validated POST request with missing Vault header")
- }
-
- err = validateVaultHeaderValue(postHeadersInvalid, requestUrl, canaryHeaderValue)
- if err == nil {
- t.Error("validated POST request with invalid Vault header value")
- }
-
- err = validateVaultHeaderValue(postHeadersUnsigned, requestUrl, canaryHeaderValue)
- if err == nil {
- t.Error("validated POST request with unsigned Vault header")
- }
-
- err = validateVaultHeaderValue(postHeadersValid, requestUrl, canaryHeaderValue)
- if err != nil {
- t.Errorf("did NOT validate valid POST request: %v", err)
- }
-
- err = validateVaultHeaderValue(postHeadersSplit, requestUrl, canaryHeaderValue)
- if err != nil {
- t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err)
- }
-}
-
-func TestBackend_pathLogin_parseIamRequestHeaders(t *testing.T) {
- testIamParser := func(headers interface{}, expectedHeaders http.Header) error {
- headersJson, err := json.Marshal(headers)
- if err != nil {
- return fmt.Errorf("unable to JSON encode headers: %v", err)
- }
- headersB64 := base64.StdEncoding.EncodeToString(headersJson)
-
- parsedHeaders, err := parseIamRequestHeaders(headersB64)
- if err != nil {
- return fmt.Errorf("error parsing encoded headers: %v", err)
- }
- if parsedHeaders == nil {
- return fmt.Errorf("nil result from parsing headers")
- }
- if !reflect.DeepEqual(parsedHeaders, expectedHeaders) {
- return fmt.Errorf("parsed headers not equal to input headers")
- }
- return nil
- }
-
- headersGoStyle := http.Header{
- "Header1": []string{"Value1"},
- "Header2": []string{"Value2"},
- }
- headersMixedType := map[string]interface{}{
- "Header1": "Value1",
- "Header2": []string{"Value2"},
- }
-
- err := testIamParser(headersGoStyle, headersGoStyle)
- if err != nil {
- t.Errorf("error parsing go-style headers: %v", err)
- }
- err = testIamParser(headersMixedType, headersGoStyle)
- if err != nil {
- t.Errorf("error parsing mixed-style headers: %v", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go
deleted file mode 100644
index 476beca..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go
+++ /dev/null
@@ -1,790 +0,0 @@
-package awsauth
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathRole(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role"),
- Fields: map[string]*framework.FieldSchema{
- "role": {
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "auth_type": {
- Type: framework.TypeString,
- Description: `The auth_type permitted to authenticate to this role. Must be one of
-iam or ec2 and cannot be changed after role creation.`,
- },
- "bound_ami_id": {
- Type: framework.TypeString,
- Description: `If set, defines a constraint on the EC2 instances that they should be
-using the AMI ID specified by this parameter.`,
- },
- "bound_account_id": {
- Type: framework.TypeString,
- Description: `If set, defines a constraint on the EC2 instances that the account ID
-in its identity document to match the one specified by this parameter.`,
- },
- "bound_iam_principal_arn": {
- Type: framework.TypeString,
- Description: `ARN of the IAM principal to bind to this role. Only applicable when
-auth_type is iam.`,
- },
- "bound_region": {
- Type: framework.TypeString,
- Description: `If set, defines a constraint on the EC2 instances that the region in
-its identity document to match the one specified by this parameter. Only applicable when
-auth_type is ec2.`,
- },
- "bound_iam_role_arn": {
- Type: framework.TypeString,
- Description: `If set, defines a constraint on the authenticating EC2 instance
-that it must match the IAM role ARN specified by this parameter.
-The value is prefix-matched (as though it were a glob ending in
-'*'). The configured IAM user or EC2 instance role must be allowed
-to execute the 'iam:GetInstanceProfile' action if this is
-specified. This is only checked when auth_type is
-ec2.`,
- },
- "bound_iam_instance_profile_arn": {
- Type: framework.TypeString,
- Description: `If set, defines a constraint on the EC2 instances to be associated
-with an IAM instance profile ARN which has a prefix that matches
-the value specified by this parameter. The value is prefix-matched
-(as though it were a glob ending in '*'). This is only checked when
-auth_type is ec2.`,
- },
- "resolve_aws_unique_ids": {
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, resolve all AWS IAM ARNs into AWS's internal unique IDs.
-When an IAM entity (e.g., user, role, or instance profile) is deleted, then all references
-to it within the role will be invalidated, which prevents a new IAM entity from being created
-with the same name and matching the role's IAM binds. Once set, this cannot be unset.`,
- },
- "inferred_entity_type": {
- Type: framework.TypeString,
- Description: `When auth_type is iam, the
-AWS entity type to infer from the authenticated principal. The only supported
-value is ec2_instance, which will extract the EC2 instance ID from the
-authenticated role and apply the following restrictions specific to EC2
-instances: bound_ami_id, bound_account_id, bound_iam_role_arn,
-bound_iam_instance_profile_arn, bound_vpc_id, bound_subnet_id. The configured
-EC2 client must be able to find the inferred instance ID in the results, and the
-instance must be running. If unable to determine the EC2 instance ID or unable
-to find the EC2 instance ID among running instances, then authentication will
-fail.`,
- },
- "inferred_aws_region": {
- Type: framework.TypeString,
- Description: `When auth_type is iam and
-inferred_entity_type is set, the region to assume the inferred entity exists in.`,
- },
- "bound_vpc_id": {
- Type: framework.TypeString,
- Description: `
-If set, defines a constraint on the EC2 instance to be associated with the VPC
-ID that matches the value specified by this parameter.`,
- },
- "bound_subnet_id": {
- Type: framework.TypeString,
- Description: `
-If set, defines a constraint on the EC2 instance to be associated with the
-subnet ID that matches the value specified by this parameter.`,
- },
- "role_tag": {
- Type: framework.TypeString,
- Default: "",
- Description: `If set, enables the role tags for this role. The value set for this
-field should be the 'key' of the tag on the EC2 instance. The 'value'
-of the tag should be generated using 'role//tag' endpoint.
-Defaults to an empty string, meaning that role tags are disabled. This
-is only allowed if auth_type is ec2.`,
- },
- "period": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: `
-If set, indicates that the token generated using this role should never expire. The token should be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the value of this parameter.`,
- },
- "ttl": {
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: `Duration in seconds after which the issued token should expire. Defaults
-to 0, in which case the value will fallback to the system/mount defaults.`,
- },
- "max_ttl": {
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: "The maximum allowed lifetime of tokens issued using this role.",
- },
- "policies": {
- Type: framework.TypeCommaStringSlice,
- Default: "default",
- Description: "Policies to be set on tokens issued using this role.",
- },
- "allow_instance_migration": {
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, allows migration of the underlying instance where the client
-resides. This keys off of pendingTime in the metadata document, so
-essentially, this disables the client nonce check whenever the
-instance is migrated to a new host and pendingTime is newer than the
-previously-remembered time. Use with caution. This is only checked when
-auth_type is ec2.`,
- },
- "disallow_reauthentication": {
- Type: framework.TypeBool,
- Default: false,
- Description: "If set, only allows a single token to be granted per instance ID. In order to perform a fresh login, the entry in whitelist for the instance ID needs to be cleared using 'auth/aws-ec2/identity-whitelist/' endpoint.",
- },
- },
-
- ExistenceCheck: b.pathRoleExistenceCheck,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathRoleCreateUpdate,
- logical.UpdateOperation: b.pathRoleCreateUpdate,
- logical.ReadOperation: b.pathRoleRead,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleSyn,
- HelpDescription: pathRoleDesc,
- }
-}
-
-func pathListRole(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "role/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathListRolesHelpSyn,
- HelpDescription: pathListRolesHelpDesc,
- }
-}
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathListRolesHelpSyn,
- HelpDescription: pathListRolesHelpDesc,
- }
-}
-
-// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
-// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
-func (b *backend) pathRoleExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- entry, err := b.lockedAWSRole(req.Storage, strings.ToLower(data.Get("role").(string)))
- if err != nil {
- return false, err
- }
- return entry != nil, nil
-}
-
-// lockedAWSRole returns the properties set on the given role. This method
-// acquires the read lock before reading the role from the storage.
-func (b *backend) lockedAWSRole(s logical.Storage, roleName string) (*awsRoleEntry, error) {
- if roleName == "" {
- return nil, fmt.Errorf("missing role name")
- }
-
- b.roleMutex.RLock()
- roleEntry, err := b.nonLockedAWSRole(s, roleName)
- // we manually unlock rather than defer the unlock because we might need to grab
- // a read/write lock in the upgrade path
- b.roleMutex.RUnlock()
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return nil, nil
- }
- needUpgrade, err := b.upgradeRoleEntry(s, roleEntry)
- if err != nil {
- return nil, fmt.Errorf("error upgrading roleEntry: %v", err)
- }
- if needUpgrade {
- b.roleMutex.Lock()
- defer b.roleMutex.Unlock()
- // Now that we have a R/W lock, we need to re-read the role entry in case it was
- // written to between releasing the read lock and acquiring the write lock
- roleEntry, err = b.nonLockedAWSRole(s, roleName)
- if err != nil {
- return nil, err
- }
- // somebody deleted the role, so no use in putting it back
- if roleEntry == nil {
- return nil, nil
- }
- // now re-check to see if we need to upgrade
- if needUpgrade, err = b.upgradeRoleEntry(s, roleEntry); err != nil {
- return nil, fmt.Errorf("error upgrading roleEntry: %v", err)
- }
- if needUpgrade {
- if err = b.nonLockedSetAWSRole(s, roleName, roleEntry); err != nil {
- return nil, fmt.Errorf("error saving upgraded roleEntry: %v", err)
- }
- }
- }
- return roleEntry, nil
-}
-
-// lockedSetAWSRole creates or updates a role in the storage. This method
-// acquires the write lock before creating or updating the role at the storage.
-func (b *backend) lockedSetAWSRole(s logical.Storage, roleName string, roleEntry *awsRoleEntry) error {
- if roleName == "" {
- return fmt.Errorf("missing role name")
- }
-
- if roleEntry == nil {
- return fmt.Errorf("nil role entry")
- }
-
- b.roleMutex.Lock()
- defer b.roleMutex.Unlock()
-
- return b.nonLockedSetAWSRole(s, roleName, roleEntry)
-}
-
-// nonLockedSetAWSRole creates or updates a role in the storage. This method
-// does not acquire the write lock before reading the role from the storage. If
-// locking is desired, use lockedSetAWSRole instead.
-func (b *backend) nonLockedSetAWSRole(s logical.Storage, roleName string,
- roleEntry *awsRoleEntry) error {
- if roleName == "" {
- return fmt.Errorf("missing role name")
- }
-
- if roleEntry == nil {
- return fmt.Errorf("nil role entry")
- }
-
- entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), roleEntry)
- if err != nil {
- return err
- }
-
- if err := s.Put(entry); err != nil {
- return err
- }
-
- return nil
-}
-
-// If needed, updates the role entry and returns a bool indicating if it was updated
-// (and thus needs to be persisted)
-func (b *backend) upgradeRoleEntry(s logical.Storage, roleEntry *awsRoleEntry) (bool, error) {
- if roleEntry == nil {
- return false, fmt.Errorf("received nil roleEntry")
- }
- var upgraded bool
- // Check if the value held by role ARN field is actually an instance profile ARN
- if roleEntry.BoundIamRoleARN != "" && strings.Contains(roleEntry.BoundIamRoleARN, ":instance-profile/") {
- // If yes, move it to the correct field
- roleEntry.BoundIamInstanceProfileARN = roleEntry.BoundIamRoleARN
-
- // Reset the old field
- roleEntry.BoundIamRoleARN = ""
-
- upgraded = true
- }
-
- // Check if there was no pre-existing AuthType set (from older versions)
- if roleEntry.AuthType == "" {
- // then default to the original behavior of ec2
- roleEntry.AuthType = ec2AuthType
- upgraded = true
- }
-
- if roleEntry.AuthType == iamAuthType &&
- roleEntry.ResolveAWSUniqueIDs &&
- roleEntry.BoundIamPrincipalARN != "" &&
- roleEntry.BoundIamPrincipalID == "" &&
- !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
- principalId, err := b.resolveArnToUniqueIDFunc(s, roleEntry.BoundIamPrincipalARN)
- if err != nil {
- return false, err
- }
- roleEntry.BoundIamPrincipalID = principalId
- upgraded = true
- }
-
- return upgraded, nil
-
-}
-
-// nonLockedAWSRole returns the properties set on the given role. This method
-// does not acquire the read lock before reading the role from the storage. If
-// locking is desired, use lockedAWSRole instead.
-// This method also does NOT check to see if a role upgrade is required. It is
-// the responsibility of the caller to check if a role upgrade is required and,
-// if so, to upgrade the role
-func (b *backend) nonLockedAWSRole(s logical.Storage, roleName string) (*awsRoleEntry, error) {
- if roleName == "" {
- return nil, fmt.Errorf("missing role name")
- }
-
- entry, err := s.Get("role/" + strings.ToLower(roleName))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result awsRoleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-// pathRoleDelete is used to delete the information registered for a given AMI ID.
-func (b *backend) pathRoleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role"), nil
- }
-
- b.roleMutex.Lock()
- defer b.roleMutex.Unlock()
-
- return nil, req.Storage.Delete("role/" + strings.ToLower(roleName))
-}
-
-// pathRoleList is used to list all the AMI IDs registered with Vault.
-func (b *backend) pathRoleList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.roleMutex.RLock()
- defer b.roleMutex.RUnlock()
-
- roles, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(roles), nil
-}
-
-// pathRoleRead is used to view the information registered for a given AMI ID.
-func (b *backend) pathRoleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleEntry, err := b.lockedAWSRole(req.Storage, strings.ToLower(data.Get("role").(string)))
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return nil, nil
- }
-
- // Prepare the map of all the entries in the roleEntry.
- respData := structs.New(roleEntry).Map()
-
- // HMAC key belonging to the role should NOT be exported.
- delete(respData, "hmac_key")
-
- // Display all the durations in seconds
- respData["ttl"] = roleEntry.TTL / time.Second
- respData["max_ttl"] = roleEntry.MaxTTL / time.Second
- respData["period"] = roleEntry.Period / time.Second
-
- return &logical.Response{
- Data: respData,
- }, nil
-}
-
-// pathRoleCreateUpdate is used to associate Vault policies to a given AMI ID.
-func (b *backend) pathRoleCreateUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- roleName := strings.ToLower(data.Get("role").(string))
- if roleName == "" {
- return logical.ErrorResponse("missing role"), nil
- }
-
- b.roleMutex.Lock()
- defer b.roleMutex.Unlock()
-
- roleEntry, err := b.nonLockedAWSRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- roleEntry = &awsRoleEntry{}
- } else {
- needUpdate, err := b.upgradeRoleEntry(req.Storage, roleEntry)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to update roleEntry: %v", err)), nil
- }
- if needUpdate {
- err = b.nonLockedSetAWSRole(req.Storage, roleName, roleEntry)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to save upgraded roleEntry: %v", err)), nil
- }
- }
- }
-
- // Fetch and set the bound parameters. There can't be default values
- // for these.
- if boundAmiIDRaw, ok := data.GetOk("bound_ami_id"); ok {
- roleEntry.BoundAmiID = boundAmiIDRaw.(string)
- }
-
- if boundAccountIDRaw, ok := data.GetOk("bound_account_id"); ok {
- roleEntry.BoundAccountID = boundAccountIDRaw.(string)
- }
-
- if boundRegionRaw, ok := data.GetOk("bound_region"); ok {
- roleEntry.BoundRegion = boundRegionRaw.(string)
- }
-
- if boundVpcIDRaw, ok := data.GetOk("bound_vpc_id"); ok {
- roleEntry.BoundVpcID = boundVpcIDRaw.(string)
- }
-
- if boundSubnetIDRaw, ok := data.GetOk("bound_subnet_id"); ok {
- roleEntry.BoundSubnetID = boundSubnetIDRaw.(string)
- }
-
- if resolveAWSUniqueIDsRaw, ok := data.GetOk("resolve_aws_unique_ids"); ok {
- switch {
- case req.Operation == logical.CreateOperation:
- roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool)
- case roleEntry.ResolveAWSUniqueIDs && !resolveAWSUniqueIDsRaw.(bool):
- return logical.ErrorResponse("changing resolve_aws_unique_ids from true to false is not allowed"), nil
- default:
- roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool)
- }
- } else if req.Operation == logical.CreateOperation {
- roleEntry.ResolveAWSUniqueIDs = data.Get("resolve_aws_unique_ids").(bool)
- }
-
- if boundIamRoleARNRaw, ok := data.GetOk("bound_iam_role_arn"); ok {
- roleEntry.BoundIamRoleARN = boundIamRoleARNRaw.(string)
- }
-
- if boundIamInstanceProfileARNRaw, ok := data.GetOk("bound_iam_instance_profile_arn"); ok {
- roleEntry.BoundIamInstanceProfileARN = boundIamInstanceProfileARNRaw.(string)
- }
-
- if boundIamPrincipalARNRaw, ok := data.GetOk("bound_iam_principal_arn"); ok {
- principalARN := boundIamPrincipalARNRaw.(string)
- roleEntry.BoundIamPrincipalARN = principalARN
- // Explicitly not checking to see if the user has changed the ARN under us
- // This allows the user to sumbit an update with the same ARN to force Vault
- // to re-resolve the ARN to the unique ID, in case an entity was deleted and
- // recreated
- if roleEntry.ResolveAWSUniqueIDs && !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
- principalID, err := b.resolveArnToUniqueIDFunc(req.Storage, principalARN)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed updating the unique ID of ARN %#v: %#v", principalARN, err)), nil
- }
- roleEntry.BoundIamPrincipalID = principalID
- } else {
- // Need to handle the case where we're switching from a non-wildcard principal to a wildcard principal
- roleEntry.BoundIamPrincipalID = ""
- }
- } else if roleEntry.ResolveAWSUniqueIDs && roleEntry.BoundIamPrincipalARN != "" && !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
- // we're turning on resolution on this role, so ensure we update it
- principalID, err := b.resolveArnToUniqueIDFunc(req.Storage, roleEntry.BoundIamPrincipalARN)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to resolve ARN %#v to internal ID: %#v", roleEntry.BoundIamPrincipalARN, err)), nil
- }
- roleEntry.BoundIamPrincipalID = principalID
- }
-
- if inferRoleTypeRaw, ok := data.GetOk("inferred_entity_type"); ok {
- roleEntry.InferredEntityType = inferRoleTypeRaw.(string)
- }
-
- if inferredAWSRegionRaw, ok := data.GetOk("inferred_aws_region"); ok {
- roleEntry.InferredAWSRegion = inferredAWSRegionRaw.(string)
- }
-
- // auth_type is a special case as it's immutable and can't be changed once a role is created
- if authTypeRaw, ok := data.GetOk("auth_type"); ok {
- // roleEntry.AuthType should only be "" when it's a new role; existing roles without an
- // auth_type should have already been upgraded to have one before we get here
- if roleEntry.AuthType == "" {
- switch authTypeRaw.(string) {
- case ec2AuthType, iamAuthType:
- roleEntry.AuthType = authTypeRaw.(string)
- default:
- return logical.ErrorResponse(fmt.Sprintf("unrecognized auth_type: %v", authTypeRaw.(string))), nil
- }
- } else if authTypeRaw.(string) != roleEntry.AuthType {
- return logical.ErrorResponse("changing auth_type on a role is not allowed"), nil
- }
- } else if req.Operation == logical.CreateOperation {
- switch req.MountType {
- // maintain backwards compatibility for old aws-ec2 auth types
- case "aws-ec2":
- roleEntry.AuthType = ec2AuthType
- // but default to iamAuth for new mounts going forward
- case "aws":
- roleEntry.AuthType = iamAuthType
- default:
- roleEntry.AuthType = iamAuthType
- }
- }
-
- allowEc2Binds := roleEntry.AuthType == ec2AuthType
-
- if roleEntry.InferredEntityType != "" {
- switch {
- case roleEntry.AuthType != iamAuthType:
- return logical.ErrorResponse("specified inferred_entity_type but didn't allow iam auth_type"), nil
- case roleEntry.InferredEntityType != ec2EntityType:
- return logical.ErrorResponse(fmt.Sprintf("specified invalid inferred_entity_type: %s", roleEntry.InferredEntityType)), nil
- case roleEntry.InferredAWSRegion == "":
- return logical.ErrorResponse("specified inferred_entity_type but not inferred_aws_region"), nil
- }
- allowEc2Binds = true
- } else if roleEntry.InferredAWSRegion != "" {
- return logical.ErrorResponse("specified inferred_aws_region but not inferred_entity_type"), nil
- }
-
- numBinds := 0
-
- if roleEntry.BoundAccountID != "" {
- if !allowEc2Binds {
- return logical.ErrorResponse(fmt.Sprintf("specified bound_account_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
- }
- numBinds++
- }
-
- if roleEntry.BoundRegion != "" {
- if roleEntry.AuthType != ec2AuthType {
- return logical.ErrorResponse("specified bound_region but not allowing ec2 auth_type"), nil
- }
- numBinds++
- }
-
- if roleEntry.BoundAmiID != "" {
- if !allowEc2Binds {
- return logical.ErrorResponse(fmt.Sprintf("specified bound_ami_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
- }
- numBinds++
- }
-
- if roleEntry.BoundIamInstanceProfileARN != "" {
- if !allowEc2Binds {
- return logical.ErrorResponse(fmt.Sprintf("specified bound_iam_instance_profile_arn but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
- }
- numBinds++
- }
-
- if roleEntry.BoundIamRoleARN != "" {
- if !allowEc2Binds {
- return logical.ErrorResponse(fmt.Sprintf("specified bound_iam_role_arn but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
- }
- numBinds++
- }
-
- if roleEntry.BoundIamPrincipalARN != "" {
- if roleEntry.AuthType != iamAuthType {
- return logical.ErrorResponse("specified bound_iam_principal_arn but not allowing iam auth_type"), nil
- }
- numBinds++
- }
-
- if roleEntry.BoundVpcID != "" {
- if !allowEc2Binds {
- return logical.ErrorResponse(fmt.Sprintf("specified bound_vpc_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
- }
- numBinds++
- }
-
- if roleEntry.BoundSubnetID != "" {
- if !allowEc2Binds {
- return logical.ErrorResponse(fmt.Sprintf("specified bound_subnet_id but not allowing ec2 auth_type or inferring %s", ec2EntityType)), nil
- }
- numBinds++
- }
-
- if numBinds == 0 {
- return logical.ErrorResponse("at least be one bound parameter should be specified on the role"), nil
- }
-
- policiesRaw, ok := data.GetOk("policies")
- if ok {
- roleEntry.Policies = policyutil.ParsePolicies(policiesRaw)
- } else if req.Operation == logical.CreateOperation {
- roleEntry.Policies = []string{}
- }
-
- disallowReauthenticationBool, ok := data.GetOk("disallow_reauthentication")
- if ok {
- if roleEntry.AuthType != ec2AuthType {
- return logical.ErrorResponse("specified disallow_reauthentication when not using ec2 auth type"), nil
- }
- roleEntry.DisallowReauthentication = disallowReauthenticationBool.(bool)
- } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType {
- roleEntry.DisallowReauthentication = data.Get("disallow_reauthentication").(bool)
- }
-
- allowInstanceMigrationBool, ok := data.GetOk("allow_instance_migration")
- if ok {
- if roleEntry.AuthType != ec2AuthType {
- return logical.ErrorResponse("specified allow_instance_migration when not using ec2 auth type"), nil
- }
- roleEntry.AllowInstanceMigration = allowInstanceMigrationBool.(bool)
- } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType {
- roleEntry.AllowInstanceMigration = data.Get("allow_instance_migration").(bool)
- }
-
- var resp logical.Response
-
- ttlRaw, ok := data.GetOk("ttl")
- if ok {
- ttl := time.Duration(ttlRaw.(int)) * time.Second
- defaultLeaseTTL := b.System().DefaultLeaseTTL()
- if ttl > defaultLeaseTTL {
- resp.AddWarning(fmt.Sprintf("Given ttl of %d seconds greater than current mount/system default of %d seconds; ttl will be capped at login time", ttl/time.Second, defaultLeaseTTL/time.Second))
- }
- roleEntry.TTL = ttl
- } else if req.Operation == logical.CreateOperation {
- roleEntry.TTL = time.Duration(data.Get("ttl").(int)) * time.Second
- }
-
- maxTTLInt, ok := data.GetOk("max_ttl")
- if ok {
- maxTTL := time.Duration(maxTTLInt.(int)) * time.Second
- systemMaxTTL := b.System().MaxLeaseTTL()
- if maxTTL > systemMaxTTL {
- resp.AddWarning(fmt.Sprintf("Given max_ttl of %d seconds greater than current mount/system default of %d seconds; max_ttl will be capped at login time", maxTTL/time.Second, systemMaxTTL/time.Second))
- }
-
- if maxTTL < time.Duration(0) {
- return logical.ErrorResponse("max_ttl cannot be negative"), nil
- }
-
- roleEntry.MaxTTL = maxTTL
- } else if req.Operation == logical.CreateOperation {
- roleEntry.MaxTTL = time.Duration(data.Get("max_ttl").(int)) * time.Second
- }
-
- if roleEntry.MaxTTL != 0 && roleEntry.MaxTTL < roleEntry.TTL {
- return logical.ErrorResponse("ttl should be shorter than max_ttl"), nil
- }
-
- periodRaw, ok := data.GetOk("period")
- if ok {
- roleEntry.Period = time.Second * time.Duration(periodRaw.(int))
- } else if req.Operation == logical.CreateOperation {
- roleEntry.Period = time.Second * time.Duration(data.Get("period").(int))
- }
-
- if roleEntry.Period > b.System().MaxLeaseTTL() {
- return logical.ErrorResponse(fmt.Sprintf("'period' of '%s' is greater than the backend's maximum lease TTL of '%s'", roleEntry.Period.String(), b.System().MaxLeaseTTL().String())), nil
- }
-
- roleTagStr, ok := data.GetOk("role_tag")
- if ok {
- if roleEntry.AuthType != ec2AuthType {
- return logical.ErrorResponse("tried to enable role_tag when not using ec2 auth method"), nil
- }
- roleEntry.RoleTag = roleTagStr.(string)
- // There is a limit of 127 characters on the tag key for AWS EC2 instances.
- // Complying to that requirement, do not allow the value of 'key' to be more than that.
- if len(roleEntry.RoleTag) > 127 {
- return logical.ErrorResponse("length of role tag exceeds the EC2 key limit of 127 characters"), nil
- }
- } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType {
- roleEntry.RoleTag = data.Get("role_tag").(string)
- }
-
- if roleEntry.HMACKey == "" {
- roleEntry.HMACKey, err = uuid.GenerateUUID()
- if err != nil {
- return nil, fmt.Errorf("failed to generate role HMAC key: %v", err)
- }
- }
-
- if err := b.nonLockedSetAWSRole(req.Storage, roleName, roleEntry); err != nil {
- return nil, err
- }
-
- if len(resp.Warnings) == 0 {
- return nil, nil
- }
-
- return &resp, nil
-}
-
-// Struct to hold the information associated with an AMI ID in Vault.
-type awsRoleEntry struct {
- AuthType string `json:"auth_type" structs:"auth_type" mapstructure:"auth_type"`
- BoundAmiID string `json:"bound_ami_id" structs:"bound_ami_id" mapstructure:"bound_ami_id"`
- BoundAccountID string `json:"bound_account_id" structs:"bound_account_id" mapstructure:"bound_account_id"`
- BoundIamPrincipalARN string `json:"bound_iam_principal_arn" structs:"bound_iam_principal_arn" mapstructure:"bound_iam_principal_arn"`
- BoundIamPrincipalID string `json:"bound_iam_principal_id" structs:"bound_iam_principal_id" mapstructure:"bound_iam_principal_id"`
- BoundIamRoleARN string `json:"bound_iam_role_arn" structs:"bound_iam_role_arn" mapstructure:"bound_iam_role_arn"`
- BoundIamInstanceProfileARN string `json:"bound_iam_instance_profile_arn" structs:"bound_iam_instance_profile_arn" mapstructure:"bound_iam_instance_profile_arn"`
- BoundRegion string `json:"bound_region" structs:"bound_region" mapstructure:"bound_region"`
- BoundSubnetID string `json:"bound_subnet_id" structs:"bound_subnet_id" mapstructure:"bound_subnet_id"`
- BoundVpcID string `json:"bound_vpc_id" structs:"bound_vpc_id" mapstructure:"bound_vpc_id"`
- InferredEntityType string `json:"inferred_entity_type" structs:"inferred_entity_type" mapstructure:"inferred_entity_type"`
- InferredAWSRegion string `json:"inferred_aws_region" structs:"inferred_aws_region" mapstructure:"inferred_aws_region"`
- ResolveAWSUniqueIDs bool `json:"resolve_aws_unique_ids" structs:"resolve_aws_unique_ids" mapstructure:"resolve_aws_unique_ids"`
- RoleTag string `json:"role_tag" structs:"role_tag" mapstructure:"role_tag"`
- AllowInstanceMigration bool `json:"allow_instance_migration" structs:"allow_instance_migration" mapstructure:"allow_instance_migration"`
- TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
- MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
- Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
- DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
- HMACKey string `json:"hmac_key" structs:"hmac_key" mapstructure:"hmac_key"`
- Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
-}
-
-const pathRoleSyn = `
-Create a role and associate policies to it.
-`
-
-const pathRoleDesc = `
-A precondition for login is that a role should be created in the backend.
-The login endpoint takes in the role name against which the instance
-should be validated. After authenticating the instance, the authorization
-for the instance to access Vault's resources is determined by the policies
-that are associated to the role though this endpoint.
-
-When the instances require only a subset of policies on the role, then
-'role_tag' option on the role can be enabled to create a role tag via the
-endpoint 'role//tag'. This tag then needs to be applied on the
-instance before it attempts a login. The policies on the tag should be a
-subset of policies that are associated to the role. In order to enable
-login using tags, 'role_tag' option should be set while creating a role.
-This only applies when authenticating EC2 instances.
-
-Also, a 'max_ttl' can be configured in this endpoint that determines the maximum
-duration for which a login can be renewed. Note that the 'max_ttl' has an upper
-limit of the 'max_ttl' value on the backend's mount.
-`
-
-const pathListRolesHelpSyn = `
-Lists all the roles that are registered with Vault.
-`
-
-const pathListRolesHelpDesc = `
-Roles will be listed by their respective role names.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go
deleted file mode 100644
index 0f5dc5e..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package awsauth
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "crypto/subtle"
- "encoding/base64"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const roleTagVersion = "v1"
-
-func pathRoleTag(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "role/" + framework.GenericNameRegex("role") + "/tag$",
- Fields: map[string]*framework.FieldSchema{
- "role": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
-
- "instance_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Instance ID for which this tag is intended for.
-If set, the created tag can only be used by the instance with the given ID.`,
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Policies to be associated with the tag. If set, must be a subset of the role's policies. If set, but set to an empty value, only the 'default' policy will be given to issued tokens.",
- },
-
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: "If set, specifies the maximum allowed token lifetime.",
- },
-
- "allow_instance_migration": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: "If set, allows migration of the underlying instance where the client resides. This keys off of pendingTime in the metadata document, so essentially, this disables the client nonce check whenever the instance is migrated to a new host and pendingTime is newer than the previously-remembered time. Use with caution.",
- },
-
- "disallow_reauthentication": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: "If set, only allows a single token to be granted per instance ID. In order to perform a fresh login, the entry in whitelist for the instance ID needs to be cleared using the 'auth/aws-ec2/identity-whitelist/' endpoint.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoleTagUpdate,
- },
-
- HelpSynopsis: pathRoleTagSyn,
- HelpDescription: pathRoleTagDesc,
- }
-}
-
-// pathRoleTagUpdate is used to create an EC2 instance tag which will
-// identify the Vault resources that the instance will be authorized for.
-func (b *backend) pathRoleTagUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- roleName := strings.ToLower(data.Get("role").(string))
- if roleName == "" {
- return logical.ErrorResponse("missing role"), nil
- }
-
- // Fetch the role entry
- roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return logical.ErrorResponse(fmt.Sprintf("entry not found for role %s", roleName)), nil
- }
-
- // If RoleTag is empty, disallow creation of tag.
- if roleEntry.RoleTag == "" {
- return logical.ErrorResponse("tag creation is not enabled for this role"), nil
- }
-
- // There should be a HMAC key present in the role entry
- if roleEntry.HMACKey == "" {
- // Not being able to find the HMACKey is an internal error
- return nil, fmt.Errorf("failed to find the HMAC key")
- }
-
- resp := &logical.Response{}
-
- // Instance ID is an optional field.
- instanceID := strings.ToLower(data.Get("instance_id").(string))
-
- // If no policies field was not supplied, then the tag should inherit all the policies
- // on the role. But, it was provided, but set to empty explicitly, only "default" policy
- // should be inherited. So, by leaving the policies var unset to anything when it is not
- // supplied, we ensure that it inherits all the policies on the role.
- var policies []string
- policiesRaw, ok := data.GetOk("policies")
- if ok {
- policies = policyutil.ParsePolicies(policiesRaw)
- }
- if !strutil.StrListSubset(roleEntry.Policies, policies) {
- resp.AddWarning("Policies on the tag are not a subset of the policies set on the role. Login will not be allowed with this tag unless the role policies are updated.")
- }
-
- // This is an optional field.
- disallowReauthentication := data.Get("disallow_reauthentication").(bool)
-
- // This is an optional field.
- allowInstanceMigration := data.Get("allow_instance_migration").(bool)
- if allowInstanceMigration && !roleEntry.AllowInstanceMigration {
- resp.AddWarning("Role does not allow instance migration. Login will not be allowed with this tag unless the role value is updated.")
- }
-
- // max_ttl for the role tag should be less than the max_ttl set on the role.
- maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second
-
- // max_ttl on the tag should not be greater than the system view's max_ttl value.
- if maxTTL > b.System().MaxLeaseTTL() {
- resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the mount maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, b.System().MaxLeaseTTL()/time.Second))
- }
- // If max_ttl is set for the role, check the bounds for tag's max_ttl value using that.
- if roleEntry.MaxTTL != time.Duration(0) && maxTTL > roleEntry.MaxTTL {
- resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the role maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, roleEntry.MaxTTL/time.Second))
- }
-
- if maxTTL < time.Duration(0) {
- return logical.ErrorResponse("max_ttl cannot be negative"), nil
- }
-
- // Create a random nonce.
- nonce, err := createRoleTagNonce()
- if err != nil {
- return nil, err
- }
-
- // Create a role tag out of all the information provided.
- rTagValue, err := createRoleTagValue(&roleTag{
- Version: roleTagVersion,
- Role: roleName,
- Nonce: nonce,
- Policies: policies,
- MaxTTL: maxTTL,
- InstanceID: instanceID,
- DisallowReauthentication: disallowReauthentication,
- AllowInstanceMigration: allowInstanceMigration,
- }, roleEntry)
- if err != nil {
- return nil, err
- }
-
- // Return the key to be used for the tag and the value to be used for that tag key.
- // This key value pair should be set on the EC2 instance.
- resp.Data = map[string]interface{}{
- "tag_key": roleEntry.RoleTag,
- "tag_value": rTagValue,
- }
-
- return resp, nil
-}
-
-// createRoleTagValue prepares the plaintext version of the role tag,
-// and appends a HMAC of the plaintext value to it, before returning.
-func createRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (string, error) {
- if rTag == nil {
- return "", fmt.Errorf("nil role tag")
- }
-
- if roleEntry == nil {
- return "", fmt.Errorf("nil role entry")
- }
-
- // Attach version, nonce, policies and maxTTL to the role tag value.
- rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag)
- if err != nil {
- return "", err
- }
-
- // Attach HMAC to tag's plaintext and return.
- return appendHMAC(rTagPlaintext, roleEntry)
-}
-
-// Takes in the plaintext part of the role tag, creates a HMAC of it and returns
-// a role tag value containing both the plaintext part and the HMAC part.
-func appendHMAC(rTagPlaintext string, roleEntry *awsRoleEntry) (string, error) {
- if rTagPlaintext == "" {
- return "", fmt.Errorf("empty role tag plaintext string")
- }
-
- if roleEntry == nil {
- return "", fmt.Errorf("nil role entry")
- }
-
- // Create the HMAC of the value
- hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext)
- if err != nil {
- return "", err
- }
-
- // attach the HMAC to the value
- rTagValue := fmt.Sprintf("%s:%s", rTagPlaintext, hmacB64)
-
- // This limit of 255 is enforced on the EC2 instance. Hence complying to that here.
- if len(rTagValue) > 255 {
- return "", fmt.Errorf("role tag 'value' exceeding the limit of 255 characters")
- }
-
- return rTagValue, nil
-}
-
-// verifyRoleTagValue rebuilds the role tag's plaintext part, computes the HMAC
-// from it using the role specific HMAC key and compares it with the received HMAC.
-func verifyRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (bool, error) {
- if rTag == nil {
- return false, fmt.Errorf("nil role tag")
- }
-
- if roleEntry == nil {
- return false, fmt.Errorf("nil role entry")
- }
-
- // Fetch the plaintext part of role tag
- rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag)
- if err != nil {
- return false, err
- }
-
- // Compute the HMAC of the plaintext
- hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext)
- if err != nil {
- return false, err
- }
-
- return subtle.ConstantTimeCompare([]byte(rTag.HMAC), []byte(hmacB64)) == 1, nil
-}
-
-// prepareRoleTagPlaintextValue builds the role tag value without the HMAC in it.
-func prepareRoleTagPlaintextValue(rTag *roleTag) (string, error) {
- if rTag == nil {
- return "", fmt.Errorf("nil role tag")
- }
- if rTag.Version == "" {
- return "", fmt.Errorf("missing version")
- }
- if rTag.Nonce == "" {
- return "", fmt.Errorf("missing nonce")
- }
- if rTag.Role == "" {
- return "", fmt.Errorf("missing role")
- }
-
- // Attach Version, Nonce, Role, DisallowReauthentication and AllowInstanceMigration
- // fields to the role tag.
- value := fmt.Sprintf("%s:%s:r=%s:d=%s:m=%s", rTag.Version, rTag.Nonce, rTag.Role, strconv.FormatBool(rTag.DisallowReauthentication), strconv.FormatBool(rTag.AllowInstanceMigration))
-
- // Attach the policies only if they are specified.
- if len(rTag.Policies) != 0 {
- value = fmt.Sprintf("%s:p=%s", value, strings.Join(rTag.Policies, ","))
- }
-
- // Attach instance_id if set.
- if rTag.InstanceID != "" {
- value = fmt.Sprintf("%s:i=%s", value, rTag.InstanceID)
- }
-
- // Attach max_ttl if it is provided.
- if int(rTag.MaxTTL.Seconds()) > 0 {
- value = fmt.Sprintf("%s:t=%d", value, int(rTag.MaxTTL.Seconds()))
- }
-
- return value, nil
-}
-
-// Parses the tag from string form into a struct form. This method
-// also verifies the correctness of the parsed role tag.
-func (b *backend) parseAndVerifyRoleTagValue(s logical.Storage, tag string) (*roleTag, error) {
- tagItems := strings.Split(tag, ":")
-
- // Tag must contain version, nonce, policies and HMAC
- if len(tagItems) < 4 {
- return nil, fmt.Errorf("invalid tag")
- }
-
- rTag := &roleTag{}
-
- // Cache the HMAC value. The last item in the collection.
- rTag.HMAC = tagItems[len(tagItems)-1]
-
- // Remove the HMAC from the list.
- tagItems = tagItems[:len(tagItems)-1]
-
- // Version will be the first element.
- rTag.Version = tagItems[0]
- if rTag.Version != roleTagVersion {
- return nil, fmt.Errorf("invalid role tag version")
- }
-
- // Nonce will be the second element.
- rTag.Nonce = tagItems[1]
-
- // Delete the version and nonce from the list.
- tagItems = tagItems[2:]
-
- for _, tagItem := range tagItems {
- var err error
- switch {
- case strings.Contains(tagItem, "i="):
- rTag.InstanceID = strings.TrimPrefix(tagItem, "i=")
- case strings.Contains(tagItem, "r="):
- rTag.Role = strings.TrimPrefix(tagItem, "r=")
- case strings.Contains(tagItem, "p="):
- rTag.Policies = strings.Split(strings.TrimPrefix(tagItem, "p="), ",")
- case strings.Contains(tagItem, "d="):
- rTag.DisallowReauthentication, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "d="))
- if err != nil {
- return nil, err
- }
- case strings.Contains(tagItem, "m="):
- rTag.AllowInstanceMigration, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "m="))
- if err != nil {
- return nil, err
- }
- case strings.Contains(tagItem, "t="):
- rTag.MaxTTL, err = time.ParseDuration(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t=")))
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("unrecognized item %s in tag", tagItem)
- }
- }
-
- if rTag.Role == "" {
- return nil, fmt.Errorf("missing role name")
- }
-
- roleEntry, err := b.lockedAWSRole(s, rTag.Role)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return nil, fmt.Errorf("entry not found for %s", rTag.Role)
- }
-
- // Create a HMAC of the plaintext value of role tag and compare it with the given value.
- verified, err := verifyRoleTagValue(rTag, roleEntry)
- if err != nil {
- return nil, err
- }
- if !verified {
- return nil, fmt.Errorf("role tag signature verification failed")
- }
-
- return rTag, nil
-}
-
-// Creates base64 encoded HMAC using a per-role key.
-func createRoleTagHMACBase64(key, value string) (string, error) {
- if key == "" {
- return "", fmt.Errorf("invalid HMAC key")
- }
- hm := hmac.New(sha256.New, []byte(key))
- hm.Write([]byte(value))
-
- // base64 encode the hmac bytes.
- return base64.StdEncoding.EncodeToString(hm.Sum(nil)), nil
-}
-
-// Creates a base64 encoded random nonce.
-func createRoleTagNonce() (string, error) {
- if uuidBytes, err := uuid.GenerateRandomBytes(8); err != nil {
- return "", err
- } else {
- return base64.StdEncoding.EncodeToString(uuidBytes), nil
- }
-}
-
-// Struct roleTag represents a role tag in a struc form.
-type roleTag struct {
- Version string `json:"version" structs:"version" mapstructure:"version"`
- InstanceID string `json:"instance_id" structs:"instance_id" mapstructure:"instance_id"`
- Nonce string `json:"nonce" structs:"nonce" mapstructure:"nonce"`
- Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
- MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
- Role string `json:"role" structs:"role" mapstructure:"role"`
- HMAC string `json:"hmac" structs:"hmac" mapstructure:"hmac"`
- DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
- AllowInstanceMigration bool `json:"allow_instance_migration" structs:"allow_instance_migration" mapstructure:"allow_instance_migration"`
-}
-
-func (rTag1 *roleTag) Equal(rTag2 *roleTag) bool {
- return rTag1 != nil &&
- rTag2 != nil &&
- rTag1.Version == rTag2.Version &&
- rTag1.Nonce == rTag2.Nonce &&
- policyutil.EquivalentPolicies(rTag1.Policies, rTag2.Policies) &&
- rTag1.MaxTTL == rTag2.MaxTTL &&
- rTag1.Role == rTag2.Role &&
- rTag1.HMAC == rTag2.HMAC &&
- rTag1.InstanceID == rTag2.InstanceID &&
- rTag1.DisallowReauthentication == rTag2.DisallowReauthentication &&
- rTag1.AllowInstanceMigration == rTag2.AllowInstanceMigration
-}
-
-const pathRoleTagSyn = `
-Create a tag on a role in order to be able to further restrict the capabilities of a role.
-`
-
-const pathRoleTagDesc = `
-If there are needs to apply only a subset of role's capabilities to any specific
-instance, create a role tag using this endpoint and attach the tag on the instance
-before performing login.
-
-To be able to create a role tag, the 'role_tag' option on the role should be
-enabled via the endpoint 'role/'. Also, the policies to be associated
-with the tag should be a subset of the policies associated with the registered role.
-
-This endpoint will return both the 'key' and the 'value' of the tag to be set
-on the EC2 instance.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go
deleted file mode 100644
index 21c87ab..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go
+++ /dev/null
@@ -1,666 +0,0 @@
-package awsauth
-
-import (
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestBackend_pathRoleEc2(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- data := map[string]interface{}{
- "auth_type": "ec2",
- "policies": "p,q,r,s",
- "max_ttl": "2h",
- "bound_ami_id": "ami-abcd123",
- }
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/ami-abcd123",
- Data: data,
- Storage: storage,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/ami-abcd123",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatal("failed to read the role entry")
- }
- if !policyutil.EquivalentPolicies(strings.Split(data["policies"].(string), ","), resp.Data["policies"].([]string)) {
- t.Fatalf("bad: policies: expected: %#v\ngot: %#v\n", data, resp.Data)
- }
-
- data["allow_instance_migration"] = true
- data["disallow_reauthentication"] = true
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "role/ami-abcd123",
- Data: data,
- Storage: storage,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role: %s", resp.Data["error"])
- }
- if err != nil {
- t.Fatal(err)
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/ami-abcd123",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if !resp.Data["allow_instance_migration"].(bool) || !resp.Data["disallow_reauthentication"].(bool) {
- t.Fatal("bad: expected:true got:false\n")
- }
-
- // add another entry, to test listing of role entries
- data["bound_ami_id"] = "ami-abcd456"
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/ami-abcd456",
- Data: data,
- Storage: storage,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role: %s", resp.Data["error"])
- }
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "roles",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Data == nil || resp.IsError() {
- t.Fatalf("failed to list the role entries")
- }
- keys := resp.Data["keys"].([]string)
- if len(keys) != 2 {
- t.Fatalf("bad: keys: %#v\n", keys)
- }
-
- _, err = b.HandleRequest(&logical.Request{
- Operation: logical.DeleteOperation,
- Path: "role/ami-abcd123",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/ami-abcd123",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatalf("bad: response: expected:nil actual:%#v\n", resp)
- }
-}
-
-func Test_enableIamIDResolution(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- roleName := "upgradable_role"
-
- b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId
-
- data := map[string]interface{}{
- "auth_type": iamAuthType,
- "policies": "p,q",
- "bound_iam_principal_arn": "arn:aws:iam::123456789012:role/MyRole",
- "resolve_aws_unique_ids": false,
- }
-
- submitRequest := func(roleName string, op logical.Operation) (*logical.Response, error) {
- return b.HandleRequest(&logical.Request{
- Operation: op,
- Path: "role/" + roleName,
- Data: data,
- Storage: storage,
- })
- }
-
- resp, err := submitRequest(roleName, logical.CreateOperation)
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create role: %#v", resp)
- }
-
- resp, err = submitRequest(roleName, logical.ReadOperation)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err)
- }
- if resp.Data["bound_iam_principal_id"] != "" {
- t.Fatalf("expected to get no unique ID in role, but got %q", resp.Data["bound_iam_principal_id"])
- }
-
- data = map[string]interface{}{
- "resolve_aws_unique_ids": true,
- }
- resp, err = submitRequest(roleName, logical.UpdateOperation)
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatalf("unable to upgrade role to resolve internal IDs: resp:%#v", resp)
- }
-
- resp, err = submitRequest(roleName, logical.ReadOperation)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err)
- }
- if resp.Data["bound_iam_principal_id"] != "FakeUniqueId1" {
- t.Fatalf("bad: expected upgrade of role resolve principal ID to %q, but got %q instead", "FakeUniqueId1", resp.Data["bound_iam_principal_id"])
- }
-}
-
-func TestBackend_pathIam(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // make sure we start with empty roles, which gives us confidence that the read later
- // actually is the two roles we created
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "roles",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Data == nil || resp.IsError() {
- t.Fatalf("failed to list role entries")
- }
- if resp.Data["keys"] != nil {
- t.Fatalf("Received roles when expected none")
- }
-
- data := map[string]interface{}{
- "auth_type": iamAuthType,
- "policies": "p,q,r,s",
- "max_ttl": "2h",
- "bound_iam_principal_arn": "n:aws:iam::123456789012:user/MyUserName",
- "resolve_aws_unique_ids": false,
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/MyRoleName",
- Data: data,
- Storage: storage,
- })
-
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create the role entry; resp: %#v", resp)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/MyRoleName",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.IsError() {
- t.Fatal("failed to read the role entry")
- }
- if !policyutil.EquivalentPolicies(strings.Split(data["policies"].(string), ","), resp.Data["policies"].([]string)) {
- t.Fatalf("bad: policies: expected %#v\ngot: %#v\n", data, resp.Data)
- }
-
- data["inferred_entity_type"] = "invalid"
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/ShouldNeverExist",
- Data: data,
- Storage: storage,
- })
- if resp == nil || !resp.IsError() {
- t.Fatalf("Created role with invalid inferred_entity_type")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- data["inferred_entity_type"] = ec2EntityType
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/ShouldNeverExist",
- Data: data,
- Storage: storage,
- })
- if resp == nil || !resp.IsError() {
- t.Fatalf("Created role without necessary inferred_aws_region")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- delete(data, "bound_iam_principal_arn")
- data["inferred_aws_region"] = "us-east-1"
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/ShouldNeverExist",
- Data: data,
- Storage: storage,
- })
- if resp == nil || !resp.IsError() {
- t.Fatalf("Created role without anything bound")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // generate a second role, ensure we're able to list both
- data["bound_ami_id"] = "ami-abcd123"
- secondRole := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "role/MyOtherRoleName",
- Data: data,
- Storage: storage,
- }
- resp, err = b.HandleRequest(secondRole)
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create additional role: %v", *secondRole)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "roles",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || resp.Data == nil || resp.IsError() {
- t.Fatalf("failed to list role entries")
- }
- keys := resp.Data["keys"].([]string)
- if len(keys) != 2 {
- t.Fatalf("bad: keys %#v\n", keys)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.DeleteOperation,
- Path: "role/MyOtherRoleName",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "role/MyOtherRoleName",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatalf("bad: response: expected: nil actual:%3v\n", resp)
- }
-}
-
-func TestBackend_pathRoleMixedTypes(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- data := map[string]interface{}{
- "policies": "p,q,r,s",
- "bound_ami_id": "ami-abc1234",
- "auth_type": "ec2,invalid",
- }
-
- submitRequest := func(roleName string, op logical.Operation) (*logical.Response, error) {
- return b.HandleRequest(&logical.Request{
- Operation: op,
- Path: "role/" + roleName,
- Data: data,
- Storage: storage,
- })
- }
-
- resp, err := submitRequest("shouldNeverExist", logical.CreateOperation)
- if resp == nil || !resp.IsError() {
- t.Fatalf("created role with invalid auth_type; resp: %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- data["auth_type"] = "ec2,,iam"
- resp, err = submitRequest("shouldNeverExist", logical.CreateOperation)
- if resp == nil || !resp.IsError() {
- t.Fatalf("created role mixed auth types")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- data["auth_type"] = ec2AuthType
- resp, err = submitRequest("ec2_to_iam", logical.CreateOperation)
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create valid role; resp: %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- data["auth_type"] = iamAuthType
- delete(data, "bound_ami_id")
- data["bound_iam_principal_arn"] = "arn:aws:iam::123456789012:role/MyRole"
- resp, err = submitRequest("ec2_to_iam", logical.UpdateOperation)
- if resp == nil || !resp.IsError() {
- t.Fatalf("changed auth type on the role")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- data["inferred_entity_type"] = ec2EntityType
- data["inferred_aws_region"] = "us-east-1"
- data["resolve_aws_unique_ids"] = false
- resp, err = submitRequest("multipleTypesInferred", logical.CreateOperation)
- if err != nil {
- t.Fatal(err)
- }
- if resp.IsError() {
- t.Fatalf("didn't allow creation of roles with only inferred bindings")
- }
-
- b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId
- data["resolve_aws_unique_ids"] = true
- resp, err = submitRequest("withInternalIdResolution", logical.CreateOperation)
- if err != nil {
- t.Fatal(err)
- }
- if resp.IsError() {
- t.Fatalf("didn't allow creation of role resolving unique IDs")
- }
- resp, err = submitRequest("withInternalIdResolution", logical.ReadOperation)
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["bound_iam_principal_id"] != "FakeUniqueId1" {
- t.Fatalf("expected fake unique ID of FakeUniqueId1, got %q", resp.Data["bound_iam_principal_id"])
- }
- data["resolve_aws_unique_ids"] = false
- resp, err = submitRequest("withInternalIdResolution", logical.UpdateOperation)
- if err != nil {
- t.Fatal(err)
- }
- if !resp.IsError() {
- t.Fatalf("allowed changing resolve_aws_unique_ids from true to false")
- }
-
-}
-
-func TestAwsEc2_RoleCrud(t *testing.T) {
- var err error
- var resp *logical.Response
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- role1Data := map[string]interface{}{
- "auth_type": "ec2",
- "bound_vpc_id": "testvpcid",
- "allow_instance_migration": true,
- "policies": "testpolicy1,testpolicy2",
- }
- roleReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "role/role1",
- Data: role1Data,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- roleData := map[string]interface{}{
- "auth_type": "ec2",
- "bound_ami_id": "testamiid",
- "bound_account_id": "testaccountid",
- "bound_region": "testregion",
- "bound_iam_role_arn": "arn:aws:iam::123456789012:role/MyRole",
- "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/MyInstanceProfile",
- "bound_subnet_id": "testsubnetid",
- "bound_vpc_id": "testvpcid",
- "role_tag": "testtag",
- "resolve_aws_unique_ids": false,
- "allow_instance_migration": true,
- "ttl": "10m",
- "max_ttl": "20m",
- "policies": "testpolicy1,testpolicy2",
- "disallow_reauthentication": true,
- "hmac_key": "testhmackey",
- "period": "1m",
- }
-
- roleReq.Path = "role/testrole"
- roleReq.Data = roleData
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- roleReq.Operation = logical.ReadOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- expected := map[string]interface{}{
- "auth_type": ec2AuthType,
- "bound_ami_id": "testamiid",
- "bound_account_id": "testaccountid",
- "bound_region": "testregion",
- "bound_iam_principal_arn": "",
- "bound_iam_principal_id": "",
- "bound_iam_role_arn": "arn:aws:iam::123456789012:role/MyRole",
- "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/MyInstanceProfile",
- "bound_subnet_id": "testsubnetid",
- "bound_vpc_id": "testvpcid",
- "inferred_entity_type": "",
- "inferred_aws_region": "",
- "resolve_aws_unique_ids": false,
- "role_tag": "testtag",
- "allow_instance_migration": true,
- "ttl": time.Duration(600),
- "max_ttl": time.Duration(1200),
- "policies": []string{"testpolicy1", "testpolicy2"},
- "disallow_reauthentication": true,
- "period": time.Duration(60),
- }
-
- if !reflect.DeepEqual(expected, resp.Data) {
- t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data)
- }
-
- roleData["bound_vpc_id"] = "newvpcid"
- roleReq.Operation = logical.UpdateOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- roleReq.Operation = logical.ReadOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- expected["bound_vpc_id"] = "newvpcid"
-
- if !reflect.DeepEqual(expected, resp.Data) {
- t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data)
- }
-
- roleReq.Operation = logical.DeleteOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- if resp != nil {
- t.Fatalf("failed to delete role entry")
- }
-}
-
-func TestAwsEc2_RoleDurationSeconds(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- roleData := map[string]interface{}{
- "auth_type": "ec2",
- "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/test-profile-name",
- "resolve_aws_unique_ids": false,
- "ttl": "10s",
- "max_ttl": "20s",
- "period": "30s",
- }
-
- roleReq := &logical.Request{
- Operation: logical.CreateOperation,
- Storage: storage,
- Path: "role/testrole",
- Data: roleData,
- }
-
- resp, err := b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- roleReq.Operation = logical.ReadOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("resp: %#v, err: %v", resp, err)
- }
-
- if int64(resp.Data["ttl"].(time.Duration)) != 10 {
- t.Fatalf("bad: period; expected: 10, actual: %d", resp.Data["ttl"])
- }
- if int64(resp.Data["max_ttl"].(time.Duration)) != 20 {
- t.Fatalf("bad: period; expected: 20, actual: %d", resp.Data["max_ttl"])
- }
- if int64(resp.Data["period"].(time.Duration)) != 30 {
- t.Fatalf("bad: period; expected: 30, actual: %d", resp.Data["period"])
- }
-}
-
-func resolveArnToFakeUniqueId(s logical.Storage, arn string) (string, error) {
- return "FakeUniqueId1", nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_roletag_blacklist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_roletag_blacklist.go
deleted file mode 100644
index 32fded8..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_roletag_blacklist.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package awsauth
-
-import (
- "encoding/base64"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathRoletagBlacklist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roletag-blacklist/(?P.*)",
- Fields: map[string]*framework.FieldSchema{
- "role_tag": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Role tag to be blacklisted. The tag can be supplied as-is. In order
-to avoid any encoding problems, it can be base64 encoded.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRoletagBlacklistUpdate,
- logical.ReadOperation: b.pathRoletagBlacklistRead,
- logical.DeleteOperation: b.pathRoletagBlacklistDelete,
- },
-
- HelpSynopsis: pathRoletagBlacklistSyn,
- HelpDescription: pathRoletagBlacklistDesc,
- }
-}
-
-// Path to list all the blacklisted tags.
-func pathListRoletagBlacklist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roletag-blacklist/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoletagBlacklistsList,
- },
-
- HelpSynopsis: pathListRoletagBlacklistHelpSyn,
- HelpDescription: pathListRoletagBlacklistHelpDesc,
- }
-}
-
-// Lists all the blacklisted role tags.
-func (b *backend) pathRoletagBlacklistsList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.blacklistMutex.RLock()
- defer b.blacklistMutex.RUnlock()
-
- tags, err := req.Storage.List("blacklist/roletag/")
- if err != nil {
- return nil, err
- }
-
- // Tags are base64 encoded before indexing to avoid problems
- // with the path separators being present in the tag.
- // Reverse it before returning the list response.
- for i, keyB64 := range tags {
- if key, err := base64.StdEncoding.DecodeString(keyB64); err != nil {
- return nil, err
- } else {
- // Overwrite the result with the decoded string.
- tags[i] = string(key)
- }
- }
- return logical.ListResponse(tags), nil
-}
-
-// Fetch an entry from the role tag blacklist for a given tag.
-// This method takes a role tag in its original form and not a base64 encoded form.
-func (b *backend) lockedBlacklistRoleTagEntry(s logical.Storage, tag string) (*roleTagBlacklistEntry, error) {
- b.blacklistMutex.RLock()
- defer b.blacklistMutex.RUnlock()
-
- return b.nonLockedBlacklistRoleTagEntry(s, tag)
-}
-
-func (b *backend) nonLockedBlacklistRoleTagEntry(s logical.Storage, tag string) (*roleTagBlacklistEntry, error) {
- entry, err := s.Get("blacklist/roletag/" + base64.StdEncoding.EncodeToString([]byte(tag)))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleTagBlacklistEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-// Deletes an entry from the role tag blacklist for a given tag.
-func (b *backend) pathRoletagBlacklistDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.blacklistMutex.Lock()
- defer b.blacklistMutex.Unlock()
-
- tag := data.Get("role_tag").(string)
- if tag == "" {
- return logical.ErrorResponse("missing role_tag"), nil
- }
-
- return nil, req.Storage.Delete("blacklist/roletag/" + base64.StdEncoding.EncodeToString([]byte(tag)))
-}
-
-// If the given role tag is blacklisted, returns the details of the blacklist entry.
-// Returns 'nil' otherwise.
-func (b *backend) pathRoletagBlacklistRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- tag := data.Get("role_tag").(string)
- if tag == "" {
- return logical.ErrorResponse("missing role_tag"), nil
- }
-
- entry, err := b.lockedBlacklistRoleTagEntry(req.Storage, tag)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "creation_time": entry.CreationTime.Format(time.RFC3339Nano),
- "expiration_time": entry.ExpirationTime.Format(time.RFC3339Nano),
- },
- }, nil
-}
-
-// pathRoletagBlacklistUpdate is used to blacklist a given role tag.
-// Before a role tag is blacklisted, the correctness of the plaintext part
-// in the role tag is verified using the associated HMAC.
-func (b *backend) pathRoletagBlacklistUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- // The role_tag value provided, optionally can be base64 encoded.
- tagInput := data.Get("role_tag").(string)
- if tagInput == "" {
- return logical.ErrorResponse("missing role_tag"), nil
- }
-
- tag := ""
-
- // Try to base64 decode the value.
- tagBytes, err := base64.StdEncoding.DecodeString(tagInput)
- if err != nil {
- // If the decoding failed, use the value as-is.
- tag = tagInput
- } else {
- // If the decoding succeeded, use the decoded value.
- tag = string(tagBytes)
- }
-
- // Parse and verify the role tag from string form to a struct form and verify it.
- rTag, err := b.parseAndVerifyRoleTagValue(req.Storage, tag)
- if err != nil {
- return nil, err
- }
- if rTag == nil {
- return logical.ErrorResponse("failed to verify the role tag and parse it"), nil
- }
-
- // Get the entry for the role mentioned in the role tag.
- roleEntry, err := b.lockedAWSRole(req.Storage, rTag.Role)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return logical.ErrorResponse("role entry not found"), nil
- }
-
- b.blacklistMutex.Lock()
- defer b.blacklistMutex.Unlock()
-
- // Check if the role tag is already blacklisted. If yes, update it.
- blEntry, err := b.nonLockedBlacklistRoleTagEntry(req.Storage, tag)
- if err != nil {
- return nil, err
- }
- if blEntry == nil {
- blEntry = &roleTagBlacklistEntry{}
- }
-
- currentTime := time.Now()
-
- // Check if this is a creation of blacklist entry.
- if blEntry.CreationTime.IsZero() {
- // Set the creation time for the blacklist entry.
- // This should not be updated after setting it once.
- // If blacklist operation is invoked more than once, only update the expiration time.
- blEntry.CreationTime = currentTime
- }
-
- // Decide the expiration time based on the max_ttl values. Since this is
- // restricting access, use the greatest duration, not the least.
- maxDur := rTag.MaxTTL
- if roleEntry.MaxTTL > maxDur {
- maxDur = roleEntry.MaxTTL
- }
- if b.System().MaxLeaseTTL() > maxDur {
- maxDur = b.System().MaxLeaseTTL()
- }
-
- blEntry.ExpirationTime = currentTime.Add(maxDur)
-
- entry, err := logical.StorageEntryJSON("blacklist/roletag/"+base64.StdEncoding.EncodeToString([]byte(tag)), blEntry)
- if err != nil {
- return nil, err
- }
-
- // Store the blacklist entry.
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type roleTagBlacklistEntry struct {
- CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
- ExpirationTime time.Time `json:"expiration_time" structs:"expiration_time" mapstructure:"expiration_time"`
-}
-
-const pathRoletagBlacklistSyn = `
-Blacklist a previously created role tag.
-`
-
-const pathRoletagBlacklistDesc = `
-Blacklist a role tag so that it cannot be used by any EC2 instance to perform further
-logins. This can be used if the role tag is suspected or believed to be possessed by
-an unintended party.
-
-By default, a cron task will periodically look for expired entries in the blacklist
-and deletes them. The duration to periodically run this, is one hour by default.
-However, this can be configured using the 'config/tidy/roletags' endpoint. This tidy
-action can be triggered via the API as well, using the 'tidy/roletags' endpoint.
-
-Also note that delete operation is supported on this endpoint to remove specific
-entries from the blacklist.
-`
-
-const pathListRoletagBlacklistHelpSyn = `
-Lists the blacklisted role tags.
-`
-
-const pathListRoletagBlacklistHelpDesc = `
-Lists all the entries present in the blacklist. This will show both the valid
-entries and the expired entries in the blacklist. Use 'tidy/roletags' endpoint
-to clean-up the blacklist of role tags based on expiration time.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_identity_whitelist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_identity_whitelist.go
deleted file mode 100644
index c77687f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_identity_whitelist.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package awsauth
-
-import (
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathTidyIdentityWhitelist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "tidy/identity-whitelist$",
- Fields: map[string]*framework.FieldSchema{
- "safety_buffer": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 259200,
- Description: `The amount of extra time that must have passed beyond the identity's
-expiration, before it is removed from the backend storage.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathTidyIdentityWhitelistUpdate,
- },
-
- HelpSynopsis: pathTidyIdentityWhitelistSyn,
- HelpDescription: pathTidyIdentityWhitelistDesc,
- }
-}
-
-// tidyWhitelistIdentity is used to delete entries in the whitelist that are expired.
-func (b *backend) tidyWhitelistIdentity(s logical.Storage, safety_buffer int) error {
- grabbed := atomic.CompareAndSwapUint32(&b.tidyWhitelistCASGuard, 0, 1)
- if grabbed {
- defer atomic.StoreUint32(&b.tidyWhitelistCASGuard, 0)
- } else {
- return fmt.Errorf("identity whitelist tidy operation already running")
- }
-
- bufferDuration := time.Duration(safety_buffer) * time.Second
-
- identities, err := s.List("whitelist/identity/")
- if err != nil {
- return err
- }
-
- for _, instanceID := range identities {
- identityEntry, err := s.Get("whitelist/identity/" + instanceID)
- if err != nil {
- return fmt.Errorf("error fetching identity of instanceID %s: %s", instanceID, err)
- }
-
- if identityEntry == nil {
- return fmt.Errorf("identity entry for instanceID %s is nil", instanceID)
- }
-
- if identityEntry.Value == nil || len(identityEntry.Value) == 0 {
- return fmt.Errorf("found identity entry for instanceID %s but actual identity is empty", instanceID)
- }
-
- var result whitelistIdentity
- if err := identityEntry.DecodeJSON(&result); err != nil {
- return err
- }
-
- if time.Now().After(result.ExpirationTime.Add(bufferDuration)) {
- if err := s.Delete("whitelist/identity" + instanceID); err != nil {
- return fmt.Errorf("error deleting identity of instanceID %s from storage: %s", instanceID, err)
- }
- }
- }
-
- return nil
-}
-
-// pathTidyIdentityWhitelistUpdate is used to delete entries in the whitelist that are expired.
-func (b *backend) pathTidyIdentityWhitelistUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return nil, b.tidyWhitelistIdentity(req.Storage, data.Get("safety_buffer").(int))
-}
-
-const pathTidyIdentityWhitelistSyn = `
-Clean-up the whitelist instance identity entries.
-`
-
-const pathTidyIdentityWhitelistDesc = `
-When an instance identity is whitelisted, the expiration time of the whitelist
-entry is set based on the maximum 'max_ttl' value set on: the role, the role tag
-and the backend's mount.
-
-When this endpoint is invoked, all the entries that are expired will be deleted.
-A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of
-only those entries that are expired before 'safety_buffer' seconds.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_roletag_blacklist.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_roletag_blacklist.go
deleted file mode 100644
index 3970a18..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_tidy_roletag_blacklist.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package awsauth
-
-import (
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathTidyRoletagBlacklist(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "tidy/roletag-blacklist$",
- Fields: map[string]*framework.FieldSchema{
- "safety_buffer": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 259200, // 72h
- Description: `The amount of extra time that must have passed beyond the roletag
-expiration, before it is removed from the backend storage.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathTidyRoletagBlacklistUpdate,
- },
-
- HelpSynopsis: pathTidyRoletagBlacklistSyn,
- HelpDescription: pathTidyRoletagBlacklistDesc,
- }
-}
-
-// tidyBlacklistRoleTag is used to clean-up the entries in the role tag blacklist.
-func (b *backend) tidyBlacklistRoleTag(s logical.Storage, safety_buffer int) error {
- grabbed := atomic.CompareAndSwapUint32(&b.tidyBlacklistCASGuard, 0, 1)
- if grabbed {
- defer atomic.StoreUint32(&b.tidyBlacklistCASGuard, 0)
- } else {
- return fmt.Errorf("roletag blacklist tidy operation already running")
- }
-
- bufferDuration := time.Duration(safety_buffer) * time.Second
- tags, err := s.List("blacklist/roletag/")
- if err != nil {
- return err
- }
-
- for _, tag := range tags {
- tagEntry, err := s.Get("blacklist/roletag/" + tag)
- if err != nil {
- return fmt.Errorf("error fetching tag %s: %s", tag, err)
- }
-
- if tagEntry == nil {
- return fmt.Errorf("tag entry for tag %s is nil", tag)
- }
-
- if tagEntry.Value == nil || len(tagEntry.Value) == 0 {
- return fmt.Errorf("found entry for tag %s but actual tag is empty", tag)
- }
-
- var result roleTagBlacklistEntry
- if err := tagEntry.DecodeJSON(&result); err != nil {
- return err
- }
-
- if time.Now().After(result.ExpirationTime.Add(bufferDuration)) {
- if err := s.Delete("blacklist/roletag" + tag); err != nil {
- return fmt.Errorf("error deleting tag %s from storage: %s", tag, err)
- }
- }
- }
-
- return nil
-}
-
-// pathTidyRoletagBlacklistUpdate is used to clean-up the entries in the role tag blacklist.
-func (b *backend) pathTidyRoletagBlacklistUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return nil, b.tidyBlacklistRoleTag(req.Storage, data.Get("safety_buffer").(int))
-}
-
-const pathTidyRoletagBlacklistSyn = `
-Clean-up the blacklist role tag entries.
-`
-
-const pathTidyRoletagBlacklistDesc = `
-When a role tag is blacklisted, the expiration time of the blacklist entry is
-set based on the maximum 'max_ttl' value set on: the role, the role tag and the
-backend's mount.
-
-When this endpoint is invoked, all the entries that are expired will be deleted.
-A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of
-only those entries that are expired before 'safety_buffer' seconds.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go
deleted file mode 100644
index 9420164..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package cert
-
-import (
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: backendHelp,
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "login",
- },
- },
- Paths: append([]*framework.Path{
- pathConfig(&b),
- pathLogin(&b),
- pathListCerts(&b),
- pathCerts(&b),
- pathCRLs(&b),
- }),
- AuthRenew: b.pathLoginRenew,
- Invalidate: b.invalidate,
- BackendType: logical.TypeCredential,
- }
-
- b.crlUpdateMutex = &sync.RWMutex{}
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
- MapCertId *framework.PathMap
-
- crls map[string]CRLInfo
- crlUpdateMutex *sync.RWMutex
-}
-
-func (b *backend) invalidate(key string) {
- switch {
- case strings.HasPrefix(key, "crls/"):
- b.crlUpdateMutex.Lock()
- defer b.crlUpdateMutex.Unlock()
- b.crls = nil
- }
-}
-
-const backendHelp = `
-The "cert" credential provider allows authentication using
-TLS client certificates. A client connects to Vault and uses
-the "login" endpoint to generate a client token.
-
-Trusted certificates are configured using the "certs/" endpoint
-by a user with root access. A certificate authority can be trusted,
-which permits all keys signed by it. Alternatively, self-signed
-certificates can be trusted avoiding the need for a CA.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go
deleted file mode 100644
index 4680d61..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go
+++ /dev/null
@@ -1,1181 +0,0 @@
-package cert
-
-import (
- "crypto/rand"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "fmt"
- "io"
- "io/ioutil"
- "math/big"
- "net"
- "os"
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/go-rootcerts"
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
-)
-
-const (
- serverCertPath = "test-fixtures/cacert.pem"
- serverKeyPath = "test-fixtures/cakey.pem"
- serverCAPath = serverCertPath
-
- testRootCACertPath1 = "test-fixtures/testcacert1.pem"
- testRootCAKeyPath1 = "test-fixtures/testcakey1.pem"
- testCertPath1 = "test-fixtures/testissuedcert4.pem"
- testKeyPath1 = "test-fixtures/testissuedkey4.pem"
- testIssuedCertCRL = "test-fixtures/issuedcertcrl"
-
- testRootCACertPath2 = "test-fixtures/testcacert2.pem"
- testRootCAKeyPath2 = "test-fixtures/testcakey2.pem"
- testRootCertCRL = "test-fixtures/cacert2crl"
-)
-
-// Unlike testConnState, this method does not use the same 'tls.Config' objects for
-// both dialing and listening. Instead, it runs the server without specifying its CA.
-// But the client, presents the CA cert of the server to trust the server.
-// The client can present a cert and key which is completely independent of server's CA.
-// The connection state returned will contain the certificate presented by the client.
-func connectionState(serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) (tls.ConnectionState, error) {
- serverKeyPair, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath)
- if err != nil {
- return tls.ConnectionState{}, err
- }
- // Prepare the listener configuration with server's key pair
- listenConf := &tls.Config{
- Certificates: []tls.Certificate{serverKeyPair},
- ClientAuth: tls.RequestClientCert,
- }
-
- clientKeyPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
- if err != nil {
- return tls.ConnectionState{}, err
- }
- // Load the CA cert required by the client to authenticate the server.
- rootConfig := &rootcerts.Config{
- CAFile: serverCAPath,
- }
- serverCAs, err := rootcerts.LoadCACerts(rootConfig)
- if err != nil {
- return tls.ConnectionState{}, err
- }
- // Prepare the dial configuration that the client uses to establish the connection.
- dialConf := &tls.Config{
- Certificates: []tls.Certificate{clientKeyPair},
- RootCAs: serverCAs,
- }
-
- // Start the server.
- list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
- if err != nil {
- return tls.ConnectionState{}, err
- }
- defer list.Close()
-
- // Accept connections.
- serverErrors := make(chan error, 1)
- connState := make(chan tls.ConnectionState)
- go func() {
- defer close(connState)
- serverConn, err := list.Accept()
- if err != nil {
- serverErrors <- err
- close(serverErrors)
- return
- }
- defer serverConn.Close()
-
- // Read the ping
- buf := make([]byte, 4)
- _, err = serverConn.Read(buf)
- if (err != nil) && (err != io.EOF) {
- serverErrors <- err
- close(serverErrors)
- return
- }
- close(serverErrors)
- connState <- serverConn.(*tls.Conn).ConnectionState()
- }()
-
- // Establish a connection from the client side and write a few bytes.
- clientErrors := make(chan error, 1)
- go func() {
- addr := list.Addr().String()
- conn, err := tls.Dial("tcp", addr, dialConf)
- if err != nil {
- clientErrors <- err
- close(clientErrors)
- return
- }
- defer conn.Close()
-
- // Write ping
- _, err = conn.Write([]byte("ping"))
- if err != nil {
- clientErrors <- err
- }
- close(clientErrors)
- }()
-
- for err = range clientErrors {
- if err != nil {
- return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err)
- }
- }
-
- for err = range serverErrors {
- if err != nil {
- return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err)
- }
- }
- // Grab the current state
- return <-connState, nil
-}
-
-func TestBackend_NonCAExpiry(t *testing.T) {
- var resp *logical.Response
- var err error
-
- // Create a self-signed certificate and issue a leaf certificate using the
- // CA cert
- template := &x509.Certificate{
- SerialNumber: big.NewInt(1234),
- Subject: pkix.Name{
- CommonName: "localhost",
- Organization: []string{"hashicorp"},
- OrganizationalUnit: []string{"vault"},
- },
- BasicConstraintsValid: true,
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: time.Now().Add(50 * time.Second),
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
- KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
- }
-
- // Set IP SAN
- parsedIP := net.ParseIP("127.0.0.1")
- if parsedIP == nil {
- t.Fatalf("failed to create parsed IP")
- }
- template.IPAddresses = []net.IP{parsedIP}
-
- // Private key for CA cert
- caPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- t.Fatal(err)
- }
-
- // Marshalling to be able to create PEM file
- caPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(caPrivateKey)
-
- caPublicKey := &caPrivateKey.PublicKey
-
- template.IsCA = true
-
- caCertBytes, err := x509.CreateCertificate(rand.Reader, template, template, caPublicKey, caPrivateKey)
- if err != nil {
- t.Fatal(err)
- }
-
- caCert, err := x509.ParseCertificate(caCertBytes)
- if err != nil {
- t.Fatal(err)
- }
-
- parsedCaBundle := &certutil.ParsedCertBundle{
- Certificate: caCert,
- CertificateBytes: caCertBytes,
- PrivateKeyBytes: caPrivateKeyBytes,
- PrivateKeyType: certutil.RSAPrivateKey,
- }
-
- caCertBundle, err := parsedCaBundle.ToCertBundle()
- if err != nil {
- t.Fatal(err)
- }
-
- caCertFile, err := ioutil.TempFile("", "caCert")
- if err != nil {
- t.Fatal(err)
- }
-
- defer os.Remove(caCertFile.Name())
-
- if _, err := caCertFile.Write([]byte(caCertBundle.Certificate)); err != nil {
- t.Fatal(err)
- }
- if err := caCertFile.Close(); err != nil {
- t.Fatal(err)
- }
-
- caKeyFile, err := ioutil.TempFile("", "caKey")
- if err != nil {
- t.Fatal(err)
- }
-
- defer os.Remove(caKeyFile.Name())
-
- if _, err := caKeyFile.Write([]byte(caCertBundle.PrivateKey)); err != nil {
- t.Fatal(err)
- }
- if err := caKeyFile.Close(); err != nil {
- t.Fatal(err)
- }
-
- // Prepare template for non-CA cert
-
- template.IsCA = false
- template.SerialNumber = big.NewInt(5678)
-
- template.KeyUsage = x509.KeyUsage(x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign)
- issuedPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- t.Fatal(err)
- }
-
- issuedPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(issuedPrivateKey)
-
- issuedPublicKey := &issuedPrivateKey.PublicKey
-
- // Keep a short certificate lifetime so logins can be tested both when
- // cert is valid and when it gets expired
- template.NotBefore = time.Now().Add(-2 * time.Second)
- template.NotAfter = time.Now().Add(3 * time.Second)
-
- issuedCertBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, issuedPublicKey, caPrivateKey)
- if err != nil {
- t.Fatal(err)
- }
-
- issuedCert, err := x509.ParseCertificate(issuedCertBytes)
- if err != nil {
- t.Fatal(err)
- }
-
- parsedIssuedBundle := &certutil.ParsedCertBundle{
- Certificate: issuedCert,
- CertificateBytes: issuedCertBytes,
- PrivateKeyBytes: issuedPrivateKeyBytes,
- PrivateKeyType: certutil.RSAPrivateKey,
- }
-
- issuedCertBundle, err := parsedIssuedBundle.ToCertBundle()
- if err != nil {
- t.Fatal(err)
- }
-
- issuedCertFile, err := ioutil.TempFile("", "issuedCert")
- if err != nil {
- t.Fatal(err)
- }
-
- defer os.Remove(issuedCertFile.Name())
-
- if _, err := issuedCertFile.Write([]byte(issuedCertBundle.Certificate)); err != nil {
- t.Fatal(err)
- }
- if err := issuedCertFile.Close(); err != nil {
- t.Fatal(err)
- }
-
- issuedKeyFile, err := ioutil.TempFile("", "issuedKey")
- if err != nil {
- t.Fatal(err)
- }
-
- defer os.Remove(issuedKeyFile.Name())
-
- if _, err := issuedKeyFile.Write([]byte(issuedCertBundle.PrivateKey)); err != nil {
- t.Fatal(err)
- }
- if err := issuedKeyFile.Close(); err != nil {
- t.Fatal(err)
- }
-
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Register the Non-CA certificate of the client key pair
- certData := map[string]interface{}{
- "certificate": issuedCertBundle.Certificate,
- "policies": "abc",
- "display_name": "cert1",
- "ttl": 10000,
- }
- certReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "certs/cert1",
- Storage: storage,
- Data: certData,
- }
-
- resp, err = b.HandleRequest(certReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Create connection state using the certificates generated
- connState, err := connectionState(caCertFile.Name(), caCertFile.Name(), caKeyFile.Name(), issuedCertFile.Name(), issuedKeyFile.Name())
- if err != nil {
- t.Fatalf("error testing connection state:%v", err)
- }
-
- loginReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "login",
- Connection: &logical.Connection{
- ConnState: &connState,
- },
- }
-
- // Login when the certificate is still valid. Login should succeed.
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Wait until the certificate expires
- time.Sleep(5 * time.Second)
-
- // Login attempt after certificate expiry should fail
- resp, err = b.HandleRequest(loginReq)
- if err == nil {
- t.Fatalf("expected error due to expired certificate")
- }
-}
-
-func TestBackend_RegisteredNonCA_CRL(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- nonCACert, err := ioutil.ReadFile(testCertPath1)
- if err != nil {
- t.Fatal(err)
- }
-
- // Register the Non-CA certificate of the client key pair
- certData := map[string]interface{}{
- "certificate": nonCACert,
- "policies": "abc",
- "display_name": "cert1",
- "ttl": 10000,
- }
- certReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "certs/cert1",
- Storage: storage,
- Data: certData,
- }
-
- resp, err := b.HandleRequest(certReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Connection state is presenting the client Non-CA cert and its key.
- // This is exactly what is registered at the backend.
- connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
- if err != nil {
- t.Fatalf("error testing connection state:%v", err)
- }
- loginReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "login",
- Connection: &logical.Connection{
- ConnState: &connState,
- },
- }
- // Login should succeed.
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Register a CRL containing the issued client certificate used above.
- issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL)
- if err != nil {
- t.Fatal(err)
- }
- crlData := map[string]interface{}{
- "crl": issuedCRL,
- }
- crlReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "crls/issuedcrl",
- Data: crlData,
- }
- resp, err = b.HandleRequest(crlReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Attempt login with the same connection state but with the CRL registered
- resp, err = b.HandleRequest(loginReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || !resp.IsError() {
- t.Fatalf("expected failure due to revoked certificate")
- }
-}
-
-func TestBackend_CRLs(t *testing.T) {
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- clientCA1, err := ioutil.ReadFile(testRootCACertPath1)
- if err != nil {
- t.Fatal(err)
- }
- // Register the CA certificate of the client key pair
- certData := map[string]interface{}{
- "certificate": clientCA1,
- "policies": "abc",
- "display_name": "cert1",
- "ttl": 10000,
- }
-
- certReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "certs/cert1",
- Storage: storage,
- Data: certData,
- }
-
- resp, err := b.HandleRequest(certReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Connection state is presenting the client CA cert and its key.
- // This is exactly what is registered at the backend.
- connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1)
- if err != nil {
- t.Fatalf("error testing connection state:%v", err)
- }
- loginReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "login",
- Connection: &logical.Connection{
- ConnState: &connState,
- },
- }
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Now, without changing the registered client CA cert, present from
- // the client side, a cert issued using the registered CA.
- connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- loginReq.Connection.ConnState = &connState
-
- // Attempt login with the updated connection
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Register a CRL containing the issued client certificate used above.
- issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL)
- if err != nil {
- t.Fatal(err)
- }
- crlData := map[string]interface{}{
- "crl": issuedCRL,
- }
-
- crlReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: storage,
- Path: "crls/issuedcrl",
- Data: crlData,
- }
- resp, err = b.HandleRequest(crlReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Attempt login with the revoked certificate.
- resp, err = b.HandleRequest(loginReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || !resp.IsError() {
- t.Fatalf("expected failure due to revoked certificate")
- }
-
- // Register a different client CA certificate.
- clientCA2, err := ioutil.ReadFile(testRootCACertPath2)
- if err != nil {
- t.Fatal(err)
- }
- certData["certificate"] = clientCA2
- resp, err = b.HandleRequest(certReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Test login using a different client CA cert pair.
- connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2)
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- loginReq.Connection.ConnState = &connState
-
- // Attempt login with the updated connection
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Register a CRL containing the root CA certificate used above.
- rootCRL, err := ioutil.ReadFile(testRootCertCRL)
- if err != nil {
- t.Fatal(err)
- }
- crlData["crl"] = rootCRL
- resp, err = b.HandleRequest(crlReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Attempt login with the same connection state but with the CRL registered
- resp, err = b.HandleRequest(loginReq)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil || !resp.IsError() {
- t.Fatalf("expected failure due to revoked certificate")
- }
-}
-
-func testFactory(t *testing.T) logical.Backend {
- b, err := Factory(&logical.BackendConfig{
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: 300 * time.Second,
- MaxLeaseTTLVal: 1800 * time.Second,
- },
- StorageView: &logical.InmemStorage{},
- })
- if err != nil {
- t.Fatalf("error: %s", err)
- }
- return b
-}
-
-// Test the certificates being registered to the backend
-func TestBackend_CertWrites(t *testing.T) {
- // CA cert
- ca1, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- // Non CA Cert
- ca2, err := ioutil.ReadFile("test-fixtures/keys/cert.pem")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- // Non CA cert without TLS web client authentication
- ca3, err := ioutil.ReadFile("test-fixtures/noclientauthcert.pem")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- tc := logicaltest.TestCase{
- Backend: testFactory(t),
- Steps: []logicaltest.TestStep{
- testAccStepCert(t, "aaa", ca1, "foo", "", false),
- testAccStepCert(t, "bbb", ca2, "foo", "", false),
- testAccStepCert(t, "ccc", ca3, "foo", "", true),
- },
- }
- tc.Steps = append(tc.Steps, testAccStepListCerts(t, []string{"aaa", "bbb"})...)
- logicaltest.Test(t, tc)
-}
-
-// Test a client trusted by a CA
-func TestBackend_basic_CA(t *testing.T) {
- connState, err := testConnState("test-fixtures/keys/cert.pem",
- "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: testFactory(t),
- Steps: []logicaltest.TestStep{
- testAccStepCert(t, "web", ca, "foo", "", false),
- testAccStepLogin(t, connState),
- testAccStepCertLease(t, "web", ca, "foo"),
- testAccStepCertTTL(t, "web", ca, "foo"),
- testAccStepLogin(t, connState),
- testAccStepCertNoLease(t, "web", ca, "foo"),
- testAccStepLoginDefaultLease(t, connState),
- testAccStepCert(t, "web", ca, "foo", "*.example.com", false),
- testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "*.invalid.com", false),
- testAccStepLoginInvalid(t, connState),
- },
- })
-}
-
-// Test CRL behavior
-func TestBackend_Basic_CRLs(t *testing.T) {
- connState, err := testConnState("test-fixtures/keys/cert.pem",
- "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- crl, err := ioutil.ReadFile("test-fixtures/root/root.crl")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: testFactory(t),
- Steps: []logicaltest.TestStep{
- testAccStepCertNoLease(t, "web", ca, "foo"),
- testAccStepLoginDefaultLease(t, connState),
- testAccStepAddCRL(t, crl, connState),
- testAccStepReadCRL(t, connState),
- testAccStepLoginInvalid(t, connState),
- testAccStepDeleteCRL(t, connState),
- testAccStepLoginDefaultLease(t, connState),
- },
- })
-}
-
-// Test a self-signed client (root CA) that is trusted
-func TestBackend_basic_singleCert(t *testing.T) {
- connState, err := testConnState("test-fixtures/root/rootcacert.pem",
- "test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: testFactory(t),
- Steps: []logicaltest.TestStep{
- testAccStepCert(t, "web", ca, "foo", "", false),
- testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "example.com", false),
- testAccStepLogin(t, connState),
- testAccStepCert(t, "web", ca, "foo", "invalid", false),
- testAccStepLoginInvalid(t, connState),
- },
- })
-}
-
-// Test against a collection of matching and non-matching rules
-func TestBackend_mixed_constraints(t *testing.T) {
- connState, err := testConnState("test-fixtures/keys/cert.pem",
- "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: testFactory(t),
- Steps: []logicaltest.TestStep{
- testAccStepCert(t, "1unconstrained", ca, "foo", "", false),
- testAccStepCert(t, "2matching", ca, "foo", "*.example.com,whatever", false),
- testAccStepCert(t, "3invalid", ca, "foo", "invalid", false),
- testAccStepLogin(t, connState),
- // Assumes CertEntries are processed in alphabetical order (due to store.List), so we only match 2matching if 1unconstrained doesn't match
- testAccStepLoginWithName(t, connState, "2matching"),
- testAccStepLoginWithNameInvalid(t, connState, "3invalid"),
- },
- })
-}
-
-// Test an untrusted client
-func TestBackend_untrusted(t *testing.T) {
- connState, err := testConnState("test-fixtures/keys/cert.pem",
- "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: testFactory(t),
- Steps: []logicaltest.TestStep{
- testAccStepLoginInvalid(t, connState),
- },
- })
-}
-
-func testAccStepAddCRL(t *testing.T, crl []byte, connState tls.ConnectionState) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "crls/test",
- ConnState: &connState,
- Data: map[string]interface{}{
- "crl": crl,
- },
- }
-}
-
-func testAccStepReadCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "crls/test",
- ConnState: &connState,
- Check: func(resp *logical.Response) error {
- crlInfo := CRLInfo{}
- err := mapstructure.Decode(resp.Data, &crlInfo)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(crlInfo.Serials) != 1 {
- t.Fatalf("bad: expected CRL with length 1, got %d", len(crlInfo.Serials))
- }
- if _, ok := crlInfo.Serials["637101449987587619778072672905061040630001617053"]; !ok {
- t.Fatalf("bad: expected serial number not found in CRL")
- }
- return nil
- },
- }
-}
-
-func testAccStepDeleteCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "crls/test",
- ConnState: &connState,
- }
-}
-
-func testAccStepLogin(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
- return testAccStepLoginWithName(t, connState, "")
-}
-
-func testAccStepLoginWithName(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Unauthenticated: true,
- ConnState: &connState,
- Check: func(resp *logical.Response) error {
- if resp.Auth.TTL != 1000*time.Second {
- t.Fatalf("bad lease length: %#v", resp.Auth)
- }
-
- if certName != "" && resp.Auth.DisplayName != ("mnt-"+certName) {
- t.Fatalf("matched the wrong cert: %#v", resp.Auth.DisplayName)
- }
-
- fn := logicaltest.TestCheckAuth([]string{"default", "foo"})
- return fn(resp)
- },
- Data: map[string]interface{}{
- "name": certName,
- },
- }
-}
-
-func testAccStepLoginDefaultLease(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Unauthenticated: true,
- ConnState: &connState,
- Check: func(resp *logical.Response) error {
- if resp.Auth.TTL != 300*time.Second {
- t.Fatalf("bad lease length: %#v", resp.Auth)
- }
-
- fn := logicaltest.TestCheckAuth([]string{"default", "foo"})
- return fn(resp)
- },
- }
-}
-
-func testAccStepLoginInvalid(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep {
- return testAccStepLoginWithNameInvalid(t, connState, "")
-}
-
-func testAccStepLoginWithNameInvalid(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Unauthenticated: true,
- ConnState: &connState,
- Check: func(resp *logical.Response) error {
- if resp.Auth != nil {
- return fmt.Errorf("should not be authorized: %#v", resp)
- }
- return nil
- },
- Data: map[string]interface{}{
- "name": certName,
- },
- ErrorOk: true,
- }
-}
-
-func testAccStepListCerts(
- t *testing.T, certs []string) []logicaltest.TestStep {
- return []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "certs",
- Check: func(resp *logical.Response) error {
- if resp == nil {
- return fmt.Errorf("nil response")
- }
- if resp.Data == nil {
- return fmt.Errorf("nil data")
- }
- if resp.Data["keys"] == interface{}(nil) {
- return fmt.Errorf("nil keys")
- }
- keys := resp.Data["keys"].([]string)
- if !reflect.DeepEqual(keys, certs) {
- return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs)
- }
- return nil
- },
- }, logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "certs/",
- Check: func(resp *logical.Response) error {
- if resp == nil {
- return fmt.Errorf("nil response")
- }
- if resp.Data == nil {
- return fmt.Errorf("nil data")
- }
- if resp.Data["keys"] == interface{}(nil) {
- return fmt.Errorf("nil keys")
- }
- keys := resp.Data["keys"].([]string)
- if !reflect.DeepEqual(keys, certs) {
- return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs)
- }
-
- return nil
- },
- },
- }
-}
-
-func testAccStepCert(
- t *testing.T, name string, cert []byte, policies string, allowedNames string, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "certs/" + name,
- ErrorOk: expectError,
- Data: map[string]interface{}{
- "certificate": string(cert),
- "policies": policies,
- "display_name": name,
- "allowed_names": allowedNames,
- "lease": 1000,
- },
- Check: func(resp *logical.Response) error {
- if resp == nil && expectError {
- return fmt.Errorf("expected error but received nil")
- }
- return nil
- },
- }
-}
-
-func testAccStepCertLease(
- t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "certs/" + name,
- Data: map[string]interface{}{
- "certificate": string(cert),
- "policies": policies,
- "display_name": name,
- "lease": 1000,
- },
- }
-}
-
-func testAccStepCertTTL(
- t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "certs/" + name,
- Data: map[string]interface{}{
- "certificate": string(cert),
- "policies": policies,
- "display_name": name,
- "ttl": "1000s",
- },
- }
-}
-
-func testAccStepCertNoLease(
- t *testing.T, name string, cert []byte, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "certs/" + name,
- Data: map[string]interface{}{
- "certificate": string(cert),
- "policies": policies,
- "display_name": name,
- },
- }
-}
-
-func testConnState(certPath, keyPath, rootCertPath string) (tls.ConnectionState, error) {
- cert, err := tls.LoadX509KeyPair(certPath, keyPath)
- if err != nil {
- return tls.ConnectionState{}, err
- }
- rootConfig := &rootcerts.Config{
- CAFile: rootCertPath,
- }
- rootCAs, err := rootcerts.LoadCACerts(rootConfig)
- if err != nil {
- return tls.ConnectionState{}, err
- }
- listenConf := &tls.Config{
- Certificates: []tls.Certificate{cert},
- ClientAuth: tls.RequestClientCert,
- InsecureSkipVerify: false,
- RootCAs: rootCAs,
- }
- dialConf := new(tls.Config)
- *dialConf = *listenConf
- // start a server
- list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
- if err != nil {
- return tls.ConnectionState{}, err
- }
- defer list.Close()
-
- // Accept connections.
- serverErrors := make(chan error, 1)
- connState := make(chan tls.ConnectionState)
- go func() {
- defer close(connState)
- serverConn, err := list.Accept()
- serverErrors <- err
- if err != nil {
- close(serverErrors)
- return
- }
- defer serverConn.Close()
-
- // Read the ping
- buf := make([]byte, 4)
- _, err = serverConn.Read(buf)
- if (err != nil) && (err != io.EOF) {
- serverErrors <- err
- close(serverErrors)
- return
- } else {
- // EOF is a reasonable error condition, so swallow it.
- serverErrors <- nil
- }
- close(serverErrors)
- connState <- serverConn.(*tls.Conn).ConnectionState()
- }()
-
- // Establish a connection from the client side and write a few bytes.
- clientErrors := make(chan error, 1)
- go func() {
- addr := list.Addr().String()
- conn, err := tls.Dial("tcp", addr, dialConf)
- clientErrors <- err
- if err != nil {
- close(clientErrors)
- return
- }
- defer conn.Close()
-
- // Write ping
- _, err = conn.Write([]byte("ping"))
- clientErrors <- err
- close(clientErrors)
- }()
-
- for err = range clientErrors {
- if err != nil {
- return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err)
- }
- }
-
- for err = range serverErrors {
- if err != nil {
- return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err)
- }
- }
- // Grab the current state
- return <-connState, nil
-}
-
-func Test_Renew(t *testing.T) {
- storage := &logical.InmemStorage{}
-
- lb, err := Factory(&logical.BackendConfig{
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: 300 * time.Second,
- MaxLeaseTTLVal: 1800 * time.Second,
- },
- StorageView: storage,
- })
- if err != nil {
- t.Fatalf("error: %s", err)
- }
-
- b := lb.(*backend)
- connState, err := testConnState("test-fixtures/keys/cert.pem",
- "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatalf("error testing connection state: %v", err)
- }
- ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
- if err != nil {
- t.Fatal(err)
- }
-
- req := &logical.Request{
- Connection: &logical.Connection{
- ConnState: &connState,
- },
- Storage: storage,
- Auth: &logical.Auth{},
- }
-
- fd := &framework.FieldData{
- Raw: map[string]interface{}{
- "name": "test",
- "certificate": ca,
- "policies": "foo,bar",
- },
- Schema: pathCerts(b).Fields,
- }
-
- resp, err := b.pathCertWrite(req, fd)
- if err != nil {
- t.Fatal(err)
- }
-
- empty_login_fd := &framework.FieldData{
- Raw: map[string]interface{}{},
- Schema: pathLogin(b).Fields,
- }
- resp, err = b.pathLogin(req, empty_login_fd)
- if err != nil {
- t.Fatal(err)
- }
- if resp.IsError() {
- t.Fatalf("got error: %#v", *resp)
- }
- req.Auth.InternalData = resp.Auth.InternalData
- req.Auth.Metadata = resp.Auth.Metadata
- req.Auth.LeaseOptions = resp.Auth.LeaseOptions
- req.Auth.Policies = resp.Auth.Policies
- req.Auth.IssueTime = time.Now()
-
- // Normal renewal
- resp, err = b.pathLoginRenew(req, empty_login_fd)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response from renew")
- }
- if resp.IsError() {
- t.Fatalf("got error: %#v", *resp)
- }
-
- // Change the policies -- this should fail
- fd.Raw["policies"] = "zip,zap"
- resp, err = b.pathCertWrite(req, fd)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.pathLoginRenew(req, empty_login_fd)
- if err == nil {
- t.Fatal("expected error")
- }
-
- // Put the policies back, this shold be okay
- fd.Raw["policies"] = "bar,foo"
- resp, err = b.pathCertWrite(req, fd)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.pathLoginRenew(req, empty_login_fd)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response from renew")
- }
- if resp.IsError() {
- t.Fatalf("got error: %#v", *resp)
- }
-
- // Delete CA, make sure we can't renew
- resp, err = b.pathCertDelete(req, fd)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.pathLoginRenew(req, empty_login_fd)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response from renew")
- }
- if !resp.IsError() {
- t.Fatal("expected error")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go
deleted file mode 100644
index a1071fc..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package cert
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/mitchellh/mapstructure"
-)
-
-type CLIHandler struct{}
-
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
- var data struct {
- Mount string `mapstructure:"mount"`
- Name string `mapstructure:"name"`
- }
- if err := mapstructure.WeakDecode(m, &data); err != nil {
- return nil, err
- }
-
- if data.Mount == "" {
- data.Mount = "cert"
- }
-
- options := map[string]interface{}{
- "name": data.Name,
- }
- path := fmt.Sprintf("auth/%s/login", data.Mount)
- secret, err := c.Logical().Write(path, options)
- if err != nil {
- return nil, err
- }
- if secret == nil {
- return nil, fmt.Errorf("empty response from credential provider")
- }
-
- return secret, nil
-}
-
-func (h *CLIHandler) Help() string {
- help := `
-The "cert" credential provider allows you to authenticate with a
-client certificate. No other authentication materials are needed.
-Optionally, you may specify the specific certificate role to
-authenticate against with the "name" parameter.
-
- Example: vault auth -method=cert \
- -client-cert=/path/to/cert.pem \
- -client-key=/path/to/key.pem
- name=cert1
-
- `
-
- return strings.TrimSpace(help)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go
deleted file mode 100644
index fc5254f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package cert
-
-import (
- "crypto/x509"
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListCerts(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "certs/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathCertList,
- },
-
- HelpSynopsis: pathCertHelpSyn,
- HelpDescription: pathCertHelpDesc,
- }
-}
-
-func pathCerts(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "certs/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The name of the certificate",
- },
-
- "certificate": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The public certificate that should be trusted.
-Must be x509 PEM encoded.`,
- },
-
- "allowed_names": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: `A comma-separated list of names.
-At least one must exist in either the Common Name or SANs. Supports globbing.`,
- },
-
- "display_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The display name to use for clients using this
-certificate.`,
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Comma-seperated list of policies.",
- },
-
- "lease": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `Deprecated: use "ttl" instead. TTL time in
-seconds. Defaults to system/backend default TTL.`,
- },
-
- "ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `TTL for tokens issued by this backend.
-Defaults to system/backend default TTL time.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathCertDelete,
- logical.ReadOperation: b.pathCertRead,
- logical.UpdateOperation: b.pathCertWrite,
- },
-
- HelpSynopsis: pathCertHelpSyn,
- HelpDescription: pathCertHelpDesc,
- }
-}
-
-func (b *backend) Cert(s logical.Storage, n string) (*CertEntry, error) {
- entry, err := s.Get("cert/" + strings.ToLower(n))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result CertEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-func (b *backend) pathCertDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("cert/" + strings.ToLower(d.Get("name").(string)))
- if err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-func (b *backend) pathCertList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- certs, err := req.Storage.List("cert/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(certs), nil
-}
-
-func (b *backend) pathCertRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- cert, err := b.Cert(req.Storage, strings.ToLower(d.Get("name").(string)))
- if err != nil {
- return nil, err
- }
- if cert == nil {
- return nil, nil
- }
-
- duration := cert.TTL
- if duration == 0 {
- duration = b.System().DefaultLeaseTTL()
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "certificate": cert.Certificate,
- "display_name": cert.DisplayName,
- "policies": cert.Policies,
- "ttl": duration / time.Second,
- },
- }, nil
-}
-
-func (b *backend) pathCertWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := strings.ToLower(d.Get("name").(string))
- certificate := d.Get("certificate").(string)
- displayName := d.Get("display_name").(string)
- policies := policyutil.ParsePolicies(d.Get("policies"))
- allowedNames := d.Get("allowed_names").([]string)
-
- // Default the display name to the certificate name if not given
- if displayName == "" {
- displayName = name
- }
-
- parsed := parsePEM([]byte(certificate))
- if len(parsed) == 0 {
- return logical.ErrorResponse("failed to parse certificate"), nil
- }
-
- // If the certificate is not a CA cert, then ensure that x509.ExtKeyUsageClientAuth is set
- if !parsed[0].IsCA && parsed[0].ExtKeyUsage != nil {
- var clientAuth bool
- for _, usage := range parsed[0].ExtKeyUsage {
- if usage == x509.ExtKeyUsageClientAuth || usage == x509.ExtKeyUsageAny {
- clientAuth = true
- break
- }
- }
- if !clientAuth {
- return logical.ErrorResponse("non-CA certificates should have TLS client authentication set as an extended key usage"), nil
- }
- }
-
- certEntry := &CertEntry{
- Name: name,
- Certificate: certificate,
- DisplayName: displayName,
- Policies: policies,
- AllowedNames: allowedNames,
- }
-
- // Parse the lease duration or default to backend/system default
- maxTTL := b.System().MaxLeaseTTL()
- ttl := time.Duration(d.Get("ttl").(int)) * time.Second
- if ttl == time.Duration(0) {
- ttl = time.Second * time.Duration(d.Get("lease").(int))
- }
- if ttl > maxTTL {
- return logical.ErrorResponse(fmt.Sprintf("Given TTL of %d seconds greater than current mount/system default of %d seconds", ttl/time.Second, maxTTL/time.Second)), nil
- }
- if ttl > time.Duration(0) {
- certEntry.TTL = ttl
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("cert/"+name, certEntry)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-type CertEntry struct {
- Name string
- Certificate string
- DisplayName string
- Policies []string
- TTL time.Duration
- AllowedNames []string
-}
-
-const pathCertHelpSyn = `
-Manage trusted certificates used for authentication.
-`
-
-const pathCertHelpDesc = `
-This endpoint allows you to create, read, update, and delete trusted certificates
-that are allowed to authenticate.
-
-Deleting a certificate will not revoke auth for prior authenticated connections.
-To do this, do a revoke on "login". If you don't need to revoke login immediately,
-then the next renew will cause the lease to expire.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_config.go
deleted file mode 100644
index 9e946c6..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_config.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package cert
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfig(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config",
- Fields: map[string]*framework.FieldSchema{
- "disable_binding": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConfigWrite,
- },
- }
-}
-
-func (b *backend) pathConfigWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- disableBinding := data.Get("disable_binding").(bool)
-
- entry, err := logical.StorageEntryJSON("config", config{
- DisableBinding: disableBinding,
- })
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-// Config returns the configuration for this backend.
-func (b *backend) Config(s logical.Storage) (*config, error) {
- entry, err := s.Get("config")
- if err != nil {
- return nil, err
- }
-
- // Returning a default configuration if an entry is not found
- var result config
- if entry != nil {
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, fmt.Errorf("error reading configuration: %s", err)
- }
- }
- return &result, nil
-}
-
-type config struct {
- DisableBinding bool `json:"disable_binding"`
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_crls.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_crls.go
deleted file mode 100644
index 234b93a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_crls.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package cert
-
-import (
- "crypto/x509"
- "fmt"
- "math/big"
- "strings"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathCRLs(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "crls/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The name of the certificate",
- },
-
- "crl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The public certificate that should be trusted.
-May be DER or PEM encoded. Note: the expiration time
-is ignored; if the CRL is no longer valid, delete it
-using the same name as specified here.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathCRLDelete,
- logical.ReadOperation: b.pathCRLRead,
- logical.UpdateOperation: b.pathCRLWrite,
- },
-
- HelpSynopsis: pathCRLsHelpSyn,
- HelpDescription: pathCRLsHelpDesc,
- }
-}
-
-func (b *backend) populateCRLs(storage logical.Storage) error {
- b.crlUpdateMutex.Lock()
- defer b.crlUpdateMutex.Unlock()
-
- if b.crls != nil {
- return nil
- }
-
- b.crls = map[string]CRLInfo{}
-
- keys, err := storage.List("crls/")
- if err != nil {
- return fmt.Errorf("error listing CRLs: %v", err)
- }
- if keys == nil || len(keys) == 0 {
- return nil
- }
-
- for _, key := range keys {
- entry, err := storage.Get("crls/" + key)
- if err != nil {
- b.crls = nil
- return fmt.Errorf("error loading CRL %s: %v", key, err)
- }
- if entry == nil {
- continue
- }
- var crlInfo CRLInfo
- err = entry.DecodeJSON(&crlInfo)
- if err != nil {
- b.crls = nil
- return fmt.Errorf("error decoding CRL %s: %v", key, err)
- }
- b.crls[key] = crlInfo
- }
-
- return nil
-}
-
-func (b *backend) findSerialInCRLs(serial *big.Int) map[string]RevokedSerialInfo {
- b.crlUpdateMutex.RLock()
- defer b.crlUpdateMutex.RUnlock()
- ret := map[string]RevokedSerialInfo{}
- for key, crl := range b.crls {
- if crl.Serials == nil {
- continue
- }
- if info, ok := crl.Serials[serial.String()]; ok {
- ret[key] = info
- }
- }
- return ret
-}
-
-func parseSerialString(input string) (*big.Int, error) {
- ret := &big.Int{}
-
- switch {
- case strings.Count(input, ":") > 0:
- serialBytes := certutil.ParseHexFormatted(input, ":")
- if serialBytes == nil {
- return nil, fmt.Errorf("error parsing serial %s", input)
- }
- ret.SetBytes(serialBytes)
- case strings.Count(input, "-") > 0:
- serialBytes := certutil.ParseHexFormatted(input, "-")
- if serialBytes == nil {
- return nil, fmt.Errorf("error parsing serial %s", input)
- }
- ret.SetBytes(serialBytes)
- default:
- var success bool
- ret, success = ret.SetString(input, 0)
- if !success {
- return nil, fmt.Errorf("error parsing serial %s", input)
- }
- }
-
- return ret, nil
-}
-
-func (b *backend) pathCRLDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := strings.ToLower(d.Get("name").(string))
- if name == "" {
- return logical.ErrorResponse(`"name" parameter cannot be empty`), nil
- }
-
- if err := b.populateCRLs(req.Storage); err != nil {
- return nil, err
- }
-
- b.crlUpdateMutex.Lock()
- defer b.crlUpdateMutex.Unlock()
-
- _, ok := b.crls[name]
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf(
- "no such CRL %s", name,
- )), nil
- }
-
- if err := req.Storage.Delete("crls/" + name); err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "error deleting crl %s: %v", name, err),
- ), nil
- }
-
- delete(b.crls, name)
-
- return nil, nil
-}
-
-func (b *backend) pathCRLRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := strings.ToLower(d.Get("name").(string))
- if name == "" {
- return logical.ErrorResponse(`"name" parameter must be set`), nil
- }
-
- if err := b.populateCRLs(req.Storage); err != nil {
- return nil, err
- }
-
- b.crlUpdateMutex.RLock()
- defer b.crlUpdateMutex.RUnlock()
-
- var retData map[string]interface{}
-
- crl, ok := b.crls[name]
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf(
- "no such CRL %s", name,
- )), nil
- }
-
- retData = structs.New(&crl).Map()
-
- return &logical.Response{
- Data: retData,
- }, nil
-}
-
-func (b *backend) pathCRLWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := strings.ToLower(d.Get("name").(string))
- if name == "" {
- return logical.ErrorResponse(`"name" parameter cannot be empty`), nil
- }
- crl := d.Get("crl").(string)
-
- certList, err := x509.ParseCRL([]byte(crl))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to parse CRL: %v", err)), nil
- }
- if certList == nil {
- return logical.ErrorResponse("parsed CRL is nil"), nil
- }
-
- if err := b.populateCRLs(req.Storage); err != nil {
- return nil, err
- }
-
- b.crlUpdateMutex.Lock()
- defer b.crlUpdateMutex.Unlock()
-
- crlInfo := CRLInfo{
- Serials: map[string]RevokedSerialInfo{},
- }
- for _, revokedCert := range certList.TBSCertList.RevokedCertificates {
- crlInfo.Serials[revokedCert.SerialNumber.String()] = RevokedSerialInfo{}
- }
-
- entry, err := logical.StorageEntryJSON("crls/"+name, crlInfo)
- if err != nil {
- return nil, err
- }
- if err = req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- b.crls[name] = crlInfo
-
- return nil, nil
-}
-
-type CRLInfo struct {
- Serials map[string]RevokedSerialInfo `json:"serials" structs:"serials" mapstructure:"serials"`
-}
-
-type RevokedSerialInfo struct {
-}
-
-const pathCRLsHelpSyn = `
-Manage Certificate Revocation Lists checked during authentication.
-`
-
-const pathCRLsHelpDesc = `
-This endpoint allows you to create, read, update, and delete the Certificate
-Revocation Lists checked during authentication.
-
-When any CRLs are in effect, any login will check the trust chains sent by a
-client against the submitted CRLs. Any chain containing a serial number revoked
-by one or more of the CRLs causes that chain to be marked as invalid for the
-authentication attempt. Conversely, *any* valid chain -- that is, a chain
-in which none of the serials are revoked by any CRL -- allows authentication.
-This allows authentication to succeed when interim parts of one chain have been
-revoked; for instance, if a certificate is signed by two intermediate CAs due to
-one of them expiring.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go
deleted file mode 100644
index 2faecd3..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go
+++ /dev/null
@@ -1,361 +0,0 @@
-package cert
-
-import (
- "bytes"
- "crypto/tls"
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "errors"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-
- "github.com/ryanuber/go-glob"
-)
-
-// ParsedCert is a certificate that has been configured as trusted
-type ParsedCert struct {
- Entry *CertEntry
- Certificates []*x509.Certificate
-}
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login",
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The name of the certificate role to authenticate against.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
- }
-}
-
-func (b *backend) pathLogin(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- var matched *ParsedCert
- if verifyResp, resp, err := b.verifyCredentials(req, data); err != nil {
- return nil, err
- } else if resp != nil {
- return resp, nil
- } else {
- matched = verifyResp
- }
-
- if matched == nil {
- return nil, nil
- }
-
- ttl := matched.Entry.TTL
- if ttl == 0 {
- ttl = b.System().DefaultLeaseTTL()
- }
-
- clientCerts := req.Connection.ConnState.PeerCertificates
- if len(clientCerts) == 0 {
- return logical.ErrorResponse("no client certificate found"), nil
- }
- skid := base64.StdEncoding.EncodeToString(clientCerts[0].SubjectKeyId)
- akid := base64.StdEncoding.EncodeToString(clientCerts[0].AuthorityKeyId)
-
- // Generate a response
- resp := &logical.Response{
- Auth: &logical.Auth{
- InternalData: map[string]interface{}{
- "subject_key_id": skid,
- "authority_key_id": akid,
- },
- Policies: matched.Entry.Policies,
- DisplayName: matched.Entry.DisplayName,
- Metadata: map[string]string{
- "cert_name": matched.Entry.Name,
- "common_name": clientCerts[0].Subject.CommonName,
- "subject_key_id": certutil.GetHexFormatted(clientCerts[0].SubjectKeyId, ":"),
- "authority_key_id": certutil.GetHexFormatted(clientCerts[0].AuthorityKeyId, ":"),
- },
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- TTL: ttl,
- },
- },
- }
- return resp, nil
-}
-
-func (b *backend) pathLoginRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- config, err := b.Config(req.Storage)
- if err != nil {
- return nil, err
- }
-
- if !config.DisableBinding {
- var matched *ParsedCert
- if verifyResp, resp, err := b.verifyCredentials(req, d); err != nil {
- return nil, err
- } else if resp != nil {
- return resp, nil
- } else {
- matched = verifyResp
- }
-
- if matched == nil {
- return nil, nil
- }
-
- clientCerts := req.Connection.ConnState.PeerCertificates
- if len(clientCerts) == 0 {
- return nil, fmt.Errorf("no client certificate found")
- }
- skid := base64.StdEncoding.EncodeToString(clientCerts[0].SubjectKeyId)
- akid := base64.StdEncoding.EncodeToString(clientCerts[0].AuthorityKeyId)
-
- // Certificate should not only match a registered certificate policy.
- // Also, the identity of the certificate presented should match the identity of the certificate used during login
- if req.Auth.InternalData["subject_key_id"] != skid && req.Auth.InternalData["authority_key_id"] != akid {
- return nil, fmt.Errorf("client identity during renewal not matching client identity used during login")
- }
-
- }
- // Get the cert and use its TTL
- cert, err := b.Cert(req.Storage, req.Auth.Metadata["cert_name"])
- if err != nil {
- return nil, err
- }
- if cert == nil {
- // User no longer exists, do not renew
- return nil, nil
- }
-
- if !policyutil.EquivalentPolicies(cert.Policies, req.Auth.Policies) {
- return nil, fmt.Errorf("policies have changed, not renewing")
- }
-
- return framework.LeaseExtend(cert.TTL, 0, b.System())(req, d)
-}
-
-func (b *backend) verifyCredentials(req *logical.Request, d *framework.FieldData) (*ParsedCert, *logical.Response, error) {
- // Get the connection state
- if req.Connection == nil || req.Connection.ConnState == nil {
- return nil, logical.ErrorResponse("tls connection required"), nil
- }
- connState := req.Connection.ConnState
-
- if connState.PeerCertificates == nil || len(connState.PeerCertificates) == 0 {
- return nil, logical.ErrorResponse("client certificate must be supplied"), nil
- }
- clientCert := connState.PeerCertificates[0]
-
- // Allow constraining the login request to a single CertEntry
- var certName string
- if req.Auth != nil { // It's a renewal, use the saved certName
- certName = req.Auth.Metadata["cert_name"]
- } else {
- certName = d.Get("name").(string)
- }
-
- // Load the trusted certificates
- roots, trusted, trustedNonCAs := b.loadTrustedCerts(req.Storage, certName)
-
- // Get the list of full chains matching the connection
- trustedChains, err := validateConnState(roots, connState)
- if err != nil {
- return nil, nil, err
- }
-
- // If trustedNonCAs is not empty it means that client had registered a non-CA cert
- // with the backend.
- if len(trustedNonCAs) != 0 {
- for _, trustedNonCA := range trustedNonCAs {
- tCert := trustedNonCA.Certificates[0]
- // Check for client cert being explicitly listed in the config (and matching other constraints)
- if tCert.SerialNumber.Cmp(clientCert.SerialNumber) == 0 &&
- bytes.Equal(tCert.AuthorityKeyId, clientCert.AuthorityKeyId) &&
- b.matchesConstraints(clientCert, trustedNonCA.Certificates, trustedNonCA) {
- return trustedNonCA, nil, nil
- }
- }
- }
-
- // If no trusted chain was found, client is not authenticated
- if len(trustedChains) == 0 {
- return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil
- }
-
- // Search for a ParsedCert that intersects with the validated chains and any additional constraints
- matches := make([]*ParsedCert, 0)
- for _, trust := range trusted { // For each ParsedCert in the config
- for _, tCert := range trust.Certificates { // For each certificate in the entry
- for _, chain := range trustedChains { // For each root chain that we matched
- for _, cCert := range chain { // For each cert in the matched chain
- if tCert.Equal(cCert) && // ParsedCert intersects with matched chain
- b.matchesConstraints(clientCert, chain, trust) { // validate client cert + matched chain against the config
- // Add the match to the list
- matches = append(matches, trust)
- }
- }
- }
- }
- }
-
- // Fail on no matches
- if len(matches) == 0 {
- return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil
- }
-
- // Return the first matching entry (for backwards compatibility, we continue to just pick one if multiple match)
- return matches[0], nil, nil
-}
-
-func (b *backend) matchesConstraints(clientCert *x509.Certificate, trustedChain []*x509.Certificate, config *ParsedCert) bool {
- // Default behavior (no names) is to allow all names
- nameMatched := len(config.Entry.AllowedNames) == 0
- // At least one pattern must match at least one name if any patterns are specified
- for _, allowedName := range config.Entry.AllowedNames {
- if glob.Glob(allowedName, clientCert.Subject.CommonName) {
- nameMatched = true
- }
-
- for _, name := range clientCert.DNSNames {
- if glob.Glob(allowedName, name) {
- nameMatched = true
- }
- }
-
- for _, name := range clientCert.EmailAddresses {
- if glob.Glob(allowedName, name) {
- nameMatched = true
- }
- }
- }
-
- return !b.checkForChainInCRLs(trustedChain) && nameMatched
-}
-
-// loadTrustedCerts is used to load all the trusted certificates from the backend
-func (b *backend) loadTrustedCerts(store logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert) {
- pool = x509.NewCertPool()
- trusted = make([]*ParsedCert, 0)
- trustedNonCAs = make([]*ParsedCert, 0)
- names, err := store.List("cert/")
- if err != nil {
- b.Logger().Error("cert: failed to list trusted certs", "error", err)
- return
- }
- for _, name := range names {
- // If we are trying to select a single CertEntry and this isn't it
- if certName != "" && name != certName {
- continue
- }
- entry, err := b.Cert(store, strings.TrimPrefix(name, "cert/"))
- if err != nil {
- b.Logger().Error("cert: failed to load trusted cert", "name", name, "error", err)
- continue
- }
- parsed := parsePEM([]byte(entry.Certificate))
- if len(parsed) == 0 {
- b.Logger().Error("cert: failed to parse certificate", "name", name)
- continue
- }
- if !parsed[0].IsCA {
- trustedNonCAs = append(trustedNonCAs, &ParsedCert{
- Entry: entry,
- Certificates: parsed,
- })
- } else {
- for _, p := range parsed {
- pool.AddCert(p)
- }
-
- // Create a ParsedCert entry
- trusted = append(trusted, &ParsedCert{
- Entry: entry,
- Certificates: parsed,
- })
- }
- }
- return
-}
-
-func (b *backend) checkForChainInCRLs(chain []*x509.Certificate) bool {
- badChain := false
- for _, cert := range chain {
- badCRLs := b.findSerialInCRLs(cert.SerialNumber)
- if len(badCRLs) != 0 {
- badChain = true
- break
- }
- }
- return badChain
-}
-
-func (b *backend) checkForValidChain(chains [][]*x509.Certificate) bool {
- for _, chain := range chains {
- if !b.checkForChainInCRLs(chain) {
- return true
- }
- }
- return false
-}
-
-// parsePEM parses a PEM encoded x509 certificate
-func parsePEM(raw []byte) (certs []*x509.Certificate) {
- for len(raw) > 0 {
- var block *pem.Block
- block, raw = pem.Decode(raw)
- if block == nil {
- break
- }
- if (block.Type != "CERTIFICATE" && block.Type != "TRUSTED CERTIFICATE") || len(block.Headers) != 0 {
- continue
- }
-
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- continue
- }
- certs = append(certs, cert)
- }
- return
-}
-
-// validateConnState is used to validate that the TLS client is authorized
-// by at trusted certificate. Most of this logic is lifted from the client
-// verification logic here: http://golang.org/src/crypto/tls/handshake_server.go
-// The trusted chains are returned.
-func validateConnState(roots *x509.CertPool, cs *tls.ConnectionState) ([][]*x509.Certificate, error) {
- opts := x509.VerifyOptions{
- Roots: roots,
- Intermediates: x509.NewCertPool(),
- KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
- }
-
- certs := cs.PeerCertificates
- if len(certs) == 0 {
- return nil, nil
- }
-
- if len(certs) > 1 {
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
- }
- }
-
- chains, err := certs[0].Verify(opts)
- if err != nil {
- if _, ok := err.(x509.UnknownAuthorityError); ok {
- return nil, nil
- }
- return nil, errors.New("failed to verify client's certificate: " + err.Error())
- }
- return chains, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert.pem
deleted file mode 100644
index 9d9a385..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDPjCCAiagAwIBAgIUXiEDuecwua9+j1XHLnconxQ/JBcwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwMzU4WhgPMjA2
-NjA0MjAxNjA0MjhaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
-9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhx
-VP3IN897TYzkaBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bV
-zg9ZL1AI5H7dY2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZ
-wvBafQEjSsYk9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75
-unIJ29nL0yB7zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabks
-sqVyA825/1we2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABo4GBMH8w
-DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBTo2I+W
-3Wb2MBe3OWuj5qCbafavMB8GA1UdIwQYMBaAFBTo2I+W3Wb2MBe3OWuj5qCbafav
-MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
-AQAyjJzDMzf28yMgiu//2R6LD3+zuLHlfX8+p5JB7WDBT7CgSm89gzMRtD2DvqZQ
-6iLbZv/x7Td8bdLsOKf3LDCkZyOygJ0Sr9+6YZdc9heWO8tsO/SbcLhj9/vK8YyV
-5fJo+vECW8I5zQLeTKfPqJtTU0zFspv0WYCB96Hsbhd1hTfHmVgjBoxi0YuduAa8
-3EHuYPfTYkO3M4QJCoQ+3S6LXSTDqppd1KGAy7QhRU6shd29EpSVxhgqZ+CIOpZu
-3RgPOgPqfqcOD/v/SRPqhRf+P5O5Dc/N4ZXTZtfJbaY0qE+smpeQUskVQ2TrSqha
-UYpNk7+toZW3Gioo0lBD3gH2
------END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert2crl b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert2crl
deleted file mode 100644
index 82db7a3..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cacert2crl
+++ /dev/null
@@ -1,12 +0,0 @@
------BEGIN X509 CRL-----
-MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN
-MTYwNTAyMTYxNDMzWhcNMTYwNTA1MTYxNDMzWjArMCkCFCXxxcbS0ATpI2PYrx8d
-ACLEQ3B9FxExNjA1MDIxMjE0MzMtMDQwMKAjMCEwHwYDVR0jBBgwFoAUwsRNYCw4
-U2won66rMKEJm8inFfgwDQYJKoZIhvcNAQELBQADggEBAD/VvoRK4eaEDzG7Z95b
-fHL5ubJGkyvkp8ruNu+rfQp8NLgFVvY6a93Hz7WLOhACkKIWJ63+/4vCfDi5uU0B
-HW2FICHdlSQ+6DdGJ6MrgujALlyT+69iF+fPiJ/M1j/N7Am8XPYYcfNdSK6CHtfg
-gHNB7E+ubBA7lIw7ucIkoiJjXrSWSXTs9/GzLUImiXJAKQ+JzPYryIsGKXKAwgHh
-HB56BnJ2vOs7+6UxQ6fjKTMxYdNgoZ34MhkkxNNhylrEndO6XUvUvC1f/1p1wlzy
-xTq2MrMfJHJyu08rkrD+kwMPH2uoVwKyDhXdRBP0QrvQwOsvNEhW8LTKwLWkK17b
-fEI=
------END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cakey.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cakey.pem
deleted file mode 100644
index ecba475..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/cakey.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhxVP3IN897TYzk
-aBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bVzg9ZL1AI5H7d
-Y2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZwvBafQEjSsYk
-9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75unIJ29nL0yB7
-zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabkssqVyA825/1we
-2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABAoIBAD1pBd9ov8t6Surq
-sY2hZUM0Hc16r+ln5LcInbx6djjaxvHiWql+OYgyXimP764lPYuTuspjFPKB1SOU
-+N7XDxCkwFeayXXHdDlYtZ4gm5Z9mMVOT+j++8xWdxZaqJ56fmX9zOPM2LuR3paB
-L52Xgh9EwHJmMApYAzaCvbu8bU+iHeNTW80xabxQrp9VCu/A1BXUX06jK4T+wmjZ
-kDA82uQp3dCOF1tv/10HgwqkJj6/1jjM0XUzUZR6iV85S6jrA7wD7gDDeqNO8YHN
-08YMRgTKk4pbA7AqoC5xbL3gbSjsjyw48KRq0FkdkjsgV0PJZRMUU9fv9puDa23K
-WRPa8LECgYEAyeth5bVH8FXnVXIAAFU6W0WdgCK3VakhjItLw0eoxshuTwbVq64w
-CNOB8y1pfP83WiJjX3qRG43NDW07X69J57YKtCCb6KICVUPmecgYZPkmegD1HBQZ
-5+Aak+5pIUQuycQ0t65yHGu4Jsju05gEFgdzydFjNANgiPxRzZxzAkkCgYEA9S+y
-ZR063oCQDg/GhMLCx19nCJyU44Figh1YCD6kTrsSTECuRpQ5B1F9a+LeZT2wnYxv
-+qMvvV+lfVY73f5WZ567u2jSDIsCH34p4g7sE25lKwo+Lhik6EtOehJFs2ZUemaT
-Ym7EjqWlC1whrG7P4MnTGzPOVNAGAxsGPtT58nMCgYAs/R8A2VU//UPfy9ioOlUY
-RPiEtjd3BIoPEHI+/lZihAHf5bvx1oupS8bmcbXRPeQNVyAhA+QU6ZFIbpAOD7Y9
-xFe6LpHOUVqHuOs/MxAMX17tTA1QxkHHYi1JzJLr8I8kMW01h86w+mc7bQWZa4Nt
-jReFXfvmeOInY2CumS8e0QKBgC23ow/vj1aFqla04lNG7YK3a0LTz39MVM3mItAG
-viRgBV1qghRu9uNCcpx3RPijtBbsZMTbQL+S4gyo06jlD79qfZ7IQMJN+SteHvkj
-xykoYHzSAB4gQj9+KzffyFdXMVFRZxHnjYb7o/amSzEXyHMlrtNXqZVu5HAXzeZR
-V/m5AoGAAStS43Q7qSJSMfMBITKMdKlqCObnifD77WeR2WHGrpkq26300ggsDpMS
-UTmnAAo77lSMmDsdoNn2XZmdeTu1CPoQnoZSE5CqPd5GeHA/hhegVCdeYxSXZJoH
-Lhiac+AhCEog/MS1GmVsjynD7eDGVFcsJ6SWuam7doKfrpPqPnE=
------END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/generate.txt b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/generate.txt
deleted file mode 100644
index 5b888ee..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/generate.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-vault mount pki
-vault mount-tune -max-lease-ttl=438000h pki
-vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1
-vi cacert.pem
-vi cakey.pem
-
-vaultcert.hcl
-backend "inmem" {
-}
-disable_mlock = true
-default_lease_ttl = "700h"
-max_lease_ttl = "768h"
-listener "tcp" {
- address = "127.0.0.1:8200"
- tls_cert_file = "./cacert.pem"
- tls_key_file = "./cakey.pem"
-}
-========================================
-vault mount pki
-vault mount-tune -max-lease-ttl=438000h pki
-vault write pki/root/generate/exported common_name=myvault.com ttl=438000h max_ttl=438000h ip_sans=127.0.0.1
-vi testcacert1.pem
-vi testcakey1.pem
-vi testcaserial1
-
-vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl"
-vault write pki/roles/myvault-dot-com allowed_domains=myvault.com allow_subdomains=true ttl=437999h max_ttl=438000h allow_ip_sans=true
-
-vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
-vi testissuedserial1
-
-vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
-vi testissuedcert2.pem
-vi testissuedkey2.pem
-vi testissuedserial2
-
-vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
-vi testissuedserial3
-
-vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
-vi testissuedcert4.pem
-vi testissuedkey4.pem
-vi testissuedserial4
-
-vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1
-vi testissuedserial5
-
-vault write pki/revoke serial_number=$(cat testissuedserial2)
-vault write pki/revoke serial_number=$(cat testissuedserial4)
-curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > issuedcertcrl
-openssl crl -in issuedcertcrl -noout -text
-
-========================================
-export VAULT_ADDR='http://127.0.0.1:8200'
-vault mount pki
-vault mount-tune -max-lease-ttl=438000h pki
-vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1
-vi testcacert2.pem
-vi testcakey2.pem
-vi testcaserial2
-vi testcacert2leaseid
-
-vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl"
-vault revoke $(cat testcacert2leaseid)
-
-curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > cacert2crl
-openssl crl -in cacert2crl -noout -text
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/issuedcertcrl b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/issuedcertcrl
deleted file mode 100644
index 45e9a98..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/issuedcertcrl
+++ /dev/null
@@ -1,12 +0,0 @@
------BEGIN X509 CRL-----
-MIIB2TCBwjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN
-MTYwNTAyMTYxMTA4WhcNMTYwNTA1MTYxMTA4WjBWMCkCFAS6oenLRllQ1MRYcSV+
-5ukv2563FxExNjA1MDIxMjExMDgtMDQwMDApAhQaQdPJfbIwE3q4nyYp60lVnZaE
-5hcRMTYwNTAyMTIxMTA1LTA0MDCgIzAhMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRX
-AOeMiUdBfHFyMA0GCSqGSIb3DQEBCwUAA4IBAQBD2jkeOAmkDdYkAXbmjLGdHaQI
-WMS/M+wtFnHVIDVQEmUmj/KPsrkshTZv2UgCHIxBha6y+kXUMQFMg6FwriDTB170
-WyJVDVhGg2WjiQjnzrzEI+iOmcpx60sPPXE63J/Zxo4QS5M62RTXRq3909HQTFI5
-f3xf0pog8mOrv5uQxO1SACP6YFtdDE2dGOVwoIPuNMTY5vijnj8I9dAw8VrbdoBX
-m/Ky56kT+BpmVWHKwQd1nEcP/RHSKbZwwJzJG0BoGM8cvzjITtBmpEF+OZcea81x
-p9XJkpfFeiVIgzxks3zTeuQjLF8u+MDcdGt0ztHEbkswjxuk1cCovZe2GFr4
------END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/cert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/cert.pem
deleted file mode 100644
index 942d266..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/cert.pem
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
-MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
-TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
-SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
-YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
-donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
-B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
-MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
-HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
-k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
-OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
-AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
-aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
-X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
-aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
-KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
-QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
-xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/key.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/key.pem
deleted file mode 100644
index add9820..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
-HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
-6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
-TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
-y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
-DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
-9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
-RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
-rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
-5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
-oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
-GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
-VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
-akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
-FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
-efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
-r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
-0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
-FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
-kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
-UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
-xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
-injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
-2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
-gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/pkioutput b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/pkioutput
deleted file mode 100644
index 526ff03..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/keys/pkioutput
+++ /dev/null
@@ -1,74 +0,0 @@
-Key Value
-lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4
-lease_duration 279359999
-lease_renewable false
-certificate -----BEGIN CERTIFICATE-----
-MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw
-MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS
-TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn
-SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi
-YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5
-donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG
-B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1
-MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e
-HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o
-k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x
-OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A
-AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br
-aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs
-X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4
-aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA
-KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN
-QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj
-xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk=
------END CERTIFICATE-----
-issuing_ca -----BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
-private_key -----BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu
-HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA
-6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N
-TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd
-y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2
-DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX
-9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF
-RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd
-rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI
-5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7
-oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ
-GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb
-VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR
-akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI
-FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy
-efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh
-r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ
-0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp
-FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR
-kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT
-UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3
-xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W
-injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU
-2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3
-gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4=
------END RSA PRIVATE KEY-----
-private_key_type rsa
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/noclientauthcert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/noclientauthcert.pem
deleted file mode 100644
index 3948f22..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/noclientauthcert.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDGTCCAgGgAwIBAgIBBDANBgkqhkiG9w0BAQUFADBxMQowCAYDVQQDFAEqMQsw
-CQYDVQQIEwJHQTELMAkGA1UEBhMCVVMxJTAjBgkqhkiG9w0BCQEWFnZpc2hhbG5h
-eWFrdkBnbWFpbC5jb20xEjAQBgNVBAoTCUhhc2hpQ29ycDEOMAwGA1UECxMFVmF1
-bHQwHhcNMTYwMjI5MjE0NjE2WhcNMjEwMjI3MjE0NjE2WjBxMQowCAYDVQQDFAEq
-MQswCQYDVQQIEwJHQTELMAkGA1UEBhMCVVMxJTAjBgkqhkiG9w0BCQEWFnZpc2hh
-bG5heWFrdkBnbWFpbC5jb20xEjAQBgNVBAoTCUhhc2hpQ29ycDEOMAwGA1UECxMF
-VmF1bHQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMfRkLfIGHt1r2jjnV0N
-LqRCu3oB+J1dqpM03vQt3qzIiqtuQuIA2ba7TJm2HwU3W3+rtfFcS+hkBR/LZM+u
-cBPB+9b9+7i08vHjgy2P3QH/Ebxa8j1v7JtRMT2qyxWK8NlT/+wZSH82Cr812aS/
-zNT56FbBo2UAtzpqeC4eiv6NAgMBAAGjQDA+MAkGA1UdEwQCMAAwCwYDVR0PBAQD
-AgXgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZI
-hvcNAQEFBQADggEBAG2mUwsZ6+R8qqyNjzMk7mgpsRZv9TEl6c1IiQdyjaCOPaYH
-vtZpLX20um36cxrLuOUtZLllG/VJEhRZW5mXWxuOk4QunWMBXQioCDJG1ktcZAcQ
-QqYv9Dzy2G9lZHjLztEac37T75RXW7OEeQREgwP11c8sQYiS9jf+7ITYL7nXjoKq
-gEuH0h86BOH2O/BxgMelt9O0YCkvkLLHnE27xuNelRRZcBLSuE1GxdUi32MDJ+ff
-25GUNM0zzOEaJAFE/USUBEdQqN1gvJidNXkAiMtIK7T8omQZONRaD2ZnSW8y2krh
-eUg+rKis9RinqFlahLPfI5BlyQsNMEnsD07Q85E=
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/pkioutput b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/pkioutput
deleted file mode 100644
index 312ae18..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/pkioutput
+++ /dev/null
@@ -1,74 +0,0 @@
-Key Value
-lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586
-lease_duration 315359999
-lease_renewable false
-certificate -----BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
-expiration 1.772072879e+09
-issuing_ca -----BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
-private_key -----BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
-t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
-BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
-/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
-0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
-18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
-ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
-8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
-nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
-2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
-grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
-bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
-0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
-ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
-lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
-lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
-AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
-ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
-thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
-4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
-iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
-tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
-LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
-4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
-OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
------END RSA PRIVATE KEY-----
-private_key_type rsa
-serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/root.crl b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/root.crl
deleted file mode 100644
index a80c9e4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/root.crl
+++ /dev/null
@@ -1,12 +0,0 @@
------BEGIN X509 CRL-----
-MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN
-MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3
-UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H
-MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn
-HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk
-RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J
-xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y
-UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg
-1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw
-IA0=
------END X509 CRL-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcacert.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcacert.pem
deleted file mode 100644
index dcb307a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcacert.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw
-MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7
-Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0
-z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x
-AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb
-6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH
-SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G
-A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx
-7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc
-BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2
-U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa
-cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N
-ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ
-t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk
-zehNe5dFTjFpylg1o6b8Ow==
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcakey.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcakey.pem
deleted file mode 100644
index e950da5..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/root/rootcakey.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p
-t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3
-BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w
-/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv
-0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi
-18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb
-ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn
-8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f
-nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8
-2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t
-grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc
-bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9
-0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN
-ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf
-lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1
-lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj
-AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG
-ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib
-thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU
-4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb
-iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO
-tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y
-LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc
-4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX
-OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert1.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert1.pem
deleted file mode 100644
index ab8bf9e..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert1.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2
-NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
-9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS
-xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP
-67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE
-JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb
-cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY
-WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w
-DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU
-G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy
-MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
-AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW
-n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh
-MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/
-spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d
-CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q
-5gn6KxUPBKHEtNzs5DgGM7nq
------END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert2.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert2.pem
deleted file mode 100644
index a8fe6c4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcacert2.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDPjCCAiagAwIBAgIUJfHFxtLQBOkjY9ivHx0AIsRDcH0wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYxMjI5WhgPMjA2
-NjA0MjAxNjEyNTlaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
-9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/
-Bv76ESjomj1zCyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C26
-2uldDToh5rm7K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVL
-alxEYgA1Qt6+ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG
-0kVz56TjF+oY0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQ
-BXpSMcwG3woJ0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABo4GBMH8w
-DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMLETWAs
-OFNsKJ+uqzChCZvIpxX4MB8GA1UdIwQYMBaAFMLETWAsOFNsKJ+uqzChCZvIpxX4
-MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
-AQCRlFb6bZDrq3NkoZF9evls7cT41V3XCdykMA4K9YRgDroZ5psanSvYEnSrk9cU
-Y7sVYW7b8qSRWkLZrHCAwc2V0/i5F5j4q9yVnWaTZ+kOVCFYCI8yUS7ixRQdTLNN
-os/r9dcRSzzTEqoQThAzn571yRcbJHzTjda3gCJ5F4utYUBU2F9WK+ukW9nqfepa
-ju5vEEGDuL2+RyApzL0nGzMUkCdBcK82QBksTlElPnbICbJZWUUMTZWPaZ7WGDDa
-Pj+pWMXiDQmzIuzgXUCNtQL6lEv4tQwGYRHjjPmhgJP4sr6Cyrj4G0iljrqM+z/3
-gLyJOlNU8c5x02/C1nFDDa14
------END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey1.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey1.pem
deleted file mode 100644
index 05211ba..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey1.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c
-N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl
-HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2
-eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ
-1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z
-wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel
-CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg
-eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/
-fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW
-TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB
-nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud
-XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh
-Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X
-YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW
-2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7
-YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ
-48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8
-aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX
-Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB
-55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1
-HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt
-TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9
-hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP
-QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr
-PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3
------END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey2.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey2.pem
deleted file mode 100644
index c2e3763..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testcakey2.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/Bv76ESjomj1z
-CyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C262uldDToh5rm7
-K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVLalxEYgA1Qt6+
-ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG0kVz56TjF+oY
-0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQBXpSMcwG3woJ
-0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABAoIBADivQ2XHdeHsUzk1
-JOz8efVBfgGo+nL2UPl5MAMnUKH4CgKZJT3311mb2TXA4RrdQUg3ixvBcAFe4L8u
-BIgTIWyjX6Q5KloWXWHhFA8hll76FSGag8ygRJCYaHSI5xOKslxKgtZvUqKZdb0f
-BoDrBYnXL9+MqOmSjjDegh7G2+n49n774Z2VVR47TZTBB5LCWDWj4AtEcalgwlvw
-d5yL/GU/RfCkXCjCeie1pInp3eCMUI9jlvbe/vyaoFq2RiaJw1LSlJLXZBMYzaij
-XkgMtRsr5bf0Tg2z3SPiaa9QZogfVLqHWAt6RHZf9Keidtiho+Ad6/dzJu+jKDys
-Z6cthOECgYEAxMUCIYKO74BtPRN2r7KxbSjHzFsasxbfwkSg4Qefd4UoZJX2ShlL
-cClnef3WdkKxtShJhqEPaKTYTrfgM+iz/a9+3lAFnS4EZawSf3YgXXslVTory0Da
-yPQZKxX6XsupaLl4s13ehw/D0qfdxWVYaiFad3ePEE4ytmSkMMHLHo8CgYEA3X4a
-jMWVbVv1W1lj+LFcg7AhU7lHgla+p7NI4gHw9V783noafnW7/8pNF80kshYo4u0g
-aJRwaU/Inr5uw14eAyEjB4X7N8AE5wGmcxxS2uluGG6r3oyQSJBqktGnLwyTfcfC
-XrfsGJza2BRGF4Mn8SFb7WtCl3f1qu0hTF+mC1ECgYB4oA1eXZsiV6if+H6Z1wHN
-2WIidPc5MpyZi1jUmse3jXnlr8j8Q+VrLPayYlpGxTwLwlbQoYvAqs2v9CkNqWot
-6pfr0UKfyMYJTiNI4DGXHRcV2ENgprF436tOLnr+AfwopwrHapQwWAnD6gSaLja1
-WR0Mf87EQCv2hFvjR+otIQKBgQCLyvJQ1MeZzQdPT1zkcnSUfM6b+/1hCwSr7WDb
-nCQLiZcJh4E/PWmZaII9unEloQzPJKBmwQEtxng1kLVxwu4oRXrJXcuPhTbS4dy/
-HCpDFj8xVnBNNuQ9mEBbR80/ya0xHqnThDuT0TPiWvFeF55W9xoA/8h4tvKrnZx9
-ioTO8QKBgCMqRa5pHb+vCniTWUTz9JZRnRsdq7fRSsJHngMe5gOR4HylyAmmqKrd
-kEXfkdu9TH2jxSWcZbHUPVwKfOUqQUZMz0pml0DIs1kedUDFanTZ8Rgg5SGUHBW0
-5bNCq64tKMmw6GiicaAGqd04OPo85WD9h8mPhM1Jdv/UmTV+HFAr
------END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedcert4.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedcert4.pem
deleted file mode 100644
index 5bffd67..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedcert4.pem
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2
-NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G
-CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB
-9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI
-b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL
-5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W
-1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF
-+czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj
-gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe
-Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH
-QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w
-LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE
-fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv
-cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX
-lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z
-6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch
-f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D
-Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe
-TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg==
------END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedkey4.pem b/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedkey4.pem
deleted file mode 100644
index 58e7f8d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/test-fixtures/testissuedkey4.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm
-peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq
-/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61
-fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV
-T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq
-zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug
-RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6
-mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh
-bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL
-FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV
-WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m
-tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx
-PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3
-8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz
-HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8
-goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU
-jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu
-kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f
-DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB
-p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe
-X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS
-rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P
-aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455
-t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx
-we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ==
------END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go
deleted file mode 100644
index b53e95f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package github
-
-import (
- "context"
-
- "github.com/google/go-github/github"
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "golang.org/x/oauth2"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.TeamMap = &framework.PolicyMap{
- PathMap: framework.PathMap{
- Name: "teams",
- },
- DefaultKey: "default",
- }
-
- b.UserMap = &framework.PolicyMap{
- PathMap: framework.PathMap{
- Name: "users",
- },
- DefaultKey: "default",
- }
-
- allPaths := append(b.TeamMap.Paths(), b.UserMap.Paths()...)
-
- b.Backend = &framework.Backend{
- Help: backendHelp,
-
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "login",
- },
- },
-
- Paths: append([]*framework.Path{
- pathConfig(&b),
- pathLogin(&b),
- }, allPaths...),
-
- AuthRenew: b.pathLoginRenew,
- BackendType: logical.TypeCredential,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- TeamMap *framework.PolicyMap
-
- UserMap *framework.PolicyMap
-}
-
-// Client returns the GitHub client to communicate to GitHub via the
-// configured settings.
-func (b *backend) Client(token string) (*github.Client, error) {
- tc := cleanhttp.DefaultClient()
- if token != "" {
- ctx := context.WithValue(context.Background(), oauth2.HTTPClient, tc)
- tc = oauth2.NewClient(ctx, &tokenSource{Value: token})
- }
-
- return github.NewClient(tc), nil
-}
-
-// tokenSource is an oauth2.TokenSource implementation.
-type tokenSource struct {
- Value string
-}
-
-func (t *tokenSource) Token() (*oauth2.Token, error) {
- return &oauth2.Token{AccessToken: t.Value}, nil
-}
-
-const backendHelp = `
-The GitHub credential provider allows authentication via GitHub.
-
-Users provide a personal access token to log in, and the credential
-provider verifies they're part of the correct organization and then
-maps the user to a set of Vault policies according to the teams they're
-part of.
-
-After enabling the credential provider, use the "config" route to
-configure it.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go
deleted file mode 100644
index 6dd7da8..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package github
-
-import (
- "fmt"
- "os"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
-)
-
-func TestBackend_Config(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 2
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- login_data := map[string]interface{}{
- // This token has to be replaced with a working token for the test to work.
- "token": os.Getenv("GITHUB_TOKEN"),
- }
- config_data1 := map[string]interface{}{
- "organization": os.Getenv("GITHUB_ORG"),
- "ttl": "",
- "max_ttl": "",
- }
- expectedTTL1, _ := time.ParseDuration("24h0m0s")
- config_data2 := map[string]interface{}{
- "organization": os.Getenv("GITHUB_ORG"),
- "ttl": "1h",
- "max_ttl": "2h",
- }
- expectedTTL2, _ := time.ParseDuration("1h0m0s")
- config_data3 := map[string]interface{}{
- "organization": os.Getenv("GITHUB_ORG"),
- "ttl": "50h",
- "max_ttl": "50h",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testConfigWrite(t, config_data1),
- testLoginWrite(t, login_data, expectedTTL1.Nanoseconds(), false),
- testConfigWrite(t, config_data2),
- testLoginWrite(t, login_data, expectedTTL2.Nanoseconds(), false),
- testConfigWrite(t, config_data3),
- testLoginWrite(t, login_data, 0, true),
- },
- })
-}
-
-func testLoginWrite(t *testing.T, d map[string]interface{}, expectedTTL int64, expectFail bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- ErrorOk: true,
- Data: d,
- Check: func(resp *logical.Response) error {
- if resp.IsError() && expectFail {
- return nil
- }
- var actualTTL int64
- actualTTL = resp.Auth.LeaseOptions.TTL.Nanoseconds()
- if actualTTL != expectedTTL {
- return fmt.Errorf("TTL mismatched. Expected: %d Actual: %d", expectedTTL, resp.Auth.LeaseOptions.TTL.Nanoseconds())
- }
- return nil
- },
- }
-}
-
-func testConfigWrite(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: d,
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, false),
- testAccMap(t, "default", "fakepol"),
- testAccMap(t, "oWnErs", "fakepol"),
- testAccLogin(t, []string{"default", "fakepol"}),
- testAccStepConfig(t, true),
- testAccMap(t, "default", "fakepol"),
- testAccMap(t, "oWnErs", "fakepol"),
- testAccLogin(t, []string{"default", "fakepol"}),
- testAccStepConfigWithBaseURL(t),
- testAccMap(t, "default", "fakepol"),
- testAccMap(t, "oWnErs", "fakepol"),
- testAccLogin(t, []string{"default", "fakepol"}),
- testAccMap(t, "default", "fakepol"),
- testAccStepConfig(t, true),
- mapUserToPolicy(t, os.Getenv("GITHUB_USER"), "userpolicy"),
- testAccLogin(t, []string{"default", "fakepol", "userpolicy"}),
- },
- })
-}
-
-func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("GITHUB_TOKEN"); v == "" {
- t.Skip("GITHUB_TOKEN must be set for acceptance tests")
- }
-
- if v := os.Getenv("GITHUB_ORG"); v == "" {
- t.Skip("GITHUB_ORG must be set for acceptance tests")
- }
-
- if v := os.Getenv("GITHUB_BASEURL"); v == "" {
- t.Skip("GITHUB_BASEURL must be set for acceptance tests (use 'https://api.github.com' if you don't know what you're doing)")
- }
-}
-
-func testAccStepConfig(t *testing.T, upper bool) logicaltest.TestStep {
- ts := logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{
- "organization": os.Getenv("GITHUB_ORG"),
- },
- }
- if upper {
- ts.Data["organization"] = strings.ToUpper(os.Getenv("GITHUB_ORG"))
- }
- return ts
-}
-
-func testAccStepConfigWithBaseURL(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{
- "organization": os.Getenv("GITHUB_ORG"),
- "base_url": os.Getenv("GITHUB_BASEURL"),
- },
- }
-}
-
-func testAccMap(t *testing.T, k string, v string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "map/teams/" + k,
- Data: map[string]interface{}{
- "value": v,
- },
- }
-}
-
-func mapUserToPolicy(t *testing.T, k string, v string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "map/users/" + k,
- Data: map[string]interface{}{
- "value": v,
- },
- }
-}
-
-func testAccLogin(t *testing.T, policies []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Data: map[string]interface{}{
- "token": os.Getenv("GITHUB_TOKEN"),
- },
- Unauthenticated: true,
-
- Check: logicaltest.TestCheckAuth(policies),
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go
deleted file mode 100644
index 557939b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package github
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/api"
-)
-
-type CLIHandler struct{}
-
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
- mount, ok := m["mount"]
- if !ok {
- mount = "github"
- }
-
- token, ok := m["token"]
- if !ok {
- if token = os.Getenv("VAULT_AUTH_GITHUB_TOKEN"); token == "" {
- return nil, fmt.Errorf("GitHub token should be provided either as 'value' for 'token' key,\nor via an env var VAULT_AUTH_GITHUB_TOKEN")
- }
- }
-
- path := fmt.Sprintf("auth/%s/login", mount)
- secret, err := c.Logical().Write(path, map[string]interface{}{
- "token": token,
- })
- if err != nil {
- return nil, err
- }
- if secret == nil {
- return nil, fmt.Errorf("empty response from credential provider")
- }
-
- return secret, nil
-}
-
-func (h *CLIHandler) Help() string {
- help := `
-The GitHub credential provider allows you to authenticate with GitHub.
-To use it, specify the "token" parameter. The value should be a personal access
-token for your GitHub account. You can generate a personal access token on your
-account settings page on GitHub.
-
- Example: vault auth -method=github token=
-
-Key/Value Pairs:
-
- mount=github The mountpoint for the GitHub credential provider.
- Defaults to "github"
-
- token= The GitHub personal access token for authentication.
- `
-
- return strings.TrimSpace(help)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go
deleted file mode 100644
index c211450..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package github
-
-import (
- "fmt"
- "net/url"
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfig(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config",
- Fields: map[string]*framework.FieldSchema{
- "organization": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The organization users must be part of",
- },
-
- "base_url": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The API endpoint to use. Useful if you
-are running GitHub Enterprise or an
-API-compatible authentication server.`,
- },
- "ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Duration after which authentication will be expired`,
- },
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Maximum duration after which authentication will be expired`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConfigWrite,
- logical.ReadOperation: b.pathConfigRead,
- },
- }
-}
-
-func (b *backend) pathConfigWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- organization := data.Get("organization").(string)
- baseURL := data.Get("base_url").(string)
- if len(baseURL) != 0 {
- _, err := url.Parse(baseURL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil
- }
- }
-
- var ttl time.Duration
- var err error
- ttlRaw, ok := data.GetOk("ttl")
- if !ok || len(ttlRaw.(string)) == 0 {
- ttl = 0
- } else {
- ttl, err = time.ParseDuration(ttlRaw.(string))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Invalid 'ttl':%s", err)), nil
- }
- }
-
- var maxTTL time.Duration
- maxTTLRaw, ok := data.GetOk("max_ttl")
- if !ok || len(maxTTLRaw.(string)) == 0 {
- maxTTL = 0
- } else {
- maxTTL, err = time.ParseDuration(maxTTLRaw.(string))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Invalid 'max_ttl':%s", err)), nil
- }
- }
-
- entry, err := logical.StorageEntryJSON("config", config{
- Organization: organization,
- BaseURL: baseURL,
- TTL: ttl,
- MaxTTL: maxTTL,
- })
-
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathConfigRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- config, err := b.Config(req.Storage)
- if err != nil {
- return nil, err
- }
-
- if config == nil {
- return nil, fmt.Errorf("configuration object not found")
- }
-
- config.TTL /= time.Second
- config.MaxTTL /= time.Second
-
- resp := &logical.Response{
- Data: structs.New(config).Map(),
- }
- return resp, nil
-}
-
-// Config returns the configuration for this backend.
-func (b *backend) Config(s logical.Storage) (*config, error) {
- entry, err := s.Get("config")
- if err != nil {
- return nil, err
- }
-
- var result config
- if entry != nil {
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, fmt.Errorf("error reading configuration: %s", err)
- }
- }
-
- return &result, nil
-}
-
-type config struct {
- Organization string `json:"organization" structs:"organization" mapstructure:"organization"`
- BaseURL string `json:"base_url" structs:"base_url" mapstructure:"base_url"`
- TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
- MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_login.go
deleted file mode 100644
index 0e212ab..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_login.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package github
-
-import (
- "context"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/google/go-github/github"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login",
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "GitHub personal API token",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
- }
-}
-
-func (b *backend) pathLogin(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- token := data.Get("token").(string)
-
- var verifyResp *verifyCredentialsResp
- if verifyResponse, resp, err := b.verifyCredentials(req, token); err != nil {
- return nil, err
- } else if resp != nil {
- return resp, nil
- } else {
- verifyResp = verifyResponse
- }
-
- config, err := b.Config(req.Storage)
- if err != nil {
- return nil, err
- }
-
- ttl, _, err := b.SanitizeTTLStr(config.TTL.String(), config.MaxTTL.String())
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error sanitizing TTLs: %s", err)), nil
- }
-
- return &logical.Response{
- Auth: &logical.Auth{
- InternalData: map[string]interface{}{
- "token": token,
- },
- Policies: verifyResp.Policies,
- Metadata: map[string]string{
- "username": *verifyResp.User.Login,
- "org": *verifyResp.Org.Login,
- },
- DisplayName: *verifyResp.User.Login,
- LeaseOptions: logical.LeaseOptions{
- TTL: ttl,
- Renewable: true,
- },
- },
- }, nil
-}
-
-func (b *backend) pathLoginRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- if req.Auth == nil {
- return nil, fmt.Errorf("request auth was nil")
- }
-
- tokenRaw, ok := req.Auth.InternalData["token"]
- if !ok {
- return nil, fmt.Errorf("token created in previous version of Vault cannot be validated properly at renewal time")
- }
- token := tokenRaw.(string)
-
- var verifyResp *verifyCredentialsResp
- if verifyResponse, resp, err := b.verifyCredentials(req, token); err != nil {
- return nil, err
- } else if resp != nil {
- return resp, nil
- } else {
- verifyResp = verifyResponse
- }
- if !policyutil.EquivalentPolicies(verifyResp.Policies, req.Auth.Policies) {
- return nil, fmt.Errorf("policies do not match")
- }
-
- config, err := b.Config(req.Storage)
- if err != nil {
- return nil, err
- }
- return framework.LeaseExtend(config.TTL, config.MaxTTL, b.System())(req, d)
-}
-
-func (b *backend) verifyCredentials(req *logical.Request, token string) (*verifyCredentialsResp, *logical.Response, error) {
- config, err := b.Config(req.Storage)
- if err != nil {
- return nil, nil, err
- }
- if config.Organization == "" {
- return nil, logical.ErrorResponse(
- "configure the github credential backend first"), nil
- }
-
- client, err := b.Client(token)
- if err != nil {
- return nil, nil, err
- }
-
- if config.BaseURL != "" {
- parsedURL, err := url.Parse(config.BaseURL)
- if err != nil {
- return nil, nil, fmt.Errorf("Successfully parsed base_url when set but failing to parse now: %s", err)
- }
- client.BaseURL = parsedURL
- }
-
- // Get the user
- user, _, err := client.Users.Get(context.Background(), "")
- if err != nil {
- return nil, nil, err
- }
-
- // Verify that the user is part of the organization
- var org *github.Organization
-
- orgOpt := &github.ListOptions{
- PerPage: 100,
- }
-
- var allOrgs []*github.Organization
- for {
- orgs, resp, err := client.Organizations.List(context.Background(), "", orgOpt)
- if err != nil {
- return nil, nil, err
- }
- allOrgs = append(allOrgs, orgs...)
- if resp.NextPage == 0 {
- break
- }
- orgOpt.Page = resp.NextPage
- }
-
- for _, o := range allOrgs {
- if strings.ToLower(*o.Login) == strings.ToLower(config.Organization) {
- org = o
- break
- }
- }
- if org == nil {
- return nil, logical.ErrorResponse("user is not part of required org"), nil
- }
-
- // Get the teams that this user is part of to determine the policies
- var teamNames []string
-
- teamOpt := &github.ListOptions{
- PerPage: 100,
- }
-
- var allTeams []*github.Team
- for {
- teams, resp, err := client.Organizations.ListUserTeams(context.Background(), teamOpt)
- if err != nil {
- return nil, nil, err
- }
- allTeams = append(allTeams, teams...)
- if resp.NextPage == 0 {
- break
- }
- teamOpt.Page = resp.NextPage
- }
-
- for _, t := range allTeams {
- // We only care about teams that are part of the organization we use
- if *t.Organization.ID != *org.ID {
- continue
- }
-
- // Append the names so we can get the policies
- teamNames = append(teamNames, *t.Name)
- if *t.Name != *t.Slug {
- teamNames = append(teamNames, *t.Slug)
- }
- }
-
- groupPoliciesList, err := b.TeamMap.Policies(req.Storage, teamNames...)
-
- if err != nil {
- return nil, nil, err
- }
-
- userPoliciesList, err := b.UserMap.Policies(req.Storage, []string{*user.Login}...)
-
- if err != nil {
- return nil, nil, err
- }
-
- return &verifyCredentialsResp{
- User: user,
- Org: org,
- Policies: append(groupPoliciesList, userPoliciesList...),
- }, nil, nil
-}
-
-type verifyCredentialsResp struct {
- User *github.User
- Org *github.Organization
- Policies []string
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go
deleted file mode 100644
index 835b4a6..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go
+++ /dev/null
@@ -1,420 +0,0 @@
-package ldap
-
-import (
- "bytes"
- "fmt"
- "text/template"
-
- "github.com/go-ldap/ldap"
- "github.com/hashicorp/vault/helper/mfa"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: backendHelp,
-
- PathsSpecial: &logical.Paths{
- Root: mfa.MFARootPaths(),
-
- Unauthenticated: []string{
- "login/*",
- },
- },
-
- Paths: append([]*framework.Path{
- pathConfig(&b),
- pathGroups(&b),
- pathGroupsList(&b),
- pathUsers(&b),
- pathUsersList(&b),
- },
- mfa.MFAPaths(b.Backend, pathLogin(&b))...,
- ),
-
- AuthRenew: b.pathLoginRenew,
- BackendType: logical.TypeCredential,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-}
-
-func EscapeLDAPValue(input string) string {
- // RFC4514 forbids un-escaped:
- // - leading space or hash
- // - trailing space
- // - special characters '"', '+', ',', ';', '<', '>', '\\'
- // - null
- for i := 0; i < len(input); i++ {
- escaped := false
- if input[i] == '\\' {
- i++
- escaped = true
- }
- switch input[i] {
- case '"', '+', ',', ';', '<', '>', '\\':
- if !escaped {
- input = input[0:i] + "\\" + input[i:]
- i++
- }
- continue
- }
- if escaped {
- input = input[0:i] + "\\" + input[i:]
- i++
- }
- }
- if input[0] == ' ' || input[0] == '#' {
- input = "\\" + input
- }
- if input[len(input)-1] == ' ' {
- input = input[0:len(input)-1] + "\\ "
- }
- return input
-}
-
-func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, error) {
-
- cfg, err := b.Config(req)
- if err != nil {
- return nil, nil, err
- }
- if cfg == nil {
- return nil, logical.ErrorResponse("ldap backend not configured"), nil
- }
-
- c, err := cfg.DialLDAP()
- if err != nil {
- return nil, logical.ErrorResponse(err.Error()), nil
- }
- if c == nil {
- return nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil
- }
-
- // Clean connection
- defer c.Close()
-
- userBindDN, err := b.getUserBindDN(cfg, c, username)
- if err != nil {
- return nil, logical.ErrorResponse(err.Error()), nil
- }
-
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: User BindDN fetched", "username", username, "binddn", userBindDN)
- }
-
- if cfg.DenyNullBind && len(password) == 0 {
- return nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil
- }
-
- // Try to bind as the login user. This is where the actual authentication takes place.
- if len(password) > 0 {
- err = c.Bind(userBindDN, password)
- } else {
- err = c.UnauthenticatedBind(userBindDN)
- }
- if err != nil {
- return nil, logical.ErrorResponse(fmt.Sprintf("LDAP bind failed: %v", err)), nil
- }
-
- // We re-bind to the BindDN if it's defined because we assume
- // the BindDN should be the one to search, not the user logging in.
- if cfg.BindDN != "" && cfg.BindPassword != "" {
- if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil {
- return nil, logical.ErrorResponse(fmt.Sprintf("Encountered an error while attempting to re-bind with the BindDN User: %s", err.Error())), nil
- }
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: Re-Bound to original BindDN")
- }
- }
-
- userDN, err := b.getUserDN(cfg, c, userBindDN)
- if err != nil {
- return nil, logical.ErrorResponse(err.Error()), nil
- }
-
- ldapGroups, err := b.getLdapGroups(cfg, c, userDN, username)
- if err != nil {
- return nil, logical.ErrorResponse(err.Error()), nil
- }
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: Groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups)
- }
-
- ldapResponse := &logical.Response{
- Data: map[string]interface{}{},
- }
- if len(ldapGroups) == 0 {
- errString := fmt.Sprintf(
- "no LDAP groups found in groupDN '%s'; only policies from locally-defined groups available",
- cfg.GroupDN)
- ldapResponse.AddWarning(errString)
- }
-
- var allGroups []string
- // Import the custom added groups from ldap backend
- user, err := b.User(req.Storage, username)
- if err == nil && user != nil && user.Groups != nil {
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups)
- }
- allGroups = append(allGroups, user.Groups...)
- }
- // Merge local and LDAP groups
- allGroups = append(allGroups, ldapGroups...)
-
- // Retrieve policies
- var policies []string
- for _, groupName := range allGroups {
- group, err := b.Group(req.Storage, groupName)
- if err == nil && group != nil {
- policies = append(policies, group.Policies...)
- }
- }
- if user != nil && user.Policies != nil {
- policies = append(policies, user.Policies...)
- }
- // Policies from each group may overlap
- policies = strutil.RemoveDuplicates(policies, true)
-
- if len(policies) == 0 {
- errStr := "user is not a member of any authorized group"
- if len(ldapResponse.Warnings) > 0 {
- errStr = fmt.Sprintf("%s; additionally, %s", errStr, ldapResponse.Warnings[0])
- }
-
- ldapResponse.Data["error"] = errStr
- return nil, ldapResponse, nil
- }
-
- return policies, ldapResponse, nil
-}
-
-/*
- * Parses a distinguished name and returns the CN portion.
- * Given a non-conforming string (such as an already-extracted CN),
- * it will be returned as-is.
- */
-func (b *backend) getCN(dn string) string {
- parsedDN, err := ldap.ParseDN(dn)
- if err != nil || len(parsedDN.RDNs) == 0 {
- // It was already a CN, return as-is
- return dn
- }
-
- for _, rdn := range parsedDN.RDNs {
- for _, rdnAttr := range rdn.Attributes {
- if rdnAttr.Type == "CN" {
- return rdnAttr.Value
- }
- }
- }
-
- // Default, return self
- return dn
-}
-
-/*
- * Discover and return the bind string for the user attempting to authenticate.
- * This is handled in one of several ways:
- *
- * 1. If DiscoverDN is set, the user object will be searched for using userdn (base search path)
- * and userattr (the attribute that maps to the provided username).
- * The bind will either be anonymous or use binddn and bindpassword if they were provided.
- * 2. If upndomain is set, the user dn is constructed as 'username@upndomain'. See https://msdn.microsoft.com/en-us/library/cc223499.aspx
- *
- */
-func (b *backend) getUserBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) (string, error) {
- bindDN := ""
- if cfg.DiscoverDN || (cfg.BindDN != "" && cfg.BindPassword != "") {
- var err error
- if cfg.BindPassword != "" {
- err = c.Bind(cfg.BindDN, cfg.BindPassword)
- } else {
- err = c.UnauthenticatedBind(cfg.BindDN)
- }
- if err != nil {
- return bindDN, fmt.Errorf("LDAP bind (service) failed: %v", err)
- }
-
- filter := fmt.Sprintf("(%s=%s)", cfg.UserAttr, ldap.EscapeFilter(username))
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: Discovering user", "userdn", cfg.UserDN, "filter", filter)
- }
- result, err := c.Search(&ldap.SearchRequest{
- BaseDN: cfg.UserDN,
- Scope: 2, // subtree
- Filter: filter,
- })
- if err != nil {
- return bindDN, fmt.Errorf("LDAP search for binddn failed: %v", err)
- }
- if len(result.Entries) != 1 {
- return bindDN, fmt.Errorf("LDAP search for binddn 0 or not unique")
- }
- bindDN = result.Entries[0].DN
- } else {
- if cfg.UPNDomain != "" {
- bindDN = fmt.Sprintf("%s@%s", EscapeLDAPValue(username), cfg.UPNDomain)
- } else {
- bindDN = fmt.Sprintf("%s=%s,%s", cfg.UserAttr, EscapeLDAPValue(username), cfg.UserDN)
- }
- }
-
- return bindDN, nil
-}
-
-/*
- * Returns the DN of the object representing the authenticated user.
- */
-func (b *backend) getUserDN(cfg *ConfigEntry, c *ldap.Conn, bindDN string) (string, error) {
- userDN := ""
- if cfg.UPNDomain != "" {
- // Find the distinguished name for the user if userPrincipalName used for login
- filter := fmt.Sprintf("(userPrincipalName=%s)", ldap.EscapeFilter(bindDN))
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: Searching UPN", "userdn", cfg.UserDN, "filter", filter)
- }
- result, err := c.Search(&ldap.SearchRequest{
- BaseDN: cfg.UserDN,
- Scope: 2, // subtree
- Filter: filter,
- })
- if err != nil {
- return userDN, fmt.Errorf("LDAP search failed for detecting user: %v", err)
- }
- for _, e := range result.Entries {
- userDN = e.DN
- }
- } else {
- userDN = bindDN
- }
-
- return userDN, nil
-}
-
-/*
- * getLdapGroups queries LDAP and returns a slice describing the set of groups the authenticated user is a member of.
- *
- * The search query is constructed according to cfg.GroupFilter, and run in context of cfg.GroupDN.
- * Groups will be resolved from the query results by following the attribute defined in cfg.GroupAttr.
- *
- * cfg.GroupFilter is a go template and is compiled with the following context: [UserDN, Username]
- * UserDN - The DN of the authenticated user
- * Username - The Username of the authenticated user
- *
- * Example:
- * cfg.GroupFilter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))"
- * cfg.GroupDN = "OU=Groups,DC=myorg,DC=com"
- * cfg.GroupAttr = "cn"
- *
- * NOTE - If cfg.GroupFilter is empty, no query is performed and an empty result slice is returned.
- *
- */
-func (b *backend) getLdapGroups(cfg *ConfigEntry, c *ldap.Conn, userDN string, username string) ([]string, error) {
- // retrieve the groups in a string/bool map as a structure to avoid duplicates inside
- ldapMap := make(map[string]bool)
-
- if cfg.GroupFilter == "" {
- b.Logger().Warn("auth/ldap: GroupFilter is empty, will not query server")
- return make([]string, 0), nil
- }
-
- if cfg.GroupDN == "" {
- b.Logger().Warn("auth/ldap: GroupDN is empty, will not query server")
- return make([]string, 0), nil
- }
-
- // If groupfilter was defined, resolve it as a Go template and use the query for
- // returning the user's groups
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: Compiling group filter", "group_filter", cfg.GroupFilter)
- }
-
- // Parse the configuration as a template.
- // Example template "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))"
- t, err := template.New("queryTemplate").Parse(cfg.GroupFilter)
- if err != nil {
- return nil, fmt.Errorf("LDAP search failed due to template compilation error: %v", err)
- }
-
- // Build context to pass to template - we will be exposing UserDn and Username.
- context := struct {
- UserDN string
- Username string
- }{
- ldap.EscapeFilter(userDN),
- ldap.EscapeFilter(username),
- }
-
- var renderedQuery bytes.Buffer
- t.Execute(&renderedQuery, context)
-
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/ldap: Searching", "groupdn", cfg.GroupDN, "rendered_query", renderedQuery.String())
- }
-
- result, err := c.Search(&ldap.SearchRequest{
- BaseDN: cfg.GroupDN,
- Scope: 2, // subtree
- Filter: renderedQuery.String(),
- Attributes: []string{
- cfg.GroupAttr,
- },
- })
- if err != nil {
- return nil, fmt.Errorf("LDAP search failed: %v", err)
- }
-
- for _, e := range result.Entries {
- dn, err := ldap.ParseDN(e.DN)
- if err != nil || len(dn.RDNs) == 0 {
- continue
- }
-
- // Enumerate attributes of each result, parse out CN and add as group
- values := e.GetAttributeValues(cfg.GroupAttr)
- if len(values) > 0 {
- for _, val := range values {
- groupCN := b.getCN(val)
- ldapMap[groupCN] = true
- }
- } else {
- // If groupattr didn't resolve, use self (enumerating group objects)
- groupCN := b.getCN(e.DN)
- ldapMap[groupCN] = true
- }
- }
-
- ldapGroups := make([]string, 0, len(ldapMap))
- for key, _ := range ldapMap {
- ldapGroups = append(ldapGroups, key)
- }
-
- return ldapGroups, nil
-}
-
-const backendHelp = `
-The "ldap" credential provider allows authentication querying
-a LDAP server, checking username and password, and associating groups
-to set of policies.
-
-Configuration of the server is done through the "config" and "groups"
-endpoints by a user with root access. Authentication is then done
-by suppying the two fields for "login".
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go
deleted file mode 100644
index 3b1d936..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go
+++ /dev/null
@@ -1,541 +0,0 @@
-package ldap
-
-import (
- "fmt"
- "reflect"
- "sort"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
-)
-
-func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- b := Backend()
- if b == nil {
- t.Fatalf("failed to create backend")
- }
-
- err := b.Backend.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- return b, config.StorageView
-}
-
-func TestLdapAuthBackend_UserPolicies(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- configReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{
- // Online LDAP test server
- // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
- "url": "ldap://ldap.forumsys.com",
- "userattr": "uid",
- "userdn": "dc=example,dc=com",
- "groupdn": "dc=example,dc=com",
- "binddn": "cn=read-only-admin,dc=example,dc=com",
- },
- Storage: storage,
- }
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- groupReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Data: map[string]interface{}{
- "policies": "grouppolicy",
- },
- Path: "groups/engineers",
- Storage: storage,
- }
- resp, err = b.HandleRequest(groupReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- userReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Data: map[string]interface{}{
- "groups": "engineers",
- "policies": "userpolicy",
- },
- Path: "users/tesla",
- Storage: storage,
- }
-
- resp, err = b.HandleRequest(userReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- loginReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "login/tesla",
- Data: map[string]interface{}{
- "password": "password",
- },
- Storage: storage,
- }
-
- resp, err = b.HandleRequest(loginReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
- expected := []string{"grouppolicy", "userpolicy"}
- if !reflect.DeepEqual(expected, resp.Auth.Policies) {
- t.Fatalf("bad: policies: expected: %q, actual: %q", expected, resp.Auth.Policies)
- }
-}
-
-/*
- * Acceptance test for LDAP Auth Backend
- *
- * The tests here rely on a public LDAP server:
- * [http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/]
- *
- * ...as well as existence of a person object, `uid=tesla,dc=example,dc=com`,
- * which is a member of a group, `ou=scientists,dc=example,dc=com`
- *
- * Querying the server from the command line:
- * $ ldapsearch -x -H ldap://ldap.forumsys.com -b dc=example,dc=com -s sub \
- * '(&(objectClass=groupOfUniqueNames)(uniqueMember=uid=tesla,dc=example,dc=com))'
- *
- * $ ldapsearch -x -H ldap://ldap.forumsys.com -b dc=example,dc=com -s sub uid=tesla
- */
-func factory(t *testing.T) logical.Backend {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
- return b
-}
-
-func TestBackend_basic(t *testing.T) {
- b := factory(t)
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfigUrl(t),
- // Map Scientists group (from LDAP server) with foo policy
- testAccStepGroup(t, "Scientists", "foo"),
-
- // Map engineers group (local) with bar policy
- testAccStepGroup(t, "engineers", "bar"),
-
- // Map tesla user with local engineers group
- testAccStepUser(t, "tesla", "engineers"),
-
- // Authenticate
- testAccStepLogin(t, "tesla", "password"),
-
- // Verify both groups mappings can be listed back
- testAccStepGroupList(t, []string{"engineers", "Scientists"}),
-
- // Verify user mapping can be listed back
- testAccStepUserList(t, []string{"tesla"}),
- },
- })
-}
-
-func TestBackend_basic_authbind(t *testing.T) {
- b := factory(t)
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfigUrlWithAuthBind(t),
- testAccStepGroup(t, "Scientists", "foo"),
- testAccStepGroup(t, "engineers", "bar"),
- testAccStepUser(t, "tesla", "engineers"),
- testAccStepLogin(t, "tesla", "password"),
- },
- })
-}
-
-func TestBackend_basic_discover(t *testing.T) {
- b := factory(t)
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfigUrlWithDiscover(t),
- testAccStepGroup(t, "Scientists", "foo"),
- testAccStepGroup(t, "engineers", "bar"),
- testAccStepUser(t, "tesla", "engineers"),
- testAccStepLogin(t, "tesla", "password"),
- },
- })
-}
-
-func TestBackend_basic_nogroupdn(t *testing.T) {
- b := factory(t)
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfigUrlNoGroupDN(t),
- testAccStepGroup(t, "Scientists", "foo"),
- testAccStepGroup(t, "engineers", "bar"),
- testAccStepUser(t, "tesla", "engineers"),
- testAccStepLoginNoGroupDN(t, "tesla", "password"),
- },
- })
-}
-
-func TestBackend_groupCrud(t *testing.T) {
- b := factory(t)
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepGroup(t, "g1", "foo"),
- testAccStepReadGroup(t, "g1", "foo"),
- testAccStepDeleteGroup(t, "g1"),
- testAccStepReadGroup(t, "g1", ""),
- },
- })
-}
-
-/*
- * Test backend configuration defaults are successfully read.
- */
-func TestBackend_configDefaultsAfterUpdate(t *testing.T) {
- b := factory(t)
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{},
- },
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config",
- Check: func(resp *logical.Response) error {
- if resp == nil {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- // Test well-known defaults
- cfg := resp.Data
- defaultGroupFilter := "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))"
- if cfg["groupfilter"] != defaultGroupFilter {
- t.Errorf("Default mismatch: groupfilter. Expected: '%s', received :'%s'", defaultGroupFilter, cfg["groupfilter"])
- }
-
- defaultGroupAttr := "cn"
- if cfg["groupattr"] != defaultGroupAttr {
- t.Errorf("Default mismatch: groupattr. Expected: '%s', received :'%s'", defaultGroupAttr, cfg["groupattr"])
- }
-
- defaultUserAttr := "cn"
- if cfg["userattr"] != defaultUserAttr {
- t.Errorf("Default mismatch: userattr. Expected: '%s', received :'%s'", defaultUserAttr, cfg["userattr"])
- }
-
- defaultDenyNullBind := true
- if cfg["deny_null_bind"] != defaultDenyNullBind {
- t.Errorf("Default mismatch: deny_null_bind. Expected: '%s', received :'%s'", defaultDenyNullBind, cfg["deny_null_bind"])
- }
-
- return nil
- },
- },
- },
- })
-}
-
-func testAccStepConfigUrl(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{
- // Online LDAP test server
- // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
- "url": "ldap://ldap.forumsys.com",
- "userattr": "uid",
- "userdn": "dc=example,dc=com",
- "groupdn": "dc=example,dc=com",
- },
- }
-}
-
-func testAccStepConfigUrlWithAuthBind(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{
- // Online LDAP test server
- // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
- // In this test we also exercise multiple URL support
- "url": "foobar://ldap.example.com,ldap://ldap.forumsys.com",
- "userattr": "uid",
- "userdn": "dc=example,dc=com",
- "groupdn": "dc=example,dc=com",
- "binddn": "cn=read-only-admin,dc=example,dc=com",
- "bindpass": "password",
- },
- }
-}
-
-func testAccStepConfigUrlWithDiscover(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{
- // Online LDAP test server
- // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
- "url": "ldap://ldap.forumsys.com",
- "userattr": "uid",
- "userdn": "dc=example,dc=com",
- "groupdn": "dc=example,dc=com",
- "discoverdn": true,
- },
- }
-}
-
-func testAccStepConfigUrlNoGroupDN(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: map[string]interface{}{
- // Online LDAP test server
- // http://www.forumsys.com/tutorials/integration-how-to/ldap/online-ldap-test-server/
- "url": "ldap://ldap.forumsys.com",
- "userattr": "uid",
- "userdn": "dc=example,dc=com",
- "discoverdn": true,
- },
- }
-}
-
-func testAccStepGroup(t *testing.T, group string, policies string) logicaltest.TestStep {
- t.Logf("[testAccStepGroup] - Registering group %s, policy %s", group, policies)
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "groups/" + group,
- Data: map[string]interface{}{
- "policies": policies,
- },
- }
-}
-
-func testAccStepReadGroup(t *testing.T, group string, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "groups/" + group,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if policies == "" {
- return nil
- }
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Policies []string `mapstructure:"policies"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepDeleteGroup(t *testing.T, group string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "groups/" + group,
- }
-}
-
-func TestBackend_userCrud(t *testing.T) {
- b := Backend()
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepUser(t, "g1", "bar"),
- testAccStepReadUser(t, "g1", "bar"),
- testAccStepDeleteUser(t, "g1"),
- testAccStepReadUser(t, "g1", ""),
- },
- })
-}
-
-func testAccStepUser(t *testing.T, user string, groups string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "users/" + user,
- Data: map[string]interface{}{
- "groups": groups,
- },
- }
-}
-
-func testAccStepReadUser(t *testing.T, user string, groups string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "users/" + user,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if groups == "" {
- return nil
- }
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Groups string `mapstructure:"groups"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.Groups != groups {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepDeleteUser(t *testing.T, user string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "users/" + user,
- }
-}
-
-func testAccStepLogin(t *testing.T, user string, pass string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + user,
- Data: map[string]interface{}{
- "password": pass,
- },
- Unauthenticated: true,
-
- // Verifies user tesla maps to groups via local group (engineers) as well as remote group (Scientiests)
- Check: logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}),
- }
-}
-
-func testAccStepLoginNoGroupDN(t *testing.T, user string, pass string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + user,
- Data: map[string]interface{}{
- "password": pass,
- },
- Unauthenticated: true,
-
- // Verifies a search without defined GroupDN returns a warnting rather than failing
- Check: func(resp *logical.Response) error {
- if len(resp.Warnings) != 1 {
- return fmt.Errorf("expected a warning due to no group dn, got: %#v", resp.Warnings)
- }
-
- return logicaltest.TestCheckAuth([]string{"bar", "default"})(resp)
- },
- }
-}
-
-func TestLDAPEscape(t *testing.T) {
- testcases := map[string]string{
- "#test": "\\#test",
- "test,hello": "test\\,hello",
- "test,hel+lo": "test\\,hel\\+lo",
- "test\\hello": "test\\\\hello",
- " test ": "\\ test \\ ",
- }
-
- for test, answer := range testcases {
- res := EscapeLDAPValue(test)
- if res != answer {
- t.Errorf("Failed to escape %s: %s != %s\n", test, res, answer)
- }
- }
-}
-
-func testAccStepGroupList(t *testing.T, groups []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "groups",
- Check: func(resp *logical.Response) error {
- if resp.IsError() {
- return fmt.Errorf("Got error response: %#v", *resp)
- }
-
- expected := make([]string, len(groups))
- copy(expected, groups)
- sort.Strings(expected)
-
- sortedResponse := make([]string, len(resp.Data["keys"].([]string)))
- copy(sortedResponse, resp.Data["keys"].([]string))
- sort.Strings(sortedResponse)
-
- if !reflect.DeepEqual(expected, sortedResponse) {
- return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", expected, sortedResponse)
- }
- return nil
- },
- }
-}
-
-func testAccStepUserList(t *testing.T, users []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "users",
- Check: func(resp *logical.Response) error {
- if resp.IsError() {
- return fmt.Errorf("Got error response: %#v", *resp)
- }
-
- expected := make([]string, len(users))
- copy(expected, users)
- sort.Strings(expected)
-
- sortedResponse := make([]string, len(resp.Data["keys"].([]string)))
- copy(sortedResponse, resp.Data["keys"].([]string))
- sort.Strings(sortedResponse)
-
- if !reflect.DeepEqual(expected, sortedResponse) {
- return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", expected, sortedResponse)
- }
- return nil
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go
deleted file mode 100644
index 262bc99..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package ldap
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/api"
- pwd "github.com/hashicorp/vault/helper/password"
-)
-
-type CLIHandler struct{}
-
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
- mount, ok := m["mount"]
- if !ok {
- mount = "ldap"
- }
-
- username, ok := m["username"]
- if !ok {
- username = usernameFromEnv()
- if username == "" {
- return nil, fmt.Errorf("'username' not supplied and neither 'LOGNAME' nor 'USER' env vars set")
- }
- }
- password, ok := m["password"]
- if !ok {
- fmt.Printf("Password (will be hidden): ")
- var err error
- password, err = pwd.Read(os.Stdin)
- fmt.Println()
- if err != nil {
- return nil, err
- }
- }
-
- data := map[string]interface{}{
- "password": password,
- }
-
- mfa_method, ok := m["method"]
- if ok {
- data["method"] = mfa_method
- }
- mfa_passcode, ok := m["passcode"]
- if ok {
- data["passcode"] = mfa_passcode
- }
-
- path := fmt.Sprintf("auth/%s/login/%s", mount, username)
- secret, err := c.Logical().Write(path, data)
- if err != nil {
- return nil, err
- }
- if secret == nil {
- return nil, fmt.Errorf("empty response from credential provider")
- }
-
- return secret, nil
-}
-
-func (h *CLIHandler) Help() string {
- help := `
-The LDAP credential provider allows you to authenticate with LDAP.
-To use it, first configure it through the "config" endpoint, and then
-login by specifying username and password. If password is not provided
-on the command line, it will be read from stdin.
-
-If multi-factor authentication (MFA) is enabled, a "method" and/or "passcode"
-may be provided depending on the MFA backend enabled. To check
-which MFA backend is in use, read "auth/[mount]/mfa_config".
-
- Example: vault auth -method=ldap username=john
-
- `
-
- return strings.TrimSpace(help)
-}
-
-func usernameFromEnv() string {
- if logname := os.Getenv("LOGNAME"); logname != "" {
- return logname
- }
- if user := os.Getenv("USER"); user != "" {
- return user
- }
- return ""
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go
deleted file mode 100644
index bf76715..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package ldap
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "net"
- "net/url"
- "strings"
- "text/template"
-
- "github.com/fatih/structs"
- "github.com/go-ldap/ldap"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- log "github.com/mgutz/logxi/v1"
-)
-
-func pathConfig(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `config`,
- Fields: map[string]*framework.FieldSchema{
- "url": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "ldap://127.0.0.1",
- Description: "LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.",
- },
-
- "userdn": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP domain to use for users (eg: ou=People,dc=example,dc=org)",
- },
-
- "binddn": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP DN for searching for the user DN (optional)",
- },
-
- "bindpass": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP password for searching for the user DN (optional)",
- },
-
- "groupdn": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)",
- },
-
- "groupfilter": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))",
- Description: `Go template for querying group membership of user (optional)
-The template can access the following context variables: UserDN, Username
-Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))
-Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`,
- },
-
- "groupattr": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "cn",
- Description: `LDAP attribute to follow on objects returned by
-in order to enumerate user group membership.
-Examples: "cn" or "memberOf", etc.
-Default: cn`,
- },
-
- "upndomain": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Enables userPrincipalDomain login with [username]@UPNDomain (optional)",
- },
-
- "userattr": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "cn",
- Description: "Attribute used for users (default: cn)",
- },
-
- "certificate": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded (optional)",
- },
-
- "discoverdn": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Use anonymous bind to discover the bind DN of a user (optional)",
- },
-
- "insecure_tls": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Skip LDAP server SSL Certificate verification - VERY insecure (optional)",
- },
-
- "starttls": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Issue a StartTLS command after establishing unencrypted connection (optional)",
- },
-
- "tls_min_version": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "tls12",
- Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
- },
-
- "tls_max_version": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "tls12",
- Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
- },
- "deny_null_bind": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: "Denies an unauthenticated LDAP bind request if the user's password is empty; defaults to true",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConfigRead,
- logical.UpdateOperation: b.pathConfigWrite,
- },
-
- HelpSynopsis: pathConfigHelpSyn,
- HelpDescription: pathConfigHelpDesc,
- }
-}
-
-/*
- * Construct ConfigEntry struct using stored configuration.
- */
-func (b *backend) Config(req *logical.Request) (*ConfigEntry, error) {
- // Schema for ConfigEntry
- fd, err := b.getConfigFieldData()
- if err != nil {
- return nil, err
- }
-
- // Create a new ConfigEntry, filling in defaults where appropriate
- result, err := b.newConfigEntry(fd)
- if err != nil {
- return nil, err
- }
-
- storedConfig, err := req.Storage.Get("config")
- if err != nil {
- return nil, err
- }
-
- if storedConfig == nil {
- // No user overrides, return default configuration
- return result, nil
- }
-
- // Deserialize stored configuration.
- // Fields not specified in storedConfig will retain their defaults.
- if err := storedConfig.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- result.logger = b.Logger()
-
- return result, nil
-}
-
-func (b *backend) pathConfigRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- cfg, err := b.Config(req)
- if err != nil {
- return nil, err
- }
- if cfg == nil {
- return nil, nil
- }
-
- resp := &logical.Response{
- Data: structs.New(cfg).Map(),
- }
- resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the configuration information as-is, including any passwords.")
- return resp, nil
-}
-
-/*
- * Creates and initializes a ConfigEntry object with its default values,
- * as specified by the passed schema.
- */
-func (b *backend) newConfigEntry(d *framework.FieldData) (*ConfigEntry, error) {
- cfg := new(ConfigEntry)
-
- cfg.logger = b.Logger()
-
- url := d.Get("url").(string)
- if url != "" {
- cfg.Url = strings.ToLower(url)
- }
- userattr := d.Get("userattr").(string)
- if userattr != "" {
- cfg.UserAttr = strings.ToLower(userattr)
- }
- userdn := d.Get("userdn").(string)
- if userdn != "" {
- cfg.UserDN = userdn
- }
- groupdn := d.Get("groupdn").(string)
- if groupdn != "" {
- cfg.GroupDN = groupdn
- }
- groupfilter := d.Get("groupfilter").(string)
- if groupfilter != "" {
- // Validate the template before proceeding
- _, err := template.New("queryTemplate").Parse(groupfilter)
- if err != nil {
- return nil, fmt.Errorf("invalid groupfilter (%v)", err)
- }
-
- cfg.GroupFilter = groupfilter
- }
- groupattr := d.Get("groupattr").(string)
- if groupattr != "" {
- cfg.GroupAttr = groupattr
- }
- upndomain := d.Get("upndomain").(string)
- if upndomain != "" {
- cfg.UPNDomain = upndomain
- }
- certificate := d.Get("certificate").(string)
- if certificate != "" {
- block, _ := pem.Decode([]byte(certificate))
-
- if block == nil || block.Type != "CERTIFICATE" {
- return nil, fmt.Errorf("failed to decode PEM block in the certificate")
- }
- _, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, fmt.Errorf("failed to parse certificate %s", err.Error())
- }
- cfg.Certificate = certificate
- }
- insecureTLS := d.Get("insecure_tls").(bool)
- if insecureTLS {
- cfg.InsecureTLS = insecureTLS
- }
- cfg.TLSMinVersion = d.Get("tls_min_version").(string)
- if cfg.TLSMinVersion == "" {
- return nil, fmt.Errorf("failed to get 'tls_min_version' value")
- }
-
- var ok bool
- _, ok = tlsutil.TLSLookup[cfg.TLSMinVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_min_version'")
- }
-
- cfg.TLSMaxVersion = d.Get("tls_max_version").(string)
- if cfg.TLSMaxVersion == "" {
- return nil, fmt.Errorf("failed to get 'tls_max_version' value")
- }
-
- _, ok = tlsutil.TLSLookup[cfg.TLSMaxVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_max_version'")
- }
- if cfg.TLSMaxVersion < cfg.TLSMinVersion {
- return nil, fmt.Errorf("'tls_max_version' must be greater than or equal to 'tls_min_version'")
- }
-
- startTLS := d.Get("starttls").(bool)
- if startTLS {
- cfg.StartTLS = startTLS
- }
- bindDN := d.Get("binddn").(string)
- if bindDN != "" {
- cfg.BindDN = bindDN
- }
- bindPass := d.Get("bindpass").(string)
- if bindPass != "" {
- cfg.BindPassword = bindPass
- }
- denyNullBind := d.Get("deny_null_bind").(bool)
- if denyNullBind {
- cfg.DenyNullBind = denyNullBind
- }
- discoverDN := d.Get("discoverdn").(bool)
- if discoverDN {
- cfg.DiscoverDN = discoverDN
- }
-
- return cfg, nil
-}
-
-func (b *backend) pathConfigWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- // Build a ConfigEntry struct out of the supplied FieldData
- cfg, err := b.newConfigEntry(d)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- entry, err := logical.StorageEntryJSON("config", cfg)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type ConfigEntry struct {
- logger log.Logger
- Url string `json:"url" structs:"url" mapstructure:"url"`
- UserDN string `json:"userdn" structs:"userdn" mapstructure:"userdn"`
- GroupDN string `json:"groupdn" structs:"groupdn" mapstructure:"groupdn"`
- GroupFilter string `json:"groupfilter" structs:"groupfilter" mapstructure:"groupfilter"`
- GroupAttr string `json:"groupattr" structs:"groupattr" mapstructure:"groupattr"`
- UPNDomain string `json:"upndomain" structs:"upndomain" mapstructure:"upndomain"`
- UserAttr string `json:"userattr" structs:"userattr" mapstructure:"userattr"`
- Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"`
- InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"`
- StartTLS bool `json:"starttls" structs:"starttls" mapstructure:"starttls"`
- BindDN string `json:"binddn" structs:"binddn" mapstructure:"binddn"`
- BindPassword string `json:"bindpass" structs:"bindpass" mapstructure:"bindpass"`
- DenyNullBind bool `json:"deny_null_bind" structs:"deny_null_bind" mapstructure:"deny_null_bind"`
- DiscoverDN bool `json:"discoverdn" structs:"discoverdn" mapstructure:"discoverdn"`
- TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"`
- TLSMaxVersion string `json:"tls_max_version" structs:"tls_max_version" mapstructure:"tls_max_version"`
-}
-
-func (c *ConfigEntry) GetTLSConfig(host string) (*tls.Config, error) {
- tlsConfig := &tls.Config{
- ServerName: host,
- }
-
- if c.TLSMinVersion != "" {
- tlsMinVersion, ok := tlsutil.TLSLookup[c.TLSMinVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_min_version' in config")
- }
- tlsConfig.MinVersion = tlsMinVersion
- }
-
- if c.TLSMaxVersion != "" {
- tlsMaxVersion, ok := tlsutil.TLSLookup[c.TLSMaxVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_max_version' in config")
- }
- tlsConfig.MaxVersion = tlsMaxVersion
- }
-
- if c.InsecureTLS {
- tlsConfig.InsecureSkipVerify = true
- }
- if c.Certificate != "" {
- caPool := x509.NewCertPool()
- ok := caPool.AppendCertsFromPEM([]byte(c.Certificate))
- if !ok {
- return nil, fmt.Errorf("could not append CA certificate")
- }
- tlsConfig.RootCAs = caPool
- }
- return tlsConfig, nil
-}
-
-func (c *ConfigEntry) DialLDAP() (*ldap.Conn, error) {
- var retErr *multierror.Error
- var conn *ldap.Conn
- urls := strings.Split(c.Url, ",")
- for _, uut := range urls {
- u, err := url.Parse(uut)
- if err != nil {
- retErr = multierror.Append(retErr, fmt.Errorf("error parsing url %q: %s", uut, err.Error()))
- continue
- }
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- host = u.Host
- }
-
- var tlsConfig *tls.Config
- switch u.Scheme {
- case "ldap":
- if port == "" {
- port = "389"
- }
- conn, err = ldap.Dial("tcp", net.JoinHostPort(host, port))
- if err != nil {
- break
- }
- if conn == nil {
- err = fmt.Errorf("empty connection after dialing")
- break
- }
- if c.StartTLS {
- tlsConfig, err = c.GetTLSConfig(host)
- if err != nil {
- break
- }
- err = conn.StartTLS(tlsConfig)
- }
- case "ldaps":
- if port == "" {
- port = "636"
- }
- tlsConfig, err = c.GetTLSConfig(host)
- if err != nil {
- break
- }
- conn, err = ldap.DialTLS("tcp", net.JoinHostPort(host, port), tlsConfig)
- default:
- retErr = multierror.Append(retErr, fmt.Errorf("invalid LDAP scheme in url %q", net.JoinHostPort(host, port)))
- continue
- }
- if err == nil {
- if retErr != nil {
- if c.logger.IsDebug() {
- c.logger.Debug("ldap: errors connecting to some hosts: %s", retErr.Error())
- }
- }
- retErr = nil
- break
- }
- retErr = multierror.Append(retErr, fmt.Errorf("error connecting to host %q: %s", uut, err.Error()))
- }
-
- return conn, retErr.ErrorOrNil()
-}
-
-/*
- * Returns FieldData describing our ConfigEntry struct schema
- */
-func (b *backend) getConfigFieldData() (*framework.FieldData, error) {
- configPath := b.Route("config")
-
- if configPath == nil {
- return nil, logical.ErrUnsupportedPath
- }
-
- raw := make(map[string]interface{}, len(configPath.Fields))
-
- fd := framework.FieldData{
- Raw: raw,
- Schema: configPath.Fields,
- }
-
- return &fd, nil
-}
-
-const pathConfigHelpSyn = `
-Configure the LDAP server to connect to, along with its options.
-`
-
-const pathConfigHelpDesc = `
-This endpoint allows you to configure the LDAP server to connect to and its
-configuration options.
-
-The LDAP URL can use either the "ldap://" or "ldaps://" schema. In the former
-case, an unencrypted connection will be made with a default port of 389, unless
-the "starttls" parameter is set to true, in which case TLS will be used. In the
-latter case, a SSL connection will be established with a default port of 636.
-
-## A NOTE ON ESCAPING
-
-It is up to the administrator to provide properly escaped DNs. This includes
-the user DN, bind DN for search, and so on.
-
-The only DN escaping performed by this backend is on usernames given at login
-time when they are inserted into the final bind DN, and uses escaping rules
-defined in RFC 4514.
-
-Additionally, Active Directory has escaping rules that differ slightly from the
-RFC; in particular it requires escaping of '#' regardless of position in the DN
-(the RFC only requires it to be escaped when it is the first character), and
-'=', which the RFC indicates can be escaped with a backslash, but does not
-contain in its set of required escapes. If you are using Active Directory and
-these appear in your usernames, please ensure that they are escaped, in
-addition to being properly escaped in your configured DNs.
-
-For reference, see https://www.ietf.org/rfc/rfc4514.txt and
-http://social.technet.microsoft.com/wiki/contents/articles/5312.active-directory-characters-to-escape.aspx
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go
deleted file mode 100644
index 48c0d25..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package ldap
-
-import (
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathGroupsList(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "groups/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathGroupList,
- },
-
- HelpSynopsis: pathGroupHelpSyn,
- HelpDescription: pathGroupHelpDesc,
- }
-}
-
-func pathGroups(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `groups/(?P.+)`,
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the LDAP group.",
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Comma-separated list of policies associated to the group.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathGroupDelete,
- logical.ReadOperation: b.pathGroupRead,
- logical.UpdateOperation: b.pathGroupWrite,
- },
-
- HelpSynopsis: pathGroupHelpSyn,
- HelpDescription: pathGroupHelpDesc,
- }
-}
-
-func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, error) {
- entry, err := s.Get("group/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result GroupEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathGroupDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("group/" + d.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathGroupRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- group, err := b.Group(req.Storage, d.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if group == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "policies": group.Policies,
- },
- }, nil
-}
-
-func (b *backend) pathGroupWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Store it
- entry, err := logical.StorageEntryJSON("group/"+d.Get("name").(string), &GroupEntry{
- Policies: policyutil.ParsePolicies(d.Get("policies")),
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathGroupList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groups, err := req.Storage.List("group/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(groups), nil
-}
-
-type GroupEntry struct {
- Policies []string
-}
-
-const pathGroupHelpSyn = `
-Manage users allowed to authenticate.
-`
-
-const pathGroupHelpDesc = `
-This endpoint allows you to create, read, update, and delete configuration
-for LDAP groups that are allowed to authenticate, and associate policies to
-them.
-
-Deleting a group will not revoke auth for prior authenticated users in that
-group. To do this, do a revoke on "login/" for
-the usernames you want revoked.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go
deleted file mode 100644
index 2266e8d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package ldap
-
-import (
- "fmt"
- "sort"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `login/(?P.+)`,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "DN (distinguished name) to be used for login.",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password for this user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
-
- HelpSynopsis: pathLoginSyn,
- HelpDescription: pathLoginDesc,
- }
-}
-
-func (b *backend) pathLogin(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- username := d.Get("username").(string)
- password := d.Get("password").(string)
-
- policies, resp, err := b.Login(req, username, password)
- // Handle an internal error
- if err != nil {
- return nil, err
- }
- if resp != nil {
- // Handle a logical error
- if resp.IsError() {
- return resp, nil
- }
- } else {
- resp = &logical.Response{}
- }
-
- sort.Strings(policies)
-
- resp.Auth = &logical.Auth{
- Policies: policies,
- Metadata: map[string]string{
- "username": username,
- },
- InternalData: map[string]interface{}{
- "password": password,
- },
- DisplayName: username,
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- },
- }
- return resp, nil
-}
-
-func (b *backend) pathLoginRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- username := req.Auth.Metadata["username"]
- password := req.Auth.InternalData["password"].(string)
-
- loginPolicies, resp, err := b.Login(req, username, password)
- if len(loginPolicies) == 0 {
- return resp, err
- }
-
- if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.Policies) {
- return nil, fmt.Errorf("policies have changed, not renewing")
- }
-
- return framework.LeaseExtend(0, 0, b.System())(req, d)
-}
-
-const pathLoginSyn = `
-Log in with a username and password.
-`
-
-const pathLoginDesc = `
-This endpoint authenticates using a username and password. Please be sure to
-read the note on escaping from the path-help for the 'config' endpoint.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go
deleted file mode 100644
index 6845a41..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package ldap
-
-import (
- "strings"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathUsersList(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "users/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathUserList,
- },
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func pathUsers(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `users/(?P.+)`,
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the LDAP user.",
- },
-
- "groups": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Comma-separated list of additional groups associated with the user.",
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Comma-separated list of policies associated with the user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathUserDelete,
- logical.ReadOperation: b.pathUserRead,
- logical.UpdateOperation: b.pathUserWrite,
- },
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func (b *backend) User(s logical.Storage, n string) (*UserEntry, error) {
- entry, err := s.Get("user/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result UserEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathUserDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("user/" + d.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathUserRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- user, err := b.User(req.Storage, d.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if user == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "groups": strings.Join(user.Groups, ","),
- "policies": user.Policies,
- },
- }, nil
-}
-
-func (b *backend) pathUserWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- groups := strutil.RemoveDuplicates(strutil.ParseStringSlice(d.Get("groups").(string), ","), false)
- policies := policyutil.ParsePolicies(d.Get("policies"))
- for i, g := range groups {
- groups[i] = strings.TrimSpace(g)
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("user/"+name, &UserEntry{
- Groups: groups,
- Policies: policies,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathUserList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- users, err := req.Storage.List("user/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(users), nil
-}
-
-type UserEntry struct {
- Groups []string
- Policies []string
-}
-
-const pathUserHelpSyn = `
-Manage additional groups for users allowed to authenticate.
-`
-
-const pathUserHelpDesc = `
-This endpoint allows you to create, read, update, and delete configuration
-for LDAP users that are allowed to authenticate, in particular associating
-additional groups to them.
-
-Deleting a user will not revoke their auth. To do this, do a revoke on "login/" for
-the usernames you want revoked.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go
deleted file mode 100644
index 951d190..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package okta
-
-import (
- "fmt"
-
- "github.com/chrismalek/oktasdk-go/okta"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: backendHelp,
-
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "login/*",
- },
- },
-
- Paths: append([]*framework.Path{
- pathConfig(&b),
- pathUsers(&b),
- pathGroups(&b),
- pathUsersList(&b),
- pathGroupsList(&b),
- pathLogin(&b),
- }),
-
- AuthRenew: b.pathLoginRenew,
- BackendType: logical.TypeCredential,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-}
-
-func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, error) {
- cfg, err := b.Config(req.Storage)
- if err != nil {
- return nil, nil, err
- }
- if cfg == nil {
- return nil, logical.ErrorResponse("Okta backend not configured"), nil
- }
-
- client := cfg.OktaClient()
-
- type embeddedResult struct {
- User okta.User `json:"user"`
- }
-
- type authResult struct {
- Embedded embeddedResult `json:"_embedded"`
- }
-
- authReq, err := client.NewRequest("POST", "authn", map[string]interface{}{
- "username": username,
- "password": password,
- })
- if err != nil {
- return nil, nil, err
- }
-
- var result authResult
- rsp, err := client.Do(authReq, &result)
- if err != nil {
- return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil
- }
- if rsp == nil {
- return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil
- }
-
- oktaResponse := &logical.Response{
- Data: map[string]interface{}{},
- }
-
- var allGroups []string
- // Only query the Okta API for group membership if we have a token
- if cfg.Token != "" {
- oktaGroups, err := b.getOktaGroups(client, &result.Embedded.User)
- if err != nil {
- return nil, logical.ErrorResponse(fmt.Sprintf("okta failure retrieving groups: %v", err)), nil
- }
- if len(oktaGroups) == 0 {
- errString := fmt.Sprintf(
- "no Okta groups found; only policies from locally-defined groups available")
- oktaResponse.AddWarning(errString)
- }
- allGroups = append(allGroups, oktaGroups...)
- }
-
- // Import the custom added groups from okta backend
- user, err := b.User(req.Storage, username)
- if err != nil {
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/okta: error looking up user", "error", err)
- }
- }
- if err == nil && user != nil && user.Groups != nil {
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/okta: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups)
- }
- allGroups = append(allGroups, user.Groups...)
- }
-
- // Retrieve policies
- var policies []string
- for _, groupName := range allGroups {
- entry, _, err := b.Group(req.Storage, groupName)
- if err != nil {
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/okta: error looking up group policies", "error", err)
- }
- }
- if err == nil && entry != nil && entry.Policies != nil {
- policies = append(policies, entry.Policies...)
- }
- }
-
- // Merge local Policies into Okta Policies
- if user != nil && user.Policies != nil {
- policies = append(policies, user.Policies...)
- }
-
- if len(policies) == 0 {
- errStr := "user is not a member of any authorized policy"
- if len(oktaResponse.Warnings) > 0 {
- errStr = fmt.Sprintf("%s; additionally, %s", errStr, oktaResponse.Warnings[0])
- }
-
- oktaResponse.Data["error"] = errStr
- return nil, oktaResponse, nil
- }
-
- return policies, oktaResponse, nil
-}
-
-func (b *backend) getOktaGroups(client *okta.Client, user *okta.User) ([]string, error) {
- rsp, err := client.Users.PopulateGroups(user)
- if err != nil {
- return nil, err
- }
- if rsp == nil {
- return nil, fmt.Errorf("okta auth backend unexpected failure")
- }
- oktaGroups := make([]string, 0, len(user.Groups))
- for _, group := range user.Groups {
- oktaGroups = append(oktaGroups, group.Profile.Name)
- }
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/okta: Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", oktaGroups)
- }
- return oktaGroups, nil
-}
-
-const backendHelp = `
-The Okta credential provider allows authentication querying,
-checking username and password, and associating policies. If an api token is configure
-groups are pulled down from Okta.
-
-Configuration of the connection is done through the "config" and "policies"
-endpoints by a user with root access. Authentication is then done
-by suppying the two fields for "login".
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go
deleted file mode 100644
index 9c2503d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package okta
-
-import (
- "fmt"
- "os"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/helper/policyutil"
- log "github.com/mgutz/logxi/v1"
-
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
-)
-
-func TestBackend_Config(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 12
- maxLeaseTTLVal := time.Hour * 24
- b, err := Factory(&logical.BackendConfig{
- Logger: logformat.NewVaultLogger(log.LevelTrace),
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- username := os.Getenv("OKTA_USERNAME")
- password := os.Getenv("OKTA_PASSWORD")
- token := os.Getenv("OKTA_API_TOKEN")
-
- configData := map[string]interface{}{
- "organization": os.Getenv("OKTA_ORG"),
- "base_url": "oktapreview.com",
- }
-
- updatedDuration := time.Hour * 1
- configDataToken := map[string]interface{}{
- "token": token,
- "ttl": "1h",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testConfigCreate(t, configData),
- testLoginWrite(t, username, "wrong", "E0000004", 0, nil),
- testLoginWrite(t, username, password, "user is not a member of any authorized policy", 0, nil),
- testAccUserGroups(t, username, "local_grouP,lOcal_group2"),
- testAccGroups(t, "local_groUp", "loCal_group_policy"),
- testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy"}),
- testAccGroups(t, "everyoNe", "everyone_grouP_policy,eveRy_group_policy2"),
- testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy"}),
- testConfigUpdate(t, configDataToken),
- testConfigRead(t, token, configData),
- testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy"}),
- testAccGroups(t, "locAl_group2", "testgroup_group_policy"),
- testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "testgroup_group_policy"}),
- },
- })
-}
-
-func testLoginWrite(t *testing.T, username, password, reason string, expectedTTL time.Duration, policies []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + username,
- ErrorOk: true,
- Data: map[string]interface{}{
- "password": password,
- },
- Check: func(resp *logical.Response) error {
- if resp.IsError() {
- if reason == "" || !strings.Contains(resp.Error().Error(), reason) {
- return resp.Error()
- }
- }
-
- if resp.Auth != nil {
- if !policyutil.EquivalentPolicies(resp.Auth.Policies, policies) {
- return fmt.Errorf("policy mismatch expected %v but got %v", policies, resp.Auth.Policies)
- }
-
- actualTTL := resp.Auth.LeaseOptions.TTL
- if actualTTL != expectedTTL {
- return fmt.Errorf("TTL mismatch expected %v but got %v", expectedTTL, actualTTL)
- }
- }
-
- return nil
- },
- }
-}
-
-func testConfigCreate(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.CreateOperation,
- Path: "config",
- Data: d,
- }
-}
-
-func testConfigUpdate(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: d,
- }
-}
-
-func testConfigRead(t *testing.T, token string, d map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config",
- Check: func(resp *logical.Response) error {
- if resp.IsError() {
- return resp.Error()
- }
-
- if resp.Data["organization"] != d["organization"] {
- return fmt.Errorf("Org mismatch expected %s but got %s", d["organization"], resp.Data["Org"])
- }
-
- if resp.Data["base_url"] != d["base_url"] {
- return fmt.Errorf("BaseURL mismatch expected %s but got %s", d["base_url"], resp.Data["BaseURL"])
- }
-
- for _, value := range resp.Data {
- if value == token {
- return fmt.Errorf("token should not be returned on a read request")
- }
- }
-
- return nil
- },
- }
-}
-
-func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("OKTA_USERNAME"); v == "" {
- t.Fatal("OKTA_USERNAME must be set for acceptance tests")
- }
-
- if v := os.Getenv("OKTA_PASSWORD"); v == "" {
- t.Fatal("OKTA_PASSWORD must be set for acceptance tests")
- }
-
- if v := os.Getenv("OKTA_ORG"); v == "" {
- t.Fatal("OKTA_ORG must be set for acceptance tests")
- }
-}
-
-func testAccUserGroups(t *testing.T, user string, groups string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "users/" + user,
- Data: map[string]interface{}{
- "groups": groups,
- },
- }
-}
-
-func testAccGroups(t *testing.T, group string, policies string) logicaltest.TestStep {
- t.Logf("[testAccGroups] - Registering group %s, policy %s", group, policies)
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "groups/" + group,
- Data: map[string]interface{}{
- "policies": policies,
- },
- }
-}
-
-func testAccLogin(t *testing.T, user, password string, keys []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + user,
- Data: map[string]interface{}{
- "password": password,
- },
- Unauthenticated: true,
-
- Check: logicaltest.TestCheckAuth(keys),
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go
deleted file mode 100644
index f5f8502..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package okta
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/api"
- pwd "github.com/hashicorp/vault/helper/password"
-)
-
-// CLIHandler struct
-type CLIHandler struct{}
-
-// Auth cli method
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
- mount, ok := m["mount"]
- if !ok {
- mount = "okta"
- }
-
- username, ok := m["username"]
- if !ok {
- return nil, fmt.Errorf("'username' var must be set")
- }
- password, ok := m["password"]
- if !ok {
- fmt.Printf("Password (will be hidden): ")
- var err error
- password, err = pwd.Read(os.Stdin)
- fmt.Println()
- if err != nil {
- return nil, err
- }
- }
-
- data := map[string]interface{}{
- "password": password,
- }
-
- path := fmt.Sprintf("auth/%s/login/%s", mount, username)
- secret, err := c.Logical().Write(path, data)
- if err != nil {
- return nil, err
- }
- if secret == nil {
- return nil, fmt.Errorf("empty response from credential provider")
- }
-
- return secret, nil
-}
-
-// Help method for okta cli
-func (h *CLIHandler) Help() string {
- help := `
-The Okta credential provider allows you to authenticate with Okta.
-To use it, first configure it through the "config" endpoint, and then
-login by specifying username and password. If password is not provided
-on the command line, it will be read from stdin.
-
- Example: vault auth -method=okta username=john
-
- `
-
- return strings.TrimSpace(help)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go
deleted file mode 100644
index e879302..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package okta
-
-import (
- "fmt"
- "net/url"
-
- "time"
-
- "github.com/chrismalek/oktasdk-go/okta"
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- defaultBaseURL = "okta.com"
- previewBaseURL = "oktapreview.com"
-)
-
-func pathConfig(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `config`,
- Fields: map[string]*framework.FieldSchema{
- "organization": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "(DEPRECATED) Okta organization to authenticate against. Use org_name instead.",
- },
- "org_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the organization to be used in the Okta API.",
- },
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "(DEPRECATED) Okta admin API token. Use api_token instead.",
- },
- "api_token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Okta API key.",
- },
- "base_url": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The base domain to use for the Okta API. When not specified in the configuraiton, "okta.com" is used.`,
- },
- "production": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `(DEPRECATED) Use base_url.`,
- },
- "ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Duration after which authentication will be expired`,
- },
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Maximum duration after which authentication will be expired`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConfigRead,
- logical.CreateOperation: b.pathConfigWrite,
- logical.UpdateOperation: b.pathConfigWrite,
- },
-
- ExistenceCheck: b.pathConfigExistenceCheck,
-
- HelpSynopsis: pathConfigHelp,
- }
-}
-
-// Config returns the configuration for this backend.
-func (b *backend) Config(s logical.Storage) (*ConfigEntry, error) {
- entry, err := s.Get("config")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result ConfigEntry
- if entry != nil {
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- }
-
- return &result, nil
-}
-
-func (b *backend) pathConfigRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- cfg, err := b.Config(req.Storage)
- if err != nil {
- return nil, err
- }
- if cfg == nil {
- return nil, nil
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "organization": cfg.Org,
- "org_name": cfg.Org,
- "ttl": cfg.TTL,
- "max_ttl": cfg.MaxTTL,
- },
- }
- if cfg.BaseURL != "" {
- resp.Data["base_url"] = cfg.BaseURL
- }
- if cfg.Production != nil {
- resp.Data["production"] = *cfg.Production
- }
-
- return resp, nil
-}
-
-func (b *backend) pathConfigWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- cfg, err := b.Config(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Due to the existence check, entry will only be nil if it's a create
- // operation, so just create a new one
- if cfg == nil {
- cfg = &ConfigEntry{}
- }
-
- org, ok := d.GetOk("org_name")
- if ok {
- cfg.Org = org.(string)
- }
- if cfg.Org == "" {
- org, ok = d.GetOk("organization")
- if ok {
- cfg.Org = org.(string)
- }
- }
- if cfg.Org == "" && req.Operation == logical.CreateOperation {
- return logical.ErrorResponse("org_name is missing"), nil
- }
-
- token, ok := d.GetOk("api_token")
- if ok {
- cfg.Token = token.(string)
- }
- if cfg.Token == "" {
- token, ok = d.GetOk("token")
- if ok {
- cfg.Token = token.(string)
- }
- }
-
- baseURLRaw, ok := d.GetOk("base_url")
- if ok {
- baseURL := baseURLRaw.(string)
- _, err = url.Parse(fmt.Sprintf("https://%s,%s", cfg.Org, baseURL))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil
- }
- cfg.BaseURL = baseURL
- }
-
- // We only care about the production flag when base_url is not set. It is
- // for compatibility reasons.
- if cfg.BaseURL == "" {
- productionRaw, ok := d.GetOk("production")
- if ok {
- production := productionRaw.(bool)
- cfg.Production = &production
- }
- } else {
- // clear out old production flag if base_url is set
- cfg.Production = nil
- }
-
- ttl, ok := d.GetOk("ttl")
- if ok {
- cfg.TTL = time.Duration(ttl.(int)) * time.Second
- } else if req.Operation == logical.CreateOperation {
- cfg.TTL = time.Duration(d.Get("ttl").(int)) * time.Second
- }
-
- maxTTL, ok := d.GetOk("max_ttl")
- if ok {
- cfg.MaxTTL = time.Duration(maxTTL.(int)) * time.Second
- } else if req.Operation == logical.CreateOperation {
- cfg.MaxTTL = time.Duration(d.Get("max_ttl").(int)) * time.Second
- }
-
- jsonCfg, err := logical.StorageEntryJSON("config", cfg)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(jsonCfg); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathConfigExistenceCheck(
- req *logical.Request, d *framework.FieldData) (bool, error) {
- cfg, err := b.Config(req.Storage)
- if err != nil {
- return false, err
- }
-
- return cfg != nil, nil
-}
-
-// OktaClient creates a basic okta client connection
-func (c *ConfigEntry) OktaClient() *okta.Client {
- baseURL := defaultBaseURL
- if c.Production != nil {
- if !*c.Production {
- baseURL = previewBaseURL
- }
- }
- if c.BaseURL != "" {
- baseURL = c.BaseURL
- }
-
- // We validate config on input and errors are only returned when parsing URLs
- client, _ := okta.NewClientWithDomain(cleanhttp.DefaultClient(), c.Org, baseURL, c.Token)
- return client
-}
-
-// ConfigEntry for Okta
-type ConfigEntry struct {
- Org string `json:"organization"`
- Token string `json:"token"`
- BaseURL string `json:"base_url"`
- Production *bool `json:"is_production,omitempty"`
- TTL time.Duration `json:"ttl"`
- MaxTTL time.Duration `json:"max_ttl"`
-}
-
-const pathConfigHelp = `
-This endpoint allows you to configure the Okta and its
-configuration options.
-
-The Okta organization are the characters at the front of the URL for Okta.
-Example https://ORG.okta.com
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go
deleted file mode 100644
index 9f879a1..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package okta
-
-import (
- "strings"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathGroupsList(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "groups/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathGroupList,
- },
-
- HelpSynopsis: pathGroupHelpSyn,
- HelpDescription: pathGroupHelpDesc,
- }
-}
-
-func pathGroups(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `groups/(?P.+)`,
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the Okta group.",
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Comma-separated list of policies associated to the group.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathGroupDelete,
- logical.ReadOperation: b.pathGroupRead,
- logical.UpdateOperation: b.pathGroupWrite,
- },
-
- HelpSynopsis: pathGroupHelpSyn,
- HelpDescription: pathGroupHelpDesc,
- }
-}
-
-// We look up groups in a case-insensitive manner since Okta is case-preserving
-// but case-insensitive for comparisons
-func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, string, error) {
- canonicalName := n
- entry, err := s.Get("group/" + n)
- if err != nil {
- return nil, "", err
- }
- if entry == nil {
- entries, err := s.List("group/")
- if err != nil {
- return nil, "", err
- }
- for _, groupName := range entries {
- if strings.ToLower(groupName) == strings.ToLower(n) {
- entry, err = s.Get("group/" + groupName)
- if err != nil {
- return nil, "", err
- }
- canonicalName = groupName
- break
- }
- }
- }
- if entry == nil {
- return nil, "", nil
- }
-
- var result GroupEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, "", err
- }
-
- return &result, canonicalName, nil
-}
-
-func (b *backend) pathGroupDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if len(name) == 0 {
- return logical.ErrorResponse("'name' must be supplied"), nil
- }
-
- entry, canonicalName, err := b.Group(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if entry != nil {
- err := req.Storage.Delete("group/" + canonicalName)
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
-}
-
-func (b *backend) pathGroupRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if len(name) == 0 {
- return logical.ErrorResponse("'name' must be supplied"), nil
- }
-
- group, _, err := b.Group(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if group == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "policies": group.Policies,
- },
- }, nil
-}
-
-func (b *backend) pathGroupWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if len(name) == 0 {
- return logical.ErrorResponse("'name' must be supplied"), nil
- }
-
- // Check for an existing group, possibly lowercased so that we keep using
- // existing user set values
- _, canonicalName, err := b.Group(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if canonicalName != "" {
- name = canonicalName
- } else {
- name = strings.ToLower(name)
- }
-
- entry, err := logical.StorageEntryJSON("group/"+name, &GroupEntry{
- Policies: policyutil.ParsePolicies(d.Get("policies")),
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathGroupList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groups, err := req.Storage.List("group/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(groups), nil
-}
-
-type GroupEntry struct {
- Policies []string
-}
-
-const pathGroupHelpSyn = `
-Manage users allowed to authenticate.
-`
-
-const pathGroupHelpDesc = `
-This endpoint allows you to create, read, update, and delete configuration
-for Okta groups that are allowed to authenticate, and associate policies to
-them.
-
-Deleting a group will not revoke auth for prior authenticated users in that
-group. To do this, do a revoke on "login/" for
-the usernames you want revoked.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go
deleted file mode 100644
index e439771..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package okta
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/go-errors/errors"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `login/(?P.+)`,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username to be used for login.",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password for this user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
-
- HelpSynopsis: pathLoginSyn,
- HelpDescription: pathLoginDesc,
- }
-}
-
-func (b *backend) pathLogin(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- username := d.Get("username").(string)
- password := d.Get("password").(string)
-
- policies, resp, err := b.Login(req, username, password)
- // Handle an internal error
- if err != nil {
- return nil, err
- }
- if resp != nil {
- // Handle a logical error
- if resp.IsError() {
- return resp, nil
- }
- } else {
- resp = &logical.Response{}
- }
-
- sort.Strings(policies)
-
- cfg, err := b.getConfig(req)
- if err != nil {
- return nil, err
- }
-
- resp.Auth = &logical.Auth{
- Policies: policies,
- Metadata: map[string]string{
- "username": username,
- "policies": strings.Join(policies, ","),
- },
- InternalData: map[string]interface{}{
- "password": password,
- },
- DisplayName: username,
- LeaseOptions: logical.LeaseOptions{
- TTL: cfg.TTL,
- Renewable: true,
- },
- }
- return resp, nil
-}
-
-func (b *backend) pathLoginRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- username := req.Auth.Metadata["username"]
- password := req.Auth.InternalData["password"].(string)
-
- loginPolicies, resp, err := b.Login(req, username, password)
- if len(loginPolicies) == 0 {
- return resp, err
- }
-
- if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.Policies) {
- return nil, fmt.Errorf("policies have changed, not renewing")
- }
-
- cfg, err := b.getConfig(req)
- if err != nil {
- return nil, err
- }
-
- return framework.LeaseExtend(cfg.TTL, cfg.MaxTTL, b.System())(req, d)
-}
-
-func (b *backend) getConfig(req *logical.Request) (*ConfigEntry, error) {
-
- cfg, err := b.Config(req.Storage)
- if err != nil {
- return nil, err
- }
- if cfg == nil {
- return nil, errors.New("Okta backend not configured")
- }
-
- return cfg, nil
-}
-
-const pathLoginSyn = `
-Log in with a username and password.
-`
-
-const pathLoginDesc = `
-This endpoint authenticates using a username and password.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_users.go
deleted file mode 100644
index 1512da4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_users.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package okta
-
-import (
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathUsersList(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "users/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathUserList,
- },
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func pathUsers(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `users/(?P.+)`,
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the user.",
- },
-
- "groups": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Comma-separated list of groups associated with the user.",
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Comma-separated list of policies associated with the user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathUserDelete,
- logical.ReadOperation: b.pathUserRead,
- logical.UpdateOperation: b.pathUserWrite,
- },
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func (b *backend) User(s logical.Storage, n string) (*UserEntry, error) {
- entry, err := s.Get("user/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result UserEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathUserDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if len(name) == 0 {
- return logical.ErrorResponse("Error empty name"), nil
- }
-
- err := req.Storage.Delete("user/" + name)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathUserRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if len(name) == 0 {
- return logical.ErrorResponse("Error empty name"), nil
- }
-
- user, err := b.User(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if user == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "groups": user.Groups,
- "policies": user.Policies,
- },
- }, nil
-}
-
-func (b *backend) pathUserWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if len(name) == 0 {
- return logical.ErrorResponse("Error empty name"), nil
- }
-
- groups := strings.Split(d.Get("groups").(string), ",")
- for i, g := range groups {
- groups[i] = strings.TrimSpace(g)
- }
-
- policies := strings.Split(d.Get("policies").(string), ",")
- for i, p := range policies {
- policies[i] = strings.TrimSpace(p)
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("user/"+name, &UserEntry{
- Groups: groups,
- Policies: policies,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathUserList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- users, err := req.Storage.List("user/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(users), nil
-}
-
-type UserEntry struct {
- Groups []string
- Policies []string
-}
-
-const pathUserHelpSyn = `
-Manage additional groups for users allowed to authenticate.
-`
-
-const pathUserHelpDesc = `
-This endpoint allows you to create, read, update, and delete configuration
-for Okta users that are allowed to authenticate, in particular associating
-additional groups to them.
-
-Deleting a user will not revoke their auth. To do this, do a revoke on "login/" for
-the usernames you want revoked.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go
deleted file mode 100644
index 49dcb7f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package radius
-
-import (
- "github.com/hashicorp/vault/helper/mfa"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: backendHelp,
-
- PathsSpecial: &logical.Paths{
- Root: mfa.MFARootPaths(),
-
- Unauthenticated: []string{
- "login",
- "login/*",
- },
- },
-
- Paths: append([]*framework.Path{
- pathConfig(&b),
- pathUsers(&b),
- pathUsersList(&b),
- },
- mfa.MFAPaths(b.Backend, pathLogin(&b))...,
- ),
-
- AuthRenew: b.pathLoginRenew,
- BackendType: logical.TypeCredential,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-}
-
-const backendHelp = `
-The "radius" credential provider allows authentication against
-a RADIUS server, checking username and associating users
-to set of policies.
-
-Configuration of the server is done through the "config" and "users"
-endpoints by a user with approriate access mandated by policy.
-Authentication is then done by suppying the two fields for "login".
-
-The backend optionally allows to grant a set of policies to any
-user that successfully authenticates against the RADIUS server,
-without them being explicitly mapped in vault.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend_test.go
deleted file mode 100644
index 86ce19a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend_test.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package radius
-
-import (
- "fmt"
- "os"
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
-)
-
-const (
- testSysTTL = time.Hour * 10
- testSysMaxTTL = time.Hour * 20
-)
-
-func TestBackend_Config(t *testing.T) {
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- config_data_basic := map[string]interface{}{
- "host": "test.radius.hostname.com",
- "secret": "test-secret",
- }
-
- config_data_missingrequired := map[string]interface{}{
- "host": "test.radius.hostname.com",
- }
-
- config_data_invalidport := map[string]interface{}{
- "host": "test.radius.hostname.com",
- "port": "notnumeric",
- "secret": "test-secret",
- }
-
- config_data_invalidbool := map[string]interface{}{
- "host": "test.radius.hostname.com",
- "secret": "test-secret",
- "unregistered_user_policies": "test",
- }
-
- config_data_emptyport := map[string]interface{}{
- "host": "test.radius.hostname.com",
- "port": "",
- "secret": "test-secret",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: false,
- // PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testConfigWrite(t, config_data_basic, false),
- testConfigWrite(t, config_data_emptyport, true),
- testConfigWrite(t, config_data_invalidport, true),
- testConfigWrite(t, config_data_invalidbool, true),
- testConfigWrite(t, config_data_missingrequired, true),
- },
- })
-}
-
-func TestBackend_users(t *testing.T) {
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testStepUpdateUser(t, "web", "foo"),
- testStepUpdateUser(t, "web2", "foo"),
- testStepUpdateUser(t, "web3", "foo"),
- testStepUserList(t, []string{"web", "web2", "web3"}),
- },
- })
-}
-
-func TestBackend_acceptance(t *testing.T) {
-
- if os.Getenv(logicaltest.TestEnvVar) == "" {
- t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
- return
- }
-
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- configDataAcceptanceAllowUnreg := map[string]interface{}{
- "host": os.Getenv("RADIUS_HOST"),
- "port": os.Getenv("RADIUS_PORT"),
- "secret": os.Getenv("RADIUS_SECRET"),
- "unregistered_user_policies": "policy1,policy2",
- }
- if configDataAcceptanceAllowUnreg["port"] == "" {
- configDataAcceptanceAllowUnreg["port"] = "1812"
- }
-
- configDataAcceptanceNoAllowUnreg := map[string]interface{}{
- "host": os.Getenv("RADIUS_HOST"),
- "port": os.Getenv("RADIUS_PORT"),
- "secret": os.Getenv("RADIUS_SECRET"),
- "unregistered_user_policies": "",
- }
- if configDataAcceptanceNoAllowUnreg["port"] == "" {
- configDataAcceptanceNoAllowUnreg["port"] = "1812"
- }
-
- dataRealpassword := map[string]interface{}{
- "password": os.Getenv("RADIUS_USERPASS"),
- }
-
- dataWrongpassword := map[string]interface{}{
- "password": "wrongpassword",
- }
-
- username := os.Getenv("RADIUS_USERNAME")
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- PreCheck: func() { testAccPreCheck(t) },
- AcceptanceTest: true,
- Steps: []logicaltest.TestStep{
- // Login with valid but unknown user will fail because unregistered_user_policies is emtpy
- testConfigWrite(t, configDataAcceptanceNoAllowUnreg, false),
- testAccUserLogin(t, username, dataRealpassword, true),
- // Once the user is registered auth will succeed
- testStepUpdateUser(t, username, ""),
- testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default"}, false),
-
- testStepUpdateUser(t, username, "foopolicy"),
- testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy"}, false),
- testAccStepDeleteUser(t, username),
-
- // When unregistered_user_policies is specified, an unknown user will be granted access and granted the listed policies
- testConfigWrite(t, configDataAcceptanceAllowUnreg, false),
- testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "policy1", "policy2"}, false),
-
- // More tests
- testAccUserLogin(t, "nonexistinguser", dataRealpassword, true),
- testAccUserLogin(t, username, dataWrongpassword, true),
- testStepUpdateUser(t, username, "foopolicy"),
- testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy"}, false),
- testStepUpdateUser(t, username, "foopolicy, secondpolicy"),
- testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy", "secondpolicy"}, false),
- testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy", "secondpolicy", "thirdpolicy"}, true),
- },
- })
-}
-
-func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("RADIUS_HOST"); v == "" {
- t.Fatal("RADIUS_HOST must be set for acceptance tests")
- }
-
- if v := os.Getenv("RADIUS_USERNAME"); v == "" {
- t.Fatal("RADIUS_USERNAME must be set for acceptance tests")
- }
-
- if v := os.Getenv("RADIUS_USERPASS"); v == "" {
- t.Fatal("RADIUS_USERPASS must be set for acceptance tests")
- }
-
- if v := os.Getenv("RADIUS_SECRET"); v == "" {
- t.Fatal("RADIUS_SECRET must be set for acceptance tests")
- }
-}
-
-func testConfigWrite(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config",
- Data: d,
- ErrorOk: expectError,
- }
-}
-
-func testAccStepDeleteUser(t *testing.T, n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "users/" + n,
- }
-}
-
-func testStepUserList(t *testing.T, users []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "users",
- Check: func(resp *logical.Response) error {
- if resp.IsError() {
- return fmt.Errorf("Got error response: %#v", *resp)
- }
-
- if !reflect.DeepEqual(users, resp.Data["keys"].([]string)) {
- return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", users, resp.Data["keys"])
- }
- return nil
- },
- }
-}
-
-func testStepUpdateUser(
- t *testing.T, name string, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "users/" + name,
- Data: map[string]interface{}{
- "policies": policies,
- },
- }
-}
-
-func testAccUserLogin(t *testing.T, user string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + user,
- Data: data,
- ErrorOk: expectError,
- Unauthenticated: true,
- }
-}
-
-func testAccUserLoginPolicy(t *testing.T, user string, data map[string]interface{}, policies []string, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + user,
- Data: data,
- ErrorOk: false,
- Unauthenticated: true,
- //Check: logicaltest.TestCheckAuth(policies),
- Check: func(resp *logical.Response) error {
- res := logicaltest.TestCheckAuth(policies)(resp)
- if res != nil && expectError {
- return nil
- }
- return res
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go
deleted file mode 100644
index 7d4bc8b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package radius
-
-import (
- "strings"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfig(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config",
- Fields: map[string]*framework.FieldSchema{
- "host": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "RADIUS server host",
- },
-
- "port": &framework.FieldSchema{
- Type: framework.TypeInt,
- Default: 1812,
- Description: "RADIUS server port (default: 1812)",
- },
- "secret": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Secret shared with the RADIUS server",
- },
- "unregistered_user_policies": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: emtpy)",
- },
- "dial_timeout": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 10,
- Description: "Number of seconds before connect times out (default: 10)",
- },
- "read_timeout": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 10,
- Description: "Number of seconds before response times out (default: 10). Note: kept for backwards compatibility, currently unused.",
- },
- "nas_port": &framework.FieldSchema{
- Type: framework.TypeInt,
- Default: 10,
- Description: "RADIUS NAS port field (default: 10)",
- },
- },
-
- ExistenceCheck: b.configExistenceCheck,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConfigRead,
- logical.CreateOperation: b.pathConfigCreateUpdate,
- logical.UpdateOperation: b.pathConfigCreateUpdate,
- },
-
- HelpSynopsis: pathConfigHelpSyn,
- HelpDescription: pathConfigHelpDesc,
- }
-}
-
-// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
-// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
-func (b *backend) configExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- entry, err := b.Config(req)
- if err != nil {
- return false, err
- }
- return entry != nil, nil
-}
-
-/*
- * Construct ConfigEntry struct using stored configuration.
- */
-func (b *backend) Config(req *logical.Request) (*ConfigEntry, error) {
-
- storedConfig, err := req.Storage.Get("config")
- if err != nil {
- return nil, err
- }
-
- if storedConfig == nil {
- return nil, nil
- }
-
- var result ConfigEntry
-
- if err := storedConfig.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathConfigRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- cfg, err := b.Config(req)
- if err != nil {
- return nil, err
- }
- if cfg == nil {
- return nil, nil
- }
-
- resp := &logical.Response{
- Data: structs.New(cfg).Map(),
- }
- resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the configuration information as-is, including any secrets.")
- return resp, nil
-}
-
-func (b *backend) pathConfigCreateUpdate(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- // Build a ConfigEntry struct out of the supplied FieldData
- cfg, err := b.Config(req)
- if err != nil {
- return nil, err
- }
- if cfg == nil {
- cfg = &ConfigEntry{}
- }
-
- host, ok := d.GetOk("host")
- if ok {
- cfg.Host = strings.ToLower(host.(string))
- } else if req.Operation == logical.CreateOperation {
- cfg.Host = strings.ToLower(d.Get("host").(string))
- }
- if cfg.Host == "" {
- return logical.ErrorResponse("config parameter `host` cannot be empty"), nil
- }
-
- port, ok := d.GetOk("port")
- if ok {
- cfg.Port = port.(int)
- } else if req.Operation == logical.CreateOperation {
- cfg.Port = d.Get("port").(int)
- }
-
- secret, ok := d.GetOk("secret")
- if ok {
- cfg.Secret = secret.(string)
- } else if req.Operation == logical.CreateOperation {
- cfg.Secret = d.Get("secret").(string)
- }
- if cfg.Secret == "" {
- return logical.ErrorResponse("config parameter `secret` cannot be empty"), nil
- }
-
- policies := make([]string, 0)
- unregisteredUserPoliciesRaw, ok := d.GetOk("unregistered_user_policies")
- if ok {
- unregisteredUserPoliciesStr := unregisteredUserPoliciesRaw.(string)
- if strings.TrimSpace(unregisteredUserPoliciesStr) != "" {
- policies = strings.Split(unregisteredUserPoliciesStr, ",")
- for _, policy := range policies {
- if policy == "root" {
- return logical.ErrorResponse("root policy cannot be granted by an authentication backend"), nil
- }
- }
- }
- cfg.UnregisteredUserPolicies = policies
- } else if req.Operation == logical.CreateOperation {
- cfg.UnregisteredUserPolicies = policies
- }
-
- dialTimeout, ok := d.GetOk("dial_timeout")
- if ok {
- cfg.DialTimeout = dialTimeout.(int)
- } else if req.Operation == logical.CreateOperation {
- cfg.DialTimeout = d.Get("dial_timeout").(int)
- }
-
- readTimeout, ok := d.GetOk("read_timeout")
- if ok {
- cfg.ReadTimeout = readTimeout.(int)
- } else if req.Operation == logical.CreateOperation {
- cfg.ReadTimeout = d.Get("read_timeout").(int)
- }
-
- nasPort, ok := d.GetOk("nas_port")
- if ok {
- cfg.NasPort = nasPort.(int)
- } else if req.Operation == logical.CreateOperation {
- cfg.NasPort = d.Get("nas_port").(int)
- }
-
- entry, err := logical.StorageEntryJSON("config", cfg)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type ConfigEntry struct {
- Host string `json:"host" structs:"host" mapstructure:"host"`
- Port int `json:"port" structs:"port" mapstructure:"port"`
- Secret string `json:"secret" structs:"secret" mapstructure:"secret"`
- UnregisteredUserPolicies []string `json:"unregistered_user_policies" structs:"unregistered_user_policies" mapstructure:"unregistered_user_policies"`
- DialTimeout int `json:"dial_timeout" structs:"dial_timeout" mapstructure:"dial_timeout"`
- ReadTimeout int `json:"read_timeout" structs:"read_timeout" mapstructure:"read_timeout"`
- NasPort int `json:"nas_port" structs:"nas_port" mapstructure:"nas_port"`
-}
-
-const pathConfigHelpSyn = `
-Configure the RADIUS server to connect to, along with its options.
-`
-
-const pathConfigHelpDesc = `
-This endpoint allows you to configure the RADIUS server to connect to and its
-configuration options.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go
deleted file mode 100644
index f3f8c9d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package radius
-
-import (
- "context"
- "fmt"
- "net"
- "strconv"
- "strings"
- "time"
-
- "layeh.com/radius"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login" + framework.OptionalParamRegex("urlusername"),
- Fields: map[string]*framework.FieldSchema{
- "urlusername": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username to be used for login. (URL parameter)",
- },
-
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username to be used for login. (POST request body)",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password for this user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
-
- HelpSynopsis: pathLoginSyn,
- HelpDescription: pathLoginDesc,
- }
-}
-
-func (b *backend) pathLogin(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- username := d.Get("username").(string)
- password := d.Get("password").(string)
-
- if username == "" {
- username = d.Get("urlusername").(string)
- if username == "" {
- return logical.ErrorResponse("username cannot be empty"), nil
- }
- }
-
- if password == "" {
- return logical.ErrorResponse("password cannot be empty"), nil
- }
-
- policies, resp, err := b.RadiusLogin(req, username, password)
- // Handle an internal error
- if err != nil {
- return nil, err
- }
- if resp != nil {
- // Handle a logical error
- if resp.IsError() {
- return resp, nil
- }
- }
-
- resp.Auth = &logical.Auth{
- Policies: policies,
- Metadata: map[string]string{
- "username": username,
- "policies": strings.Join(policies, ","),
- },
- InternalData: map[string]interface{}{
- "password": password,
- },
- DisplayName: username,
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- },
- }
- return resp, nil
-}
-
-func (b *backend) pathLoginRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var err error
-
- username := req.Auth.Metadata["username"]
- password := req.Auth.InternalData["password"].(string)
-
- var resp *logical.Response
- var loginPolicies []string
-
- loginPolicies, resp, err = b.RadiusLogin(req, username, password)
- if err != nil || (resp != nil && resp.IsError()) {
- return resp, err
- }
-
- if !policyutil.EquivalentPolicies(loginPolicies, req.Auth.Policies) {
- return nil, fmt.Errorf("policies have changed, not renewing")
- }
-
- return framework.LeaseExtend(0, 0, b.System())(req, d)
-}
-
-func (b *backend) RadiusLogin(req *logical.Request, username string, password string) ([]string, *logical.Response, error) {
-
- cfg, err := b.Config(req)
- if err != nil {
- return nil, nil, err
- }
- if cfg == nil || cfg.Host == "" || cfg.Secret == "" {
- return nil, logical.ErrorResponse("radius backend not configured"), nil
- }
-
- hostport := net.JoinHostPort(cfg.Host, strconv.Itoa(cfg.Port))
-
- packet := radius.New(radius.CodeAccessRequest, []byte(cfg.Secret))
- usernameAttr, err := radius.NewString(username)
- if err != nil {
- return nil, nil, err
- }
- passwordAttr, err := radius.NewString(password)
- if err != nil {
- return nil, nil, err
- }
- packet.Add(1, usernameAttr)
- packet.Add(2, passwordAttr)
- packet.Add(5, radius.NewInteger(uint32(cfg.NasPort)))
-
- client := radius.Client{
- Dialer: net.Dialer{
- Timeout: time.Duration(cfg.DialTimeout) * time.Second,
- },
- }
- received, err := client.Exchange(context.Background(), packet, hostport)
- if err != nil {
- return nil, logical.ErrorResponse(err.Error()), nil
- }
- if received.Code != radius.CodeAccessAccept {
- return nil, logical.ErrorResponse("access denied by the authentication server"), nil
- }
-
- var policies []string
- // Retrieve user entry from storage
- user, err := b.user(req.Storage, username)
- if err != nil {
- return policies, logical.ErrorResponse("could not retrieve user entry from storage"), err
- }
- if user == nil {
- // No user found, check if unregistered users are allowed (unregistered_user_policies not empty)
- if len(policyutil.SanitizePolicies(cfg.UnregisteredUserPolicies, false)) == 0 {
- return nil, logical.ErrorResponse("authentication succeeded but user has no associated policies"), nil
- }
- policies = policyutil.SanitizePolicies(cfg.UnregisteredUserPolicies, true)
- } else {
- policies = policyutil.SanitizePolicies(user.Policies, true)
- }
-
- return policies, &logical.Response{}, nil
-}
-
-const pathLoginSyn = `
-Log in with a username and password.
-`
-
-const pathLoginDesc = `
-This endpoint authenticates using a username and password. Please be sure to
-read the note on escaping from the path-help for the 'config' endpoint.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go
deleted file mode 100644
index 1e0fc61..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package radius
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathUsersList(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "users/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathUserList,
- },
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func pathUsers(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `users/(?P.+)`,
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the RADIUS user.",
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Comma-separated list of policies associated to the user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathUserDelete,
- logical.ReadOperation: b.pathUserRead,
- logical.UpdateOperation: b.pathUserWrite,
- logical.CreateOperation: b.pathUserWrite,
- },
-
- ExistenceCheck: b.userExistenceCheck,
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func (b *backend) userExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- userEntry, err := b.user(req.Storage, data.Get("name").(string))
- if err != nil {
- return false, err
- }
-
- return userEntry != nil, nil
-}
-
-func (b *backend) user(s logical.Storage, username string) (*UserEntry, error) {
- if username == "" {
- return nil, fmt.Errorf("missing username")
- }
-
- entry, err := s.Get("user/" + strings.ToLower(username))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result UserEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathUserDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("user/" + d.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathUserRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- user, err := b.user(req.Storage, d.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if user == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "policies": user.Policies,
- },
- }, nil
-}
-
-func (b *backend) pathUserWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- var policies = policyutil.ParsePolicies(d.Get("policies"))
- for _, policy := range policies {
- if policy == "root" {
- return logical.ErrorResponse("root policy cannot be granted by an authentication backend"), nil
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("user/"+d.Get("name").(string), &UserEntry{
- Policies: policies,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathUserList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- users, err := req.Storage.List("user/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(users), nil
-}
-
-type UserEntry struct {
- Policies []string
-}
-
-const pathUserHelpSyn = `
-Manage users allowed to authenticate.
-`
-
-const pathUserHelpDesc = `
-This endpoint allows you to create, read, update, and delete configuration
-for RADIUS users that are allowed to authenticate, and associate policies to
-them.
-
-Deleting a user will not revoke auth for prior authenticated users.
-To do this, do a revoke token by path on "auth/radius/login/"
-for the usernames you want revoked.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go
deleted file mode 100644
index 65f67e1..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package userpass
-
-import (
- "github.com/hashicorp/vault/helper/mfa"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: backendHelp,
-
- PathsSpecial: &logical.Paths{
- Root: mfa.MFARootPaths(),
-
- Unauthenticated: []string{
- "login/*",
- },
- },
-
- Paths: append([]*framework.Path{
- pathUsers(&b),
- pathUsersList(&b),
- pathUserPolicies(&b),
- pathUserPassword(&b),
- },
- mfa.MFAPaths(b.Backend, pathLogin(&b))...,
- ),
-
- AuthRenew: b.pathLoginRenew,
- BackendType: logical.TypeCredential,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-}
-
-const backendHelp = `
-The "userpass" credential provider allows authentication using
-a combination of a username and password. No additional factors
-are supported.
-
-The username/password combination is configured using the "users/"
-endpoints by a user with root access. Authentication is then done
-by suppying the two fields for "login".
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go
deleted file mode 100644
index 4f077ee..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go
+++ /dev/null
@@ -1,328 +0,0 @@
-package userpass
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
-)
-
-const (
- testSysTTL = time.Hour * 10
- testSysMaxTTL = time.Hour * 20
-)
-
-func TestBackend_TTLDurations(t *testing.T) {
- data1 := map[string]interface{}{
- "password": "password",
- "policies": "root",
- "ttl": "21h",
- "max_ttl": "11h",
- }
- data2 := map[string]interface{}{
- "password": "password",
- "policies": "root",
- "ttl": "10h",
- "max_ttl": "21h",
- }
- data3 := map[string]interface{}{
- "password": "password",
- "policies": "root",
- "ttl": "10h",
- "max_ttl": "10h",
- }
- data4 := map[string]interface{}{
- "password": "password",
- "policies": "root",
- "ttl": "11h",
- "max_ttl": "5h",
- }
- data5 := map[string]interface{}{
- "password": "password",
- }
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testUsersWrite(t, "test", data1, true),
- testUsersWrite(t, "test", data2, true),
- testUsersWrite(t, "test", data3, false),
- testUsersWrite(t, "test", data4, false),
- testLoginWrite(t, "test", data5, false),
- testLoginWrite(t, "wrong", data5, true),
- },
- })
-}
-
-func TestBackend_basic(t *testing.T) {
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepUser(t, "web", "password", "foo"),
- testAccStepUser(t, "web2", "password", "foo"),
- testAccStepUser(t, "web3", "password", "foo"),
- testAccStepList(t, []string{"web", "web2", "web3"}),
- testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
- },
- })
-}
-
-func TestBackend_userCrud(t *testing.T) {
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepUser(t, "web", "password", "foo"),
- testAccStepReadUser(t, "web", "foo"),
- testAccStepDeleteUser(t, "web"),
- testAccStepReadUser(t, "web", ""),
- },
- })
-}
-
-func TestBackend_userCreateOperation(t *testing.T) {
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testUserCreateOperation(t, "web", "password", "foo"),
- testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
- },
- })
-}
-
-func TestBackend_passwordUpdate(t *testing.T) {
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepUser(t, "web", "password", "foo"),
- testAccStepReadUser(t, "web", "foo"),
- testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
- testUpdatePassword(t, "web", "newpassword"),
- testAccStepLogin(t, "web", "newpassword", []string{"default", "foo"}),
- },
- })
-
-}
-
-func TestBackend_policiesUpdate(t *testing.T) {
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: testSysTTL,
- MaxLeaseTTLVal: testSysMaxTTL,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepUser(t, "web", "password", "foo"),
- testAccStepReadUser(t, "web", "foo"),
- testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
- testUpdatePolicies(t, "web", "foo,bar"),
- testAccStepReadUser(t, "web", "bar,foo"),
- testAccStepLogin(t, "web", "password", []string{"bar", "default", "foo"}),
- },
- })
-
-}
-
-func testUpdatePassword(t *testing.T, user, password string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "users/" + user + "/password",
- Data: map[string]interface{}{
- "password": password,
- },
- }
-}
-
-func testUpdatePolicies(t *testing.T, user, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "users/" + user + "/policies",
- Data: map[string]interface{}{
- "policies": policies,
- },
- }
-}
-
-func testUsersWrite(t *testing.T, user string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "users/" + user,
- Data: data,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if resp == nil && expectError {
- return fmt.Errorf("Expected error but received nil")
- }
- return nil
- },
- }
-}
-
-func testLoginWrite(t *testing.T, user string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + user,
- Data: data,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if resp == nil && expectError {
- return fmt.Errorf("Expected error but received nil")
- }
- return nil
- },
- }
-}
-
-func testAccStepList(t *testing.T, users []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "users",
- Check: func(resp *logical.Response) error {
- if resp.IsError() {
- return fmt.Errorf("Got error response: %#v", *resp)
- }
-
- exp := []string{"web", "web2", "web3"}
- if !reflect.DeepEqual(exp, resp.Data["keys"].([]string)) {
- return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", exp, resp.Data["keys"])
- }
- return nil
- },
- }
-}
-
-func testAccStepLogin(t *testing.T, user string, pass string, policies []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login/" + user,
- Data: map[string]interface{}{
- "password": pass,
- },
- Unauthenticated: true,
-
- Check: logicaltest.TestCheckAuth(policies),
- }
-}
-
-func testUserCreateOperation(
- t *testing.T, name string, password string, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.CreateOperation,
- Path: "users/" + name,
- Data: map[string]interface{}{
- "password": password,
- "policies": policies,
- },
- }
-}
-
-func testAccStepUser(
- t *testing.T, name string, password string, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "users/" + name,
- Data: map[string]interface{}{
- "password": password,
- "policies": policies,
- },
- }
-}
-
-func testAccStepDeleteUser(t *testing.T, n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "users/" + n,
- }
-}
-
-func testAccStepReadUser(t *testing.T, name string, policies string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "users/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if policies == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Policies []string `mapstructure:"policies"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go
deleted file mode 100644
index 4433c0e..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package userpass
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/api"
- pwd "github.com/hashicorp/vault/helper/password"
- "github.com/mitchellh/mapstructure"
-)
-
-type CLIHandler struct {
- DefaultMount string
-}
-
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
- var data struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- Mount string `mapstructure:"mount"`
- Method string `mapstructure:"method"`
- Passcode string `mapstructure:"passcode"`
- }
- if err := mapstructure.WeakDecode(m, &data); err != nil {
- return nil, err
- }
-
- if data.Username == "" {
- return nil, fmt.Errorf("'username' must be specified")
- }
- if data.Password == "" {
- fmt.Printf("Password (will be hidden): ")
- password, err := pwd.Read(os.Stdin)
- fmt.Println()
- if err != nil {
- return nil, err
- }
- data.Password = password
- }
- if data.Mount == "" {
- data.Mount = h.DefaultMount
- }
-
- options := map[string]interface{}{
- "password": data.Password,
- }
- if data.Method != "" {
- options["method"] = data.Method
- }
- if data.Passcode != "" {
- options["passcode"] = data.Passcode
- }
-
- path := fmt.Sprintf("auth/%s/login/%s", data.Mount, data.Username)
- secret, err := c.Logical().Write(path, options)
- if err != nil {
- return nil, err
- }
- if secret == nil {
- return nil, fmt.Errorf("empty response from credential provider")
- }
-
- return secret, nil
-}
-
-func (h *CLIHandler) Help() string {
- help := `
-The "userpass"/"radius" credential provider allows you to authenticate with
-a username and password. To use it, specify the "username" and "password"
-parameters. If password is not provided on the command line, it will be
-read from stdin.
-
-If multi-factor authentication (MFA) is enabled, a "method" and/or "passcode"
-may be provided depending on the MFA backend enabled. To check
-which MFA backend is in use, read "auth/[mount]/mfa_config".
-
- Example: vault auth -method=userpass \
- username= \
- password=
-
- `
-
- return strings.TrimSpace(help)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_login.go
deleted file mode 100644
index 574aa29..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_login.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package userpass
-
-import (
- "crypto/subtle"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "golang.org/x/crypto/bcrypt"
-)
-
-func pathLogin(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "login/" + framework.GenericNameRegex("username"),
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username of the user.",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password for this user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLogin,
- },
-
- HelpSynopsis: pathLoginSyn,
- HelpDescription: pathLoginDesc,
- }
-}
-
-func (b *backend) pathLogin(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- username := strings.ToLower(d.Get("username").(string))
-
- password := d.Get("password").(string)
- if password == "" {
- return nil, fmt.Errorf("missing password")
- }
-
- // Get the user and validate auth
- user, err := b.user(req.Storage, username)
- if err != nil {
- return nil, err
- }
- if user == nil {
- return logical.ErrorResponse("invalid username or password"), nil
- }
-
- // Check for a password match. Check for a hash collision for Vault 0.2+,
- // but handle the older legacy passwords with a constant time comparison.
- passwordBytes := []byte(password)
- if user.PasswordHash != nil {
- if err := bcrypt.CompareHashAndPassword(user.PasswordHash, passwordBytes); err != nil {
- return logical.ErrorResponse("invalid username or password"), nil
- }
- } else {
- if subtle.ConstantTimeCompare([]byte(user.Password), passwordBytes) != 1 {
- return logical.ErrorResponse("invalid username or password"), nil
- }
- }
-
- return &logical.Response{
- Auth: &logical.Auth{
- Policies: user.Policies,
- Metadata: map[string]string{
- "username": username,
- },
- DisplayName: username,
- LeaseOptions: logical.LeaseOptions{
- TTL: user.TTL,
- Renewable: true,
- },
- },
- }, nil
-}
-
-func (b *backend) pathLoginRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the user
- user, err := b.user(req.Storage, req.Auth.Metadata["username"])
- if err != nil {
- return nil, err
- }
- if user == nil {
- // User no longer exists, do not renew
- return nil, nil
- }
-
- if !policyutil.EquivalentPolicies(user.Policies, req.Auth.Policies) {
- return nil, fmt.Errorf("policies have changed, not renewing")
- }
-
- return framework.LeaseExtend(user.TTL, user.MaxTTL, b.System())(req, d)
-}
-
-const pathLoginSyn = `
-Log in with a username and password.
-`
-
-const pathLoginDesc = `
-This endpoint authenticates using a username and password.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_password.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_password.go
deleted file mode 100644
index 57d9b49..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_password.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package userpass
-
-import (
- "fmt"
-
- "golang.org/x/crypto/bcrypt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathUserPassword(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "users/" + framework.GenericNameRegex("username") + "/password$",
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username for this user.",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password for this user.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathUserPasswordUpdate,
- },
-
- HelpSynopsis: pathUserPasswordHelpSyn,
- HelpDescription: pathUserPasswordHelpDesc,
- }
-}
-
-func (b *backend) pathUserPasswordUpdate(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- username := d.Get("username").(string)
-
- userEntry, err := b.user(req.Storage, username)
- if err != nil {
- return nil, err
- }
- if userEntry == nil {
- return nil, fmt.Errorf("username does not exist")
- }
-
- userErr, intErr := b.updateUserPassword(req, d, userEntry)
- if intErr != nil {
- return nil, err
- }
- if userErr != nil {
- return logical.ErrorResponse(userErr.Error()), logical.ErrInvalidRequest
- }
-
- return nil, b.setUser(req.Storage, username, userEntry)
-}
-
-func (b *backend) updateUserPassword(req *logical.Request, d *framework.FieldData, userEntry *UserEntry) (error, error) {
- password := d.Get("password").(string)
- if password == "" {
- return fmt.Errorf("missing password"), nil
- }
- // Generate a hash of the password
- hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
- if err != nil {
- return nil, err
- }
- userEntry.PasswordHash = hash
- return nil, nil
-}
-
-const pathUserPasswordHelpSyn = `
-Reset user's password.
-`
-
-const pathUserPasswordHelpDesc = `
-This endpoint allows resetting the user's password.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go
deleted file mode 100644
index d03a6c2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package userpass
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathUserPolicies(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "users/" + framework.GenericNameRegex("username") + "/policies$",
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username for this user.",
- },
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Comma-separated list of policies",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathUserPoliciesUpdate,
- },
-
- HelpSynopsis: pathUserPoliciesHelpSyn,
- HelpDescription: pathUserPoliciesHelpDesc,
- }
-}
-
-func (b *backend) pathUserPoliciesUpdate(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- username := d.Get("username").(string)
-
- userEntry, err := b.user(req.Storage, username)
- if err != nil {
- return nil, err
- }
- if userEntry == nil {
- return nil, fmt.Errorf("username does not exist")
- }
-
- userEntry.Policies = policyutil.ParsePolicies(d.Get("policies"))
-
- return nil, b.setUser(req.Storage, username, userEntry)
-}
-
-const pathUserPoliciesHelpSyn = `
-Update the policies associated with the username.
-`
-
-const pathUserPoliciesHelpDesc = `
-This endpoint allows updating the policies associated with the username.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go
deleted file mode 100644
index b207598..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package userpass
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathUsersList(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "users/?",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathUserList,
- },
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func pathUsers(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "users/" + framework.GenericNameRegex("username"),
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username for this user.",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password for this user.",
- },
-
- "policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Comma-separated list of policies",
- },
- "ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "The lease duration which decides login expiration",
- },
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "Maximum duration after which login should expire",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathUserDelete,
- logical.ReadOperation: b.pathUserRead,
- logical.UpdateOperation: b.pathUserWrite,
- logical.CreateOperation: b.pathUserWrite,
- },
-
- ExistenceCheck: b.userExistenceCheck,
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func (b *backend) userExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- userEntry, err := b.user(req.Storage, data.Get("username").(string))
- if err != nil {
- return false, err
- }
-
- return userEntry != nil, nil
-}
-
-func (b *backend) user(s logical.Storage, username string) (*UserEntry, error) {
- if username == "" {
- return nil, fmt.Errorf("missing username")
- }
-
- entry, err := s.Get("user/" + strings.ToLower(username))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result UserEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) setUser(s logical.Storage, username string, userEntry *UserEntry) error {
- entry, err := logical.StorageEntryJSON("user/"+username, userEntry)
- if err != nil {
- return err
- }
-
- return s.Put(entry)
-}
-
-func (b *backend) pathUserList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- users, err := req.Storage.List("user/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(users), nil
-}
-
-func (b *backend) pathUserDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("user/" + strings.ToLower(d.Get("username").(string)))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathUserRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- user, err := b.user(req.Storage, strings.ToLower(d.Get("username").(string)))
- if err != nil {
- return nil, err
- }
- if user == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "policies": user.Policies,
- "ttl": user.TTL.Seconds(),
- "max_ttl": user.MaxTTL.Seconds(),
- },
- }, nil
-}
-
-func (b *backend) userCreateUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- username := strings.ToLower(d.Get("username").(string))
- userEntry, err := b.user(req.Storage, username)
- if err != nil {
- return nil, err
- }
- // Due to existence check, user will only be nil if it's a create operation
- if userEntry == nil {
- userEntry = &UserEntry{}
- }
-
- if _, ok := d.GetOk("password"); ok {
- userErr, intErr := b.updateUserPassword(req, d, userEntry)
- if intErr != nil {
- return nil, err
- }
- if userErr != nil {
- return logical.ErrorResponse(userErr.Error()), logical.ErrInvalidRequest
- }
- }
-
- if policiesRaw, ok := d.GetOk("policies"); ok {
- userEntry.Policies = policyutil.ParsePolicies(policiesRaw)
- }
-
- ttlStr := userEntry.TTL.String()
- if ttlStrRaw, ok := d.GetOk("ttl"); ok {
- ttlStr = ttlStrRaw.(string)
- }
-
- maxTTLStr := userEntry.MaxTTL.String()
- if maxTTLStrRaw, ok := d.GetOk("max_ttl"); ok {
- maxTTLStr = maxTTLStrRaw.(string)
- }
-
- userEntry.TTL, userEntry.MaxTTL, err = b.SanitizeTTLStr(ttlStr, maxTTLStr)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("err: %s", err)), nil
- }
-
- return nil, b.setUser(req.Storage, username, userEntry)
-}
-
-func (b *backend) pathUserWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- password := d.Get("password").(string)
- if req.Operation == logical.CreateOperation && password == "" {
- return logical.ErrorResponse("missing password"), logical.ErrInvalidRequest
- }
- return b.userCreateUpdate(req, d)
-}
-
-type UserEntry struct {
- // Password is deprecated in Vault 0.2 in favor of
- // PasswordHash, but is retained for backwards compatibility.
- Password string
-
- // PasswordHash is a bcrypt hash of the password. This is
- // used instead of the actual password in Vault 0.2+.
- PasswordHash []byte
-
- Policies []string
-
- // Duration after which the user will be revoked unless renewed
- TTL time.Duration
-
- // Maximum duration for which user can be valid
- MaxTTL time.Duration
-}
-
-const pathUserHelpSyn = `
-Manage users allowed to authenticate.
-`
-
-const pathUserHelpDesc = `
-This endpoint allows you to create, read, update, and delete users
-that are allowed to authenticate.
-
-Deleting a user will not revoke auth for prior authenticated users
-with that name. To do this, do a revoke on "login/" for
-the username you want revoked. If you don't need to revoke login immediately,
-then the next renew will cause the lease to expire.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go
deleted file mode 100644
index b6341e0..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package aws
-
-import (
- "strings"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- PathsSpecial: &logical.Paths{
- LocalStorage: []string{
- framework.WALPrefix,
- },
- },
-
- Paths: []*framework.Path{
- pathConfigRoot(),
- pathConfigLease(&b),
- pathRoles(),
- pathListRoles(&b),
- pathUser(&b),
- pathSTS(&b),
- },
-
- Secrets: []*framework.Secret{
- secretAccessKeys(&b),
- },
-
- WALRollback: walRollback,
- WALRollbackMinAge: 5 * time.Minute,
- BackendType: logical.TypeLogical,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-}
-
-const backendHelp = `
-The AWS backend dynamically generates AWS access keys for a set of
-IAM policies. The AWS access keys have a configurable lease set and
-are automatically revoked at the end of the lease.
-
-After mounting this backend, credentials to generate IAM keys must
-be configured with the "root" path and policies must be written using
-the "roles/" endpoints before any access keys can be generated.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go
deleted file mode 100644
index 5fab073..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go
+++ /dev/null
@@ -1,459 +0,0 @@
-package aws
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "log"
- "os"
- "strings"
- "testing"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/ec2"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
-)
-
-func getBackend(t *testing.T) logical.Backend {
- be, _ := Factory(logical.TestBackendConfig())
- return be
-}
-
-func TestBackend_basic(t *testing.T) {
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: getBackend(t),
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepWritePolicy(t, "test", testPolicy),
- testAccStepReadUser(t, "test"),
- },
- })
-}
-
-func TestBackend_basicSTS(t *testing.T) {
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() {
- testAccPreCheck(t)
- createRole(t)
- },
- Backend: getBackend(t),
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepWritePolicy(t, "test", testPolicy),
- testAccStepReadSTS(t, "test"),
- testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
- testAccStepReadSTSWithArnPolicy(t, "test"),
- testAccStepWriteArnRoleRef(t, testRoleName),
- testAccStepReadSTS(t, testRoleName),
- },
- Teardown: teardown,
- })
-}
-
-func TestBackend_policyCrud(t *testing.T) {
- var compacted bytes.Buffer
- if err := json.Compact(&compacted, []byte(testPolicy)); err != nil {
- t.Fatalf("bad: %s", err)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Backend: getBackend(t),
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepWritePolicy(t, "test", testPolicy),
- testAccStepReadPolicy(t, "test", compacted.String()),
- testAccStepDeletePolicy(t, "test"),
- testAccStepReadPolicy(t, "test", ""),
- },
- })
-}
-
-func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("AWS_ACCESS_KEY_ID"); v == "" {
- t.Fatal("AWS_ACCESS_KEY_ID must be set for acceptance tests")
- }
-
- if v := os.Getenv("AWS_SECRET_ACCESS_KEY"); v == "" {
- t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests")
- }
-
- if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" {
- log.Println("[INFO] Test: Using us-west-2 as test region")
- os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
- }
-
- if v := os.Getenv("AWS_ACCOUNT_ID"); v == "" {
- accountId, err := getAccountId()
- if err != nil {
- t.Fatal("AWS_ACCOUNT_ID could not be read from iam:GetUser for acceptance tests")
- }
- log.Printf("[INFO] Test: Used %s as AWS_ACCOUNT_ID", accountId)
- os.Setenv("AWS_ACCOUNT_ID", accountId)
- }
-}
-
-func getAccountId() (string, error) {
- creds := credentials.NewStaticCredentials(os.Getenv("AWS_ACCESS_KEY_ID"),
- os.Getenv("AWS_SECRET_ACCESS_KEY"),
- "")
-
- awsConfig := &aws.Config{
- Credentials: creds,
- Region: aws.String("us-east-1"),
- HTTPClient: cleanhttp.DefaultClient(),
- }
- svc := iam.New(session.New(awsConfig))
-
- params := &iam.GetUserInput{}
- res, err := svc.GetUser(params)
-
- if err != nil {
- return "", err
- }
-
- // split "arn:aws:iam::012345678912:user/username"
- accountId := strings.Split(*res.User.Arn, ":")[4]
- return accountId, nil
-}
-
-const testRoleName = "Vault-Acceptance-Test-AWS-Assume-Role"
-
-func createRole(t *testing.T) {
- const testRoleAssumePolicy = `{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect":"Allow",
- "Principal": {
- "AWS": "arn:aws:iam::%s:root"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
-`
- creds := credentials.NewStaticCredentials(os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), "")
-
- awsConfig := &aws.Config{
- Credentials: creds,
- Region: aws.String("us-east-1"),
- HTTPClient: cleanhttp.DefaultClient(),
- }
- svc := iam.New(session.New(awsConfig))
- trustPolicy := fmt.Sprintf(testRoleAssumePolicy, os.Getenv("AWS_ACCOUNT_ID"))
-
- params := &iam.CreateRoleInput{
- AssumeRolePolicyDocument: aws.String(trustPolicy),
- RoleName: aws.String(testRoleName),
- Path: aws.String("/"),
- }
-
- log.Printf("[INFO] AWS CreateRole: %s", testRoleName)
- _, err := svc.CreateRole(params)
-
- if err != nil {
- t.Fatalf("AWS CreateRole failed: %v", err)
- }
-
- attachment := &iam.AttachRolePolicyInput{
- PolicyArn: aws.String(testPolicyArn),
- RoleName: aws.String(testRoleName), // Required
- }
- _, err = svc.AttachRolePolicy(attachment)
-
- if err != nil {
- t.Fatalf("AWS CreateRole failed: %v", err)
- }
-
- // Sleep sometime because AWS is eventually consistent
- log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
- time.Sleep(10 * time.Second)
-}
-
-func teardown() error {
- creds := credentials.NewStaticCredentials(os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), "")
-
- awsConfig := &aws.Config{
- Credentials: creds,
- Region: aws.String("us-east-1"),
- HTTPClient: cleanhttp.DefaultClient(),
- }
- svc := iam.New(session.New(awsConfig))
-
- attachment := &iam.DetachRolePolicyInput{
- PolicyArn: aws.String(testPolicyArn),
- RoleName: aws.String(testRoleName), // Required
- }
- _, err := svc.DetachRolePolicy(attachment)
- if err != nil {
- log.Printf("[WARN] AWS DetachRolePolicy failed: %v", err)
- return err
- }
-
- params := &iam.DeleteRoleInput{
- RoleName: aws.String(testRoleName),
- }
-
- log.Printf("[INFO] AWS DeleteRole: %s", testRoleName)
- _, err = svc.DeleteRole(params)
-
- if err != nil {
- log.Printf("[WARN] AWS DeleteRole failed: %v", err)
- return err
- }
-
- return nil
-}
-
-func testAccStepConfig(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/root",
- Data: map[string]interface{}{
- "access_key": os.Getenv("AWS_ACCESS_KEY_ID"),
- "secret_key": os.Getenv("AWS_SECRET_ACCESS_KEY"),
- "region": os.Getenv("AWS_DEFAULT_REGION"),
- },
- }
-}
-
-func testAccStepReadUser(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- AccessKey string `mapstructure:"access_key"`
- SecretKey string `mapstructure:"secret_key"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated credentials: %v", d)
-
- // Sleep sometime because AWS is eventually consistent
- log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
- time.Sleep(10 * time.Second)
-
- // Build a client and verify that the credentials work
- creds := credentials.NewStaticCredentials(d.AccessKey, d.SecretKey, "")
- awsConfig := &aws.Config{
- Credentials: creds,
- Region: aws.String("us-east-1"),
- HTTPClient: cleanhttp.DefaultClient(),
- }
- client := ec2.New(session.New(awsConfig))
-
- log.Printf("[WARN] Verifying that the generated credentials work...")
- _, err := client.DescribeInstances(&ec2.DescribeInstancesInput{})
- if err != nil {
- return err
- }
-
- return nil
- },
- }
-}
-
-func testAccStepReadSTS(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "sts/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- AccessKey string `mapstructure:"access_key"`
- SecretKey string `mapstructure:"secret_key"`
- STSToken string `mapstructure:"security_token"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated credentials: %v", d)
-
- // Build a client and verify that the credentials work
- creds := credentials.NewStaticCredentials(d.AccessKey, d.SecretKey, d.STSToken)
- awsConfig := &aws.Config{
- Credentials: creds,
- Region: aws.String("us-east-1"),
- HTTPClient: cleanhttp.DefaultClient(),
- }
- client := ec2.New(session.New(awsConfig))
-
- log.Printf("[WARN] Verifying that the generated credentials work...")
- _, err := client.DescribeInstances(&ec2.DescribeInstancesInput{})
- if err != nil {
- return err
- }
-
- return nil
- },
- }
-}
-
-func testAccStepReadSTSWithArnPolicy(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "sts/" + name,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if resp.Data["error"] !=
- "Can't generate STS credentials for a managed policy; use a role to assume or an inline policy instead" {
- t.Fatalf("bad: %v", resp)
- }
- return nil
- },
- }
-}
-
-func testAccStepWritePolicy(t *testing.T, name string, policy string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/" + name,
- Data: map[string]interface{}{
- "policy": testPolicy,
- },
- }
-}
-
-func testAccStepDeletePolicy(t *testing.T, n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + n,
- }
-}
-
-func testAccStepReadPolicy(t *testing.T, name string, value string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if value == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Policy string `mapstructure:"policy"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.Policy != value {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-const testPolicy = `
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "Stmt1426528957000",
- "Effect": "Allow",
- "Action": [
- "ec2:*"
- ],
- "Resource": [
- "*"
- ]
- }
- ]
-}
-`
-
-const testPolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess"
-
-func testAccStepWriteArnPolicyRef(t *testing.T, name string, arn string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/" + name,
- Data: map[string]interface{}{
- "arn": testPolicyArn,
- },
- }
-}
-
-func TestBackend_basicPolicyArnRef(t *testing.T) {
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: getBackend(t),
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
- testAccStepReadUser(t, "test"),
- },
- })
-}
-
-func TestBackend_policyArnCrud(t *testing.T) {
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Backend: getBackend(t),
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepWriteArnPolicyRef(t, "test", testPolicyArn),
- testAccStepReadArnPolicy(t, "test", testPolicyArn),
- testAccStepDeletePolicy(t, "test"),
- testAccStepReadArnPolicy(t, "test", ""),
- },
- })
-}
-
-func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if value == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Policy string `mapstructure:"arn"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.Policy != value {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepWriteArnRoleRef(t *testing.T, roleName string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/" + roleName,
- Data: map[string]interface{}{
- "arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", os.Getenv("AWS_ACCOUNT_ID"), roleName),
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go
deleted file mode 100644
index f6bbbe2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package aws
-
-import (
- "fmt"
- "os"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/awsutil"
- "github.com/hashicorp/vault/logical"
-)
-
-func getRootConfig(s logical.Storage) (*aws.Config, error) {
- credsConfig := &awsutil.CredentialsConfig{}
-
- entry, err := s.Get("config/root")
- if err != nil {
- return nil, err
- }
- if entry != nil {
- var config rootConfig
- if err := entry.DecodeJSON(&config); err != nil {
- return nil, fmt.Errorf("error reading root configuration: %s", err)
- }
-
- credsConfig.AccessKey = config.AccessKey
- credsConfig.SecretKey = config.SecretKey
- credsConfig.Region = config.Region
- }
-
- if credsConfig.Region == "" {
- credsConfig.Region = os.Getenv("AWS_REGION")
- if credsConfig.Region == "" {
- credsConfig.Region = os.Getenv("AWS_DEFAULT_REGION")
- if credsConfig.Region == "" {
- credsConfig.Region = "us-east-1"
- }
- }
- }
-
- credsConfig.HTTPClient = cleanhttp.DefaultClient()
-
- creds, err := credsConfig.GenerateCredentialChain()
- if err != nil {
- return nil, err
- }
-
- return &aws.Config{
- Credentials: creds,
- Region: aws.String(credsConfig.Region),
- HTTPClient: cleanhttp.DefaultClient(),
- }, nil
-}
-
-func clientIAM(s logical.Storage) (*iam.IAM, error) {
- awsConfig, err := getRootConfig(s)
- if err != nil {
- return nil, err
- }
- client := iam.New(session.New(awsConfig))
- if client == nil {
- return nil, fmt.Errorf("could not obtain iam client")
- }
- return client, nil
-}
-
-func clientSTS(s logical.Storage) (*sts.STS, error) {
- awsConfig, err := getRootConfig(s)
- if err != nil {
- return nil, err
- }
- client := sts.New(session.New(awsConfig))
- if client == nil {
- return nil, fmt.Errorf("could not obtain sts client")
- }
- return client, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_lease.go
deleted file mode 100644
index 3decbfb..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_lease.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package aws
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigLease(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/lease",
- Fields: map[string]*framework.FieldSchema{
- "lease": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Default lease for roles.",
- },
-
- "lease_max": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Maximum time a credential is valid for.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathLeaseRead,
- logical.UpdateOperation: b.pathLeaseWrite,
- },
-
- HelpSynopsis: pathConfigLeaseHelpSyn,
- HelpDescription: pathConfigLeaseHelpDesc,
- }
-}
-
-// Lease returns the lease information
-func (b *backend) Lease(s logical.Storage) (*configLease, error) {
- entry, err := s.Get("config/lease")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result configLease
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathLeaseWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- leaseRaw := d.Get("lease").(string)
- leaseMaxRaw := d.Get("lease_max").(string)
-
- if len(leaseRaw) == 0 {
- return logical.ErrorResponse("'lease' is a required parameter"), nil
- }
- if len(leaseMaxRaw) == 0 {
- return logical.ErrorResponse("'lease_max' is a required parameter"), nil
- }
-
- lease, err := time.ParseDuration(leaseRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid lease: %s", err)), nil
- }
- leaseMax, err := time.ParseDuration(leaseMaxRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid lease_max: %s", err)), nil
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/lease", &configLease{
- Lease: lease,
- LeaseMax: leaseMax,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathLeaseRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- lease, err := b.Lease(req.Storage)
-
- if err != nil {
- return nil, err
- }
- if lease == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "lease": lease.Lease.String(),
- "lease_max": lease.LeaseMax.String(),
- },
- }, nil
-}
-
-type configLease struct {
- Lease time.Duration
- LeaseMax time.Duration
-}
-
-const pathConfigLeaseHelpSyn = `
-Configure the default lease information for generated credentials.
-`
-
-const pathConfigLeaseHelpDesc = `
-This configures the default lease information used for credentials
-generated by this backend. The lease specifies the duration that a
-credential will be valid for, as well as the maximum session for
-a set of credentials.
-
-The format for the lease is "1h" or integer and then unit. The longest
-unit is hour.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go
deleted file mode 100644
index 754e5b2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package aws
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigRoot() *framework.Path {
- return &framework.Path{
- Pattern: "config/root",
- Fields: map[string]*framework.FieldSchema{
- "access_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Access key with permission to create new keys.",
- },
-
- "secret_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Secret key with permission to create new keys.",
- },
-
- "region": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Region for API calls.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: pathConfigRootWrite,
- },
-
- HelpSynopsis: pathConfigRootHelpSyn,
- HelpDescription: pathConfigRootHelpDesc,
- }
-}
-
-func pathConfigRootWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- region := data.Get("region").(string)
-
- entry, err := logical.StorageEntryJSON("config/root", rootConfig{
- AccessKey: data.Get("access_key").(string),
- SecretKey: data.Get("secret_key").(string),
- Region: region,
- })
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type rootConfig struct {
- AccessKey string `json:"access_key"`
- SecretKey string `json:"secret_key"`
- Region string `json:"region"`
-}
-
-const pathConfigRootHelpSyn = `
-Configure the root credentials that are used to manage IAM.
-`
-
-const pathConfigRootHelpDesc = `
-Before doing anything, the AWS backend needs credentials that are able
-to manage IAM policies, users, access keys, etc. This endpoint is used
-to configure those credentials. They don't necessarilly need to be root
-keys as long as they have permission to manage IAM.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles.go
deleted file mode 100644
index acd4cae..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package aws
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
-
- "errors"
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathListRolesHelpSyn,
- HelpDescription: pathListRolesHelpDesc,
- }
-}
-
-func pathRoles() *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the policy",
- },
-
- "arn": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "ARN Reference to a managed policy",
- },
-
- "policy": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "IAM policy document",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: pathRolesDelete,
- logical.ReadOperation: pathRolesRead,
- logical.UpdateOperation: pathRolesWrite,
- },
-
- HelpSynopsis: pathRolesHelpSyn,
- HelpDescription: pathRolesHelpDesc,
- }
-}
-
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("policy/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(entries), nil
-}
-
-func pathRolesDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("policy/" + d.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func pathRolesRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get("policy/" + d.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- val := string(entry.Value)
- if strings.HasPrefix(val, "arn:") {
- return &logical.Response{
- Data: map[string]interface{}{
- "arn": val,
- },
- }, nil
- }
- return &logical.Response{
- Data: map[string]interface{}{
- "policy": val,
- },
- }, nil
-}
-
-func useInlinePolicy(d *framework.FieldData) (bool, error) {
- bp := d.Get("policy").(string) != ""
- ba := d.Get("arn").(string) != ""
-
- if !bp && !ba {
- return false, errors.New("Either policy or arn must be provided")
- }
- if bp && ba {
- return false, errors.New("Only one of policy or arn should be provided")
- }
- return bp, nil
-}
-
-func pathRolesWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var buf bytes.Buffer
-
- uip, err := useInlinePolicy(d)
- if err != nil {
- return nil, err
- }
-
- if uip {
- if err := json.Compact(&buf, []byte(d.Get("policy").(string))); err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error compacting policy: %s", err)), nil
- }
- // Write the policy into storage
- err := req.Storage.Put(&logical.StorageEntry{
- Key: "policy/" + d.Get("name").(string),
- Value: buf.Bytes(),
- })
- if err != nil {
- return nil, err
- }
- } else {
- // Write the arn ref into storage
- err := req.Storage.Put(&logical.StorageEntry{
- Key: "policy/" + d.Get("name").(string),
- Value: []byte(d.Get("arn").(string)),
- })
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
-}
-
-const pathListRolesHelpSyn = `List the existing roles in this backend`
-
-const pathListRolesHelpDesc = `Roles will be listed by the role name.`
-
-const pathRolesHelpSyn = `
-Read, write and reference IAM policies that access keys can be made for.
-`
-
-const pathRolesHelpDesc = `
-This path allows you to read and write roles that are used to
-create access keys. These roles are associated with IAM policies that
-map directly to the route to read the access keys. For example, if the
-backend is mounted at "aws" and you create a role at "aws/roles/deploy"
-then a user could request access credentials at "aws/creds/deploy".
-
-You can either supply a user inline policy (via the policy argument), or
-provide a reference to an existing AWS policy by supplying the full arn
-reference (via the arn argument). Inline user policies written are normal
-IAM policies. Vault will not attempt to parse these except to validate
-that they're basic JSON. No validation is performed on arn references.
-
-To validate the keys, attempt to read an access key after writing the policy.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go
deleted file mode 100644
index 3314c7a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package aws
-
-import (
- "strconv"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestBackend_PathListRoles(t *testing.T) {
- var resp *logical.Response
- var err error
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- b := Backend()
- if err := b.Setup(config); err != nil {
- t.Fatal(err)
- }
-
- roleData := map[string]interface{}{
- "arn": "testarn",
- }
-
- roleReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: config.StorageView,
- Data: roleData,
- }
-
- for i := 1; i <= 10; i++ {
- roleReq.Path = "roles/testrole" + strconv.Itoa(i)
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: role creation failed. resp:%#v\n err:%v", resp, err)
- }
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "roles",
- Storage: config.StorageView,
- })
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err)
- }
-
- if len(resp.Data["keys"].([]string)) != 10 {
- t.Fatalf("failed to list all 10 roles")
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "roles/",
- Storage: config.StorageView,
- })
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err)
- }
-
- if len(resp.Data["keys"].([]string)) != 10 {
- t.Fatalf("failed to list all 10 roles")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_sts.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_sts.go
deleted file mode 100644
index 25f5cfa..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_sts.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package aws
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathSTS(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "sts/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
- "ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `Lifetime of the token in seconds.
-AWS documentation excerpt: The duration, in seconds, that the credentials
-should remain valid. Acceptable durations for IAM user sessions range from 900
-seconds (15 minutes) to 129600 seconds (36 hours), with 43200 seconds (12
-hours) as the default. Sessions for AWS account owners are restricted to a
-maximum of 3600 seconds (one hour). If the duration is longer than one hour,
-the session for AWS account owners defaults to one hour.`,
- Default: 3600,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathSTSRead,
- logical.UpdateOperation: b.pathSTSRead,
- },
-
- HelpSynopsis: pathSTSHelpSyn,
- HelpDescription: pathSTSHelpDesc,
- }
-}
-
-func (b *backend) pathSTSRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- policyName := d.Get("name").(string)
- ttl := int64(d.Get("ttl").(int))
-
- // Read the policy
- policy, err := req.Storage.Get("policy/" + policyName)
- if err != nil {
- return nil, fmt.Errorf("error retrieving role: %s", err)
- }
- if policy == nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Role '%s' not found", policyName)), nil
- }
- policyValue := string(policy.Value)
- if strings.HasPrefix(policyValue, "arn:") {
- if strings.Contains(policyValue, ":role/") {
- return b.assumeRole(
- req.Storage,
- req.DisplayName, policyName, policyValue,
- ttl,
- )
- } else {
- return logical.ErrorResponse(
- "Can't generate STS credentials for a managed policy; use a role to assume or an inline policy instead"),
- logical.ErrInvalidRequest
- }
- }
- // Use the helper to create the secret
- return b.secretTokenCreate(
- req.Storage,
- req.DisplayName, policyName, policyValue,
- ttl,
- )
-}
-
-const pathSTSHelpSyn = `
-Generate an access key pair + security token for a specific role.
-`
-
-const pathSTSHelpDesc = `
-This path will generate a new, never before used key pair + security token for
-accessing AWS. The IAM policy used to back this key pair will be
-the "name" parameter. For example, if this backend is mounted at "aws",
-then "aws/sts/deploy" would generate access keys for the "deploy" role.
-
-Note, these credentials are instantiated using the AWS STS backend.
-
-The access keys will have a lease associated with them. The access keys
-can be revoked by using the lease ID.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_user.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_user.go
deleted file mode 100644
index cab2728..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_user.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package aws
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/mitchellh/mapstructure"
-)
-
-func pathUser(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathUserRead,
- },
-
- HelpSynopsis: pathUserHelpSyn,
- HelpDescription: pathUserHelpDesc,
- }
-}
-
-func (b *backend) pathUserRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- policyName := d.Get("name").(string)
-
- // Read the policy
- policy, err := req.Storage.Get("policy/" + policyName)
- if err != nil {
- return nil, fmt.Errorf("error retrieving role: %s", err)
- }
- if policy == nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Role '%s' not found", policyName)), nil
- }
-
- // Use the helper to create the secret
- return b.secretAccessKeysCreate(
- req.Storage, req.DisplayName, policyName, string(policy.Value))
-}
-
-func pathUserRollback(req *logical.Request, _kind string, data interface{}) error {
- var entry walUser
- if err := mapstructure.Decode(data, &entry); err != nil {
- return err
- }
- username := entry.UserName
-
- // Get the client
- client, err := clientIAM(req.Storage)
- if err != nil {
- return err
- }
-
- // Get information about this user
- groupsResp, err := client.ListGroupsForUser(&iam.ListGroupsForUserInput{
- UserName: aws.String(username),
- MaxItems: aws.Int64(1000),
- })
- if err != nil {
- return err
- }
- groups := groupsResp.Groups
-
- // Inline (user) policies
- policiesResp, err := client.ListUserPolicies(&iam.ListUserPoliciesInput{
- UserName: aws.String(username),
- MaxItems: aws.Int64(1000),
- })
- if err != nil {
- return err
- }
- policies := policiesResp.PolicyNames
-
- // Attached managed policies
- manPoliciesResp, err := client.ListAttachedUserPolicies(&iam.ListAttachedUserPoliciesInput{
- UserName: aws.String(username),
- MaxItems: aws.Int64(1000),
- })
- if err != nil {
- return err
- }
- manPolicies := manPoliciesResp.AttachedPolicies
-
- keysResp, err := client.ListAccessKeys(&iam.ListAccessKeysInput{
- UserName: aws.String(username),
- MaxItems: aws.Int64(1000),
- })
- if err != nil {
- return err
- }
- keys := keysResp.AccessKeyMetadata
-
- // Revoke all keys
- for _, k := range keys {
- _, err = client.DeleteAccessKey(&iam.DeleteAccessKeyInput{
- AccessKeyId: k.AccessKeyId,
- UserName: aws.String(username),
- })
- if err != nil {
- return err
- }
- }
-
- // Detach managed policies
- for _, p := range manPolicies {
- _, err = client.DetachUserPolicy(&iam.DetachUserPolicyInput{
- UserName: aws.String(username),
- PolicyArn: p.PolicyArn,
- })
- if err != nil {
- return err
- }
- }
-
- // Delete any inline (user) policies
- for _, p := range policies {
- _, err = client.DeleteUserPolicy(&iam.DeleteUserPolicyInput{
- UserName: aws.String(username),
- PolicyName: p,
- })
- if err != nil {
- return err
- }
- }
-
- // Remove the user from all their groups
- for _, g := range groups {
- _, err = client.RemoveUserFromGroup(&iam.RemoveUserFromGroupInput{
- GroupName: g.GroupName,
- UserName: aws.String(username),
- })
- if err != nil {
- return err
- }
- }
-
- // Delete the user
- _, err = client.DeleteUser(&iam.DeleteUserInput{
- UserName: aws.String(username),
- })
- if err != nil {
- return err
- }
-
- return nil
-}
-
-type walUser struct {
- UserName string
-}
-
-const pathUserHelpSyn = `
-Generate an access key pair for a specific role.
-`
-
-const pathUserHelpDesc = `
-This path will generate a new, never before used key pair for
-accessing AWS. The IAM policy used to back this key pair will be
-the "name" parameter. For example, if this backend is mounted at "aws",
-then "aws/creds/deploy" would generate access keys for the "deploy" role.
-
-The access keys will have a lease associated with them. The access keys
-can be revoked by using the lease ID.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/rollback.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/rollback.go
deleted file mode 100644
index 5d1b335..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/rollback.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package aws
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-var walRollbackMap = map[string]framework.WALRollbackFunc{
- "user": pathUserRollback,
-}
-
-func walRollback(req *logical.Request, kind string, data interface{}) error {
- f, ok := walRollbackMap[kind]
- if !ok {
- return fmt.Errorf("unknown type to rollback")
- }
-
- return f(req, kind, data)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go
deleted file mode 100644
index 18dbb5d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go
+++ /dev/null
@@ -1,320 +0,0 @@
-package aws
-
-import (
- "fmt"
- "math/rand"
- "regexp"
- "time"
-
- "strings"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/iam"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const SecretAccessKeyType = "access_keys"
-
-func secretAccessKeys(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretAccessKeyType,
- Fields: map[string]*framework.FieldSchema{
- "access_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Access Key",
- },
-
- "secret_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Secret Key",
- },
- "security_token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Security Token",
- },
- },
-
- Renew: b.secretAccessKeysRenew,
- Revoke: secretAccessKeysRevoke,
- }
-}
-
-func genUsername(displayName, policyName, userType string) (ret string, warning string) {
- var midString string
-
- switch userType {
- case "iam_user":
- // IAM users are capped at 64 chars; this leaves, after the beginning and
- // end added below, 42 chars to play with.
- midString = fmt.Sprintf("%s-%s-",
- normalizeDisplayName(displayName),
- normalizeDisplayName(policyName))
- if len(midString) > 42 {
- midString = midString[0:42]
- warning = "the calling token display name/IAM policy name were truncated to fit into IAM username length limits"
- }
- case "sts":
- // Capped at 32 chars, which leaves only a couple of characters to play
- // with, so don't insert display name or policy name at all
- }
-
- ret = fmt.Sprintf("vault-%s%d-%d", midString, time.Now().Unix(), rand.Int31n(10000))
- return
-}
-
-func (b *backend) secretTokenCreate(s logical.Storage,
- displayName, policyName, policy string,
- lifeTimeInSeconds int64) (*logical.Response, error) {
- STSClient, err := clientSTS(s)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- username, usernameWarning := genUsername(displayName, policyName, "sts")
-
- tokenResp, err := STSClient.GetFederationToken(
- &sts.GetFederationTokenInput{
- Name: aws.String(username),
- Policy: aws.String(policy),
- DurationSeconds: &lifeTimeInSeconds,
- })
-
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error generating STS keys: %s", err)), nil
- }
-
- resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
- "access_key": *tokenResp.Credentials.AccessKeyId,
- "secret_key": *tokenResp.Credentials.SecretAccessKey,
- "security_token": *tokenResp.Credentials.SessionToken,
- }, map[string]interface{}{
- "username": username,
- "policy": policy,
- "is_sts": true,
- })
-
- // Set the secret TTL to appropriately match the expiration of the token
- resp.Secret.TTL = tokenResp.Credentials.Expiration.Sub(time.Now())
-
- // STS are purposefully short-lived and aren't renewable
- resp.Secret.Renewable = false
-
- if usernameWarning != "" {
- resp.AddWarning(usernameWarning)
- }
-
- return resp, nil
-}
-
-func (b *backend) assumeRole(s logical.Storage,
- displayName, policyName, policy string,
- lifeTimeInSeconds int64) (*logical.Response, error) {
- STSClient, err := clientSTS(s)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- username, usernameWarning := genUsername(displayName, policyName, "iam_user")
-
- tokenResp, err := STSClient.AssumeRole(
- &sts.AssumeRoleInput{
- RoleSessionName: aws.String(username),
- RoleArn: aws.String(policy),
- DurationSeconds: &lifeTimeInSeconds,
- })
-
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error assuming role: %s", err)), nil
- }
-
- resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
- "access_key": *tokenResp.Credentials.AccessKeyId,
- "secret_key": *tokenResp.Credentials.SecretAccessKey,
- "security_token": *tokenResp.Credentials.SessionToken,
- }, map[string]interface{}{
- "username": username,
- "policy": policy,
- "is_sts": true,
- })
-
- // Set the secret TTL to appropriately match the expiration of the token
- resp.Secret.TTL = tokenResp.Credentials.Expiration.Sub(time.Now())
-
- // STS are purposefully short-lived and aren't renewable
- resp.Secret.Renewable = false
-
- if usernameWarning != "" {
- resp.AddWarning(usernameWarning)
- }
-
- return resp, nil
-}
-
-func (b *backend) secretAccessKeysCreate(
- s logical.Storage,
- displayName, policyName string, policy string) (*logical.Response, error) {
- client, err := clientIAM(s)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- username, usernameWarning := genUsername(displayName, policyName, "iam_user")
-
- // Write to the WAL that this user will be created. We do this before
- // the user is created because if switch the order then the WAL put
- // can fail, which would put us in an awkward position: we have a user
- // we need to rollback but can't put the WAL entry to do the rollback.
- walId, err := framework.PutWAL(s, "user", &walUser{
- UserName: username,
- })
- if err != nil {
- return nil, fmt.Errorf("Error writing WAL entry: %s", err)
- }
-
- // Create the user
- _, err = client.CreateUser(&iam.CreateUserInput{
- UserName: aws.String(username),
- })
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error creating IAM user: %s", err)), nil
- }
-
- if strings.HasPrefix(policy, "arn:") {
- // Attach existing policy against user
- _, err = client.AttachUserPolicy(&iam.AttachUserPolicyInput{
- UserName: aws.String(username),
- PolicyArn: aws.String(policy),
- })
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error attaching user policy: %s", err)), nil
- }
-
- } else {
- // Add new inline user policy against user
- _, err = client.PutUserPolicy(&iam.PutUserPolicyInput{
- UserName: aws.String(username),
- PolicyName: aws.String(policyName),
- PolicyDocument: aws.String(policy),
- })
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error putting user policy: %s", err)), nil
- }
- }
-
- // Create the keys
- keyResp, err := client.CreateAccessKey(&iam.CreateAccessKeyInput{
- UserName: aws.String(username),
- })
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error creating access keys: %s", err)), nil
- }
-
- // Remove the WAL entry, we succeeded! If we fail, we don't return
- // the secret because it'll get rolled back anyways, so we have to return
- // an error here.
- if err := framework.DeleteWAL(s, walId); err != nil {
- return nil, fmt.Errorf("Failed to commit WAL entry: %s", err)
- }
-
- // Return the info!
- resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
- "access_key": *keyResp.AccessKey.AccessKeyId,
- "secret_key": *keyResp.AccessKey.SecretAccessKey,
- "security_token": nil,
- }, map[string]interface{}{
- "username": username,
- "policy": policy,
- "is_sts": false,
- })
-
- lease, err := b.Lease(s)
- if err != nil || lease == nil {
- lease = &configLease{}
- }
-
- resp.Secret.TTL = lease.Lease
-
- if usernameWarning != "" {
- resp.AddWarning(usernameWarning)
- }
-
- return resp, nil
-}
-
-func (b *backend) secretAccessKeysRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- // STS already has a lifetime, and we don't support renewing it
- isSTSRaw, ok := req.Secret.InternalData["is_sts"]
- if ok {
- isSTS, ok := isSTSRaw.(bool)
- if ok {
- if isSTS {
- return nil, nil
- }
- }
- }
-
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- if lease == nil {
- lease = &configLease{}
- }
-
- f := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())
- return f(req, d)
-}
-
-func secretAccessKeysRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- // STS cleans up after itself so we can skip this if is_sts internal data
- // element set to true. If is_sts is not set, assumes old version
- // and defaults to the IAM approach.
- isSTSRaw, ok := req.Secret.InternalData["is_sts"]
- if ok {
- isSTS, ok := isSTSRaw.(bool)
- if ok {
- if isSTS {
- return nil, nil
- }
- } else {
- return nil, fmt.Errorf("secret has is_sts but value could not be understood")
- }
- }
-
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
-
- // Use the user rollback mechanism to delete this user
- err := pathUserRollback(req, "user", map[string]interface{}{
- "username": username,
- })
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func normalizeDisplayName(displayName string) string {
- re := regexp.MustCompile("[^a-zA-Z0-9+=,.@_-]")
- return re.ReplaceAllString(displayName, "_")
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys_test.go
deleted file mode 100644
index c7a19de..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package aws
-
-import (
- "testing"
-)
-
-func TestNormalizeDisplayName_NormRequired(t *testing.T) {
-
- invalidNames := map[string]string{
- "^#$test name\nshould be normalized)(*": "___test_name_should_be_normalized___",
- "^#$test name1 should be normalized)(*": "___test_name1_should_be_normalized___",
- "^#$test name should be normalized)(*": "___test_name__should_be_normalized___",
- "^#$test name__should be normalized)(*": "___test_name__should_be_normalized___",
- }
-
- for k, v := range invalidNames {
- normalizedName := normalizeDisplayName(k)
- if normalizedName != v {
- t.Fatalf(
- "normalizeDisplayName does not normalize AWS name correctly: %s should resolve to %s",
- k,
- normalizedName)
- }
- }
-}
-
-func TestNormalizeDisplayName_NormNotRequired(t *testing.T) {
-
- validNames := []string{
- "test_name_should_normalize_to_itself@example.com",
- "test1_name_should_normalize_to_itself@example.com",
- "UPPERlower0123456789-_,.@example.com",
- }
-
- for _, n := range validNames {
- normalizedName := normalizeDisplayName(n)
- if normalizedName != n {
- t.Fatalf(
- "normalizeDisplayName erroneously normalizes valid names: expected %s but normalized to %s",
- n,
- normalizedName)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go
deleted file mode 100644
index dd54ba5..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package cassandra
-
-import (
- "fmt"
- "strings"
- "sync"
-
- "github.com/gocql/gocql"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// Factory creates a new backend
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-// Backend contains the base information for the backend's functionality
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathConfigConnection(&b),
- pathRoles(&b),
- pathCredsCreate(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCreds(&b),
- },
-
- Invalidate: b.invalidate,
-
- Clean: func() {
- b.ResetDB(nil)
- },
- BackendType: logical.TypeLogical,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- // Session is goroutine safe, however, since we reinitialize
- // it when connection info changes, we want to make sure we
- // can close it and use a new connection; hence the lock
- session *gocql.Session
- lock sync.Mutex
-}
-
-type sessionConfig struct {
- Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"`
- Username string `json:"username" structs:"username" mapstructure:"username"`
- Password string `json:"password" structs:"password" mapstructure:"password"`
- TLS bool `json:"tls" structs:"tls" mapstructure:"tls"`
- InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"`
- Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"`
- PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"`
- IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"`
- ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"`
- ConnectTimeout int `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"`
- TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"`
-}
-
-// DB returns the database connection.
-func (b *backend) DB(s logical.Storage) (*gocql.Session, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- // If we already have a DB, we got it!
- if b.session != nil {
- return b.session, nil
- }
-
- entry, err := s.Get("config/connection")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil,
- fmt.Errorf("Configure the DB connection with config/connection first")
- }
-
- config := &sessionConfig{}
- if err := entry.DecodeJSON(config); err != nil {
- return nil, err
- }
-
- session, err := createSession(config, s)
- // Store the session in backend for reuse
- b.session = session
-
- return session, err
-
-}
-
-// ResetDB forces a connection next time DB() is called.
-func (b *backend) ResetDB(newSession *gocql.Session) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.session != nil {
- b.session.Close()
- }
-
- b.session = newSession
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "config/connection":
- b.ResetDB(nil)
- }
-}
-
-const backendHelp = `
-The Cassandra backend dynamically generates database users.
-
-After mounting this backend, configure it using the endpoints within
-the "config/" path.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go
deleted file mode 100644
index cfeb329..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package cassandra
-
-import (
- "fmt"
- "log"
- "os"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
- dockertest "gopkg.in/ory-am/dockertest.v2"
-)
-
-var (
- testImagePull sync.Once
-)
-
-func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
- if os.Getenv("CASSANDRA_HOST") != "" {
- return "", os.Getenv("CASSANDRA_HOST")
- }
-
- // Without this the checks for whether the container has started seem to
- // never actually pass. There's really no reason to expose the test
- // containers, so don't.
- dockertest.BindDockerToLocalhost = "yep"
-
- testImagePull.Do(func() {
- dockertest.Pull("cassandra")
- })
-
- cwd, _ := os.Getwd()
-
- cid, connErr := dockertest.ConnectToCassandra("latest", 60, 1000*time.Millisecond, func(connURL string) bool {
- // This will cause a validation to run
- resp, err := b.HandleRequest(&logical.Request{
- Storage: s,
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: map[string]interface{}{
- "hosts": connURL,
- "username": "cassandra",
- "password": "cassandra",
- "protocol_version": 3,
- },
- })
- if err != nil || (resp != nil && resp.IsError()) {
- // It's likely not up and running yet, so return false and try again
- return false
- }
-
- retURL = connURL
- return true
- }, []string{"-v", cwd + "/test-fixtures/:/etc/cassandra/"}...)
-
- if connErr != nil {
- if cid != "" {
- cid.KillRemove()
- }
- t.Fatalf("could not connect to database: %v", connErr)
- }
-
- return
-}
-
-func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
- err := cid.KillRemove()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- if os.Getenv("TRAVIS") != "true" {
- t.SkipNow()
- }
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, hostname := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, hostname),
- testAccStepRole(t),
- testAccStepReadCreds(t, "test"),
- },
- })
-}
-
-func TestBackend_roleCrud(t *testing.T) {
- if os.Getenv("TRAVIS") != "true" {
- t.SkipNow()
- }
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, hostname := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, hostname),
- testAccStepRole(t),
- testAccStepRoleWithOptions(t),
- testAccStepReadRole(t, "test", testRole),
- testAccStepReadRole(t, "test2", testRole),
- testAccStepDeleteRole(t, "test"),
- testAccStepDeleteRole(t, "test2"),
- testAccStepReadRole(t, "test", ""),
- testAccStepReadRole(t, "test2", ""),
- },
- })
-}
-
-func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("CASSANDRA_HOST"); v == "" {
- t.Fatal("CASSANDRA_HOST must be set for acceptance tests")
- }
-}
-
-func testAccStepConfig(t *testing.T, hostname string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: map[string]interface{}{
- "hosts": hostname,
- "username": "cassandra",
- "password": "cassandra",
- "protocol_version": 3,
- },
- }
-}
-
-func testAccStepRole(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/test",
- Data: map[string]interface{}{
- "creation_cql": testRole,
- },
- }
-}
-
-func testAccStepRoleWithOptions(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/test2",
- Data: map[string]interface{}{
- "creation_cql": testRole,
- "lease": "30s",
- "consistency": "All",
- },
- }
-}
-
-func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + n,
- }
-}
-
-func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated credentials: %v", d)
-
- return nil
- },
- }
-}
-
-func testAccStepReadRole(t *testing.T, name string, cql string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if cql == "" {
- return nil
- }
-
- return fmt.Errorf("response is nil")
- }
-
- var d struct {
- CreationCQL string `mapstructure:"creation_cql"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.CreationCQL != cql {
- return fmt.Errorf("bad: %#v\n%#v\n%#v\n", resp, cql, d.CreationCQL)
- }
-
- return nil
- },
- }
-}
-
-const testRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;
-GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_config_connection.go
deleted file mode 100644
index e00587d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_config_connection.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package cassandra
-
-import (
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigConnection(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/connection",
- Fields: map[string]*framework.FieldSchema{
- "hosts": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Comma-separated list of hosts",
- },
-
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The username to use for connecting to the cluster",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The password to use for connecting to the cluster",
- },
-
- "tls": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Whether to use TLS. If pem_bundle or pem_json are
-set, this is automatically set to true`,
- },
-
- "insecure_tls": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Whether to use TLS but skip verification; has no
-effect if a CA certificate is provided`,
- },
-
- "tls_min_version": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "tls12",
- Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'",
- },
-
- "pem_bundle": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `PEM-format, concatenated unencrypted secret key
-and certificate, with optional CA certificate`,
- },
-
- "pem_json": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `JSON containing a PEM-format, unencrypted secret
-key and certificate, with optional CA certificate.
-The JSON output of a certificate issued with the PKI
-backend can be directly passed into this parameter.
-If both this and "pem_bundle" are specified, this will
-take precedence.`,
- },
-
- "protocol_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `The protocol version to use. Defaults to 2.`,
- },
-
- "connect_timeout": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 5,
- Description: `The connection timeout to use. Defaults to 5.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConnectionRead,
- logical.UpdateOperation: b.pathConnectionWrite,
- },
-
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-func (b *backend) pathConnectionRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get("config/connection")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return logical.ErrorResponse(fmt.Sprintf("Configure the DB connection with config/connection first")), nil
- }
-
- config := &sessionConfig{}
- if err := entry.DecodeJSON(config); err != nil {
- return nil, err
- }
-
- config.Password = "**********"
- if len(config.PrivateKey) > 0 {
- config.PrivateKey = "**********"
- }
-
- return &logical.Response{
- Data: structs.New(config).Map(),
- }, nil
-}
-
-func (b *backend) pathConnectionWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- hosts := data.Get("hosts").(string)
- username := data.Get("username").(string)
- password := data.Get("password").(string)
-
- switch {
- case len(hosts) == 0:
- return logical.ErrorResponse("Hosts cannot be empty"), nil
- case len(username) == 0:
- return logical.ErrorResponse("Username cannot be empty"), nil
- case len(password) == 0:
- return logical.ErrorResponse("Password cannot be empty"), nil
- }
-
- config := &sessionConfig{
- Hosts: hosts,
- Username: username,
- Password: password,
- TLS: data.Get("tls").(bool),
- InsecureTLS: data.Get("insecure_tls").(bool),
- ProtocolVersion: data.Get("protocol_version").(int),
- ConnectTimeout: data.Get("connect_timeout").(int),
- }
-
- config.TLSMinVersion = data.Get("tls_min_version").(string)
- if config.TLSMinVersion == "" {
- return logical.ErrorResponse("failed to get 'tls_min_version' value"), nil
- }
-
- var ok bool
- _, ok = tlsutil.TLSLookup[config.TLSMinVersion]
- if !ok {
- return logical.ErrorResponse("invalid 'tls_min_version'"), nil
- }
-
- if config.InsecureTLS {
- config.TLS = true
- }
-
- pemBundle := data.Get("pem_bundle").(string)
- pemJSON := data.Get("pem_json").(string)
-
- var certBundle *certutil.CertBundle
- var parsedCertBundle *certutil.ParsedCertBundle
- var err error
-
- switch {
- case len(pemJSON) != 0:
- parsedCertBundle, err = certutil.ParsePKIJSON([]byte(pemJSON))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err)), nil
- }
- certBundle, err = parsedCertBundle.ToCertBundle()
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil
- }
- config.Certificate = certBundle.Certificate
- config.PrivateKey = certBundle.PrivateKey
- config.IssuingCA = certBundle.IssuingCA
- config.TLS = true
-
- case len(pemBundle) != 0:
- parsedCertBundle, err = certutil.ParsePEMBundle(pemBundle)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error parsing the given PEM information: %s", err)), nil
- }
- certBundle, err = parsedCertBundle.ToCertBundle()
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil
- }
- config.Certificate = certBundle.Certificate
- config.PrivateKey = certBundle.PrivateKey
- config.IssuingCA = certBundle.IssuingCA
- config.TLS = true
- }
-
- session, err := createSession(config, req.Storage)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/connection", config)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- // Reset the DB connection
- b.ResetDB(session)
-
- return nil, nil
-}
-
-const pathConfigConnectionHelpSyn = `
-Configure the connection information to talk to Cassandra.
-`
-
-const pathConfigConnectionHelpDesc = `
-This path configures the connection information used to connect to Cassandra.
-
-"hosts" is a comma-deliniated list of hostnames in the Cassandra cluster.
-
-"username" and "password" are self-explanatory, although the given user
-must have superuser access within Cassandra. Note that since this backend
-issues username/password credentials, Cassandra must be configured to use
-PasswordAuthenticator or a similar backend for its authentication. If you wish
-to have no authorization in Cassandra and want to use TLS client certificates,
-see the PKI backend.
-
-TLS works as follows:
-
-* If "tls" is set to true, the connection will use TLS; this happens automatically if "pem_bundle", "pem_json", or "insecure_tls" is set
-
-* If "insecure_tls" is set to true, the connection will not perform verification of the server certificate; this also sets "tls" to true
-
-* If only "issuing_ca" is set in "pem_json", or the only certificate in "pem_bundle" is a CA certificate, the given CA certificate will be used for server certificate verification; otherwise the system CA certificates will be used
-
-* If "certificate" and "private_key" are set in "pem_bundle" or "pem_json", client auth will be turned on for the connection
-
-"pem_bundle" should be a PEM-concatenated bundle of a private key + client certificate, an issuing CA certificate, or both. "pem_json" should contain the same information; for convenience, the JSON format is the same as that output by the issue command from the PKI backend.
-
-When configuring the connection information, the backend will verify its
-validity.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go
deleted file mode 100644
index 98981ce..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package cassandra
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/gocql/gocql"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathCredsCreate(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathCredsCreateRead,
- },
-
- HelpSynopsis: pathCredsCreateReadHelpSyn,
- HelpDescription: pathCredsCreateReadHelpDesc,
- }
-}
-
-func (b *backend) pathCredsCreateRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- // Get the role
- role, err := getRole(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", name)), nil
- }
-
- displayName := req.DisplayName
- userUUID, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- username := fmt.Sprintf("vault_%s_%s_%s_%d", name, displayName, userUUID, time.Now().Unix())
- username = strings.Replace(username, "-", "_", -1)
- password, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
-
- // Get our connection
- session, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Set consistency
- if role.Consistency != "" {
- consistencyValue, err := gocql.ParseConsistencyWrapper(role.Consistency)
- if err != nil {
- return nil, err
- }
-
- session.SetConsistency(consistencyValue)
- }
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(role.CreationCQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- err = session.Query(substQuery(query, map[string]string{
- "username": username,
- "password": password,
- })).Exec()
- if err != nil {
- for _, query := range strutil.ParseArbitraryStringSlice(role.RollbackCQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- session.Query(substQuery(query, map[string]string{
- "username": username,
- "password": password,
- })).Exec()
- }
- return nil, err
- }
- }
-
- // Return the secret
- resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
- "username": username,
- "password": password,
- }, map[string]interface{}{
- "username": username,
- "role": name,
- })
- resp.Secret.TTL = role.Lease
-
- return resp, nil
-}
-
-const pathCredsCreateReadHelpSyn = `
-Request database credentials for a certain role.
-`
-
-const pathCredsCreateReadHelpDesc = `
-This path creates database credentials for a certain role. The
-database credentials will be generated on demand and will be automatically
-revoked when the lease is up.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_roles.go
deleted file mode 100644
index 9d4f48b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_roles.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package cassandra
-
-import (
- "fmt"
- "time"
-
- "github.com/fatih/structs"
- "github.com/gocql/gocql"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- defaultCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;`
- defaultRollbackCQL = `DROP USER '{{username}}';`
-)
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
-
- "creation_cql": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: defaultCreationCQL,
- Description: `CQL to create a user and optionally grant
-authorization. If not supplied, a default that
-creates non-superuser accounts with the built-in
-password authenticator will be used; no
-authorization grants will be configured. Separate
-statements by semicolons; use @file to load from a
-file. Valid template values are '{{username}}' and
-'{{password}}' -- the single quotes are important!`,
- },
-
- "rollback_cql": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: defaultRollbackCQL,
- Description: `CQL to roll back an account operation. This will
-be used if there is an error during execution of a
-statement passed in via the "creation_cql" parameter
-parameter. The default simply drops the user, which
-should generally be sufficient. Separate statements
-by semicolons; use @file to load from a file. Valid
-template values are '{{username}}' and
-'{{password}}' -- the single quotes are important!`,
- },
-
- "lease": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "4h",
- Description: "The lease length; defaults to 4 hours",
- },
-
- "consistency": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "Quorum",
- Description: "The consistency level for the operations; defaults to Quorum.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleCreate,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func getRole(s logical.Storage, n string) (*roleEntry, error) {
- entry, err := s.Get("role/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathRoleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("role/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathRoleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, err := getRole(req.Storage, data.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: structs.New(role).Map(),
- }, nil
-}
-
-func (b *backend) pathRoleCreate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- creationCQL := data.Get("creation_cql").(string)
-
- rollbackCQL := data.Get("rollback_cql").(string)
-
- leaseRaw := data.Get("lease").(string)
- lease, err := time.ParseDuration(leaseRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error parsing lease value of %s: %s", leaseRaw, err)), nil
- }
-
- consistencyStr := data.Get("consistency").(string)
- _, err = gocql.ParseConsistencyWrapper(consistencyStr)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error parsing consistency value of %q: %v", consistencyStr, err)), nil
- }
-
- entry := &roleEntry{
- Lease: lease,
- CreationCQL: creationCQL,
- RollbackCQL: rollbackCQL,
- Consistency: consistencyStr,
- }
-
- // Store it
- entryJSON, err := logical.StorageEntryJSON("role/"+name, entry)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entryJSON); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type roleEntry struct {
- CreationCQL string `json:"creation_cql" structs:"creation_cql"`
- Lease time.Duration `json:"lease" structs:"lease"`
- RollbackCQL string `json:"rollback_cql" structs:"rollback_cql"`
- Consistency string `json:"consistency" structs:"consistency"`
-}
-
-const pathRoleHelpSyn = `
-Manage the roles that can be created with this backend.
-`
-
-const pathRoleHelpDesc = `
-This path lets you manage the roles that can be created with this backend.
-
-The "creation_cql" parameter customizes the CQL string used to create users
-and assign them grants. This can be a sequence of CQL queries separated by
-semicolons. Some substitution will be done to the CQL string for certain keys.
-The names of the variables must be surrounded by '{{' and '}}' to be replaced.
-Note that it is important that single quotes are used, not double quotes.
-
- * "username" - The random username generated for the DB user.
-
- * "password" - The random password generated for the DB user.
-
-If no "creation_cql" parameter is given, a default will be used:
-
-` + defaultCreationCQL + `
-
-This default should be suitable for Cassandra installations using the password
-authenticator but not configured to use authorization.
-
-Similarly, the "rollback_cql" is used if user creation fails, in the absense of
-Cassandra transactions. The default should be suitable for almost any
-instance of Cassandra:
-
-` + defaultRollbackCQL + `
-
-"lease" the lease time; if not set the mount/system defaults are used.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/secret_creds.go
deleted file mode 100644
index f894ad2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/secret_creds.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package cassandra
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// SecretCredsType is the type of creds issued from this backend
-const SecretCredsType = "cassandra"
-
-func secretCreds(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCredsType,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password",
- },
- },
-
- Renew: b.secretCredsRenew,
- Revoke: b.secretCredsRevoke,
- }
-}
-
-func (b *backend) secretCredsRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the lease information
- roleRaw, ok := req.Secret.InternalData["role"]
- if !ok {
- return nil, fmt.Errorf("secret is missing role internal data")
- }
- roleName, ok := roleRaw.(string)
- if !ok {
- return nil, fmt.Errorf("error converting role internal data to string")
- }
-
- role, err := getRole(req.Storage, roleName)
- if err != nil {
- return nil, fmt.Errorf("unable to load role: %s", err)
- }
-
- return framework.LeaseExtend(role.Lease, 0, b.System())(req, d)
-}
-
-func (b *backend) secretCredsRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
- if !ok {
- return nil, fmt.Errorf("error converting username internal data to string")
- }
-
- session, err := b.DB(req.Storage)
- if err != nil {
- return nil, fmt.Errorf("error getting session")
- }
-
- err = session.Query(fmt.Sprintf("DROP USER '%s'", username)).Exec()
- if err != nil {
- return nil, fmt.Errorf("error removing user %s", username)
- }
-
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/test-fixtures/cassandra.yaml b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/test-fixtures/cassandra.yaml
deleted file mode 100644
index 5b12c8c..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/test-fixtures/cassandra.yaml
+++ /dev/null
@@ -1,1146 +0,0 @@
-# Cassandra storage config YAML
-
-# NOTE:
-# See http://wiki.apache.org/cassandra/StorageConfiguration for
-# full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: 'Test Cluster'
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting on the node's initial start,
-# on subsequent starts, this setting will apply even if initial token is set.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to
-# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
-num_tokens: 256
-
-# Triggers automatic allocation of num_tokens tokens for this node. The allocation
-# algorithm attempts to choose tokens in a way that optimizes replicated load over
-# the nodes in the datacenter for the replication strategy used by the specified
-# keyspace.
-#
-# The load assigned to each node will be close to proportional to its number of
-# vnodes.
-#
-# Only supported with the Murmur3Partitioner.
-# allocate_tokens_for_keyspace: KEYSPACE
-
-# initial_token allows you to specify tokens manually. While you can use it with
-# vnodes (num_tokens > 1, above) -- in which case you should provide a
-# comma-separated list -- it's primarily used when adding nodes to legacy clusters
-# that do not have vnodes enabled.
-# initial_token:
-
-# See http://wiki.apache.org/cassandra/HintedHandoff
-# May either be "true" or "false" to enable globally
-hinted_handoff_enabled: true
-
-# When hinted_handoff_enabled is true, a black list of data centers that will not
-# perform hinted handoff
-# hinted_handoff_disabled_datacenters:
-# - DC1
-# - DC2
-
-# this defines the maximum amount of time a dead host will have hints
-# generated. After it has been dead this long, new hints for it will not be
-# created until it has been seen alive and gone down again.
-max_hint_window_in_ms: 10800000 # 3 hours
-
-# Maximum throttle in KBs per second, per delivery thread. This will be
-# reduced proportionally to the number of nodes in the cluster. (If there
-# are two nodes in the cluster, each delivery thread will use the maximum
-# rate; if there are three, each will throttle to half of the maximum,
-# since we expect two nodes to be delivering hints simultaneously.)
-hinted_handoff_throttle_in_kb: 1024
-
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# Directory where Cassandra should store hints.
-# If not set, the default directory is $CASSANDRA_HOME/data/hints.
-# hints_directory: /var/lib/cassandra/hints
-
-# How often hints should be flushed from the internal buffers to disk.
-# Will *not* trigger fsync.
-hints_flush_period_in_ms: 10000
-
-# Maximum size for a single hints file, in megabytes.
-max_hints_file_size_in_mb: 128
-
-# Compression to apply to the hint files. If omitted, hints files
-# will be written uncompressed. LZ4, Snappy, and Deflate compressors
-# are supported.
-#hints_compression:
-# - class_name: LZ4Compressor
-# parameters:
-# -
-
-# Maximum throttle in KBs per second, total. This will be
-# reduced proportionally to the number of nodes in the cluster.
-batchlog_replay_throttle_in_kb: 1024
-
-# Authentication backend, implementing IAuthenticator; used to identify users
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
-# PasswordAuthenticator}.
-#
-# - AllowAllAuthenticator performs no checks - set it to disable authentication.
-# - PasswordAuthenticator relies on username/password pairs to authenticate
-# users. It keeps usernames and hashed passwords in system_auth.credentials table.
-# Please increase system_auth keyspace replication factor if you use this authenticator.
-# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
-authenticator: PasswordAuthenticator
-
-# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
-# CassandraAuthorizer}.
-#
-# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
-# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
-# increase system_auth keyspace replication factor if you use this authorizer.
-authorizer: CassandraAuthorizer
-
-# Part of the Authentication & Authorization backend, implementing IRoleManager; used
-# to maintain grants and memberships between roles.
-# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
-# which stores role information in the system_auth keyspace. Most functions of the
-# IRoleManager require an authenticated login, so unless the configured IAuthenticator
-# actually implements authentication, most of this functionality will be unavailable.
-#
-# - CassandraRoleManager stores role data in the system_auth keyspace. Please
-# increase system_auth keyspace replication factor if you use this role manager.
-role_manager: CassandraRoleManager
-
-# Validity period for roles cache (fetching granted roles can be an expensive
-# operation depending on the role manager, CassandraRoleManager is one example)
-# Granted roles are cached for authenticated sessions in AuthenticatedUser and
-# after the period specified here, become eligible for (async) reload.
-# Defaults to 2000, set to 0 to disable caching entirely.
-# Will be disabled automatically for AllowAllAuthenticator.
-roles_validity_in_ms: 2000
-
-# Refresh interval for roles cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If roles_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as roles_validity_in_ms.
-# roles_update_interval_in_ms: 2000
-
-# Validity period for permissions cache (fetching permissions can be an
-# expensive operation depending on the authorizer, CassandraAuthorizer is
-# one example). Defaults to 2000, set to 0 to disable.
-# Will be disabled automatically for AllowAllAuthorizer.
-permissions_validity_in_ms: 2000
-
-# Refresh interval for permissions cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If permissions_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as permissions_validity_in_ms.
-# permissions_update_interval_in_ms: 2000
-
-# Validity period for credentials cache. This cache is tightly coupled to
-# the provided PasswordAuthenticator implementation of IAuthenticator. If
-# another IAuthenticator implementation is configured, this cache will not
-# be automatically used and so the following settings will have no effect.
-# Please note, credentials are cached in their encrypted form, so while
-# activating this cache may reduce the number of queries made to the
-# underlying table, it may not bring a significant reduction in the
-# latency of individual authentication attempts.
-# Defaults to 2000, set to 0 to disable credentials caching.
-credentials_validity_in_ms: 2000
-
-# Refresh interval for credentials cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If credentials_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as credentials_validity_in_ms.
-# credentials_update_interval_in_ms: 2000
-
-# The partitioner is responsible for distributing groups of rows (by
-# partition key) across nodes in the cluster. You should leave this
-# alone for new clusters. The partitioner can NOT be changed without
-# reloading all data, so when upgrading you should set this to the
-# same partitioner you were already using.
-#
-# Besides Murmur3Partitioner, partitioners included for backwards
-# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
-# OrderPreservingPartitioner.
-#
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-
-# Directories where Cassandra should store data on disk. Cassandra
-# will spread data evenly across them, subject to the granularity of
-# the configured compaction strategy.
-# If not set, the default directory is $CASSANDRA_HOME/data/data.
-data_file_directories:
- - /var/lib/cassandra/data
-
-# commit log. when running on magnetic HDD, this should be a
-# separate spindle than the data directories.
-# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
-commitlog_directory: /var/lib/cassandra/commitlog
-
-# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
-# for write path allocation rejection (standard: never reject. cdc: reject Mutation
-# containing a CDC-enabled table if at space limit in cdc_raw_directory).
-cdc_enabled: false
-
-# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
-# segment contains mutations for a CDC-enabled table. This should be placed on a
-# separate spindle than the data directories. If not set, the default directory is
-# $CASSANDRA_HOME/data/cdc_raw.
-# cdc_raw_directory: /var/lib/cassandra/cdc_raw
-
-# Policy for data disk failures:
-#
-# die
-# shut down gossip and client transports and kill the JVM for any fs errors or
-# single-sstable errors, so the node can be replaced.
-#
-# stop_paranoid
-# shut down gossip and client transports even for single-sstable errors,
-# kill the JVM for errors during startup.
-#
-# stop
-# shut down gossip and client transports, leaving the node effectively dead, but
-# can still be inspected via JMX, kill the JVM for errors during startup.
-#
-# best_effort
-# stop using the failed disk and respond to requests based on
-# remaining available sstables. This means you WILL see obsolete
-# data at CL.ONE!
-#
-# ignore
-# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# Policy for commit disk failures:
-#
-# die
-# shut down gossip and Thrift and kill the JVM, so the node can be replaced.
-#
-# stop
-# shut down gossip and Thrift, leaving the node effectively dead, but
-# can still be inspected via JMX.
-#
-# stop_commit
-# shutdown the commit log, letting writes collect but
-# continuing to service reads, as in pre-2.0.5 Cassandra
-#
-# ignore
-# ignore fatal errors and let the batches fail
-commit_failure_policy: stop
-
-# Maximum size of the native protocol prepared statement cache
-#
-# Valid values are either "auto" (omitting the value) or a value greater 0.
-#
-# Note that specifying a too large value will result in long running GCs and possbily
-# out-of-memory errors. Keep the value at a small fraction of the heap.
-#
-# If you constantly see "prepared statements discarded in the last minute because
-# cache limit reached" messages, the first step is to investigate the root cause
-# of these messages and check whether prepared statements are used correctly -
-# i.e. use bind markers for variable parts.
-#
-# Do only change the default value, if you really have more prepared statements than
-# fit in the cache. In most cases it is not neccessary to change this value.
-# Constantly re-preparing statements is a performance penalty.
-#
-# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
-prepared_statements_cache_size_mb:
-
-# Maximum size of the Thrift prepared statement cache
-#
-# If you do not use Thrift at all, it is safe to leave this value at "auto".
-#
-# See description of 'prepared_statements_cache_size_mb' above for more information.
-#
-# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
-thrift_prepared_statements_cache_size_mb:
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must contain the entire row,
-# so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the key cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Row cache implementation class name. Available implementations:
-#
-# org.apache.cassandra.cache.OHCProvider
-# Fully off-heap row cache implementation (default).
-#
-# org.apache.cassandra.cache.SerializingCacheProvider
-# This is the row cache implementation availabile
-# in previous releases of Cassandra.
-# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-
-# Maximum size of the row cache in memory.
-# Please note that OHC cache implementation requires some additional off-heap memory to manage
-# the map structures and some in-flight memory during operations before/after cache entries can be
-# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
-# Do not specify more memory that the system can afford in the worst usual situation and leave some
-# headroom for OS block level cache. Do never allow your system to swap.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should save the row cache.
-# Caches are saved to saved_caches_directory as specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save.
-# Specify 0 (which is the default), meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# Maximum size of the counter cache in memory.
-#
-# Counter cache helps to reduce counter locks' contention for hot counter cells.
-# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
-# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
-# of the lock hold, helping with hot counter cell updates, but will not allow skipping
-# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
-# in memory, not the whole counter, so it's relatively cheap.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
-# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
-counter_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the counter cache (keys only). Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Default is 7200 or 2 hours.
-counter_cache_save_period: 7200
-
-# Number of keys from the counter cache to save
-# Disabled by default, meaning all keys are going to be saved
-# counter_cache_keys_to_save: 100
-
-# saved caches
-# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
-saved_caches_directory: /var/lib/cassandra/saved_caches
-
-# commitlog_sync may be either "periodic" or "batch."
-#
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk. It will wait
-# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
-# This window should be kept short because the writer threads will
-# be unable to do extra work while waiting. (You may need to increase
-# concurrent_writes for the same reason.)
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 2
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-
-# The size of the individual commitlog file segments. A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentially from each columnfamily in the system) has been
-# flushed to sstables.
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-# Max mutation size is also configurable via max_mutation_size_in_kb setting in
-# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
-#
-# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
-# be set to at least twice the size of max_mutation_size_in_kb / 1024
-#
-commitlog_segment_size_in_mb: 32
-
-# Compression to apply to the commit log. If omitted, the commit log
-# will be written uncompressed. LZ4, Snappy, and Deflate compressors
-# are supported.
-# commitlog_compression:
-# - class_name: LZ4Compressor
-# parameters:
-# -
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map of parameters will do.
-seed_provider:
- # Addresses of hosts that are deemed contact points.
- # Cassandra nodes use this list of hosts to find each other and learn
- # the topology of the ring. You must change this if you are running
- # multiple nodes!
- - class_name: org.apache.cassandra.locator.SimpleSeedProvider
- parameters:
- # seeds is actually a comma-delimited list of addresses.
- # Ex: ",,"
- - seeds: "172.17.0.3"
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them. Same applies to
-# "concurrent_counter_writes", since counter writes read the current
-# values before incrementing and writing them back.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-concurrent_counter_writes: 32
-
-# For materialized view writes, as there is a read involved, so this should
-# be limited by the less of concurrent reads or concurrent writes.
-concurrent_materialized_view_writes: 32
-
-# Maximum memory to use for sstable chunk cache and buffer pooling.
-# 32MB of this are reserved for pooling buffers, the rest is used as an
-# cache that holds uncompressed sstable chunks.
-# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
-# so is in addition to the memory allocated for heap. The cache also has on-heap
-# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
-# if the default 64k chunk size is used).
-# Memory is only allocated when needed.
-# file_cache_size_in_mb: 512
-
-# Flag indicating whether to allocate on or off heap when the sstable buffer
-# pool is exhausted, that is when it has exceeded the maximum memory
-# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
-
-# buffer_pool_use_heap_if_exhausted: true
-
-# The strategy for optimizing disk read
-# Possible values are:
-# ssd (for solid state disks, the default)
-# spinning (for spinning disks)
-# disk_optimization_strategy: ssd
-
-# Total permitted memory to use for memtables. Cassandra will stop
-# accepting writes when the limit is exceeded until a flush completes,
-# and will trigger a flush based on memtable_cleanup_threshold
-# If omitted, Cassandra will set both to 1/4 the size of the heap.
-# memtable_heap_space_in_mb: 2048
-# memtable_offheap_space_in_mb: 2048
-
-# Ratio of occupied non-flushing memtable size to total permitted size
-# that will trigger a flush of the largest memtable. Larger mct will
-# mean larger flushes and hence less compaction, but also less concurrent
-# flush activity which can make it difficult to keep your disks fed
-# under heavy write load.
-#
-# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
-# memtable_cleanup_threshold: 0.11
-
-# Specify the way Cassandra allocates and manages memtable memory.
-# Options are:
-#
-# heap_buffers
-# on heap nio buffers
-#
-# offheap_buffers
-# off heap (direct) nio buffers
-#
-# offheap_objects
-# off heap objects
-memtable_allocation_type: heap_buffers
-
-# Total space to use for commit logs on disk.
-#
-# If space gets above this value, Cassandra will flush every dirty CF
-# in the oldest segment and remove it. So a small total commitlog space
-# will tend to cause more flush activity on less-active columnfamilies.
-#
-# The default value is the smaller of 8192, and 1/4 of the total space
-# of the commitlog volume.
-#
-# commitlog_total_space_in_mb: 8192
-
-# This sets the amount of memtable flush writer threads. These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked.
-#
-# memtable_flush_writers defaults to one per data_file_directory.
-#
-# If your data directories are backed by SSD, you can increase this, but
-# avoid having memtable_flush_writers * data_file_directories > number of cores
-#memtable_flush_writers: 1
-
-# Total space to use for change-data-capture logs on disk.
-#
-# If space gets above this value, Cassandra will throw WriteTimeoutException
-# on Mutations including tables with CDC enabled. A CDCCompactor is responsible
-# for parsing the raw CDC logs and deleting them when parsing is completed.
-#
-# The default value is the min of 4096 mb and 1/8th of the total space
-# of the drive where cdc_raw_directory resides.
-# cdc_total_space_in_mb: 4096
-
-# When we hit our cdc_raw limit and the CDCCompactor is either running behind
-# or experiencing backpressure, we check at the following interval to see if any
-# new space for cdc-tracked tables has been made available. Default to 250ms
-# cdc_free_space_check_interval_ms: 250
-
-# A fixed memory pool size in MB for for SSTable index summaries. If left
-# empty, this will default to 5% of the heap size. If the memory usage of
-# all index summaries exceeds this limit, SSTables with low read rates will
-# shrink their index summaries in order to meet this limit. However, this
-# is a best-effort process. In extreme conditions Cassandra may need to use
-# more than this amount of memory.
-index_summary_capacity_in_mb:
-
-# How frequently index summaries should be resampled. This is done
-# periodically to redistribute memory from the fixed-size pool to sstables
-# proportional their recent read rates. Setting to -1 will disable this
-# process, leaving existing index summaries at their current sampling level.
-index_summary_resize_interval_in_minutes: 60
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSDs; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-storage_port: 7000
-
-# SSL port, for encrypted communication. Unused unless enabled in
-# encryption_options
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-ssl_storage_port: 7001
-
-# Address or interface to bind to and tell other Cassandra nodes to connect to.
-# You _must_ change this if you want multiple nodes to be able to communicate!
-#
-# Set listen_address OR listen_interface, not both.
-#
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing _if_ the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting listen_address to 0.0.0.0 is always wrong.
-#
-listen_address: 172.17.0.3
-
-# Set listen_address OR listen_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
-# listen_interface: eth0
-
-# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
-# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
-# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
-# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
-# listen_interface_prefer_ipv6: false
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-broadcast_address: 172.17.0.3
-
-# When using multiple physical network interfaces, set this
-# to true to listen on broadcast_address in addition to
-# the listen_address, allowing nodes to communicate in both
-# interfaces.
-# Ignore this property if the network configuration automatically
-# routes between the public and private networks such as EC2.
-# listen_on_broadcast_address: false
-
-# Internode authentication backend, implementing IInternodeAuthenticator;
-# used to allow/disallow connections from peer nodes.
-# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
-
-# Whether to start the native transport server.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: true
-# port for the CQL native transport to listen for clients on
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-native_transport_port: 9042
-# Enabling native transport encryption in client_encryption_options allows you to either use
-# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
-# standard native_transport_port.
-# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
-# for native_transport_port. Setting native_transport_port_ssl to a different value
-# from native_transport_port will use encryption for native_transport_port_ssl while
-# keeping native_transport_port unencrypted.
-# native_transport_port_ssl: 9142
-# The maximum threads for handling requests when the native transport is used.
-# This is similar to rpc_max_threads though the default differs slightly (and
-# there is no native_transport_min_threads, idle threads will always be stopped
-# after 30 seconds).
-# native_transport_max_threads: 128
-#
-# The maximum size of allowed frame. Frame (requests) larger than this will
-# be rejected as invalid. The default is 256MB. If you're changing this parameter,
-# you may want to adjust max_value_size_in_mb accordingly.
-# native_transport_max_frame_size_in_mb: 256
-
-# The maximum number of concurrent client connections.
-# The default is -1, which means unlimited.
-# native_transport_max_concurrent_connections: -1
-
-# The maximum number of concurrent client connections per source ip.
-# The default is -1, which means unlimited.
-# native_transport_max_concurrent_connections_per_ip: -1
-
-# Whether to start the thrift rpc server.
-start_rpc: false
-
-# The address or interface to bind the Thrift RPC service and native transport
-# server to.
-#
-# Set rpc_address OR rpc_interface, not both.
-#
-# Leaving rpc_address blank has the same effect as on listen_address
-# (i.e. it will be based on the configured hostname of the node).
-#
-# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
-# set broadcast_rpc_address to a value other than 0.0.0.0.
-#
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-rpc_address: 0.0.0.0
-
-# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
-# rpc_interface: eth1
-
-# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
-# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
-# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
-# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
-# rpc_interface_prefer_ipv6: false
-
-# port for Thrift to listen for clients on
-rpc_port: 9160
-
-# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
-# be set to 0.0.0.0. If left blank, this will be set to the value of
-# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
-# be set.
-broadcast_rpc_address: 172.17.0.3
-
-# enable or disable keepalive on rpc/native connections
-rpc_keepalive: true
-
-# Cassandra provides two out-of-the-box options for the RPC Server:
-#
-# sync
-# One thread per thrift connection. For a very large number of clients, memory
-# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
-# per thread, and that will correspond to your use of virtual memory (but physical memory
-# may be limited depending on use of stack space).
-#
-# hsha
-# Stands for "half synchronous, half asynchronous." All thrift clients are handled
-# asynchronously using a small number of threads that does not vary with the amount
-# of thrift clients (and thus scales well to many clients). The rpc requests are still
-# synchronous (one thread per active request). If hsha is selected then it is essential
-# that rpc_max_threads is changed from the default value of unlimited.
-#
-# The default is sync because on Windows hsha is about 30% slower. On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively, can provide your own RPC server by providing the fully-qualified class name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Uncomment to set socket buffer size for internode communication
-# Note that when setting this, the buffer size is limited by net.core.wmem_max
-# and when not setting it it is defined by net.ipv4.tcp_wmem
-# See also:
-# /proc/sys/net/core/wmem_max
-# /proc/sys/net/core/rmem_max
-# /proc/sys/net/ipv4/tcp_wmem
-# /proc/sys/net/ipv4/tcp_wmem
-# and 'man tcp'
-# internode_send_buff_size_in_bytes:
-
-# Uncomment to set socket buffer size for internode communication
-# Note that when setting this, the buffer size is limited by net.core.wmem_max
-# and when not setting it it is defined by net.ipv4.tcp_wmem
-# internode_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum message length).
-thrift_framed_transport_size_in_mb: 15
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# keyspace data. Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction. Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you. Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# Granularity of the collation index of rows within a partition.
-# Increase if your rows are large, or if you have a very large
-# number of rows per partition. The competing goals are these:
-#
-# - a smaller granularity means more index entries are generated
-# and looking up rows withing the partition by collation column
-# is faster
-# - but, Cassandra will keep the collation index in memory for hot
-# rows (as part of the key cache), so a larger granularity means
-# you can cache more hot rows
-column_index_size_in_kb: 64
-
-# Per sstable indexed key cache entries (the collation index in memory
-# mentioned above) exceeding this size will not be held on heap.
-# This means that only partition information is held on heap and the
-# index entries are read from disk.
-#
-# Note that this size refers to the size of the
-# serialized index information and not the size of the partition.
-column_index_cache_size_in_kb: 2
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair. Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the smaller of (number of disks,
-# number of cores), with a minimum of 2 and a maximum of 8.
-#
-# If your data directories are backed by SSD, you should increase this
-# to the number of cores.
-#concurrent_compactors: 1
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# When compacting, the replacement sstable(s) can be opened before they
-# are completely written, and used in place of the prior sstables for
-# any range that has been written. This helps to smoothly transfer reads
-# between the sstables, reducing page cache churn and keeping hot rows hot
-sstable_preemptive_open_interval_in_mb: 50
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 200 Mbps or 25 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 200
-
-# Throttles all streaming file transfer between the datacenters,
-# this setting allows users to throttle inter dc stream throughput in addition
-# to throttling all network stream traffic as configured with
-# stream_throughput_outbound_megabits_per_sec
-# When unset, the default is 200 Mbps or 25 MB/s
-# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 5000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 2000
-# How long the coordinator should wait for counter writes to complete
-counter_write_request_timeout_in_ms: 5000
-# How long a coordinator should continue to retry a CAS operation
-# that contends with other proposals for the same row
-cas_contention_timeout_in_ms: 1000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because unless auto_snapshot is disabled
-# we need to flush first so we can snapshot before removing the data.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts. If disabled, replicas will assume that requests
-# were forwarded to them instantly by the coordinator, which means that
-# under overload conditions we will waste that much extra time processing
-# already-timed-out requests.
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Set socket timeout for streaming operation.
-# The stream session is failed if no data/ack is received by any of the participants
-# within that period, which means this should also be sufficient to stream a large
-# sstable or rebuild table indexes.
-# Default value is 86400000ms, which means stale streams timeout after 24 hours.
-# A value of zero means stream sockets should never time out.
-# streaming_socket_timeout_in_ms: 86400000
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch. The snitch has two functions:
-#
-# - it teaches Cassandra enough about your network topology to route
-# requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-# correlated failures. It does this by grouping machines into
-# "datacenters" and "racks." Cassandra will do its best not to have
-# more than one replica on the same "rack" (which may not actually
-# be a physical location)
-#
-# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH
-# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss.
-# This means that if you start with the default SimpleSnitch, which
-# locates every node on "rack1" in "datacenter1", your only options
-# if you need to add another datacenter are GossipingPropertyFileSnitch
-# (and the older PFS). From there, if you want to migrate to an
-# incompatible snitch like Ec2Snitch you can do it by adding new nodes
-# under Ec2Snitch (which will locate them in a new "datacenter") and
-# decommissioning the old ones.
-#
-# Out of the box, Cassandra provides:
-#
-# SimpleSnitch:
-# Treats Strategy order as proximity. This can improve cache
-# locality when disabling read repair. Only appropriate for
-# single-datacenter deployments.
-#
-# GossipingPropertyFileSnitch
-# This should be your go-to snitch for production use. The rack
-# and datacenter for the local node are defined in
-# cassandra-rackdc.properties and propagated to other nodes via
-# gossip. If cassandra-topology.properties exists, it is used as a
-# fallback, allowing migration from the PropertyFileSnitch.
-#
-# PropertyFileSnitch:
-# Proximity is determined by rack and data center, which are
-# explicitly configured in cassandra-topology.properties.
-#
-# Ec2Snitch:
-# Appropriate for EC2 deployments in a single Region. Loads Region
-# and Availability Zone information from the EC2 API. The Region is
-# treated as the datacenter, and the Availability Zone as the rack.
-# Only private IPs are used, so this will not work across multiple
-# Regions.
-#
-# Ec2MultiRegionSnitch:
-# Uses public IPs as broadcast_address to allow cross-region
-# connectivity. (Thus, you should set seed addresses to the public
-# IP as well.) You will need to open the storage_port or
-# ssl_storage_port on the public IP firewall. (For intra-Region
-# traffic, Cassandra will switch to the private IP after
-# establishing a connection.)
-#
-# RackInferringSnitch:
-# Proximity is determined by rack and data center, which are
-# assumed to correspond to the 3rd and 2nd octet of each node's IP
-# address, respectively. Unless this happens to match your
-# deployment conventions, this is best used as an example of
-# writing a custom Snitch class and is provided in that spirit.
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: SimpleSnitch
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it. This is
-# expressed as a double which represents a percentage. Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-#
-# NoScheduler
-# Has no options
-#
-# RoundRobin
-# throttle_limit
-# The throttle_limit is the number of in-flight
-# requests per client. Requests beyond
-# that limit are queued up until
-# running requests can complete.
-# The value of 80 here is twice the number of
-# concurrent_reads + concurrent_writes.
-# default_weight
-# default_weight is optional and allows for
-# overriding the default which is 1.
-# weights
-# Weights are optional and will default to 1 or the
-# overridden default_weight. The weight translates into how
-# many requests are handled during each turn of the
-# RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-# throttle_limit: 80
-# default_weight: 5
-# weights:
-# Keyspace1: 1
-# Keyspace2: 5
-
-# request_scheduler_id -- An identifier based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# Enable or disable inter-node encryption
-# JVM defaults for supported SSL socket protocols and cipher suites can
-# be replaced using custom encryption options. This is not recommended
-# unless you have policies in place that dictate certain settings, or
-# need to disable vulnerable ciphers or protocols in case the JVM cannot
-# be updated.
-# FIPS compliant settings can be configured at JVM level and should not
-# involve changing encryption settings here:
-# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
-# *NOTE* No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore. For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-server_encryption_options:
- internode_encryption: none
- keystore: conf/.keystore
- keystore_password: cassandra
- truststore: conf/.truststore
- truststore_password: cassandra
- # More advanced defaults below:
- # protocol: TLS
- # algorithm: SunX509
- # store_type: JKS
- # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
- # require_client_auth: false
- # require_endpoint_verification: false
-
-# enable or disable client/server encryption.
-client_encryption_options:
- enabled: false
- # If enabled and optional is set to true encrypted and unencrypted connections are handled.
- optional: false
- keystore: conf/.keystore
- keystore_password: cassandra
- # require_client_auth: false
- # Set trustore and truststore_password if require_client_auth is true
- # truststore: conf/.truststore
- # truststore_password: cassandra
- # More advanced defaults below:
- # protocol: TLS
- # algorithm: SunX509
- # store_type: JKS
- # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# Can be:
-#
-# all
-# all traffic is compressed
-#
-# dc
-# traffic between different datacenters is compressed
-#
-# none
-# nothing is compressed.
-internode_compression: dc
-
-# Enable or disable tcp_nodelay for inter-dc communication.
-# Disabling it will result in larger (but fewer) network packets being sent,
-# reducing overhead from the TCP protocol itself, at the cost of increasing
-# latency if you block for cross-datacenter responses.
-inter_dc_tcp_nodelay: false
-
-# TTL for different trace types used during logging of the repair process.
-tracetype_query_ttl: 86400
-tracetype_repair_ttl: 604800
-
-# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
-# This threshold can be adjusted to minimize logging if necessary
-# gc_log_threshold_in_ms: 200
-
-# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
-# INFO level
-# UDFs (user defined functions) are disabled by default.
-# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
-enable_user_defined_functions: false
-
-# Enables scripted UDFs (JavaScript UDFs).
-# Java UDFs are always enabled, if enable_user_defined_functions is true.
-# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
-# This option has no effect, if enable_user_defined_functions is false.
-enable_scripted_user_defined_functions: false
-
-# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
-# Lowering this value on Windows can provide much tighter latency and better throughput, however
-# some virtualized environments may see a negative performance impact from changing this setting
-# below their system default. The sysinternals 'clockres' tool can confirm your system's default
-# setting.
-windows_timer_interval: 1
-
-
-# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
-# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
-# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
-# can still (and should!) be in the keystore and will be used on decrypt operations
-# (to handle the case of key rotation).
-#
-# It is strongly recommended to download and install Java Cryptography Extension (JCE)
-# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
-# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
-#
-# Currently, only the following file types are supported for transparent data encryption, although
-# more are coming in future cassandra releases: commitlog, hints
-transparent_data_encryption_options:
- enabled: false
- chunk_length_kb: 64
- cipher: AES/CBC/PKCS5Padding
- key_alias: testing:1
- # CBC IV length for AES needs to be 16 bytes (which is also the default size)
- # iv_length: 16
- key_provider:
- - class_name: org.apache.cassandra.security.JKSKeyProvider
- parameters:
- - keystore: conf/.keystore
- keystore_password: cassandra
- store_type: JCEKS
- key_password: cassandra
-
-
-#####################
-# SAFETY THRESHOLDS #
-#####################
-
-# When executing a scan, within or across a partition, we need to keep the
-# tombstones seen in memory so we can return them to the coordinator, which
-# will use them to make sure other replicas also know about the deleted rows.
-# With workloads that generate a lot of tombstones, this can cause performance
-# problems and even exaust the server heap.
-# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
-# Adjust the thresholds here if you understand the dangers and want to
-# scan more tombstones anyway. These thresholds may also be adjusted at runtime
-# using the StorageService mbean.
-tombstone_warn_threshold: 1000
-tombstone_failure_threshold: 100000
-
-# Log WARN on any batch size exceeding this value. 5kb per batch by default.
-# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
-batch_size_warn_threshold_in_kb: 5
-
-# Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
-batch_size_fail_threshold_in_kb: 50
-
-# Log WARN on any batches not of type LOGGED than span across more partitions than this limit
-unlogged_batch_across_partitions_warn_threshold: 10
-
-# Log a warning when compacting partitions larger than this value
-compaction_large_partition_warning_threshold_mb: 100
-
-# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
-# Adjust the threshold based on your application throughput requirement
-# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
-gc_warn_threshold_in_ms: 1000
-
-# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
-# early. Any value size larger than this threshold will result into marking an SSTable
-# as corrupted.
-# max_value_size_in_mb: 256
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/util.go
deleted file mode 100644
index ef3d1ee..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/util.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package cassandra
-
-import (
- "crypto/tls"
- "fmt"
- "strings"
- "time"
-
- "github.com/gocql/gocql"
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// Query templates a query for us.
-func substQuery(tpl string, data map[string]string) string {
- for k, v := range data {
- tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
- }
-
- return tpl
-}
-
-func createSession(cfg *sessionConfig, s logical.Storage) (*gocql.Session, error) {
- clusterConfig := gocql.NewCluster(strings.Split(cfg.Hosts, ",")...)
- clusterConfig.Authenticator = gocql.PasswordAuthenticator{
- Username: cfg.Username,
- Password: cfg.Password,
- }
-
- clusterConfig.ProtoVersion = cfg.ProtocolVersion
- if clusterConfig.ProtoVersion == 0 {
- clusterConfig.ProtoVersion = 2
- }
-
- clusterConfig.Timeout = time.Duration(cfg.ConnectTimeout) * time.Second
-
- if cfg.TLS {
- var tlsConfig *tls.Config
- if len(cfg.Certificate) > 0 || len(cfg.IssuingCA) > 0 {
- if len(cfg.Certificate) > 0 && len(cfg.PrivateKey) == 0 {
- return nil, fmt.Errorf("Found certificate for TLS authentication but no private key")
- }
-
- certBundle := &certutil.CertBundle{}
- if len(cfg.Certificate) > 0 {
- certBundle.Certificate = cfg.Certificate
- certBundle.PrivateKey = cfg.PrivateKey
- }
- if len(cfg.IssuingCA) > 0 {
- certBundle.IssuingCA = cfg.IssuingCA
- }
-
- parsedCertBundle, err := certBundle.ToParsedCertBundle()
- if err != nil {
- return nil, fmt.Errorf("failed to parse certificate bundle: %s", err)
- }
-
- tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient)
- if err != nil || tlsConfig == nil {
- return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%v", tlsConfig, err)
- }
- tlsConfig.InsecureSkipVerify = cfg.InsecureTLS
-
- if cfg.TLSMinVersion != "" {
- var ok bool
- tlsConfig.MinVersion, ok = tlsutil.TLSLookup[cfg.TLSMinVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_min_version' in config")
- }
- } else {
- // MinVersion was not being set earlier. Reset it to
- // zero to gracefully handle upgrades.
- tlsConfig.MinVersion = 0
- }
- }
-
- clusterConfig.SslOpts = &gocql.SslOptions{
- Config: tlsConfig,
- }
- }
-
- session, err := clusterConfig.CreateSession()
- if err != nil {
- return nil, fmt.Errorf("Error creating session: %s", err)
- }
-
- // Verify the info
- err = session.Query(`LIST USERS`).Exec()
- if err != nil {
- return nil, fmt.Errorf("Error validating connection info: %s", err)
- }
-
- return session, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go
deleted file mode 100644
index 9fd09ac..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package consul
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Paths: []*framework.Path{
- pathConfigAccess(),
- pathListRoles(&b),
- pathRoles(),
- pathToken(&b),
- },
-
- Secrets: []*framework.Secret{
- secretToken(&b),
- },
- BackendType: logical.TypeLogical,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend_test.go
deleted file mode 100644
index b242657..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend_test.go
+++ /dev/null
@@ -1,474 +0,0 @@
-package consul
-
-import (
- "encoding/base64"
- "fmt"
- "log"
- "os"
- "reflect"
- "sync"
- "testing"
- "time"
-
- consulapi "github.com/hashicorp/consul/api"
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
- dockertest "gopkg.in/ory-am/dockertest.v2"
-)
-
-var (
- testImagePull sync.Once
-)
-
-func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retAddress string) {
- if os.Getenv("CONSUL_ADDR") != "" {
- return "", os.Getenv("CONSUL_ADDR")
- }
-
- // Without this the checks for whether the container has started seem to
- // never actually pass. There's really no reason to expose the test
- // containers, so don't.
- dockertest.BindDockerToLocalhost = "yep"
-
- testImagePull.Do(func() {
- dockertest.Pull(dockertest.ConsulImageName)
- })
-
- try := 0
- cid, connErr := dockertest.ConnectToConsul(60, 500*time.Millisecond, func(connAddress string) bool {
- try += 1
- // Build a client and verify that the credentials work
- config := consulapi.DefaultConfig()
- config.Address = connAddress
- config.Token = dockertest.ConsulACLMasterToken
- client, err := consulapi.NewClient(config)
- if err != nil {
- if try > 50 {
- panic(err)
- }
- return false
- }
-
- _, err = client.KV().Put(&consulapi.KVPair{
- Key: "setuptest",
- Value: []byte("setuptest"),
- }, nil)
- if err != nil {
- if try > 50 {
- panic(err)
- }
- return false
- }
-
- retAddress = connAddress
- return true
- })
-
- if connErr != nil {
- t.Fatalf("could not connect to consul: %v", connErr)
- }
-
- return
-}
-
-func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
- err := cid.KillRemove()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_config_access(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "address": connURL,
- "token": dockertest.ConsulACLMasterToken,
- }
-
- confReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/access",
- Storage: config.StorageView,
- Data: connData,
- }
-
- resp, err := b.HandleRequest(confReq)
- if err != nil || (resp != nil && resp.IsError()) || resp != nil {
- t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err)
- }
-
- confReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(confReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err)
- }
-
- expected := map[string]interface{}{
- "address": connData["address"].(string),
- "scheme": "http",
- }
- if !reflect.DeepEqual(expected, resp.Data) {
- t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data)
- }
- if resp.Data["token"] != nil {
- t.Fatalf("token should not be set in the response")
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "address": connURL,
- "token": dockertest.ConsulACLMasterToken,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData),
- testAccStepWritePolicy(t, "test", testPolicy, ""),
- testAccStepReadToken(t, "test", connData),
- },
- })
-}
-
-func TestBackend_renew_revoke(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "address": connURL,
- "token": dockertest.ConsulACLMasterToken,
- }
-
- req := &logical.Request{
- Storage: config.StorageView,
- Operation: logical.UpdateOperation,
- Path: "config/access",
- Data: connData,
- }
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Path = "roles/test"
- req.Data = map[string]interface{}{
- "policy": base64.StdEncoding.EncodeToString([]byte(testPolicy)),
- "lease": "6h",
- }
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Operation = logical.ReadOperation
- req.Path = "creds/test"
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp nil")
- }
- if resp.IsError() {
- t.Fatalf("resp is error: %v", resp.Error())
- }
-
- generatedSecret := resp.Secret
- generatedSecret.IssueTime = time.Now()
- generatedSecret.TTL = 6 * time.Hour
-
- var d struct {
- Token string `mapstructure:"token"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- t.Fatal(err)
- }
- log.Printf("[WARN] Generated token: %s", d.Token)
-
- // Build a client and verify that the credentials work
- consulapiConfig := consulapi.DefaultConfig()
- consulapiConfig.Address = connData["address"].(string)
- consulapiConfig.Token = d.Token
- client, err := consulapi.NewClient(consulapiConfig)
- if err != nil {
- t.Fatal(err)
- }
-
- log.Printf("[WARN] Verifying that the generated token works...")
- _, err = client.KV().Put(&consulapi.KVPair{
- Key: "foo",
- Value: []byte("bar"),
- }, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Operation = logical.RenewOperation
- req.Secret = generatedSecret
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response from renew")
- }
-
- req.Operation = logical.RevokeOperation
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- log.Printf("[WARN] Verifying that the generated token does not work...")
- _, err = client.KV().Put(&consulapi.KVPair{
- Key: "foo",
- Value: []byte("bar"),
- }, nil)
- if err == nil {
- t.Fatal("expected error")
- }
-}
-
-func TestBackend_management(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "address": connURL,
- "token": dockertest.ConsulACLMasterToken,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData),
- testAccStepWriteManagementPolicy(t, "test", ""),
- testAccStepReadManagementToken(t, "test", connData),
- },
- })
-}
-
-func TestBackend_crud(t *testing.T) {
- b, _ := Factory(logical.TestBackendConfig())
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepWritePolicy(t, "test", testPolicy, ""),
- testAccStepWritePolicy(t, "test2", testPolicy, ""),
- testAccStepWritePolicy(t, "test3", testPolicy, ""),
- testAccStepReadPolicy(t, "test", testPolicy, 0),
- testAccStepListPolicy(t, []string{"test", "test2", "test3"}),
- testAccStepDeletePolicy(t, "test"),
- },
- })
-}
-
-func TestBackend_role_lease(t *testing.T) {
- b, _ := Factory(logical.TestBackendConfig())
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepWritePolicy(t, "test", testPolicy, "6h"),
- testAccStepReadPolicy(t, "test", testPolicy, 6*time.Hour),
- testAccStepDeletePolicy(t, "test"),
- },
- })
-}
-
-func testAccStepConfig(
- t *testing.T, config map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/access",
- Data: config,
- }
-}
-
-func testAccStepReadToken(
- t *testing.T, name string, conf map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- Token string `mapstructure:"token"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated token: %s", d.Token)
-
- // Build a client and verify that the credentials work
- config := consulapi.DefaultConfig()
- config.Address = conf["address"].(string)
- config.Token = d.Token
- client, err := consulapi.NewClient(config)
- if err != nil {
- return err
- }
-
- log.Printf("[WARN] Verifying that the generated token works...")
- _, err = client.KV().Put(&consulapi.KVPair{
- Key: "foo",
- Value: []byte("bar"),
- }, nil)
- if err != nil {
- return err
- }
-
- return nil
- },
- }
-}
-
-func testAccStepReadManagementToken(
- t *testing.T, name string, conf map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- Token string `mapstructure:"token"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated token: %s", d.Token)
-
- // Build a client and verify that the credentials work
- config := consulapi.DefaultConfig()
- config.Address = conf["address"].(string)
- config.Token = d.Token
- client, err := consulapi.NewClient(config)
- if err != nil {
- return err
- }
-
- log.Printf("[WARN] Verifying that the generated token works...")
- _, _, err = client.ACL().Create(&consulapi.ACLEntry{
- Type: "management",
- Name: "test2",
- }, nil)
- if err != nil {
- return err
- }
-
- return nil
- },
- }
-}
-
-func testAccStepWritePolicy(t *testing.T, name string, policy string, lease string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/" + name,
- Data: map[string]interface{}{
- "policy": base64.StdEncoding.EncodeToString([]byte(policy)),
- "lease": lease,
- },
- }
-}
-
-func testAccStepWriteManagementPolicy(t *testing.T, name string, lease string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/" + name,
- Data: map[string]interface{}{
- "token_type": "management",
- "lease": lease,
- },
- }
-}
-
-func testAccStepReadPolicy(t *testing.T, name string, policy string, lease time.Duration) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- policyRaw := resp.Data["policy"].(string)
- out, err := base64.StdEncoding.DecodeString(policyRaw)
- if err != nil {
- return err
- }
- if string(out) != policy {
- return fmt.Errorf("mismatch: %s %s", out, policy)
- }
-
- leaseRaw := resp.Data["lease"].(string)
- l, err := time.ParseDuration(leaseRaw)
- if err != nil {
- return err
- }
- if l != lease {
- return fmt.Errorf("mismatch: %v %v", l, lease)
- }
- return nil
- },
- }
-}
-
-func testAccStepListPolicy(t *testing.T, names []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "roles/",
- Check: func(resp *logical.Response) error {
- respKeys := resp.Data["keys"].([]string)
- if !reflect.DeepEqual(respKeys, names) {
- return fmt.Errorf("mismatch: %#v %#v", respKeys, names)
- }
- return nil
- },
- }
-}
-
-func testAccStepDeletePolicy(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + name,
- }
-}
-
-const testPolicy = `
-key "" {
- policy = "write"
-}
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/client.go
deleted file mode 100644
index d519a88..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/client.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package consul
-
-import (
- "fmt"
-
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/vault/logical"
-)
-
-func client(s logical.Storage) (*api.Client, error, error) {
- conf, userErr, intErr := readConfigAccess(s)
- if intErr != nil {
- return nil, nil, intErr
- }
- if userErr != nil {
- return nil, userErr, nil
- }
- if conf == nil {
- return nil, nil, fmt.Errorf("no error received but no configuration found")
- }
-
- consulConf := api.DefaultNonPooledConfig()
- consulConf.Address = conf.Address
- consulConf.Scheme = conf.Scheme
- consulConf.Token = conf.Token
-
- client, err := api.NewClient(consulConf)
- return client, nil, err
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_config.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_config.go
deleted file mode 100644
index 09f0f3f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_config.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package consul
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigAccess() *framework.Path {
- return &framework.Path{
- Pattern: "config/access",
- Fields: map[string]*framework.FieldSchema{
- "address": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Consul server address",
- },
-
- "scheme": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "URI scheme for the Consul address",
-
- // https would be a better default but Consul on its own
- // defaults to HTTP access, and when HTTPS is enabled it
- // disables HTTP, so there isn't really any harm done here.
- Default: "http",
- },
-
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token for API calls",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: pathConfigAccessRead,
- logical.UpdateOperation: pathConfigAccessWrite,
- },
- }
-}
-
-func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) {
- entry, err := storage.Get("config/access")
- if err != nil {
- return nil, nil, err
- }
- if entry == nil {
- return nil, fmt.Errorf(
- "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint"),
- nil
- }
-
- conf := &accessConfig{}
- if err := entry.DecodeJSON(conf); err != nil {
- return nil, nil, fmt.Errorf("error reading consul access configuration: %s", err)
- }
-
- return conf, nil, nil
-}
-
-func pathConfigAccessRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- conf, userErr, intErr := readConfigAccess(req.Storage)
- if intErr != nil {
- return nil, intErr
- }
- if userErr != nil {
- return logical.ErrorResponse(userErr.Error()), nil
- }
- if conf == nil {
- return nil, fmt.Errorf("no user error reported but consul access configuration not found")
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "address": conf.Address,
- "scheme": conf.Scheme,
- },
- }, nil
-}
-
-func pathConfigAccessWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := logical.StorageEntryJSON("config/access", accessConfig{
- Address: data.Get("address").(string),
- Scheme: data.Get("scheme").(string),
- Token: data.Get("token").(string),
- })
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type accessConfig struct {
- Address string `json:"address"`
- Scheme string `json:"scheme"`
- Token string `json:"token"`
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_roles.go
deleted file mode 100644
index 9b4087b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_roles.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package consul
-
-import (
- "encoding/base64"
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
- }
-}
-
-func pathRoles() *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
-
- "policy": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Policy document, base64 encoded. Required
-for 'client' tokens.`,
- },
-
- "token_type": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "client",
- Description: `Which type of token to create: 'client'
-or 'management'. If a 'management' token,
-the "policy" parameter is not required.
-Defaults to 'client'.`,
- },
-
- "lease": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Lease time of the role.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: pathRolesRead,
- logical.UpdateOperation: pathRolesWrite,
- logical.DeleteOperation: pathRolesDelete,
- },
- }
-}
-
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("policy/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func pathRolesRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
-
- entry, err := req.Storage.Get("policy/" + name)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleConfig
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- if result.TokenType == "" {
- result.TokenType = "client"
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "lease": result.Lease.String(),
- "token_type": result.TokenType,
- },
- }
- if result.Policy != "" {
- resp.Data["policy"] = base64.StdEncoding.EncodeToString([]byte(result.Policy))
- }
- return resp, nil
-}
-
-func pathRolesWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- tokenType := d.Get("token_type").(string)
-
- switch tokenType {
- case "client":
- case "management":
- default:
- return logical.ErrorResponse(
- "token_type must be \"client\" or \"management\""), nil
- }
-
- name := d.Get("name").(string)
- policy := d.Get("policy").(string)
- var policyRaw []byte
- var err error
- if tokenType != "management" {
- if policy == "" {
- return logical.ErrorResponse(
- "policy cannot be empty when not using management tokens"), nil
- }
- policyRaw, err = base64.StdEncoding.DecodeString(d.Get("policy").(string))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error decoding policy base64: %s", err)), nil
- }
- }
-
- var lease time.Duration
- leaseParam := d.Get("lease").(string)
- if leaseParam != "" {
- lease, err = time.ParseDuration(leaseParam)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "error parsing given lease of %s: %s", leaseParam, err)), nil
- }
- }
-
- entry, err := logical.StorageEntryJSON("policy/"+name, roleConfig{
- Policy: string(policyRaw),
- Lease: lease,
- TokenType: tokenType,
- })
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func pathRolesDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if err := req.Storage.Delete("policy/" + name); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-type roleConfig struct {
- Policy string `json:"policy"`
- Lease time.Duration `json:"lease"`
- TokenType string `json:"token_type"`
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_token.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_token.go
deleted file mode 100644
index bce276d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/path_token.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package consul
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathToken(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathTokenRead,
- },
- }
-}
-
-func (b *backend) pathTokenRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
-
- entry, err := req.Storage.Get("policy/" + name)
- if err != nil {
- return nil, fmt.Errorf("error retrieving role: %s", err)
- }
- if entry == nil {
- return logical.ErrorResponse(fmt.Sprintf("Role '%s' not found", name)), nil
- }
-
- var result roleConfig
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- if result.TokenType == "" {
- result.TokenType = "client"
- }
-
- // Get the consul client
- c, userErr, intErr := client(req.Storage)
- if intErr != nil {
- return nil, intErr
- }
- if userErr != nil {
- return logical.ErrorResponse(userErr.Error()), nil
- }
-
- // Generate a name for the token
- tokenName := fmt.Sprintf("Vault %s %s %d", name, req.DisplayName, time.Now().UnixNano())
-
- // Create it
- token, _, err := c.ACL().Create(&api.ACLEntry{
- Name: tokenName,
- Type: result.TokenType,
- Rules: result.Policy,
- }, nil)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- // Use the helper to create the secret
- s := b.Secret(SecretTokenType).Response(map[string]interface{}{
- "token": token,
- }, map[string]interface{}{
- "token": token,
- })
- s.Secret.TTL = result.Lease
-
- return s, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/secret_token.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/secret_token.go
deleted file mode 100644
index 3388946..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/secret_token.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package consul
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- SecretTokenType = "token"
-)
-
-func secretToken(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretTokenType,
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Request token",
- },
- },
-
- Renew: b.secretTokenRenew,
- Revoke: secretTokenRevoke,
- }
-}
-
-func (b *backend) secretTokenRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- return framework.LeaseExtend(0, 0, b.System())(req, d)
-}
-
-func secretTokenRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- c, userErr, intErr := client(req.Storage)
- if intErr != nil {
- return nil, intErr
- }
- if userErr != nil {
- // Returning logical.ErrorResponse from revocation function is risky
- return nil, userErr
- }
-
- tokenRaw, ok := req.Secret.InternalData["token"]
- if !ok {
- // We return nil here because this is a pre-0.5.3 problem and there is
- // nothing we can do about it. We already can't revoke the lease
- // properly if it has been renewed and this is documented pre-0.5.3
- // behavior with a security bulletin about it.
- return nil, nil
- }
-
- _, err := c.ACL().Destroy(tokenRaw.(string), nil)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go
deleted file mode 100644
index ffc1a40..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package database
-
-import (
- "fmt"
- "net/rpc"
- "strings"
- "sync"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const databaseConfigPath = "database/config/"
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend(conf)
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend(conf *logical.BackendConfig) *databaseBackend {
- var b databaseBackend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathListPluginConnection(&b),
- pathConfigurePluginConnection(&b),
- pathListRoles(&b),
- pathRoles(&b),
- pathCredsCreate(&b),
- pathResetConnection(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCreds(&b),
- },
- Clean: b.closeAllDBs,
- Invalidate: b.invalidate,
- BackendType: logical.TypeLogical,
- }
-
- b.logger = conf.Logger
- b.connections = make(map[string]dbplugin.Database)
- return &b
-}
-
-type databaseBackend struct {
- connections map[string]dbplugin.Database
- logger log.Logger
-
- *framework.Backend
- sync.RWMutex
-}
-
-// closeAllDBs closes all connections from all database types
-func (b *databaseBackend) closeAllDBs() {
- b.Lock()
- defer b.Unlock()
-
- for _, db := range b.connections {
- db.Close()
- }
-
- b.connections = make(map[string]dbplugin.Database)
-}
-
-// This function is used to retrieve a database object either from the cached
-// connection map. The caller of this function needs to hold the backend's read
-// lock.
-func (b *databaseBackend) getDBObj(name string) (dbplugin.Database, bool) {
- db, ok := b.connections[name]
- return db, ok
-}
-
-// This function creates a new db object from the stored configuration and
-// caches it in the connections map. The caller of this function needs to hold
-// the backend's write lock
-func (b *databaseBackend) createDBObj(s logical.Storage, name string) (dbplugin.Database, error) {
- db, ok := b.connections[name]
- if ok {
- return db, nil
- }
-
- config, err := b.DatabaseConfig(s, name)
- if err != nil {
- return nil, err
- }
-
- db, err = dbplugin.PluginFactory(config.PluginName, b.System(), b.logger)
- if err != nil {
- return nil, err
- }
-
- err = db.Initialize(config.ConnectionDetails, true)
- if err != nil {
- return nil, err
- }
-
- b.connections[name] = db
-
- return db, nil
-}
-
-func (b *databaseBackend) DatabaseConfig(s logical.Storage, name string) (*DatabaseConfig, error) {
- entry, err := s.Get(fmt.Sprintf("config/%s", name))
- if err != nil {
- return nil, fmt.Errorf("failed to read connection configuration: %s", err)
- }
- if entry == nil {
- return nil, fmt.Errorf("failed to find entry for connection with name: %s", name)
- }
-
- var config DatabaseConfig
- if err := entry.DecodeJSON(&config); err != nil {
- return nil, err
- }
-
- return &config, nil
-}
-
-func (b *databaseBackend) Role(s logical.Storage, roleName string) (*roleEntry, error) {
- entry, err := s.Get("role/" + roleName)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *databaseBackend) invalidate(key string) {
- b.Lock()
- defer b.Unlock()
-
- switch {
- case strings.HasPrefix(key, databaseConfigPath):
- name := strings.TrimPrefix(key, databaseConfigPath)
- b.clearConnection(name)
- }
-}
-
-// clearConnection closes the database connection and
-// removes it from the b.connections map.
-func (b *databaseBackend) clearConnection(name string) {
- db, ok := b.connections[name]
- if ok {
- db.Close()
- delete(b.connections, name)
- }
-}
-
-func (b *databaseBackend) closeIfShutdown(name string, err error) {
- // Plugin has shutdown, close it so next call can reconnect.
- if err == rpc.ErrShutdown {
- b.Lock()
- b.clearConnection(name)
- b.Unlock()
- }
-}
-
-const backendHelp = `
-The database backend supports using many different databases
-as secret backends, including but not limited to:
-cassandra, mssql, mysql, postgres
-
-After mounting this backend, configure it using the endpoints within
-the "database/config/" path.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go
deleted file mode 100644
index d5461e2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go
+++ /dev/null
@@ -1,753 +0,0 @@
-package database
-
-import (
- "database/sql"
- "fmt"
- "log"
- "os"
- "reflect"
- "sync"
- "testing"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- vaulthttp "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/plugins/database/postgresql"
- "github.com/hashicorp/vault/vault"
- "github.com/lib/pq"
- "github.com/mitchellh/mapstructure"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-var (
- testImagePull sync.Once
-)
-
-func preparePostgresTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cleanup func(), retURL string) {
- if os.Getenv("PG_URL") != "" {
- return func() {}, os.Getenv("PG_URL")
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"})
- if err != nil {
- t.Fatalf("Could not start local PostgreSQL docker container: %s", err)
- }
-
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local container: %s", err)
- }
- }
-
- retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp"))
-
- // exponential backoff-retry
- if err = pool.Retry(func() error {
- // This will cause a validation to run
- resp, err := b.HandleRequest(&logical.Request{
- Storage: s,
- Operation: logical.UpdateOperation,
- Path: "config/postgresql",
- Data: map[string]interface{}{
- "plugin_name": "postgresql-database-plugin",
- "connection_url": retURL,
- },
- })
- if err != nil || (resp != nil && resp.IsError()) {
- // It's likely not up and running yet, so return error and try again
- return fmt.Errorf("err:%s resp:%#v\n", err, resp)
- }
- if resp == nil {
- t.Fatal("expected warning")
- }
-
- return nil
- }); err != nil {
- t.Fatalf("Could not connect to PostgreSQL docker container: %s", err)
- }
-
- return
-}
-
-func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "database": Factory,
- },
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- cores := cluster.Cores
-
- os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
-
- sys := vault.TestDynamicSystemView(cores[0].Core)
- vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", "TestBackend_PluginMain")
-
- return cluster, sys
-}
-
-func TestBackend_PluginMain(t *testing.T) {
- if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
- return
- }
-
- caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
- if caPEM == "" {
- t.Fatal("CA cert not passed in")
- }
-
- args := []string{"--ca-cert=" + caPEM}
-
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(args)
-
- postgresql.Run(apiClientMeta.GetTLSConfig())
-}
-
-func TestBackend_config_connection(t *testing.T) {
- var resp *logical.Response
- var err error
-
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- config.System = sys
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
- defer b.Cleanup()
-
- configData := map[string]interface{}{
- "connection_url": "sample_connection_url",
- "plugin_name": "postgresql-database-plugin",
- "verify_connection": false,
- "allowed_roles": []string{"*"},
- }
-
- configReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: configData,
- }
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- expected := map[string]interface{}{
- "plugin_name": "postgresql-database-plugin",
- "connection_details": map[string]interface{}{
- "connection_url": "sample_connection_url",
- },
- "allowed_roles": []string{"*"},
- }
- configReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- delete(resp.Data["connection_details"].(map[string]interface{}), "name")
- if !reflect.DeepEqual(expected, resp.Data) {
- t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data)
- }
-
- configReq.Operation = logical.ListOperation
- configReq.Data = nil
- configReq.Path = "config/"
- resp, err = b.HandleRequest(configReq)
- if err != nil {
- t.Fatal(err)
- }
- keys := resp.Data["keys"].([]string)
- key := keys[0]
- if key != "plugin-test" {
- t.Fatalf("bad key: %q", key)
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- config.System = sys
-
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
- defer b.Cleanup()
-
- cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
- defer cleanup()
-
- // Configure a connection
- data := map[string]interface{}{
- "connection_url": connURL,
- "plugin_name": "postgresql-database-plugin",
- "allowed_roles": []string{"plugin-role-test"},
- }
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err := b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Create a role
- data = map[string]interface{}{
- "db_name": "plugin-test",
- "creation_statements": testRole,
- "default_ttl": "5m",
- "max_ttl": "10m",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Get creds
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "creds/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- credsResp, err := b.HandleRequest(req)
- if err != nil || (credsResp != nil && credsResp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, credsResp)
- }
-
- if !testCredsExist(t, credsResp, connURL) {
- t.Fatalf("Creds should exist")
- }
-
- // Revoke creds
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.RevokeOperation,
- Storage: config.StorageView,
- Secret: &logical.Secret{
- InternalData: map[string]interface{}{
- "secret_type": "creds",
- "username": credsResp.Data["username"],
- "role": "plugin-role-test",
- },
- },
- })
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- if testCredsExist(t, credsResp, connURL) {
- t.Fatalf("Creds should not exist")
- }
-
-}
-
-func TestBackend_connectionCrud(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- config.System = sys
-
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
- defer b.Cleanup()
-
- cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
- defer cleanup()
-
- // Configure a connection
- data := map[string]interface{}{
- "connection_url": "test",
- "plugin_name": "postgresql-database-plugin",
- "verify_connection": false,
- }
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err := b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Create a role
- data = map[string]interface{}{
- "db_name": "plugin-test",
- "creation_statements": testRole,
- "revocation_statements": defaultRevocationSQL,
- "default_ttl": "5m",
- "max_ttl": "10m",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Update the connection
- data = map[string]interface{}{
- "connection_url": connURL,
- "plugin_name": "postgresql-database-plugin",
- "allowed_roles": []string{"plugin-role-test"},
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Read connection
- expected := map[string]interface{}{
- "plugin_name": "postgresql-database-plugin",
- "connection_details": map[string]interface{}{
- "connection_url": connURL,
- },
- "allowed_roles": []string{"plugin-role-test"},
- }
- req.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- delete(resp.Data["connection_details"].(map[string]interface{}), "name")
- if !reflect.DeepEqual(expected, resp.Data) {
- t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data)
- }
-
- // Reset Connection
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "reset/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Get creds
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "creds/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- credsResp, err := b.HandleRequest(req)
- if err != nil || (credsResp != nil && credsResp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, credsResp)
- }
-
- if !testCredsExist(t, credsResp, connURL) {
- t.Fatalf("Creds should exist")
- }
-
- // Delete Connection
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.DeleteOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Read connection
- req.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Should be empty
- if resp != nil {
- t.Fatal("Expected response to be nil")
- }
-}
-
-func TestBackend_roleCrud(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- config.System = sys
-
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
- defer b.Cleanup()
-
- cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
- defer cleanup()
-
- // Configure a connection
- data := map[string]interface{}{
- "connection_url": connURL,
- "plugin_name": "postgresql-database-plugin",
- }
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err := b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Create a role
- data = map[string]interface{}{
- "db_name": "plugin-test",
- "creation_statements": testRole,
- "revocation_statements": defaultRevocationSQL,
- "default_ttl": "5m",
- "max_ttl": "10m",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Read the role
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "roles/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- expected := dbplugin.Statements{
- CreationStatements: testRole,
- RevocationStatements: defaultRevocationSQL,
- }
-
- var actual dbplugin.Statements
- if err := mapstructure.Decode(resp.Data, &actual); err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("Statements did not match, exepected %#v, got %#v", expected, actual)
- }
-
- // Delete the role
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.DeleteOperation,
- Path: "roles/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Read the role
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "roles/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Should be empty
- if resp != nil {
- t.Fatal("Expected response to be nil")
- }
-}
-func TestBackend_allowedRoles(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- config.System = sys
-
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
- defer b.Cleanup()
-
- cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
- defer cleanup()
-
- // Configure a connection
- data := map[string]interface{}{
- "connection_url": connURL,
- "plugin_name": "postgresql-database-plugin",
- }
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err := b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Create a denied and an allowed role
- data = map[string]interface{}{
- "db_name": "plugin-test",
- "creation_statements": testRole,
- "default_ttl": "5m",
- "max_ttl": "10m",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/denied",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- data = map[string]interface{}{
- "db_name": "plugin-test",
- "creation_statements": testRole,
- "default_ttl": "5m",
- "max_ttl": "10m",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/allowed",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Get creds from denied role, should fail
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "creds/denied",
- Storage: config.StorageView,
- Data: data,
- }
- credsResp, err := b.HandleRequest(req)
- if err != logical.ErrPermissionDenied {
- t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err)
- }
-
- // update connection with * allowed roles connection
- data = map[string]interface{}{
- "connection_url": connURL,
- "plugin_name": "postgresql-database-plugin",
- "allowed_roles": "*",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Get creds, should work.
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "creds/allowed",
- Storage: config.StorageView,
- Data: data,
- }
- credsResp, err = b.HandleRequest(req)
- if err != nil || (credsResp != nil && credsResp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, credsResp)
- }
-
- if !testCredsExist(t, credsResp, connURL) {
- t.Fatalf("Creds should exist")
- }
-
- // update connection with allowed roles
- data = map[string]interface{}{
- "connection_url": connURL,
- "plugin_name": "postgresql-database-plugin",
- "allowed_roles": "allow, allowed",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(req)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- // Get creds from denied role, should fail
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "creds/denied",
- Storage: config.StorageView,
- Data: data,
- }
- credsResp, err = b.HandleRequest(req)
- if err != logical.ErrPermissionDenied {
- t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err)
- }
-
- // Get creds from allowed role, should work.
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "creds/allowed",
- Storage: config.StorageView,
- Data: data,
- }
- credsResp, err = b.HandleRequest(req)
- if err != nil || (credsResp != nil && credsResp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, credsResp)
- }
-
- if !testCredsExist(t, credsResp, connURL) {
- t.Fatalf("Creds should exist")
- }
-}
-
-func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- t.Fatal(err)
- }
- log.Printf("[TRACE] Generated credentials: %v", d)
- conn, err := pq.ParseURL(connURL)
-
- if err != nil {
- t.Fatal(err)
- }
-
- conn += " timezone=utc"
-
- db, err := sql.Open("postgres", conn)
- if err != nil {
- t.Fatal(err)
- }
-
- returnedRows := func() int {
- stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');")
- if err != nil {
- return -1
- }
- defer stmt.Close()
-
- rows, err := stmt.Query(d.Username)
- if err != nil {
- return -1
- }
- defer rows.Close()
-
- i := 0
- for rows.Next() {
- i++
- }
- return i
- }
-
- return returnedRows() == 2
-}
-
-const testRole = `
-CREATE ROLE "{{name}}" WITH
- LOGIN
- PASSWORD '{{password}}'
- VALID UNTIL '{{expiration}}';
-GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
-`
-
-const defaultRevocationSQL = `
-REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
-REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
-REVOKE USAGE ON SCHEMA public FROM {{name}};
-
-DROP ROLE IF EXISTS {{name}};
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go
deleted file mode 100644
index 6df3948..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package dbplugin
-
-import (
- "fmt"
- "net/rpc"
- "sync"
- "time"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- log "github.com/mgutz/logxi/v1"
-)
-
-// DatabasePluginClient embeds a databasePluginRPCClient and wraps it's Close
-// method to also call Kill() on the plugin.Client.
-type DatabasePluginClient struct {
- client *plugin.Client
- sync.Mutex
-
- *databasePluginRPCClient
-}
-
-func (dc *DatabasePluginClient) Close() error {
- err := dc.databasePluginRPCClient.Close()
- dc.client.Kill()
-
- return err
-}
-
-// newPluginClient returns a databaseRPCClient with a connection to a running
-// plugin. The client is wrapped in a DatabasePluginClient object to ensure the
-// plugin is killed on call of Close().
-func newPluginClient(sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger) (Database, error) {
- // pluginMap is the map of plugins we can dispense.
- var pluginMap = map[string]plugin.Plugin{
- "database": new(DatabasePlugin),
- }
-
- client, err := pluginRunner.Run(sys, pluginMap, handshakeConfig, []string{}, logger)
- if err != nil {
- return nil, err
- }
-
- // Connect via RPC
- rpcClient, err := client.Client()
- if err != nil {
- return nil, err
- }
-
- // Request the plugin
- raw, err := rpcClient.Dispense("database")
- if err != nil {
- return nil, err
- }
-
- // We should have a database type now. This feels like a normal interface
- // implementation but is in fact over an RPC connection.
- databaseRPC := raw.(*databasePluginRPCClient)
-
- // Wrap RPC implimentation in DatabasePluginClient
- return &DatabasePluginClient{
- client: client,
- databasePluginRPCClient: databaseRPC,
- }, nil
-}
-
-// ---- RPC client domain ----
-
-// databasePluginRPCClient implements Database and is used on the client to
-// make RPC calls to a plugin.
-type databasePluginRPCClient struct {
- client *rpc.Client
-}
-
-func (dr *databasePluginRPCClient) Type() (string, error) {
- var dbType string
- err := dr.client.Call("Plugin.Type", struct{}{}, &dbType)
-
- return fmt.Sprintf("plugin-%s", dbType), err
-}
-
-func (dr *databasePluginRPCClient) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) {
- req := CreateUserRequest{
- Statements: statements,
- UsernameConfig: usernameConfig,
- Expiration: expiration,
- }
-
- var resp CreateUserResponse
- err = dr.client.Call("Plugin.CreateUser", req, &resp)
-
- return resp.Username, resp.Password, err
-}
-
-func (dr *databasePluginRPCClient) RenewUser(statements Statements, username string, expiration time.Time) error {
- req := RenewUserRequest{
- Statements: statements,
- Username: username,
- Expiration: expiration,
- }
-
- err := dr.client.Call("Plugin.RenewUser", req, &struct{}{})
-
- return err
-}
-
-func (dr *databasePluginRPCClient) RevokeUser(statements Statements, username string) error {
- req := RevokeUserRequest{
- Statements: statements,
- Username: username,
- }
-
- err := dr.client.Call("Plugin.RevokeUser", req, &struct{}{})
-
- return err
-}
-
-func (dr *databasePluginRPCClient) Initialize(conf map[string]interface{}, verifyConnection bool) error {
- req := InitializeRequest{
- Config: conf,
- VerifyConnection: verifyConnection,
- }
-
- err := dr.client.Call("Plugin.Initialize", req, &struct{}{})
-
- return err
-}
-
-func (dr *databasePluginRPCClient) Close() error {
- err := dr.client.Call("Plugin.Close", struct{}{}, &struct{}{})
-
- return err
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go
deleted file mode 100644
index 87dfa6c..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package dbplugin
-
-import (
- "time"
-
- metrics "github.com/armon/go-metrics"
- log "github.com/mgutz/logxi/v1"
-)
-
-// ---- Tracing Middleware Domain ----
-
-// databaseTracingMiddleware wraps a implementation of Database and executes
-// trace logging on function call.
-type databaseTracingMiddleware struct {
- next Database
- logger log.Logger
-
- typeStr string
-}
-
-func (mw *databaseTracingMiddleware) Type() (string, error) {
- return mw.next.Type()
-}
-
-func (mw *databaseTracingMiddleware) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) {
- defer func(then time.Time) {
- mw.logger.Trace("database", "operation", "CreateUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
- }(time.Now())
-
- mw.logger.Trace("database", "operation", "CreateUser", "status", "started", "type", mw.typeStr)
- return mw.next.CreateUser(statements, usernameConfig, expiration)
-}
-
-func (mw *databaseTracingMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) {
- defer func(then time.Time) {
- mw.logger.Trace("database", "operation", "RenewUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
- }(time.Now())
-
- mw.logger.Trace("database", "operation", "RenewUser", "status", "started", mw.typeStr)
- return mw.next.RenewUser(statements, username, expiration)
-}
-
-func (mw *databaseTracingMiddleware) RevokeUser(statements Statements, username string) (err error) {
- defer func(then time.Time) {
- mw.logger.Trace("database", "operation", "RevokeUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
- }(time.Now())
-
- mw.logger.Trace("database", "operation", "RevokeUser", "status", "started", "type", mw.typeStr)
- return mw.next.RevokeUser(statements, username)
-}
-
-func (mw *databaseTracingMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) {
- defer func(then time.Time) {
- mw.logger.Trace("database", "operation", "Initialize", "status", "finished", "type", mw.typeStr, "verify", verifyConnection, "err", err, "took", time.Since(then))
- }(time.Now())
-
- mw.logger.Trace("database", "operation", "Initialize", "status", "started", "type", mw.typeStr)
- return mw.next.Initialize(conf, verifyConnection)
-}
-
-func (mw *databaseTracingMiddleware) Close() (err error) {
- defer func(then time.Time) {
- mw.logger.Trace("database", "operation", "Close", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
- }(time.Now())
-
- mw.logger.Trace("database", "operation", "Close", "status", "started", "type", mw.typeStr)
- return mw.next.Close()
-}
-
-// ---- Metrics Middleware Domain ----
-
-// databaseMetricsMiddleware wraps an implementation of Databases and on
-// function call logs metrics about this instance.
-type databaseMetricsMiddleware struct {
- next Database
-
- typeStr string
-}
-
-func (mw *databaseMetricsMiddleware) Type() (string, error) {
- return mw.next.Type()
-}
-
-func (mw *databaseMetricsMiddleware) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) {
- defer func(now time.Time) {
- metrics.MeasureSince([]string{"database", "CreateUser"}, now)
- metrics.MeasureSince([]string{"database", mw.typeStr, "CreateUser"}, now)
-
- if err != nil {
- metrics.IncrCounter([]string{"database", "CreateUser", "error"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser", "error"}, 1)
- }
- }(time.Now())
-
- metrics.IncrCounter([]string{"database", "CreateUser"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser"}, 1)
- return mw.next.CreateUser(statements, usernameConfig, expiration)
-}
-
-func (mw *databaseMetricsMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) {
- defer func(now time.Time) {
- metrics.MeasureSince([]string{"database", "RenewUser"}, now)
- metrics.MeasureSince([]string{"database", mw.typeStr, "RenewUser"}, now)
-
- if err != nil {
- metrics.IncrCounter([]string{"database", "RenewUser", "error"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser", "error"}, 1)
- }
- }(time.Now())
-
- metrics.IncrCounter([]string{"database", "RenewUser"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser"}, 1)
- return mw.next.RenewUser(statements, username, expiration)
-}
-
-func (mw *databaseMetricsMiddleware) RevokeUser(statements Statements, username string) (err error) {
- defer func(now time.Time) {
- metrics.MeasureSince([]string{"database", "RevokeUser"}, now)
- metrics.MeasureSince([]string{"database", mw.typeStr, "RevokeUser"}, now)
-
- if err != nil {
- metrics.IncrCounter([]string{"database", "RevokeUser", "error"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser", "error"}, 1)
- }
- }(time.Now())
-
- metrics.IncrCounter([]string{"database", "RevokeUser"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser"}, 1)
- return mw.next.RevokeUser(statements, username)
-}
-
-func (mw *databaseMetricsMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) {
- defer func(now time.Time) {
- metrics.MeasureSince([]string{"database", "Initialize"}, now)
- metrics.MeasureSince([]string{"database", mw.typeStr, "Initialize"}, now)
-
- if err != nil {
- metrics.IncrCounter([]string{"database", "Initialize", "error"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize", "error"}, 1)
- }
- }(time.Now())
-
- metrics.IncrCounter([]string{"database", "Initialize"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize"}, 1)
- return mw.next.Initialize(conf, verifyConnection)
-}
-
-func (mw *databaseMetricsMiddleware) Close() (err error) {
- defer func(now time.Time) {
- metrics.MeasureSince([]string{"database", "Close"}, now)
- metrics.MeasureSince([]string{"database", mw.typeStr, "Close"}, now)
-
- if err != nil {
- metrics.IncrCounter([]string{"database", "Close", "error"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "Close", "error"}, 1)
- }
- }(time.Now())
-
- metrics.IncrCounter([]string{"database", "Close"}, 1)
- metrics.IncrCounter([]string{"database", mw.typeStr, "Close"}, 1)
- return mw.next.Close()
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go
deleted file mode 100644
index 0becc9f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package dbplugin
-
-import (
- "fmt"
- "net/rpc"
- "time"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- log "github.com/mgutz/logxi/v1"
-)
-
-// Database is the interface that all database objects must implement.
-type Database interface {
- Type() (string, error)
- CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error)
- RenewUser(statements Statements, username string, expiration time.Time) error
- RevokeUser(statements Statements, username string) error
-
- Initialize(config map[string]interface{}, verifyConnection bool) error
- Close() error
-}
-
-// Statements set in role creation and passed into the database type's functions.
-type Statements struct {
- CreationStatements string `json:"creation_statments" mapstructure:"creation_statements" structs:"creation_statments"`
- RevocationStatements string `json:"revocation_statements" mapstructure:"revocation_statements" structs:"revocation_statements"`
- RollbackStatements string `json:"rollback_statements" mapstructure:"rollback_statements" structs:"rollback_statements"`
- RenewStatements string `json:"renew_statements" mapstructure:"renew_statements" structs:"renew_statements"`
-}
-
-// UsernameConfig is used to configure prefixes for the username to be
-// generated.
-type UsernameConfig struct {
- DisplayName string
- RoleName string
-}
-
-// PluginFactory is used to build plugin database types. It wraps the database
-// object in a logging and metrics middleware.
-func PluginFactory(pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) {
- // Look for plugin in the plugin catalog
- pluginRunner, err := sys.LookupPlugin(pluginName)
- if err != nil {
- return nil, err
- }
-
- var db Database
- if pluginRunner.Builtin {
- // Plugin is builtin so we can retrieve an instance of the interface
- // from the pluginRunner. Then cast it to a Database.
- dbRaw, err := pluginRunner.BuiltinFactory()
- if err != nil {
- return nil, fmt.Errorf("error getting plugin type: %s", err)
- }
-
- var ok bool
- db, ok = dbRaw.(Database)
- if !ok {
- return nil, fmt.Errorf("unsuported database type: %s", pluginName)
- }
-
- } else {
- // create a DatabasePluginClient instance
- db, err = newPluginClient(sys, pluginRunner, logger)
- if err != nil {
- return nil, err
- }
- }
-
- typeStr, err := db.Type()
- if err != nil {
- return nil, fmt.Errorf("error getting plugin type: %s", err)
- }
-
- // Wrap with metrics middleware
- db = &databaseMetricsMiddleware{
- next: db,
- typeStr: typeStr,
- }
-
- // Wrap with tracing middleware
- if logger.IsTrace() {
- db = &databaseTracingMiddleware{
- next: db,
- typeStr: typeStr,
- logger: logger,
- }
- }
-
- return db, nil
-}
-
-// handshakeConfigs are used to just do a basic handshake between
-// a plugin and host. If the handshake fails, a user friendly error is shown.
-// This prevents users from executing bad plugins or executing a plugin
-// directory. It is a UX feature, not a security feature.
-var handshakeConfig = plugin.HandshakeConfig{
- ProtocolVersion: 3,
- MagicCookieKey: "VAULT_DATABASE_PLUGIN",
- MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb",
-}
-
-// DatabasePlugin implements go-plugin's Plugin interface. It has methods for
-// retrieving a server and a client instance of the plugin.
-type DatabasePlugin struct {
- impl Database
-}
-
-func (d DatabasePlugin) Server(*plugin.MuxBroker) (interface{}, error) {
- return &databasePluginRPCServer{impl: d.impl}, nil
-}
-
-func (DatabasePlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
- return &databasePluginRPCClient{client: c}, nil
-}
-
-// ---- RPC Request Args Domain ----
-
-type InitializeRequest struct {
- Config map[string]interface{}
- VerifyConnection bool
-}
-
-type CreateUserRequest struct {
- Statements Statements
- UsernameConfig UsernameConfig
- Expiration time.Time
-}
-
-type RenewUserRequest struct {
- Statements Statements
- Username string
- Expiration time.Time
-}
-
-type RevokeUserRequest struct {
- Statements Statements
- Username string
-}
-
-// ---- RPC Response Args Domain ----
-
-type CreateUserResponse struct {
- Username string
- Password string
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go
deleted file mode 100644
index 3a78595..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package dbplugin_test
-
-import (
- "errors"
- "os"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- vaulthttp "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/vault"
- log "github.com/mgutz/logxi/v1"
-)
-
-type mockPlugin struct {
- users map[string][]string
-}
-
-func (m *mockPlugin) Type() (string, error) { return "mock", nil }
-func (m *mockPlugin) CreateUser(statements dbplugin.Statements, usernameConf dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- err = errors.New("err")
- if usernameConf.DisplayName == "" || expiration.IsZero() {
- return "", "", err
- }
-
- if _, ok := m.users[usernameConf.DisplayName]; ok {
- return "", "", err
- }
-
- m.users[usernameConf.DisplayName] = []string{password}
-
- return usernameConf.DisplayName, "test", nil
-}
-func (m *mockPlugin) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
- err := errors.New("err")
- if username == "" || expiration.IsZero() {
- return err
- }
-
- if _, ok := m.users[username]; !ok {
- return err
- }
-
- return nil
-}
-func (m *mockPlugin) RevokeUser(statements dbplugin.Statements, username string) error {
- err := errors.New("err")
- if username == "" {
- return err
- }
-
- if _, ok := m.users[username]; !ok {
- return err
- }
-
- delete(m.users, username)
- return nil
-}
-func (m *mockPlugin) Initialize(conf map[string]interface{}, _ bool) error {
- err := errors.New("err")
- if len(conf) != 1 {
- return err
- }
-
- return nil
-}
-func (m *mockPlugin) Close() error {
- m.users = nil
- return nil
-}
-
-func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
- cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- cores := cluster.Cores
-
- sys := vault.TestDynamicSystemView(cores[0].Core)
- vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", "TestPlugin_Main")
-
- return cluster, sys
-}
-
-// This is not an actual test case, it's a helper function that will be executed
-// by the go-plugin client via an exec call.
-func TestPlugin_Main(t *testing.T) {
- if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
- return
- }
-
- plugin := &mockPlugin{
- users: make(map[string][]string),
- }
-
- args := []string{"--tls-skip-verify=true"}
-
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(args)
-
- plugins.Serve(plugin, apiClientMeta.GetTLSConfig())
-}
-
-func TestPlugin_Initialize(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- dbRaw, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
-
- err = dbRaw.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = dbRaw.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestPlugin_CreateUser(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer db.Close()
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
-
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConf := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- us, pw, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if us != "test" || pw != "test" {
- t.Fatal("expected username and password to be 'test'")
- }
-
- // try and save the same user again to verify it saved the first time, this
- // should return an error
- _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err == nil {
- t.Fatal("expected an error, user wasn't created correctly")
- }
-}
-
-func TestPlugin_RenewUser(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer db.Close()
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConf := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- us, _, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = db.RenewUser(dbplugin.Statements{}, us, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestPlugin_RevokeUser(t *testing.T) {
- cluster, sys := getCluster(t)
- defer cluster.Cleanup()
-
- db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer db.Close()
-
- connectionDetails := map[string]interface{}{
- "test": 1,
- }
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConf := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- us, _, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Test default revoke statememts
- err = db.RevokeUser(dbplugin.Statements{}, us)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Try adding the same username back so we can verify it was removed
- _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go
deleted file mode 100644
index 381f0ae..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package dbplugin
-
-import (
- "crypto/tls"
-
- "github.com/hashicorp/go-plugin"
-)
-
-// Serve is called from within a plugin and wraps the provided
-// Database implementation in a databasePluginRPCServer object and starts a
-// RPC server.
-func Serve(db Database, tlsProvider func() (*tls.Config, error)) {
- dbPlugin := &DatabasePlugin{
- impl: db,
- }
-
- // pluginMap is the map of plugins we can dispense.
- var pluginMap = map[string]plugin.Plugin{
- "database": dbPlugin,
- }
-
- plugin.Serve(&plugin.ServeConfig{
- HandshakeConfig: handshakeConfig,
- Plugins: pluginMap,
- TLSProvider: tlsProvider,
- })
-}
-
-// ---- RPC server domain ----
-
-// databasePluginRPCServer implements an RPC version of Database and is run
-// inside a plugin. It wraps an underlying implementation of Database.
-type databasePluginRPCServer struct {
- impl Database
-}
-
-func (ds *databasePluginRPCServer) Type(_ struct{}, resp *string) error {
- var err error
- *resp, err = ds.impl.Type()
- return err
-}
-
-func (ds *databasePluginRPCServer) CreateUser(args *CreateUserRequest, resp *CreateUserResponse) error {
- var err error
- resp.Username, resp.Password, err = ds.impl.CreateUser(args.Statements, args.UsernameConfig, args.Expiration)
-
- return err
-}
-
-func (ds *databasePluginRPCServer) RenewUser(args *RenewUserRequest, _ *struct{}) error {
- err := ds.impl.RenewUser(args.Statements, args.Username, args.Expiration)
-
- return err
-}
-
-func (ds *databasePluginRPCServer) RevokeUser(args *RevokeUserRequest, _ *struct{}) error {
- err := ds.impl.RevokeUser(args.Statements, args.Username)
-
- return err
-}
-
-func (ds *databasePluginRPCServer) Initialize(args *InitializeRequest, _ *struct{}) error {
- err := ds.impl.Initialize(args.Config, args.VerifyConnection)
-
- return err
-}
-
-func (ds *databasePluginRPCServer) Close(_ struct{}, _ *struct{}) error {
- ds.impl.Close()
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go
deleted file mode 100644
index d1e6cb2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package database
-
-import (
- "errors"
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-var (
- respErrEmptyPluginName = "empty plugin name"
- respErrEmptyName = "empty name attribute given"
-)
-
-// DatabaseConfig is used by the Factory function to configure a Database
-// object.
-type DatabaseConfig struct {
- PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"`
- // ConnectionDetails stores the database specific connection settings needed
- // by each database type.
- ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"`
- AllowedRoles []string `json:"allowed_roles" structs:"allowed_roles" mapstructure:"allowed_roles"`
-}
-
-// pathResetConnection configures a path to reset a plugin.
-func pathResetConnection(b *databaseBackend) *framework.Path {
- return &framework.Path{
- Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of this database connection",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConnectionReset(),
- },
-
- HelpSynopsis: pathResetConnectionHelpSyn,
- HelpDescription: pathResetConnectionHelpDesc,
- }
-}
-
-// pathConnectionReset resets a plugin by closing the existing instance and
-// creating a new one.
-func (b *databaseBackend) pathConnectionReset() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse(respErrEmptyName), nil
- }
-
- // Grab the mutex lock
- b.Lock()
- defer b.Unlock()
-
- // Close plugin and delete the entry in the connections cache.
- b.clearConnection(name)
-
- // Execute plugin again, we don't need the object so throw away.
- _, err := b.createDBObj(req.Storage, name)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- }
-}
-
-// pathConfigurePluginConnection returns a configured framework.Path setup to
-// operate on plugins.
-func pathConfigurePluginConnection(b *databaseBackend) *framework.Path {
- return &framework.Path{
- Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of this database connection",
- },
-
- "plugin_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The name of a builtin or previously registered
- plugin known to vault. This endpoint will create an instance of
- that plugin type.`,
- },
-
- "verify_connection": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If true, the connection details are verified by
- actually connecting to the database. Defaults to true.`,
- },
-
- "allowed_roles": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: `Comma separated string or array of the role names
- allowed to get creds from this database connection. If empty no
- roles are allowed. If "*" all roles are allowed.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.connectionWriteHandler(),
- logical.ReadOperation: b.connectionReadHandler(),
- logical.DeleteOperation: b.connectionDeleteHandler(),
- },
-
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-func pathListPluginConnection(b *databaseBackend) *framework.Path {
- return &framework.Path{
- Pattern: fmt.Sprintf("config/?$"),
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.connectionListHandler(),
- },
-
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-func (b *databaseBackend) connectionListHandler() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("config/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
- }
-}
-
-// connectionReadHandler reads out the connection configuration
-func (b *databaseBackend) connectionReadHandler() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse(respErrEmptyName), nil
- }
-
- entry, err := req.Storage.Get(fmt.Sprintf("config/%s", name))
- if err != nil {
- return nil, errors.New("failed to read connection configuration")
- }
- if entry == nil {
- return nil, nil
- }
-
- var config DatabaseConfig
- if err := entry.DecodeJSON(&config); err != nil {
- return nil, err
- }
- return &logical.Response{
- Data: structs.New(config).Map(),
- }, nil
- }
-}
-
-// connectionDeleteHandler deletes the connection configuration
-func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse(respErrEmptyName), nil
- }
-
- err := req.Storage.Delete(fmt.Sprintf("config/%s", name))
- if err != nil {
- return nil, errors.New("failed to delete connection configuration")
- }
-
- b.Lock()
- defer b.Unlock()
-
- if _, ok := b.connections[name]; ok {
- err = b.connections[name].Close()
- if err != nil {
- return nil, err
- }
-
- delete(b.connections, name)
- }
-
- return nil, nil
- }
-}
-
-// connectionWriteHandler returns a handler function for creating and updating
-// both builtin and plugin database types.
-func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- pluginName := data.Get("plugin_name").(string)
- if pluginName == "" {
- return logical.ErrorResponse(respErrEmptyPluginName), nil
- }
-
- name := data.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse(respErrEmptyName), nil
- }
-
- verifyConnection := data.Get("verify_connection").(bool)
-
- allowedRoles := data.Get("allowed_roles").([]string)
-
- // Remove these entries from the data before we store it keyed under
- // ConnectionDetails.
- delete(data.Raw, "name")
- delete(data.Raw, "plugin_name")
- delete(data.Raw, "allowed_roles")
- delete(data.Raw, "verify_connection")
-
- config := &DatabaseConfig{
- ConnectionDetails: data.Raw,
- PluginName: pluginName,
- AllowedRoles: allowedRoles,
- }
-
- db, err := dbplugin.PluginFactory(config.PluginName, b.System(), b.logger)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil
- }
-
- err = db.Initialize(config.ConnectionDetails, verifyConnection)
- if err != nil {
- db.Close()
- return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil
- }
-
- // Grab the mutex lock
- b.Lock()
- defer b.Unlock()
-
- // Close and remove the old connection
- b.clearConnection(name)
-
- // Save the new connection
- b.connections[name] = db
-
- // Store it
- entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/%s", name), config)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- resp := &logical.Response{}
- resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any.")
-
- return resp, nil
- }
-}
-
-const pathConfigConnectionHelpSyn = `
-Configure connection details to a database plugin.
-`
-
-const pathConfigConnectionHelpDesc = `
-This path configures the connection details used to connect to a particular
-database. This path runs the provided plugin name and passes the configured
-connection details to the plugin. See the documentation for the plugin specified
-for a full list of accepted connection details.
-
-In addition to the database specific connection details, this endpoint also
-accepts:
-
- * "plugin_name" (required) - The name of a builtin or previously registered
- plugin known to vault. This endpoint will create an instance of that
- plugin type.
-
- * "verify_connection" (default: true) - A boolean value denoting if the plugin should verify
- it is able to connect to the database using the provided connection
- details.
-`
-
-const pathResetConnectionHelpSyn = `
-Resets a database plugin.
-`
-
-const pathResetConnectionHelpDesc = `
-This path resets the database connection by closing the existing database plugin
-instance and running a new one.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go
deleted file mode 100644
index 6fb61a3..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package database
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathCredsCreate(b *databaseBackend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathCredsCreateRead(),
- },
-
- HelpSynopsis: pathCredsCreateReadHelpSyn,
- HelpDescription: pathCredsCreateReadHelpDesc,
- }
-}
-
-func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- // Get the role
- role, err := b.Role(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
- }
-
- dbConfig, err := b.DatabaseConfig(req.Storage, role.DBName)
- if err != nil {
- return nil, err
- }
-
- // If role name isn't in the database's allowed roles, send back a
- // permission denied.
- if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContains(dbConfig.AllowedRoles, name) {
- return nil, logical.ErrPermissionDenied
- }
-
- // Grab the read lock
- b.RLock()
- var unlockFunc func() = b.RUnlock
-
- // Get the Database object
- db, ok := b.getDBObj(role.DBName)
- if !ok {
- // Upgrade lock
- b.RUnlock()
- b.Lock()
- unlockFunc = b.Unlock
-
- // Create a new DB object
- db, err = b.createDBObj(req.Storage, role.DBName)
- if err != nil {
- unlockFunc()
- return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err)
- }
- }
-
- expiration := time.Now().Add(role.DefaultTTL)
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: req.DisplayName,
- RoleName: name,
- }
-
- // Create the user
- username, password, err := db.CreateUser(role.Statements, usernameConfig, expiration)
- // Unlock
- unlockFunc()
- if err != nil {
- b.closeIfShutdown(role.DBName, err)
- return nil, err
- }
-
- resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
- "username": username,
- "password": password,
- }, map[string]interface{}{
- "username": username,
- "role": name,
- })
- resp.Secret.TTL = role.DefaultTTL
- return resp, nil
- }
-}
-
-const pathCredsCreateReadHelpSyn = `
-Request database credentials for a certain role.
-`
-
-const pathCredsCreateReadHelpDesc = `
-This path reads database credentials for a certain role. The
-database credentials will be generated on demand and will be automatically
-revoked when the lease is up.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go
deleted file mode 100644
index 69884cb..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go
+++ /dev/null
@@ -1,232 +0,0 @@
-package database
-
-import (
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *databaseBackend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList(),
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func pathRoles(b *databaseBackend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
-
- "db_name": {
- Type: framework.TypeString,
- Description: "Name of the database this role acts on.",
- },
- "creation_statements": {
- Type: framework.TypeString,
- Description: `Specifies the database statements executed to
- create and configure a user. See the plugin's API page for more
- information on support and formatting for this parameter.`,
- },
- "revocation_statements": {
- Type: framework.TypeString,
- Description: `Specifies the database statements to be executed
- to revoke a user. See the plugin's API page for more information
- on support and formatting for this parameter.`,
- },
- "renew_statements": {
- Type: framework.TypeString,
- Description: `Specifies the database statements to be executed
- to renew a user. Not every plugin type will support this
- functionality. See the plugin's API page for more information on
- support and formatting for this parameter. `,
- },
- "rollback_statements": {
- Type: framework.TypeString,
- Description: `Specifies the database statements to be executed
- rollback a create operation in the event of an error. Not every
- plugin type will support this functionality. See the plugin's
- API page for more information on support and formatting for this
- parameter.`,
- },
-
- "default_ttl": {
- Type: framework.TypeDurationSecond,
- Description: "Default ttl for role.",
- },
-
- "max_ttl": {
- Type: framework.TypeDurationSecond,
- Description: "Maximum time a credential is valid for",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead(),
- logical.UpdateOperation: b.pathRoleCreate(),
- logical.DeleteOperation: b.pathRoleDelete(),
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func (b *databaseBackend) pathRoleDelete() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("role/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- }
-}
-
-func (b *databaseBackend) pathRoleRead() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, err := b.Role(req.Storage, data.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "db_name": role.DBName,
- "creation_statements": role.Statements.CreationStatements,
- "revocation_statements": role.Statements.RevocationStatements,
- "rollback_statements": role.Statements.RollbackStatements,
- "renew_statements": role.Statements.RenewStatements,
- "default_ttl": role.DefaultTTL.Seconds(),
- "max_ttl": role.MaxTTL.Seconds(),
- },
- }, nil
- }
-}
-
-func (b *databaseBackend) pathRoleList() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
- }
-}
-
-func (b *databaseBackend) pathRoleCreate() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse("empty role name attribute given"), nil
- }
-
- dbName := data.Get("db_name").(string)
- if dbName == "" {
- return logical.ErrorResponse("empty database name attribute given"), nil
- }
-
- // Get statements
- creationStmts := data.Get("creation_statements").(string)
- revocationStmts := data.Get("revocation_statements").(string)
- rollbackStmts := data.Get("rollback_statements").(string)
- renewStmts := data.Get("renew_statements").(string)
-
- // Get TTLs
- defaultTTLRaw := data.Get("default_ttl").(int)
- maxTTLRaw := data.Get("max_ttl").(int)
- defaultTTL := time.Duration(defaultTTLRaw) * time.Second
- maxTTL := time.Duration(maxTTLRaw) * time.Second
-
- statements := dbplugin.Statements{
- CreationStatements: creationStmts,
- RevocationStatements: revocationStmts,
- RollbackStatements: rollbackStmts,
- RenewStatements: renewStmts,
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
- DBName: dbName,
- Statements: statements,
- DefaultTTL: defaultTTL,
- MaxTTL: maxTTL,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
- }
-}
-
-type roleEntry struct {
- DBName string `json:"db_name" mapstructure:"db_name" structs:"db_name"`
- Statements dbplugin.Statements `json:"statments" mapstructure:"statements" structs:"statments"`
- DefaultTTL time.Duration `json:"default_ttl" mapstructure:"default_ttl" structs:"default_ttl"`
- MaxTTL time.Duration `json:"max_ttl" mapstructure:"max_ttl" structs:"max_ttl"`
-}
-
-const pathRoleHelpSyn = `
-Manage the roles that can be created with this backend.
-`
-
-const pathRoleHelpDesc = `
-This path lets you manage the roles that can be created with this backend.
-
-The "db_name" parameter is required and configures the name of the database
-connection to use.
-
-The "creation_statements" parameter customizes the string used to create the
-credentials. This can be a sequence of SQL queries, or other statement formats
-for a particular database type. Some substitution will be done to the statement
-strings for certain keys. The names of the variables must be surrounded by "{{"
-and "}}" to be replaced.
-
- * "name" - The random username generated for the DB user.
-
- * "password" - The random password generated for the DB user.
-
- * "expiration" - The timestamp when this user will expire.
-
-Example of a decent creation_statements for a postgresql database plugin:
-
- CREATE ROLE "{{name}}" WITH
- LOGIN
- PASSWORD '{{password}}'
- VALID UNTIL '{{expiration}}';
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
-
-The "revocation_statements" parameter customizes the statement string used to
-revoke a user. Example of a decent revocation_statements for a postgresql
-database plugin:
-
- REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
- REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
- REVOKE USAGE ON SCHEMA public FROM {{name}};
- DROP ROLE IF EXISTS {{name}};
-
-The "renew_statements" parameter customizes the statement string used to renew a
-user.
-The "rollback_statements' parameter customizes the statement string used to
-rollback a change if needed.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go
deleted file mode 100644
index c3dfcb9..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package database
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const SecretCredsType = "creds"
-
-func secretCreds(b *databaseBackend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCredsType,
- Fields: map[string]*framework.FieldSchema{},
-
- Renew: b.secretCredsRenew(),
- Revoke: b.secretCredsRevoke(),
- }
-}
-
-func (b *databaseBackend) secretCredsRenew() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
-
- roleNameRaw, ok := req.Secret.InternalData["role"]
- if !ok {
- return nil, fmt.Errorf("could not find role with name: %s", req.Secret.InternalData["role"])
- }
-
- role, err := b.Role(req.Storage, roleNameRaw.(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, fmt.Errorf("error during renew: could not find role with name %s", req.Secret.InternalData["role"])
- }
-
- f := framework.LeaseExtend(role.DefaultTTL, role.MaxTTL, b.System())
- resp, err := f(req, data)
- if err != nil {
- return nil, err
- }
-
- // Grab the read lock
- b.RLock()
- var unlockFunc func() = b.RUnlock
-
- // Get the Database object
- db, ok := b.getDBObj(role.DBName)
- if !ok {
- // Upgrade lock
- b.RUnlock()
- b.Lock()
- unlockFunc = b.Unlock
-
- // Create a new DB object
- db, err = b.createDBObj(req.Storage, role.DBName)
- if err != nil {
- unlockFunc()
- return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err)
- }
- }
-
- // Make sure we increase the VALID UNTIL endpoint for this user.
- if expireTime := resp.Secret.ExpirationTime(); !expireTime.IsZero() {
- err := db.RenewUser(role.Statements, username, expireTime)
- // Unlock
- unlockFunc()
- if err != nil {
- b.closeIfShutdown(role.DBName, err)
- return nil, err
- }
- }
-
- return resp, nil
- }
-}
-
-func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc {
- return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
-
- var resp *logical.Response
-
- roleNameRaw, ok := req.Secret.InternalData["role"]
- if !ok {
- return nil, fmt.Errorf("no role name was provided")
- }
-
- role, err := b.Role(req.Storage, roleNameRaw.(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, fmt.Errorf("error during revoke: could not find role with name %s", req.Secret.InternalData["role"])
- }
-
- // Grab the read lock
- b.RLock()
- var unlockFunc func() = b.RUnlock
-
- // Get our connection
- db, ok := b.getDBObj(role.DBName)
- if !ok {
- // Upgrade lock
- b.RUnlock()
- b.Lock()
- unlockFunc = b.Unlock
-
- // Create a new DB object
- db, err = b.createDBObj(req.Storage, role.DBName)
- if err != nil {
- unlockFunc()
- return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err)
- }
- }
-
- err = db.RevokeUser(role.Statements, username)
- // Unlock
- unlockFunc()
- if err != nil {
- b.closeIfShutdown(role.DBName, err)
- return nil, err
- }
-
- return resp, nil
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go
deleted file mode 100644
index d850e8a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package mongodb
-
-import (
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "gopkg.in/mgo.v2"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *framework.Backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathConfigConnection(&b),
- pathConfigLease(&b),
- pathListRoles(&b),
- pathRoles(&b),
- pathCredsCreate(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCreds(&b),
- },
-
- Clean: b.ResetSession,
-
- Invalidate: b.invalidate,
- BackendType: logical.TypeLogical,
- }
-
- return b.Backend
-}
-
-type backend struct {
- *framework.Backend
-
- session *mgo.Session
- lock sync.Mutex
-}
-
-// Session returns the database connection.
-func (b *backend) Session(s logical.Storage) (*mgo.Session, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.session != nil {
- if err := b.session.Ping(); err == nil {
- return b.session, nil
- }
- b.session.Close()
- }
-
- connConfigJSON, err := s.Get("config/connection")
- if err != nil {
- return nil, err
- }
- if connConfigJSON == nil {
- return nil, fmt.Errorf("configure the MongoDB connection with config/connection first")
- }
-
- var connConfig connectionConfig
- if err := connConfigJSON.DecodeJSON(&connConfig); err != nil {
- return nil, err
- }
-
- dialInfo, err := parseMongoURI(connConfig.URI)
- if err != nil {
- return nil, err
- }
-
- b.session, err = mgo.DialWithInfo(dialInfo)
- if err != nil {
- return nil, err
- }
- b.session.SetSyncTimeout(1 * time.Minute)
- b.session.SetSocketTimeout(1 * time.Minute)
-
- return b.session, nil
-}
-
-// ResetSession forces creation of a new connection next time Session() is called.
-func (b *backend) ResetSession() {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.session != nil {
- b.session.Close()
- }
-
- b.session = nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "config/connection":
- b.ResetSession()
- }
-}
-
-// LeaseConfig returns the lease configuration
-func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) {
- entry, err := s.Get("config/lease")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result configLease
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-const backendHelp = `
-The mongodb backend dynamically generates MongoDB credentials.
-
-After mounting this backend, configure it using the endpoints within
-the "config/" path.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend_test.go
deleted file mode 100644
index dcd4b9f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend_test.go
+++ /dev/null
@@ -1,331 +0,0 @@
-package mongodb
-
-import (
- "fmt"
- "log"
- "os"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
- dockertest "gopkg.in/ory-am/dockertest.v2"
-)
-
-var (
- testImagePull sync.Once
-)
-
-func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURI string) {
- if os.Getenv("MONGODB_URI") != "" {
- return "", os.Getenv("MONGODB_URI")
- }
-
- // Without this the checks for whether the container has started seem to
- // never actually pass. There's really no reason to expose the test
- // containers, so don't.
- dockertest.BindDockerToLocalhost = "yep"
-
- testImagePull.Do(func() {
- dockertest.Pull(dockertest.MongoDBImageName)
- })
-
- cid, connErr := dockertest.ConnectToMongoDB(60, 500*time.Millisecond, func(connURI string) bool {
- connURI = "mongodb://" + connURI
- // This will cause a validation to run
- resp, err := b.HandleRequest(&logical.Request{
- Storage: s,
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: map[string]interface{}{
- "uri": connURI,
- },
- })
- if err != nil || (resp != nil && resp.IsError()) {
- // It's likely not up and running yet, so return false and try again
- return false
- }
- if resp == nil {
- t.Fatal("expected warning")
- }
-
- retURI = connURI
- return true
- })
-
- if connErr != nil {
- t.Fatalf("could not connect to database: %v", connErr)
- }
-
- return
-}
-
-func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
- err := cid.KillRemove()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_config_connection(t *testing.T) {
- var resp *logical.Response
- var err error
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- configData := map[string]interface{}{
- "uri": "sample_connection_uri",
- "verify_connection": false,
- }
-
- configReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Storage: config.StorageView,
- Data: configData,
- }
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- configReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- if resp.Data["uri"] != configData["uri"] {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURI := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "uri": connURI,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(connData, false),
- testAccStepRole(),
- testAccStepReadCreds("web"),
- },
- })
-}
-
-func TestBackend_roleCrud(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURI := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "uri": connURI,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(connData, false),
- testAccStepRole(),
- testAccStepReadRole("web", testDb, testMongoDBRoles),
- testAccStepDeleteRole("web"),
- testAccStepReadRole("web", "", ""),
- },
- })
-}
-
-func TestBackend_leaseWriteRead(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURI := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "uri": connURI,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(connData, false),
- testAccStepWriteLease(),
- testAccStepReadLease(),
- },
- })
-
-}
-
-func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: d,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if expectError {
- if resp.Data == nil {
- return fmt.Errorf("data is nil")
- }
- var e struct {
- Error string `mapstructure:"error"`
- }
- if err := mapstructure.Decode(resp.Data, &e); err != nil {
- return err
- }
- if len(e.Error) == 0 {
- return fmt.Errorf("expected error, but write succeeded.")
- }
- return nil
- } else if resp != nil && resp.IsError() {
- return fmt.Errorf("got an error response: %v", resp.Error())
- }
- return nil
- },
- }
-}
-
-func testAccStepRole() logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/web",
- Data: map[string]interface{}{
- "db": testDb,
- "roles": testMongoDBRoles,
- },
- }
-}
-
-func testAccStepDeleteRole(n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + n,
- }
-}
-
-func testAccStepReadCreds(name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- DB string `mapstructure:"db"`
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.DB == "" {
- return fmt.Errorf("bad: %#v", resp)
- }
- if d.Username == "" {
- return fmt.Errorf("bad: %#v", resp)
- }
- if !strings.HasPrefix(d.Username, "vault-root-") {
- return fmt.Errorf("bad: %#v", resp)
- }
- if d.Password == "" {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- log.Printf("[WARN] Generated credentials: %v", d)
-
- return nil
- },
- }
-}
-
-func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if db == "" && mongoDBRoles == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- DB string `mapstructure:"db"`
- MongoDBRoles string `mapstructure:"roles"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.DB != db {
- return fmt.Errorf("bad: %#v", resp)
- }
- if d.MongoDBRoles != mongoDBRoles {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepWriteLease() logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/lease",
- Data: map[string]interface{}{
- "ttl": "1h5m",
- "max_ttl": "24h",
- },
- }
-}
-
-func testAccStepReadLease() logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config/lease",
- Check: func(resp *logical.Response) error {
- if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-const testDb = "foo"
-const testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_connection.go
deleted file mode 100644
index 9f615f9..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_connection.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package mongodb
-
-import (
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "gopkg.in/mgo.v2"
-)
-
-func pathConfigConnection(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/connection",
- Fields: map[string]*framework.FieldSchema{
- "uri": {
- Type: framework.TypeString,
- Description: "MongoDB standard connection string (URI)",
- },
- "verify_connection": {
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, uri is verified by actually connecting to the database`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConnectionRead,
- logical.UpdateOperation: b.pathConnectionWrite,
- },
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-// pathConnectionRead reads out the connection configuration
-func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get("config/connection")
- if err != nil {
- return nil, fmt.Errorf("failed to read connection configuration")
- }
- if entry == nil {
- return nil, nil
- }
-
- var config connectionConfig
- if err := entry.DecodeJSON(&config); err != nil {
- return nil, err
- }
- return &logical.Response{
- Data: structs.New(config).Map(),
- }, nil
-}
-
-func (b *backend) pathConnectionWrite(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- uri := data.Get("uri").(string)
- if uri == "" {
- return logical.ErrorResponse("uri parameter is required"), nil
- }
-
- dialInfo, err := parseMongoURI(uri)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("invalid uri: %s", err)), nil
- }
-
- // Don't check the config if verification is disabled
- verifyConnection := data.Get("verify_connection").(bool)
- if verifyConnection {
- // Verify the config
- session, err := mgo.DialWithInfo(dialInfo)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error validating connection info: %s", err)), nil
- }
- defer session.Close()
- if err := session.Ping(); err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error validating connection info: %s", err)), nil
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
- URI: uri,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- // Reset the Session
- b.ResetSession()
-
- resp := &logical.Response{}
- resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URI as it is, including passwords, if any.")
-
- return resp, nil
-}
-
-type connectionConfig struct {
- URI string `json:"uri" structs:"uri" mapstructure:"uri"`
-}
-
-const pathConfigConnectionHelpSyn = `
-Configure the connection string to talk to MongoDB.
-`
-
-const pathConfigConnectionHelpDesc = `
-This path configures the standard connection string (URI) used to connect to MongoDB.
-
-A MongoDB URI looks like:
-"mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]"
-
-See https://docs.mongodb.org/manual/reference/connection-string/ for detailed documentation of the URI format.
-
-When configuring the connection string, the backend will verify its validity.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_lease.go
deleted file mode 100644
index 8f5bd7b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_config_lease.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package mongodb
-
-import (
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigLease(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/lease",
- Fields: map[string]*framework.FieldSchema{
- "ttl": {
- Type: framework.TypeDurationSecond,
- Description: "Default ttl for credentials.",
- },
-
- "max_ttl": {
- Type: framework.TypeDurationSecond,
- Description: "Maximum time a set of credentials can be valid for.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConfigLeaseRead,
- logical.UpdateOperation: b.pathConfigLeaseWrite,
- },
-
- HelpSynopsis: pathConfigLeaseHelpSyn,
- HelpDescription: pathConfigLeaseHelpDesc,
- }
-}
-
-func (b *backend) pathConfigLeaseWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- entry, err := logical.StorageEntryJSON("config/lease", &configLease{
- TTL: time.Second * time.Duration(d.Get("ttl").(int)),
- MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)),
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathConfigLeaseRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- leaseConfig, err := b.LeaseConfig(req.Storage)
-
- if err != nil {
- return nil, err
- }
- if leaseConfig == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "ttl": leaseConfig.TTL.Seconds(),
- "max_ttl": leaseConfig.MaxTTL.Seconds(),
- },
- }, nil
-}
-
-type configLease struct {
- TTL time.Duration
- MaxTTL time.Duration
-}
-
-const pathConfigLeaseHelpSyn = `
-Configure the default lease TTL settings for credentials
-generated by the mongodb backend.
-`
-
-const pathConfigLeaseHelpDesc = `
-This configures the default lease TTL settings used for
-credentials generated by this backend. The ttl specifies the
-duration that a set of credentials will be valid for before
-the lease must be renewed (if it is renewable), while the
-max_ttl specifies the overall maximum duration that the
-credentials will be valid regardless of lease renewals.
-
-The format for the TTL values is an integer and then unit. For
-example, the value "1h" specifies a 1-hour TTL. The longest
-supported unit is hours.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_creds_create.go
deleted file mode 100644
index de80e0a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_creds_create.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package mongodb
-
-import (
- "fmt"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathCredsCreate(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the role to generate credentials for.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathCredsCreateRead,
- },
-
- HelpSynopsis: pathCredsCreateReadHelpSyn,
- HelpDescription: pathCredsCreateReadHelpDesc,
- }
-}
-
-func (b *backend) pathCredsCreateRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- // Get the role
- role, err := b.Role(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
- }
-
- // Determine if we have a lease configuration
- leaseConfig, err := b.LeaseConfig(req.Storage)
- if err != nil {
- return nil, err
- }
- if leaseConfig == nil {
- leaseConfig = &configLease{}
- }
-
- // Generate the username and password
- displayName := req.DisplayName
- if displayName != "" {
- displayName += "-"
- }
-
- userUUID, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
-
- username := fmt.Sprintf("vault-%s%s", displayName, userUUID)
-
- password, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
-
- // Build the user creation command
- createUserCmd := createUserCommand{
- Username: username,
- Password: password,
- Roles: role.MongoDBRoles.toStandardRolesArray(),
- }
-
- // Get our connection
- session, err := b.Session(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Create the user
- err = session.DB(role.DB).Run(createUserCmd, nil)
- if err != nil {
- return nil, err
- }
-
- // Return the secret
- resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
- "db": role.DB,
- "username": username,
- "password": password,
- }, map[string]interface{}{
- "username": username,
- "db": role.DB,
- })
- resp.Secret.TTL = leaseConfig.TTL
- return resp, nil
-}
-
-type createUserCommand struct {
- Username string `bson:"createUser"`
- Password string `bson:"pwd"`
- Roles []interface{} `bson:"roles"`
-}
-
-const pathCredsCreateReadHelpSyn = `
-Request MongoDB database credentials for a particular role.
-`
-
-const pathCredsCreateReadHelpDesc = `
-This path reads generates MongoDB database credentials for
-a particular role. The database credentials will be
-generated on demand and will be automatically revoked when
-the lease is up.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_roles.go
deleted file mode 100644
index d67e49c..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/path_roles.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package mongodb
-
-import (
- "encoding/json"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "db": {
- Type: framework.TypeString,
- Description: "Name of the authentication database for users generated for this role.",
- },
- "roles": {
- Type: framework.TypeString,
- Description: "MongoDB roles to assign to the users generated for this role.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleCreate,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func (b *backend) Role(s logical.Storage, n string) (*roleStorageEntry, error) {
- entry, err := s.Get("role/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleStorageEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathRoleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("role/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathRoleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, err := b.Role(req.Storage, data.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- rolesJsonBytes, err := json.Marshal(role.MongoDBRoles.toStandardRolesArray())
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "db": role.DB,
- "roles": string(rolesJsonBytes),
- },
- }, nil
-}
-
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathRoleCreate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- name := data.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse("Missing name"), nil
- }
-
- roleDB := data.Get("db").(string)
- if roleDB == "" {
- return logical.ErrorResponse("db parameter is required"), nil
- }
-
- // Example roles JSON:
- //
- // [ "readWrite", { "role": "readWrite", "db": "test" } ]
- //
- // For storage, we convert such an array into a homogeneous array of role documents like:
- //
- // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ]
- //
- var roles []mongodbRole
- rolesJson := []byte(data.Get("roles").(string))
- if len(rolesJson) > 0 {
- var rolesArray []interface{}
- err := json.Unmarshal(rolesJson, &rolesArray)
- if err != nil {
- return nil, err
- }
- for _, rawRole := range rolesArray {
- switch role := rawRole.(type) {
- case string:
- roles = append(roles, mongodbRole{Role: role})
- case map[string]interface{}:
- if db, ok := role["db"].(string); ok {
- if roleName, ok := role["role"].(string); ok {
- roles = append(roles, mongodbRole{Role: roleName, DB: db})
- }
- }
- }
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("role/"+name, &roleStorageEntry{
- DB: roleDB,
- MongoDBRoles: roles,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (roles mongodbRoles) toStandardRolesArray() []interface{} {
- // Convert array of role documents like:
- //
- // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ]
- //
- // into a "standard" MongoDB roles array containing both strings and role documents:
- //
- // [ "readWrite", { "role": "readWrite", "db": "test" } ]
- //
- // MongoDB's createUser command accepts the latter.
- //
- var standardRolesArray []interface{}
- for _, role := range roles {
- if role.DB == "" {
- standardRolesArray = append(standardRolesArray, role.Role)
- } else {
- standardRolesArray = append(standardRolesArray, role)
- }
- }
- return standardRolesArray
-}
-
-type roleStorageEntry struct {
- DB string `json:"db"`
- MongoDBRoles mongodbRoles `json:"roles"`
-}
-
-type mongodbRole struct {
- Role string `json:"role" bson:"role"`
- DB string `json:"db" bson:"db"`
-}
-
-type mongodbRoles []mongodbRole
-
-const pathRoleHelpSyn = `
-Manage the roles used to generate MongoDB credentials.
-`
-
-const pathRoleHelpDesc = `
-This path lets you manage the roles used to generate MongoDB credentials.
-
-The "db" parameter specifies the authentication database for users
-generated for a given role.
-
-The "roles" parameter specifies the MongoDB roles that should be assigned
-to users created for a given role. Just like when creating a user directly
-using db.createUser, the roles JSON array can specify both built-in roles
-and user-defined roles for both the database the user is created in and
-for other databases.
-
-For example, the following roles JSON array grants the "readWrite"
-permission on both the user's authentication database and the "test"
-database:
-
-[ "readWrite", { "role": "readWrite", "db": "test" } ]
-
-Please consult the MongoDB documentation for more
-details on Role-Based Access Control in MongoDB.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/secret_creds.go
deleted file mode 100644
index 6131910..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/secret_creds.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package mongodb
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "gopkg.in/mgo.v2"
-)
-
-const SecretCredsType = "creds"
-
-func secretCreds(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCredsType,
- Fields: map[string]*framework.FieldSchema{
- "username": {
- Type: framework.TypeString,
- Description: "Username",
- },
-
- "password": {
- Type: framework.TypeString,
- Description: "Password",
- },
- },
-
- Renew: b.secretCredsRenew,
- Revoke: b.secretCredsRevoke,
- }
-}
-
-func (b *backend) secretCredsRenew(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the lease information
- leaseConfig, err := b.LeaseConfig(req.Storage)
- if err != nil {
- return nil, err
- }
- if leaseConfig == nil {
- leaseConfig = &configLease{}
- }
-
- f := framework.LeaseExtend(leaseConfig.TTL, leaseConfig.MaxTTL, b.System())
- return f(req, d)
-}
-
-func (b *backend) secretCredsRevoke(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
- if !ok {
- return nil, fmt.Errorf("username internal data is not a string")
- }
-
- // Get the db from the internal data
- dbRaw, ok := req.Secret.InternalData["db"]
- if !ok {
- return nil, fmt.Errorf("secret is missing db internal data")
- }
- db, ok := dbRaw.(string)
- if !ok {
- return nil, fmt.Errorf("db internal data is not a string")
- }
-
- // Get our connection
- session, err := b.Session(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Drop the user
- err = session.DB(db).RemoveUser(username)
- if err != nil && err != mgo.ErrNotFound {
- return nil, err
- }
-
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/util.go
deleted file mode 100644
index 209feb0..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/util.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package mongodb
-
-import (
- "crypto/tls"
- "errors"
- "net"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "gopkg.in/mgo.v2"
-)
-
-// Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that
-// ourselves. See https://github.com/go-mgo/mgo/issues/84
-func parseMongoURI(rawUri string) (*mgo.DialInfo, error) {
- uri, err := url.Parse(rawUri)
- if err != nil {
- return nil, err
- }
-
- info := mgo.DialInfo{
- Addrs: strings.Split(uri.Host, ","),
- Database: strings.TrimPrefix(uri.Path, "/"),
- Timeout: 10 * time.Second,
- }
-
- if uri.User != nil {
- info.Username = uri.User.Username()
- info.Password, _ = uri.User.Password()
- }
-
- query := uri.Query()
- for key, values := range query {
- var value string
- if len(values) > 0 {
- value = values[0]
- }
-
- switch key {
- case "authSource":
- info.Source = value
- case "authMechanism":
- info.Mechanism = value
- case "gssapiServiceName":
- info.Service = value
- case "replicaSet":
- info.ReplicaSetName = value
- case "maxPoolSize":
- poolLimit, err := strconv.Atoi(value)
- if err != nil {
- return nil, errors.New("bad value for maxPoolSize: " + value)
- }
- info.PoolLimit = poolLimit
- case "ssl":
- ssl, err := strconv.ParseBool(value)
- if err != nil {
- return nil, errors.New("bad value for ssl: " + value)
- }
- if ssl {
- info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
- return tls.Dial("tcp", addr.String(), &tls.Config{})
- }
- }
- case "connect":
- if value == "direct" {
- info.Direct = true
- break
- }
- if value == "replicaSet" {
- break
- }
- fallthrough
- default:
- return nil, errors.New("unsupported connection URL option: " + key + "=" + value)
- }
- }
-
- return &info, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go
deleted file mode 100644
index ccd981b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package mssql
-
-import (
- "database/sql"
- "fmt"
- "strings"
- "sync"
-
- _ "github.com/denisenkom/go-mssqldb"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathConfigConnection(&b),
- pathConfigLease(&b),
- pathListRoles(&b),
- pathRoles(&b),
- pathCredsCreate(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCreds(&b),
- },
-
- Invalidate: b.invalidate,
- Clean: b.ResetDB,
- BackendType: logical.TypeLogical,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- db *sql.DB
- defaultDb string
- lock sync.Mutex
-}
-
-// DB returns the default database connection.
-func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- // If we already have a DB, we got it!
- if b.db != nil {
- if err := b.db.Ping(); err == nil {
- return b.db, nil
- }
- // If the ping was unsuccessful, close it and ignore errors as we'll be
- // reestablishing anyways
- b.db.Close()
- }
-
- // Otherwise, attempt to make connection
- entry, err := s.Get("config/connection")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, fmt.Errorf("configure the DB connection with config/connection first")
- }
-
- var connConfig connectionConfig
- if err := entry.DecodeJSON(&connConfig); err != nil {
- return nil, err
- }
- connString := connConfig.ConnectionString
-
- db, err := sql.Open("sqlserver", connString)
- if err != nil {
- return nil, err
- }
-
- // Set some connection pool settings. We don't need much of this,
- // since the request rate shouldn't be high.
- db.SetMaxOpenConns(connConfig.MaxOpenConnections)
-
- stmt, err := db.Prepare("SELECT db_name();")
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
-
- err = stmt.QueryRow().Scan(&b.defaultDb)
- if err != nil {
- return nil, err
- }
-
- b.db = db
- return b.db, nil
-}
-
-// ResetDB forces a connection next time DB() is called.
-func (b *backend) ResetDB() {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.db != nil {
- b.db.Close()
- }
-
- b.db = nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "config/connection":
- b.ResetDB()
- }
-}
-
-// LeaseConfig returns the lease configuration
-func (b *backend) LeaseConfig(s logical.Storage) (*configLease, error) {
- entry, err := s.Get("config/lease")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result configLease
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-const backendHelp = `
-The MSSQL backend dynamically generates database users.
-
-After mounting this backend, configure it using the endpoints within
-the "config/" path.
-
-This backend does not support Azure SQL Databases.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend_test.go
deleted file mode 100644
index 329aeac..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend_test.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package mssql
-
-import (
- "fmt"
- "log"
- "os"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
-)
-
-func TestBackend_config_connection(t *testing.T) {
- var resp *logical.Response
- var err error
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- configData := map[string]interface{}{
- "connection_string": "sample_connection_string",
- "max_open_connections": 7,
- "verify_connection": false,
- }
-
- configReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Storage: config.StorageView,
- Data: configData,
- }
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- configReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- delete(configData, "verify_connection")
- if !reflect.DeepEqual(configData, resp.Data) {
- t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- b, _ := Factory(logical.TestBackendConfig())
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepRole(t),
- testAccStepReadCreds(t, "web"),
- },
- })
-}
-
-func TestBackend_roleCrud(t *testing.T) {
- b := Backend()
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepRole(t),
- testAccStepReadRole(t, "web", testRoleSQL),
- testAccStepDeleteRole(t, "web"),
- testAccStepReadRole(t, "web", ""),
- },
- })
-}
-
-func TestBackend_leaseWriteRead(t *testing.T) {
- b := Backend()
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepWriteLease(t),
- testAccStepReadLease(t),
- },
- })
-
-}
-
-func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("MSSQL_DSN"); v == "" {
- t.Fatal("MSSQL_DSN must be set for acceptance tests")
- }
-}
-
-func testAccStepConfig(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: map[string]interface{}{
- "connection_string": os.Getenv("MSSQL_DSN"),
- },
- }
-}
-
-func testAccStepRole(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/web",
- Data: map[string]interface{}{
- "sql": testRoleSQL,
- },
- }
-}
-
-func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + n,
- }
-}
-
-func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated credentials: %v", d)
-
- return nil
- },
- }
-}
-
-func testAccStepReadRole(t *testing.T, name, sql string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if sql == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- SQL string `mapstructure:"sql"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.SQL != sql {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepWriteLease(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/lease",
- Data: map[string]interface{}{
- "ttl": "1h5m",
- "max_ttl": "24h",
- },
- }
-}
-
-func testAccStepReadLease(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config/lease",
- Check: func(resp *logical.Response) error {
- if resp.Data["ttl"] != "1h5m0s" || resp.Data["max_ttl"] != "24h0m0s" {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-const testRoleSQL = `
-CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';
-CREATE USER [{{name}}] FOR LOGIN [{{name}}];
-GRANT SELECT ON SCHEMA::dbo TO [{{name}}]
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_connection.go
deleted file mode 100644
index 5caed56..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_connection.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package mssql
-
-import (
- "database/sql"
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigConnection(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/connection",
- Fields: map[string]*framework.FieldSchema{
- "connection_string": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "DB connection parameters",
- },
- "max_open_connections": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Maximum number of open connections to database",
- },
- "verify_connection": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: "If set, connection_string is verified by actually connecting to the database",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConnectionWrite,
- logical.ReadOperation: b.pathConnectionRead,
- },
-
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-// pathConnectionRead reads out the connection configuration
-func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get("config/connection")
- if err != nil {
- return nil, fmt.Errorf("failed to read connection configuration")
- }
- if entry == nil {
- return nil, nil
- }
-
- var config connectionConfig
- if err := entry.DecodeJSON(&config); err != nil {
- return nil, err
- }
- return &logical.Response{
- Data: structs.New(config).Map(),
- }, nil
-}
-
-// pathConnectionWrite stores the connection configuration
-func (b *backend) pathConnectionWrite(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- connString := data.Get("connection_string").(string)
-
- maxOpenConns := data.Get("max_open_connections").(int)
- if maxOpenConns == 0 {
- maxOpenConns = 2
- }
-
- // Don't check the connection_string if verification is disabled
- verifyConnection := data.Get("verify_connection").(bool)
- if verifyConnection {
- // Verify the string
- db, err := sql.Open("mssql", connString)
-
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error validating connection info: %s", err)), nil
- }
- defer db.Close()
- if err := db.Ping(); err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error validating connection info: %s", err)), nil
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
- ConnectionString: connString,
- MaxOpenConnections: maxOpenConns,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- // Reset the DB connection
- b.ResetDB()
-
- resp := &logical.Response{}
- resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string as it is, including passwords, if any.")
-
- return resp, nil
-}
-
-type connectionConfig struct {
- ConnectionString string `json:"connection_string" structs:"connection_string" mapstructure:"connection_string"`
- MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
-}
-
-const pathConfigConnectionHelpSyn = `
-Configure the connection string to talk to Microsoft Sql Server.
-`
-
-const pathConfigConnectionHelpDesc = `
-This path configures the connection string used to connect to Sql Server.
-The value of the string is a Data Source Name (DSN). An example is
-using "server=;port=;user id=;password=;database=;app name=vault;"
-
-When configuring the connection string, the backend will verify its validity.
-If the database is not available when setting the connection string, set the
-"verify_connection" option to false.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_lease.go
deleted file mode 100644
index f4ab92e..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_config_lease.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package mssql
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigLease(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/lease",
- Fields: map[string]*framework.FieldSchema{
- "ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Default ttl for roles.",
- },
-
- "ttl_max": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Deprecated: use "max_ttl" instead. Maximum
-time a credential is valid for.`,
- },
-
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Maximum time a credential is valid for.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathConfigLeaseRead,
- logical.UpdateOperation: b.pathConfigLeaseWrite,
- },
-
- HelpSynopsis: pathConfigLeaseHelpSyn,
- HelpDescription: pathConfigLeaseHelpDesc,
- }
-}
-
-func (b *backend) pathConfigLeaseWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- ttlRaw := d.Get("ttl").(string)
- ttlMaxRaw := d.Get("max_ttl").(string)
- if len(ttlMaxRaw) == 0 {
- ttlMaxRaw = d.Get("ttl_max").(string)
- }
-
- ttl, err := time.ParseDuration(ttlRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid ttl: %s", err)), nil
- }
- ttlMax, err := time.ParseDuration(ttlMaxRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid max_ttl: %s", err)), nil
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/lease", &configLease{
- TTL: ttl,
- TTLMax: ttlMax,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathConfigLeaseRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- leaseConfig, err := b.LeaseConfig(req.Storage)
-
- if err != nil {
- return nil, err
- }
- if leaseConfig == nil {
- return nil, nil
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "ttl": leaseConfig.TTL.String(),
- "ttl_max": leaseConfig.TTLMax.String(),
- "max_ttl": leaseConfig.TTLMax.String(),
- },
- }
- resp.AddWarning("The field ttl_max is deprecated and will be removed in a future release. Use max_ttl instead.")
-
- return resp, nil
-}
-
-type configLease struct {
- TTL time.Duration
- TTLMax time.Duration
-}
-
-const pathConfigLeaseHelpSyn = `
-Configure the default lease ttl for generated credentials.
-`
-
-const pathConfigLeaseHelpDesc = `
-This configures the default lease ttl used for credentials
-generated by this backend. The ttl specifies the duration that a
-credential will be valid for, as well as the maximum session for
-a set of credentials.
-
-The format for the ttl is "1h" or integer and then unit. The longest
-unit is hour.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_creds_create.go
deleted file mode 100644
index f9baae7..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_creds_create.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package mssql
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathCredsCreate(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathCredsCreateRead,
- },
-
- HelpSynopsis: pathCredsCreateHelpSyn,
- HelpDescription: pathCredsCreateHelpDesc,
- }
-}
-
-func (b *backend) pathCredsCreateRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- // Get the role
- role, err := b.Role(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
- }
-
- // Determine if we have a lease configuration
- leaseConfig, err := b.LeaseConfig(req.Storage)
- if err != nil {
- return nil, err
- }
- if leaseConfig == nil {
- leaseConfig = &configLease{}
- }
-
- // Generate our username and password
- displayName := req.DisplayName
- if len(displayName) > 10 {
- displayName = displayName[:10]
- }
- userUUID, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- username := fmt.Sprintf("%s-%s", displayName, userUUID)
- password, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
-
- // Get our handle
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
-
- // Always reset database to default db of connection. Since it is in a
- // transaction, all statements will be on the same connection in the pool.
- roleSQL := fmt.Sprintf("USE [%s]; %s", b.defaultDb, role.SQL)
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(roleSQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(Query(query, map[string]string{
- "name": username,
- "password": password,
- }))
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return nil, err
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return nil, err
- }
-
- // Return the secret
- resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
- "username": username,
- "password": password,
- }, map[string]interface{}{
- "username": username,
- })
- resp.Secret.TTL = leaseConfig.TTL
- return resp, nil
-}
-
-const pathCredsCreateHelpSyn = `
-Request database credentials for a certain role.
-`
-
-const pathCredsCreateHelpDesc = `
-This path reads database credentials for a certain role. The
-database credentials will be generated on demand and will be automatically
-revoked when the lease is up.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_roles.go
deleted file mode 100644
index 4229444..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/path_roles.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package mssql
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
-
- "sql": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "SQL string to create a role. See help for more info.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleCreate,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
- entry, err := s.Get("role/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathRoleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("role/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathRoleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, err := b.Role(req.Storage, data.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "sql": role.SQL,
- },
- }, nil
-}
-
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathRoleCreate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- sql := data.Get("sql").(string)
-
- // Get our connection
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Test the query by trying to prepare it
- for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := db.Prepare(Query(query, map[string]string{
- "name": "foo",
- "password": "bar",
- }))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error testing query: %s", err)), nil
- }
- stmt.Close()
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
- SQL: sql,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-type roleEntry struct {
- SQL string `json:"sql"`
-}
-
-const pathRoleHelpSyn = `
-Manage the roles that can be created with this backend.
-`
-
-const pathRoleHelpDesc = `
-This path lets you manage the roles that can be created with this backend.
-
-The "sql" parameter customizes the SQL string used to create the login to
-the server. The parameter can be a sequence of SQL queries, each semi-colon
-seperated. Some substitution will be done to the SQL string for certain keys.
-The names of the variables must be surrounded by "{{" and "}}" to be replaced.
-
- * "name" - The random username generated for the DB user.
-
- * "password" - The random password generated for the DB user.
-
-Example SQL query to use:
-
- CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';
- CREATE USER [{{name}}] FROM LOGIN [{{name}}];
- GRANT SELECT, UPDATE, DELETE, INSERT on SCHEMA::dbo TO [{{name}}];
-
-Please see the Microsoft SQL Server manual on the GRANT command to learn how to
-do more fine grained access.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/secret_creds.go
deleted file mode 100644
index b870c59..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/secret_creds.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package mssql
-
-import (
- "database/sql"
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const SecretCredsType = "creds"
-
-func secretCreds(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCredsType,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password",
- },
- },
-
- Renew: b.secretCredsRenew,
- Revoke: b.secretCredsRevoke,
- }
-}
-
-func (b *backend) secretCredsRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the lease information
- leaseConfig, err := b.LeaseConfig(req.Storage)
- if err != nil {
- return nil, err
- }
- if leaseConfig == nil {
- leaseConfig = &configLease{}
- }
-
- f := framework.LeaseExtend(leaseConfig.TTL, leaseConfig.TTLMax, b.System())
- return f(req, d)
-}
-
-func (b *backend) secretCredsRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
-
- // Get our connection
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // First disable server login
- disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username))
- if err != nil {
- return nil, err
- }
- defer disableStmt.Close()
- if _, err := disableStmt.Exec(); err != nil {
- return nil, err
- }
-
- // Query for sessions for the login so that we can kill any outstanding
- // sessions. There cannot be any active sessions before we drop the logins
- // This isn't done in a transaction because even if we fail along the way,
- // we want to remove as much access as possible
- sessionStmt, err := db.Prepare(fmt.Sprintf(
- "SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';", username))
- if err != nil {
- return nil, err
- }
- defer sessionStmt.Close()
-
- sessionRows, err := sessionStmt.Query()
- if err != nil {
- return nil, err
- }
- defer sessionRows.Close()
-
- var revokeStmts []string
- for sessionRows.Next() {
- var sessionID int
- err = sessionRows.Scan(&sessionID)
- if err != nil {
- return nil, err
- }
- revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID))
- }
-
- // Query for database users using undocumented stored procedure for now since
- // it is the easiest way to get this information;
- // we need to drop the database users before we can drop the login and the role
- // This isn't done in a transaction because even if we fail along the way,
- // we want to remove as much access as possible
- stmt, err := db.Prepare(fmt.Sprintf("EXEC master.dbo.sp_msloginmappings '%s';", username))
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
-
- rows, err := stmt.Query()
- if err != nil {
- return nil, err
- }
- defer rows.Close()
-
- for rows.Next() {
- var loginName, dbName, qUsername string
- var aliasName sql.NullString
- err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName)
- if err != nil {
- return nil, err
- }
- revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username))
- }
-
- // we do not stop on error, as we want to remove as
- // many permissions as possible right now
- var lastStmtError error
- for _, query := range revokeStmts {
- stmt, err := db.Prepare(query)
- if err != nil {
- lastStmtError = err
- continue
- }
- defer stmt.Close()
- _, err = stmt.Exec()
- if err != nil {
- lastStmtError = err
- }
- }
-
- // can't drop if not all database users are dropped
- if rows.Err() != nil {
- return nil, fmt.Errorf("cound not generate sql statements for all rows: %s", rows.Err())
- }
- if lastStmtError != nil {
- return nil, fmt.Errorf("could not perform all sql statements: %s", lastStmtError)
- }
-
- // Drop this login
- stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username))
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-const dropUserSQL = `
-USE [%s]
-IF EXISTS
- (SELECT name
- FROM sys.database_principals
- WHERE name = N'%s')
-BEGIN
- DROP USER [%s]
-END
-`
-
-const dropLoginSQL = `
-IF EXISTS
- (SELECT name
- FROM master.sys.server_principals
- WHERE name = N'%s')
-BEGIN
- DROP LOGIN [%s]
-END
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/util.go
deleted file mode 100644
index 362cbd3..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/util.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package mssql
-
-import (
- "fmt"
- "strings"
-)
-
-// SplitSQL is used to split a series of SQL statements
-func SplitSQL(sql string) []string {
- parts := strings.Split(sql, ";")
- out := make([]string, 0, len(parts))
- for _, p := range parts {
- clean := strings.TrimSpace(p)
- if len(clean) > 0 {
- out = append(out, clean)
- }
- }
- return out
-}
-
-// Query templates a query for us.
-func Query(tpl string, data map[string]string) string {
- for k, v := range data {
- tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
- }
-
- return tpl
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go
deleted file mode 100644
index a89cc49..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package mysql
-
-import (
- "database/sql"
- "fmt"
- "strings"
- "sync"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathConfigConnection(&b),
- pathConfigLease(&b),
- pathListRoles(&b),
- pathRoles(&b),
- pathRoleCreate(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCreds(&b),
- },
-
- Invalidate: b.invalidate,
- Clean: b.ResetDB,
- BackendType: logical.TypeLogical,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- db *sql.DB
- lock sync.Mutex
-}
-
-// DB returns the database connection.
-func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- // If we already have a DB, we got it!
- if b.db != nil {
- if err := b.db.Ping(); err == nil {
- return b.db, nil
- }
- // If the ping was unsuccessful, close it and ignore errors as we'll be
- // reestablishing anyways
- b.db.Close()
- }
-
- // Otherwise, attempt to make connection
- entry, err := s.Get("config/connection")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil,
- fmt.Errorf("configure the DB connection with config/connection first")
- }
-
- var connConfig connectionConfig
- if err := entry.DecodeJSON(&connConfig); err != nil {
- return nil, err
- }
-
- conn := connConfig.ConnectionURL
- if len(conn) == 0 {
- conn = connConfig.ConnectionString
- }
-
- b.db, err = sql.Open("mysql", conn)
- if err != nil {
- return nil, err
- }
-
- // Set some connection pool settings. We don't need much of this,
- // since the request rate shouldn't be high.
- b.db.SetMaxOpenConns(connConfig.MaxOpenConnections)
- b.db.SetMaxIdleConns(connConfig.MaxIdleConnections)
-
- return b.db, nil
-}
-
-// ResetDB forces a connection next time DB() is called.
-func (b *backend) ResetDB() {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.db != nil {
- b.db.Close()
- }
-
- b.db = nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "config/connection":
- b.ResetDB()
- }
-}
-
-// Lease returns the lease information
-func (b *backend) Lease(s logical.Storage) (*configLease, error) {
- entry, err := s.Get("config/lease")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result configLease
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-const backendHelp = `
-The MySQL backend dynamically generates database users.
-
-After mounting this backend, configure it using the endpoints within
-the "config/" path.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend_test.go
deleted file mode 100644
index 2a0a1fc..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend_test.go
+++ /dev/null
@@ -1,368 +0,0 @@
-package mysql
-
-import (
- "fmt"
- "log"
- "os"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
- dockertest "gopkg.in/ory-am/dockertest.v2"
-)
-
-var (
- testImagePull sync.Once
-)
-
-func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
- if os.Getenv("MYSQL_DSN") != "" {
- return "", os.Getenv("MYSQL_DSN")
- }
-
- // Without this the checks for whether the container has started seem to
- // never actually pass. There's really no reason to expose the test
- // containers, so don't.
- dockertest.BindDockerToLocalhost = "yep"
-
- testImagePull.Do(func() {
- dockertest.Pull("mysql")
- })
-
- cid, connErr := dockertest.ConnectToMySQL(60, 500*time.Millisecond, func(connURL string) bool {
- // This will cause a validation to run
- resp, err := b.HandleRequest(&logical.Request{
- Storage: s,
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: map[string]interface{}{
- "connection_url": connURL,
- },
- })
- if err != nil || (resp != nil && resp.IsError()) {
- // It's likely not up and running yet, so return false and try again
- return false
- }
- if resp == nil {
- t.Fatal("expected warning")
- }
-
- retURL = connURL
- return true
- })
-
- if connErr != nil {
- t.Fatalf("could not connect to database: %v", connErr)
- }
-
- return
-}
-
-func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
- err := cid.KillRemove()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_config_connection(t *testing.T) {
- var resp *logical.Response
- var err error
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- configData := map[string]interface{}{
- "value": "",
- "connection_url": "sample_connection_url",
- "max_open_connections": 9,
- "max_idle_connections": 7,
- "verify_connection": false,
- }
-
- configReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Storage: config.StorageView,
- Data: configData,
- }
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- configReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- delete(configData, "verify_connection")
- if !reflect.DeepEqual(configData, resp.Data) {
- t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- // for wildcard based mysql user
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- testAccStepRole(t, true),
- testAccStepReadCreds(t, "web"),
- },
- })
-}
-
-func TestBackend_basicHostRevoke(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- // for host based mysql user
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- testAccStepRole(t, false),
- testAccStepReadCreds(t, "web"),
- },
- })
-}
-
-func TestBackend_roleCrud(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- // test SQL with wildcard based user
- testAccStepRole(t, true),
- testAccStepReadRole(t, "web", testRoleWildCard),
- testAccStepDeleteRole(t, "web"),
- // test SQL with host based user
- testAccStepRole(t, false),
- testAccStepReadRole(t, "web", testRoleHost),
- testAccStepDeleteRole(t, "web"),
- },
- })
-}
-
-func TestBackend_leaseWriteRead(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- testAccStepWriteLease(t),
- testAccStepReadLease(t),
- },
- })
-
-}
-
-func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: d,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if expectError {
- if resp.Data == nil {
- return fmt.Errorf("data is nil")
- }
- var e struct {
- Error string `mapstructure:"error"`
- }
- if err := mapstructure.Decode(resp.Data, &e); err != nil {
- return err
- }
- if len(e.Error) == 0 {
- return fmt.Errorf("expected error, but write succeeded")
- }
- return nil
- } else if resp != nil && resp.IsError() {
- return fmt.Errorf("got an error response: %v", resp.Error())
- }
- return nil
- },
- }
-}
-
-func testAccStepRole(t *testing.T, wildCard bool) logicaltest.TestStep {
-
- pathData := make(map[string]interface{})
- if wildCard == true {
- pathData = map[string]interface{}{
- "sql": testRoleWildCard,
- }
- } else {
- pathData = map[string]interface{}{
- "sql": testRoleHost,
- "revocation_sql": testRevocationSQL,
- }
- }
-
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/web",
- Data: pathData,
- }
-
-}
-
-func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + n,
- }
-}
-
-func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated credentials: %v", d)
-
- return nil
- },
- }
-}
-
-func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if sql == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- SQL string `mapstructure:"sql"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.SQL != sql {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepWriteLease(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/lease",
- Data: map[string]interface{}{
- "lease": "1h5m",
- "lease_max": "24h",
- },
- }
-}
-
-func testAccStepReadLease(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config/lease",
- Check: func(resp *logical.Response) error {
- if resp.Data["lease"] != "1h5m0s" || resp.Data["lease_max"] != "24h0m0s" {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-const testRoleWildCard = `
-CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
-GRANT SELECT ON *.* TO '{{name}}'@'%';
-`
-const testRoleHost = `
-CREATE USER '{{name}}'@'10.1.1.2' IDENTIFIED BY '{{password}}';
-GRANT SELECT ON *.* TO '{{name}}'@'10.1.1.2';
-`
-const testRevocationSQL = `
-REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'10.1.1.2';
-DROP USER '{{name}}'@'10.1.1.2';
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_connection.go
deleted file mode 100644
index 8dd44dd..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_connection.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package mysql
-
-import (
- "database/sql"
- "fmt"
-
- "github.com/fatih/structs"
- _ "github.com/go-sql-driver/mysql"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigConnection(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/connection",
- Fields: map[string]*framework.FieldSchema{
- "connection_url": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "DB connection string",
- },
- "value": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `DB connection string. Use 'connection_url' instead.
-This name is deprecated.`,
- },
- "max_open_connections": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Maximum number of open connections to database",
- },
- "max_idle_connections": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Maximum number of idle connections to the database; a zero uses the value of max_open_connections and a negative value disables idle connections. If larger than max_open_connections it will be reduced to the same size.",
- },
- "verify_connection": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: "If set, connection_url is verified by actually connecting to the database",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConnectionWrite,
- logical.ReadOperation: b.pathConnectionRead,
- },
-
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-// pathConnectionRead reads out the connection configuration
-func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get("config/connection")
- if err != nil {
- return nil, fmt.Errorf("failed to read connection configuration")
- }
- if entry == nil {
- return nil, nil
- }
-
- var config connectionConfig
- if err := entry.DecodeJSON(&config); err != nil {
- return nil, err
- }
- return &logical.Response{
- Data: structs.New(config).Map(),
- }, nil
-}
-
-func (b *backend) pathConnectionWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- connValue := data.Get("value").(string)
- connURL := data.Get("connection_url").(string)
- if connURL == "" {
- if connValue == "" {
- return logical.ErrorResponse("the connection_url parameter must be supplied"), nil
- } else {
- connURL = connValue
- }
- }
-
- maxOpenConns := data.Get("max_open_connections").(int)
- if maxOpenConns == 0 {
- maxOpenConns = 2
- }
-
- maxIdleConns := data.Get("max_idle_connections").(int)
- if maxIdleConns == 0 {
- maxIdleConns = maxOpenConns
- }
- if maxIdleConns > maxOpenConns {
- maxIdleConns = maxOpenConns
- }
-
- // Don't check the connection_url if verification is disabled
- verifyConnection := data.Get("verify_connection").(bool)
- if verifyConnection {
- // Verify the string
- db, err := sql.Open("mysql", connURL)
-
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "error validating connection info: %s", err)), nil
- }
- defer db.Close()
- if err := db.Ping(); err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "error validating connection info: %s", err)), nil
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
- ConnectionURL: connURL,
- MaxOpenConnections: maxOpenConns,
- MaxIdleConnections: maxIdleConns,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- // Reset the DB connection
- b.ResetDB()
-
- resp := &logical.Response{}
- resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URL as it is, including passwords, if any.")
-
- return resp, nil
-}
-
-type connectionConfig struct {
- ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
- // Deprecate "value" in coming releases
- ConnectionString string `json:"value" structs:"value" mapstructure:"value"`
- MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
- MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"`
-}
-
-const pathConfigConnectionHelpSyn = `
-Configure the connection string to talk to MySQL.
-`
-
-const pathConfigConnectionHelpDesc = `
-This path configures the connection string used to connect to MySQL. The value
-of the string is a Data Source Name (DSN). An example is using
-"username:password@protocol(address)/dbname?param=value"
-
-For example, RDS may look like:
-"id:password@tcp(your-amazonaws-uri.com:3306)/dbname"
-
-When configuring the connection string, the backend will verify its validity.
-If the database is not available when setting the connection URL, set the
-"verify_connection" option to false.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_lease.go
deleted file mode 100644
index f386b60..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_config_lease.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package mysql
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigLease(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/lease",
- Fields: map[string]*framework.FieldSchema{
- "lease": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Default lease for roles.",
- },
-
- "lease_max": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Maximum time a credential is valid for.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathLeaseRead,
- logical.UpdateOperation: b.pathLeaseWrite,
- },
-
- HelpSynopsis: pathConfigLeaseHelpSyn,
- HelpDescription: pathConfigLeaseHelpDesc,
- }
-}
-
-func (b *backend) pathLeaseWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- leaseRaw := d.Get("lease").(string)
- leaseMaxRaw := d.Get("lease_max").(string)
-
- lease, err := time.ParseDuration(leaseRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid lease: %s", err)), nil
- }
- leaseMax, err := time.ParseDuration(leaseMaxRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid lease: %s", err)), nil
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/lease", &configLease{
- Lease: lease,
- LeaseMax: leaseMax,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathLeaseRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- lease, err := b.Lease(req.Storage)
-
- if err != nil {
- return nil, err
- }
- if lease == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "lease": lease.Lease.String(),
- "lease_max": lease.LeaseMax.String(),
- },
- }, nil
-}
-
-type configLease struct {
- Lease time.Duration
- LeaseMax time.Duration
-}
-
-const pathConfigLeaseHelpSyn = `
-Configure the default lease information for generated credentials.
-`
-
-const pathConfigLeaseHelpDesc = `
-This configures the default lease information used for credentials
-generated by this backend. The lease specifies the duration that a
-credential will be valid for, as well as the maximum session for
-a set of credentials.
-
-The format for the lease is "1h" or integer and then unit. The longest
-unit is hour.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_role_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_role_create.go
deleted file mode 100644
index 7e11657..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_role_create.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package mysql
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- _ "github.com/lib/pq"
-)
-
-func pathRoleCreate(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleCreateRead,
- },
-
- HelpSynopsis: pathRoleCreateReadHelpSyn,
- HelpDescription: pathRoleCreateReadHelpDesc,
- }
-}
-
-func (b *backend) pathRoleCreateRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- // Get the role
- role, err := b.Role(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
- }
-
- // Determine if we have a lease
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- if lease == nil {
- lease = &configLease{}
- }
-
- // Generate our username and password. The username will be a
- // concatenation of:
- //
- // - the role name, truncated to role.rolenameLength (default 4)
- // - the token display name, truncated to role.displaynameLength (default 4)
- // - a UUID
- //
- // the entire contactenated string is then truncated to role.usernameLength,
- // which by default is 16 due to limitations in older but still-prevalant
- // versions of MySQL.
- roleName := name
- if len(roleName) > role.RolenameLength {
- roleName = roleName[:role.RolenameLength]
- }
- displayName := req.DisplayName
- if len(displayName) > role.DisplaynameLength {
- displayName = displayName[:role.DisplaynameLength]
- }
- userUUID, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- username := fmt.Sprintf("%s-%s-%s", roleName, displayName, userUUID)
- if len(username) > role.UsernameLength {
- username = username[:role.UsernameLength]
- }
- password, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
-
- // Get our handle
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(Query(query, map[string]string{
- "name": username,
- "password": password,
- }))
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return nil, err
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return nil, err
- }
-
- // Return the secret
- resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
- "username": username,
- "password": password,
- }, map[string]interface{}{
- "username": username,
- "role": name,
- })
- resp.Secret.TTL = lease.Lease
- return resp, nil
-}
-
-const pathRoleCreateReadHelpSyn = `
-Request database credentials for a certain role.
-`
-
-const pathRoleCreateReadHelpDesc = `
-This path reads database credentials for a certain role. The
-database credentials will be generated on demand and will be automatically
-revoked when the lease is up.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_roles.go
deleted file mode 100644
index 97feccd..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/path_roles.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package mysql
-
-import (
- "fmt"
- "strings"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
-
- "sql": {
- Type: framework.TypeString,
- Description: "SQL string to create a user. See help for more info.",
- },
-
- "revocation_sql": {
- Type: framework.TypeString,
- Description: "SQL string to revoke a user. See help for more info.",
- },
-
- "username_length": {
- Type: framework.TypeInt,
- Description: "number of characters to truncate generated mysql usernames to (default 16)",
- Default: 16,
- },
-
- "rolename_length": {
- Type: framework.TypeInt,
- Description: "number of characters to truncate the rolename portion of generated mysql usernames to (default 4)",
- Default: 4,
- },
-
- "displayname_length": {
- Type: framework.TypeInt,
- Description: "number of characters to truncate the displayname portion of generated mysql usernames to (default 4)",
- Default: 4,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleCreate,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
- entry, err := s.Get("role/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- // Set defaults to handle upgrade cases
- result := roleEntry{
- UsernameLength: 16,
- RolenameLength: 4,
- DisplaynameLength: 4,
- }
-
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathRoleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("role/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathRoleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, err := b.Role(req.Storage, data.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "sql": role.SQL,
- "revocation_sql": role.RevocationSQL,
- },
- }, nil
-}
-
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathRoleCreate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- // Get our connection
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Test the query by trying to prepare it
- sql := data.Get("sql").(string)
- for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := db.Prepare(Query(query, map[string]string{
- "name": "foo",
- "password": "bar",
- }))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error testing query: %s", err)), nil
- }
- stmt.Close()
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
- SQL: sql,
- RevocationSQL: data.Get("revocation_sql").(string),
- UsernameLength: data.Get("username_length").(int),
- DisplaynameLength: data.Get("displayname_length").(int),
- RolenameLength: data.Get("rolename_length").(int),
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-type roleEntry struct {
- SQL string `json:"sql" mapstructure:"sql" structs:"sql"`
- RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"`
- UsernameLength int `json:"username_length" mapstructure:"username_length" structs:"username_length"`
- DisplaynameLength int `json:"displayname_length" mapstructure:"displayname_length" structs:"displayname_length"`
- RolenameLength int `json:"rolename_length" mapstructure:"rolename_length" structs:"rolename_length"`
-}
-
-const pathRoleHelpSyn = `
-Manage the roles that can be created with this backend.
-`
-
-const pathRoleHelpDesc = `
-This path lets you manage the roles that can be created with this backend.
-
-The "sql" parameter customizes the SQL string used to create the role.
-This can be a sequence of SQL queries, each semi-colon seperated. Some
-substitution will be done to the SQL string for certain keys.
-The names of the variables must be surrounded by "{{" and "}}" to be replaced.
-
- * "name" - The random username generated for the DB user.
-
- * "password" - The random password generated for the DB user.
-
-Example of a decent SQL query to use:
-
- CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
- GRANT ALL ON db1.* TO '{{name}}'@'%';
-
-Note the above user would be able to access anything in db1. Please see the MySQL
-manual on the GRANT command to learn how to do more fine grained access.
-
-The "rolename_length" parameter determines how many characters of the role name
-will be used in creating the generated mysql username; the default is 4.
-
-The "displayname_length" parameter determines how many characters of the token
-display name will be used in creating the generated mysql username; the default
-is 4.
-
-The "username_length" parameter determines how many total characters the
-generated username (including the role name, token display name and the uuid
-portion) will be truncated to. Versions of MySQL prior to 5.7.8 are limited to
-16 characters total (see
-http://dev.mysql.com/doc/refman/5.7/en/user-names.html) so that is the default;
-for versions >=5.7.8 it is safe to increase this to 32.
-
-For best readability in MySQL process lists, we recommend using MySQL 5.7.8 or
-later, setting "username_length" to 32 and setting both "rolename_length" and
-"displayname_length" to 8. However due the the prevalence of older versions of
-MySQL in general deployment, the defaults are currently tuned for a
-username_length of 16.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go
deleted file mode 100644
index 27c3bf8..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package mysql
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const SecretCredsType = "creds"
-
-// defaultRevocationSQL is a default SQL statement for revoking a user. Revoking
-// permissions for the user is done before the drop, because MySQL explicitly
-// documents that open user connections will not be closed. By revoking all
-// grants, at least we ensure that the open connection is useless. Dropping the
-// user will only affect the next connection.
-const defaultRevocationSQL = `
-REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%';
-DROP USER '{{name}}'@'%'
-`
-
-func secretCreds(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCredsType,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password",
- },
- },
-
- Renew: b.secretCredsRenew,
- Revoke: b.secretCredsRevoke,
- }
-}
-
-func (b *backend) secretCredsRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the lease information
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- if lease == nil {
- lease = &configLease{}
- }
-
- f := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())
- return f(req, d)
-}
-
-func (b *backend) secretCredsRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var resp *logical.Response
-
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
- if !ok {
- return nil, fmt.Errorf("usernameRaw is not a string")
- }
-
- // Get our connection
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- roleName := ""
- roleNameRaw, ok := req.Secret.InternalData["role"]
- if ok {
- roleName = roleNameRaw.(string)
- }
-
- var role *roleEntry
- if roleName != "" {
- role, err = b.Role(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- }
-
- // Use a default SQL statement for revocation if one cannot be fetched from the role
- revocationSQL := defaultRevocationSQL
-
- if role != nil && role.RevocationSQL != "" {
- revocationSQL = role.RevocationSQL
- } else {
- if resp == nil {
- resp = &logical.Response{}
- }
- resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default SQL for revoking user.", roleName))
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return nil, err
- }
- defer tx.Rollback()
-
- for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- // This is not a prepared statement because not all commands are supported
- // 1295: This command is not supported in the prepared statement protocol yet
- // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/
- query = strings.Replace(query, "{{name}}", username, -1)
- _, err = tx.Exec(query)
- if err != nil {
- return nil, err
- }
-
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return nil, err
- }
-
- return resp, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/util.go
deleted file mode 100644
index 313264f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/util.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package mysql
-
-import (
- "fmt"
- "strings"
-)
-
-// Query templates a query for us.
-func Query(tpl string, data map[string]string) string {
- for k, v := range data {
- tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
- }
-
- return tpl
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go
deleted file mode 100644
index bf5168d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package pki
-
-import (
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// Factory creates a new backend implementing the logical.Backend interface
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-// Backend returns a new Backend framework struct
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "cert/*",
- "ca/pem",
- "ca_chain",
- "ca",
- "crl/pem",
- "crl",
- },
-
- LocalStorage: []string{
- "revoked/",
- "crl",
- "certs/",
- },
-
- Root: []string{
- "root",
- "root/sign-self-issued",
- },
- },
-
- Paths: []*framework.Path{
- pathListRoles(&b),
- pathRoles(&b),
- pathGenerateRoot(&b),
- pathSignIntermediate(&b),
- pathSignSelfIssued(&b),
- pathDeleteRoot(&b),
- pathGenerateIntermediate(&b),
- pathSetSignedIntermediate(&b),
- pathConfigCA(&b),
- pathConfigCRL(&b),
- pathConfigURLs(&b),
- pathSignVerbatim(&b),
- pathSign(&b),
- pathIssue(&b),
- pathRotateCRL(&b),
- pathFetchCA(&b),
- pathFetchCAChain(&b),
- pathFetchCRL(&b),
- pathFetchCRLViaCertPath(&b),
- pathFetchValid(&b),
- pathFetchListCerts(&b),
- pathRevoke(&b),
- pathTidy(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCerts(&b),
- },
-
- BackendType: logical.TypeLogical,
- }
-
- b.crlLifetime = time.Hour * 72
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- crlLifetime time.Duration
- revokeStorageLock sync.RWMutex
-}
-
-const backendHelp = `
-The PKI backend dynamically generates X509 server and client certificates.
-
-After mounting this backend, configure the CA using the "pem_bundle" endpoint within
-the "config/" path.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go
deleted file mode 100644
index 7a32ec2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go
+++ /dev/null
@@ -1,2841 +0,0 @@
-package pki
-
-import (
- "bytes"
- "crypto"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/base64"
- "encoding/pem"
- "fmt"
- "math"
- "math/big"
- mathrand "math/rand"
- "net"
- "os"
- "reflect"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/strutil"
- vaulthttp "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/mapstructure"
-)
-
-var (
- stepCount = 0
- serialUnderTest string
- parsedKeyUsageUnderTest int
-)
-
-// Performs basic tests on CA functionality
-// Uses the RSA CA key
-func TestBackend_RSAKey(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{},
- }
-
- stepCount = len(testCase.Steps)
-
- intdata := map[string]interface{}{}
- reqdata := map[string]interface{}{}
- testCase.Steps = append(testCase.Steps, generateCATestingSteps(t, rsaCACert, rsaCAKey, ecCACert, intdata, reqdata)...)
-
- logicaltest.Test(t, testCase)
-}
-
-// Performs basic tests on CA functionality
-// Uses the EC CA key
-func TestBackend_ECKey(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{},
- }
-
- stepCount = len(testCase.Steps)
-
- intdata := map[string]interface{}{}
- reqdata := map[string]interface{}{}
- testCase.Steps = append(testCase.Steps, generateCATestingSteps(t, ecCACert, ecCAKey, rsaCACert, intdata, reqdata)...)
-
- logicaltest.Test(t, testCase)
-}
-
-func TestBackend_CSRValues(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{},
- }
-
- stepCount = len(testCase.Steps)
-
- intdata := map[string]interface{}{}
- reqdata := map[string]interface{}{}
- testCase.Steps = append(testCase.Steps, generateCSRSteps(t, ecCACert, ecCAKey, intdata, reqdata)...)
-
- logicaltest.Test(t, testCase)
-}
-
-func TestBackend_URLsCRUD(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{},
- }
-
- stepCount = len(testCase.Steps)
-
- intdata := map[string]interface{}{}
- reqdata := map[string]interface{}{}
- testCase.Steps = append(testCase.Steps, generateURLSteps(t, ecCACert, ecCAKey, intdata, reqdata)...)
-
- logicaltest.Test(t, testCase)
-}
-
-// Generates and tests steps that walk through the various possibilities
-// of role flags to ensure that they are properly restricted
-// Uses the RSA CA key
-func TestBackend_RSARoles(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "pem_bundle": rsaCAKey + rsaCACert,
- },
- },
- },
- }
-
- stepCount = len(testCase.Steps)
-
- testCase.Steps = append(testCase.Steps, generateRoleSteps(t, false)...)
- if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
- for i, v := range testCase.Steps {
- fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
- }
- }
-
- logicaltest.Test(t, testCase)
-}
-
-// Generates and tests steps that walk through the various possibilities
-// of role flags to ensure that they are properly restricted
-// Uses the RSA CA key
-func TestBackend_RSARoles_CSR(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "pem_bundle": rsaCAKey + rsaCACert + rsaCAChain,
- },
- },
- },
- }
-
- stepCount = len(testCase.Steps)
-
- testCase.Steps = append(testCase.Steps, generateRoleSteps(t, true)...)
- if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
- for i, v := range testCase.Steps {
- fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
- }
- }
-
- logicaltest.Test(t, testCase)
-}
-
-// Generates and tests steps that walk through the various possibilities
-// of role flags to ensure that they are properly restricted
-// Uses the EC CA key
-func TestBackend_ECRoles(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "pem_bundle": ecCAKey + ecCACert,
- },
- },
- },
- }
-
- stepCount = len(testCase.Steps)
-
- testCase.Steps = append(testCase.Steps, generateRoleSteps(t, false)...)
- if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
- for i, v := range testCase.Steps {
- fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
- }
- }
-
- logicaltest.Test(t, testCase)
-}
-
-// Generates and tests steps that walk through the various possibilities
-// of role flags to ensure that they are properly restricted
-// Uses the EC CA key
-func TestBackend_ECRoles_CSR(t *testing.T) {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 32
- b, err := Factory(&logical.BackendConfig{
- Logger: nil,
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
- if err != nil {
- t.Fatalf("Unable to create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "pem_bundle": ecCAKey + ecCACert,
- },
- },
- },
- }
-
- stepCount = len(testCase.Steps)
-
- testCase.Steps = append(testCase.Steps, generateRoleSteps(t, true)...)
- if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
- for i, v := range testCase.Steps {
- fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
- }
- }
-
- logicaltest.Test(t, testCase)
-}
-
-// Performs some validity checking on the returned bundles
-func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration, certBundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) {
- parsedCertBundle, err := certBundle.ToParsedCertBundle()
- if err != nil {
- return nil, fmt.Errorf("Error parsing cert bundle: %s", err)
- }
-
- if key != nil {
- switch keyType {
- case "rsa":
- parsedCertBundle.PrivateKeyType = certutil.RSAPrivateKey
- parsedCertBundle.PrivateKey = key
- parsedCertBundle.PrivateKeyBytes = x509.MarshalPKCS1PrivateKey(key.(*rsa.PrivateKey))
- case "ec":
- parsedCertBundle.PrivateKeyType = certutil.ECPrivateKey
- parsedCertBundle.PrivateKey = key
- parsedCertBundle.PrivateKeyBytes, err = x509.MarshalECPrivateKey(key.(*ecdsa.PrivateKey))
- if err != nil {
- return nil, fmt.Errorf("Error parsing EC key: %s", err)
- }
- }
- }
-
- switch {
- case parsedCertBundle.Certificate == nil:
- return nil, fmt.Errorf("Did not find a certificate in the cert bundle")
- case len(parsedCertBundle.CAChain) == 0 || parsedCertBundle.CAChain[0].Certificate == nil:
- return nil, fmt.Errorf("Did not find a CA in the cert bundle")
- case parsedCertBundle.PrivateKey == nil:
- return nil, fmt.Errorf("Did not find a private key in the cert bundle")
- case parsedCertBundle.PrivateKeyType == certutil.UnknownPrivateKey:
- return nil, fmt.Errorf("Could not figure out type of private key")
- }
-
- switch {
- case parsedCertBundle.PrivateKeyType == certutil.RSAPrivateKey && keyType != "rsa":
- fallthrough
- case parsedCertBundle.PrivateKeyType == certutil.ECPrivateKey && keyType != "ec":
- return nil, fmt.Errorf("Given key type does not match type found in bundle")
- }
-
- cert := parsedCertBundle.Certificate
-
- if usage != cert.KeyUsage {
- return nil, fmt.Errorf("Expected usage of %#v, got %#v; ext usage is %#v", usage, cert.KeyUsage, cert.ExtKeyUsage)
- }
-
- // There should only be one ext usage type, because only one is requested
- // in the tests
- if len(cert.ExtKeyUsage) != 1 {
- return nil, fmt.Errorf("Got wrong size key usage in generated cert; expected 1, values are %#v", cert.ExtKeyUsage)
- }
- switch extUsage {
- case x509.ExtKeyUsageEmailProtection:
- if cert.ExtKeyUsage[0] != x509.ExtKeyUsageEmailProtection {
- return nil, fmt.Errorf("Bad extended key usage")
- }
- case x509.ExtKeyUsageServerAuth:
- if cert.ExtKeyUsage[0] != x509.ExtKeyUsageServerAuth {
- return nil, fmt.Errorf("Bad extended key usage")
- }
- case x509.ExtKeyUsageClientAuth:
- if cert.ExtKeyUsage[0] != x509.ExtKeyUsageClientAuth {
- return nil, fmt.Errorf("Bad extended key usage")
- }
- case x509.ExtKeyUsageCodeSigning:
- if cert.ExtKeyUsage[0] != x509.ExtKeyUsageCodeSigning {
- return nil, fmt.Errorf("Bad extended key usage")
- }
- }
-
- // 40 seconds since we add 30 second slack for clock skew
- if math.Abs(float64(time.Now().Unix()-cert.NotBefore.Unix())) > 40 {
- return nil, fmt.Errorf("Validity period starts out of range")
- }
- if !cert.NotBefore.Before(time.Now().Add(-10 * time.Second)) {
- return nil, fmt.Errorf("Validity period not far enough in the past")
- }
-
- if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 20 {
- return nil, fmt.Errorf("Certificate validity end: %s; expected within 20 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339))
- }
-
- return parsedCertBundle, nil
-}
-
-func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep {
- expected := urlEntries{
- IssuingCertificates: []string{
- "http://example.com/ca1",
- "http://example.com/ca2",
- },
- CRLDistributionPoints: []string{
- "http://example.com/crl1",
- "http://example.com/crl2",
- },
- OCSPServers: []string{
- "http://example.com/ocsp1",
- "http://example.com/ocsp2",
- },
- }
- csrTemplate := x509.CertificateRequest{
- Subject: pkix.Name{
- CommonName: "my@example.com",
- },
- }
-
- priv1024, _ := rsa.GenerateKey(rand.Reader, 1024)
- csr1024, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv1024)
- csrPem1024 := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE REQUEST",
- Bytes: csr1024,
- })
-
- priv2048, _ := rsa.GenerateKey(rand.Reader, 2048)
- csr2048, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv2048)
- csrPem2048 := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE REQUEST",
- Bytes: csr2048,
- })
-
- ret := []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/generate/exported",
- Data: map[string]interface{}{
- "common_name": "Root Cert",
- "ttl": "180h",
- },
- Check: func(resp *logical.Response) error {
- if resp.Secret != nil && resp.Secret.LeaseID != "" {
- return fmt.Errorf("root returned with a lease")
- }
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/urls",
- Data: map[string]interface{}{
- "issuing_certificates": strings.Join(expected.IssuingCertificates, ","),
- "crl_distribution_points": strings.Join(expected.CRLDistributionPoints, ","),
- "ocsp_servers": strings.Join(expected.OCSPServers, ","),
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config/urls",
- Check: func(resp *logical.Response) error {
- if resp.Data == nil {
- return fmt.Errorf("no data returned")
- }
- var entries urlEntries
- err := mapstructure.Decode(resp.Data, &entries)
- if err != nil {
- return err
- }
-
- if !reflect.DeepEqual(entries, expected) {
- return fmt.Errorf("expected urls\n%#v\ndoes not match provided\n%#v\n", expected, entries)
- }
-
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/sign-intermediate",
- Data: map[string]interface{}{
- "common_name": "Intermediate Cert",
- "csr": string(csrPem1024),
- "format": "der",
- },
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if !resp.IsError() {
- return fmt.Errorf("expected an error response but did not get one")
- }
- if !strings.Contains(resp.Data["error"].(string), "2048") {
- return fmt.Errorf("recieved an error but not about a 1024-bit key, error was: %s", resp.Data["error"].(string))
- }
-
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/sign-intermediate",
- Data: map[string]interface{}{
- "common_name": "Intermediate Cert",
- "csr": string(csrPem2048),
- "format": "der",
- },
- Check: func(resp *logical.Response) error {
- certString := resp.Data["certificate"].(string)
- if certString == "" {
- return fmt.Errorf("no certificate returned")
- }
- if resp.Secret != nil && resp.Secret.LeaseID != "" {
- return fmt.Errorf("signed intermediate returned with a lease")
- }
- certBytes, _ := base64.StdEncoding.DecodeString(certString)
- certs, err := x509.ParseCertificates(certBytes)
- if err != nil {
- return fmt.Errorf("returned cert cannot be parsed: %v", err)
- }
- if len(certs) != 1 {
- return fmt.Errorf("unexpected returned length of certificates: %d", len(certs))
- }
- cert := certs[0]
-
- switch {
- case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL)
- case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints)
- case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer)
- case !reflect.DeepEqual([]string{"Intermediate Cert"}, cert.DNSNames):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string{"Intermediate Cert"}, cert.DNSNames)
- }
-
- return nil
- },
- },
-
- // Same as above but exclude adding to sans
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/sign-intermediate",
- Data: map[string]interface{}{
- "common_name": "Intermediate Cert",
- "csr": string(csrPem2048),
- "format": "der",
- "exclude_cn_from_sans": true,
- },
- Check: func(resp *logical.Response) error {
- certString := resp.Data["certificate"].(string)
- if certString == "" {
- return fmt.Errorf("no certificate returned")
- }
- if resp.Secret != nil && resp.Secret.LeaseID != "" {
- return fmt.Errorf("signed intermediate returned with a lease")
- }
- certBytes, _ := base64.StdEncoding.DecodeString(certString)
- certs, err := x509.ParseCertificates(certBytes)
- if err != nil {
- return fmt.Errorf("returned cert cannot be parsed: %v", err)
- }
- if len(certs) != 1 {
- return fmt.Errorf("unexpected returned length of certificates: %d", len(certs))
- }
- cert := certs[0]
-
- switch {
- case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL)
- case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints)
- case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer)
- case !reflect.DeepEqual([]string(nil), cert.DNSNames):
- return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string(nil), cert.DNSNames)
- }
-
- return nil
- },
- },
- }
- return ret
-}
-
-func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep {
- csrTemplate := x509.CertificateRequest{
- Subject: pkix.Name{
- Country: []string{"MyCountry"},
- PostalCode: []string{"MyPostalCode"},
- SerialNumber: "MySerialNumber",
- CommonName: "my@example.com",
- },
- DNSNames: []string{
- "name1.example.com",
- "name2.example.com",
- "name3.example.com",
- },
- EmailAddresses: []string{
- "name1@example.com",
- "name2@example.com",
- "name3@example.com",
- },
- IPAddresses: []net.IP{
- net.ParseIP("::ff:1:2:3:4"),
- net.ParseIP("::ff:5:6:7:8"),
- },
- }
-
- priv, _ := rsa.GenerateKey(rand.Reader, 2048)
- csr, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv)
- csrPem := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE REQUEST",
- Bytes: csr,
- })
-
- ret := []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/generate/exported",
- Data: map[string]interface{}{
- "common_name": "Root Cert",
- "ttl": "180h",
- "max_path_length": 0,
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/sign-intermediate",
- Data: map[string]interface{}{
- "use_csr_values": true,
- "csr": string(csrPem),
- "format": "der",
- },
- ErrorOk: true,
- },
-
- logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "root",
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/generate/exported",
- Data: map[string]interface{}{
- "common_name": "Root Cert",
- "ttl": "180h",
- "max_path_length": 1,
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/sign-intermediate",
- Data: map[string]interface{}{
- "use_csr_values": true,
- "csr": string(csrPem),
- "format": "der",
- },
- Check: func(resp *logical.Response) error {
- certString := resp.Data["certificate"].(string)
- if certString == "" {
- return fmt.Errorf("no certificate returned")
- }
- certBytes, _ := base64.StdEncoding.DecodeString(certString)
- certs, err := x509.ParseCertificates(certBytes)
- if err != nil {
- return fmt.Errorf("returned cert cannot be parsed: %v", err)
- }
- if len(certs) != 1 {
- return fmt.Errorf("unexpected returned length of certificates: %d", len(certs))
- }
- cert := certs[0]
-
- if cert.MaxPathLen != 0 {
- return fmt.Errorf("max path length of %d does not match the requested of 3", cert.MaxPathLen)
- }
- if !cert.MaxPathLenZero {
- return fmt.Errorf("max path length zero is not set")
- }
-
- // We need to set these as they are filled in with unparsed values in the final cert
- csrTemplate.Subject.Names = cert.Subject.Names
- csrTemplate.Subject.ExtraNames = cert.Subject.ExtraNames
-
- switch {
- case !reflect.DeepEqual(cert.Subject, csrTemplate.Subject):
- return fmt.Errorf("cert subject\n%#v\ndoes not match csr subject\n%#v\n", cert.Subject, csrTemplate.Subject)
- case !reflect.DeepEqual(cert.DNSNames, csrTemplate.DNSNames):
- return fmt.Errorf("cert dns names\n%#v\ndoes not match csr dns names\n%#v\n", cert.DNSNames, csrTemplate.DNSNames)
- case !reflect.DeepEqual(cert.EmailAddresses, csrTemplate.EmailAddresses):
- return fmt.Errorf("cert email addresses\n%#v\ndoes not match csr email addresses\n%#v\n", cert.EmailAddresses, csrTemplate.EmailAddresses)
- case !reflect.DeepEqual(cert.IPAddresses, csrTemplate.IPAddresses):
- return fmt.Errorf("cert ip addresses\n%#v\ndoes not match csr ip addresses\n%#v\n", cert.IPAddresses, csrTemplate.IPAddresses)
- }
- return nil
- },
- },
- }
- return ret
-}
-
-// Generates steps to test out CA configuration -- certificates + CRL expiry,
-// and ensure that the certificates are readable after storing them
-func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep {
- setSerialUnderTest := func(req *logical.Request) error {
- req.Path = serialUnderTest
- return nil
- }
-
- ret := []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "pem_bundle": caKey + caCert,
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/crl",
- Data: map[string]interface{}{
- "expiry": "16h",
- },
- },
-
- // Ensure we can fetch it back via unauthenticated means, in various formats
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "cert/ca",
- Unauthenticated: true,
- Check: func(resp *logical.Response) error {
- if resp.Data["certificate"].(string) != caCert {
- return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", resp.Data["certificate"].(string), caCert)
- }
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "ca/pem",
- Unauthenticated: true,
- Check: func(resp *logical.Response) error {
- rawBytes := resp.Data["http_raw_body"].([]byte)
- if string(rawBytes) != caCert {
- return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(rawBytes), caCert)
- }
- if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
- return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
- }
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "ca",
- Unauthenticated: true,
- Check: func(resp *logical.Response) error {
- rawBytes := resp.Data["http_raw_body"].([]byte)
- pemBytes := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE",
- Bytes: rawBytes,
- })
- if string(pemBytes) != caCert {
- return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(pemBytes), caCert)
- }
- if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
- return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
- }
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config/crl",
- Check: func(resp *logical.Response) error {
- if resp.Data["expiry"].(string) != "16h" {
- return fmt.Errorf("CRL lifetimes do not match (got %s)", resp.Data["expiry"].(string))
- }
- return nil
- },
- },
-
- // Ensure that both parts of the PEM bundle are required
- // Here, just the cert
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "pem_bundle": caCert,
- },
- ErrorOk: true,
- },
-
- // Here, just the key
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "pem_bundle": caKey,
- },
- ErrorOk: true,
- },
-
- // Ensure we can fetch it back via unauthenticated means, in various formats
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "cert/ca",
- Unauthenticated: true,
- Check: func(resp *logical.Response) error {
- if resp.Data["certificate"].(string) != caCert {
- return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", resp.Data["certificate"].(string), caCert)
- }
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "ca/pem",
- Unauthenticated: true,
- Check: func(resp *logical.Response) error {
- rawBytes := resp.Data["http_raw_body"].([]byte)
- if string(rawBytes) != caCert {
- return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(rawBytes), caCert)
- }
- if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
- return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
- }
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "ca",
- Unauthenticated: true,
- Check: func(resp *logical.Response) error {
- rawBytes := resp.Data["http_raw_body"].([]byte)
- pemBytes := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE",
- Bytes: rawBytes,
- })
- if string(pemBytes) != caCert {
- return fmt.Errorf("CA certificate:\n%s\ndoes not match original:\n%s\n", string(pemBytes), caCert)
- }
- if resp.Data["http_content_type"].(string) != "application/pkix-cert" {
- return fmt.Errorf("Expected application/pkix-cert as content-type, but got %s", resp.Data["http_content_type"].(string))
- }
- return nil
- },
- },
-
- // Test a bunch of generation stuff
- logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "root",
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/generate/exported",
- Data: map[string]interface{}{
- "common_name": "Root Cert",
- "ttl": "180h",
- },
- Check: func(resp *logical.Response) error {
- intdata["root"] = resp.Data["certificate"].(string)
- intdata["rootkey"] = resp.Data["private_key"].(string)
- reqdata["pem_bundle"] = intdata["root"].(string) + "\n" + intdata["rootkey"].(string)
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "intermediate/generate/exported",
- Data: map[string]interface{}{
- "common_name": "Intermediate Cert",
- },
- Check: func(resp *logical.Response) error {
- intdata["intermediatecsr"] = resp.Data["csr"].(string)
- intdata["intermediatekey"] = resp.Data["private_key"].(string)
- return nil
- },
- },
-
- // Re-load the root key in so we can sign it
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "pem_bundle")
- delete(reqdata, "ttl")
- reqdata["csr"] = intdata["intermediatecsr"].(string)
- reqdata["common_name"] = "Intermediate Cert"
- reqdata["ttl"] = "10s"
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/sign-intermediate",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "csr")
- delete(reqdata, "common_name")
- delete(reqdata, "ttl")
- intdata["intermediatecert"] = resp.Data["certificate"].(string)
- reqdata["serial_number"] = resp.Data["serial_number"].(string)
- reqdata["rsa_int_serial_number"] = resp.Data["serial_number"].(string)
- reqdata["certificate"] = resp.Data["certificate"].(string)
- reqdata["pem_bundle"] = intdata["intermediatekey"].(string) + "\n" + resp.Data["certificate"].(string)
- return nil
- },
- },
-
- // First load in this way to populate the private key
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "pem_bundle")
- return nil
- },
- },
-
- // Now test setting the intermediate, signed CA cert
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "intermediate/set-signed",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "certificate")
-
- serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
-
- return nil
- },
- },
-
- // We expect to find a zero revocation time
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- if resp.Data["revocation_time"].(int64) != 0 {
- return fmt.Errorf("expected a zero revocation time")
- }
-
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "revoke",
- Data: reqdata,
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "crl",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- crlBytes := resp.Data["http_raw_body"].([]byte)
- certList, err := x509.ParseCRL(crlBytes)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- revokedList := certList.TBSCertList.RevokedCertificates
- if len(revokedList) != 1 {
- t.Fatalf("length of revoked list not 1; %d", len(revokedList))
- }
- revokedString := certutil.GetHexFormatted(revokedList[0].SerialNumber.Bytes(), ":")
- if revokedString != reqdata["serial_number"].(string) {
- t.Fatalf("got serial %s, expecting %s", revokedString, reqdata["serial_number"].(string))
- }
- delete(reqdata, "serial_number")
- return nil
- },
- },
-
- // Do it all again, with EC keys and DER format
- logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "root",
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/generate/exported",
- Data: map[string]interface{}{
- "common_name": "Root Cert",
- "ttl": "180h",
- "key_type": "ec",
- "key_bits": 384,
- "format": "der",
- },
- Check: func(resp *logical.Response) error {
- certBytes, _ := base64.StdEncoding.DecodeString(resp.Data["certificate"].(string))
- certPem := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE",
- Bytes: certBytes,
- })
- keyBytes, _ := base64.StdEncoding.DecodeString(resp.Data["private_key"].(string))
- keyPem := pem.EncodeToMemory(&pem.Block{
- Type: "EC PRIVATE KEY",
- Bytes: keyBytes,
- })
- intdata["root"] = string(certPem)
- intdata["rootkey"] = string(keyPem)
- reqdata["pem_bundle"] = string(certPem) + "\n" + string(keyPem)
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "intermediate/generate/exported",
- Data: map[string]interface{}{
- "format": "der",
- "key_type": "ec",
- "key_bits": 384,
- "common_name": "Intermediate Cert",
- },
- Check: func(resp *logical.Response) error {
- csrBytes, _ := base64.StdEncoding.DecodeString(resp.Data["csr"].(string))
- csrPem := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE REQUEST",
- Bytes: csrBytes,
- })
- keyBytes, _ := base64.StdEncoding.DecodeString(resp.Data["private_key"].(string))
- keyPem := pem.EncodeToMemory(&pem.Block{
- Type: "EC PRIVATE KEY",
- Bytes: keyBytes,
- })
- intdata["intermediatecsr"] = string(csrPem)
- intdata["intermediatekey"] = string(keyPem)
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "pem_bundle")
- delete(reqdata, "ttl")
- reqdata["csr"] = intdata["intermediatecsr"].(string)
- reqdata["common_name"] = "Intermediate Cert"
- reqdata["ttl"] = "10s"
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "root/sign-intermediate",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "csr")
- delete(reqdata, "common_name")
- delete(reqdata, "ttl")
- intdata["intermediatecert"] = resp.Data["certificate"].(string)
- reqdata["serial_number"] = resp.Data["serial_number"].(string)
- reqdata["ec_int_serial_number"] = resp.Data["serial_number"].(string)
- reqdata["certificate"] = resp.Data["certificate"].(string)
- reqdata["pem_bundle"] = intdata["intermediatekey"].(string) + "\n" + resp.Data["certificate"].(string)
- return nil
- },
- },
-
- // First load in this way to populate the private key
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "pem_bundle")
- return nil
- },
- },
-
- // Now test setting the intermediate, signed CA cert
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "intermediate/set-signed",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- delete(reqdata, "certificate")
-
- serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
-
- return nil
- },
- },
-
- // We expect to find a zero revocation time
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- if resp.Data["revocation_time"].(int64) != 0 {
- return fmt.Errorf("expected a zero revocation time")
- }
-
- return nil
- },
- },
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "revoke",
- Data: reqdata,
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "crl",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- crlBytes := resp.Data["http_raw_body"].([]byte)
- certList, err := x509.ParseCRL(crlBytes)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- revokedList := certList.TBSCertList.RevokedCertificates
- if len(revokedList) != 2 {
- t.Fatalf("length of revoked list not 2; %d", len(revokedList))
- }
- found := false
- for _, revEntry := range revokedList {
- revokedString := certutil.GetHexFormatted(revEntry.SerialNumber.Bytes(), ":")
- if revokedString == reqdata["serial_number"].(string) {
- found = true
- }
- }
- if !found {
- t.Fatalf("did not find %s in CRL", reqdata["serial_number"].(string))
- }
- delete(reqdata, "serial_number")
-
- serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
-
- return nil
- },
- },
-
- // Make sure both serial numbers we expect to find are found
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- if resp.Data["revocation_time"].(int64) == 0 {
- return fmt.Errorf("expected a non-zero revocation time")
- }
-
- serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
-
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- if resp.Data["revocation_time"].(int64) == 0 {
- return fmt.Errorf("expected a non-zero revocation time")
- }
-
- // Give time for the certificates to pass the safety buffer
- t.Logf("Sleeping for 15 seconds to allow safety buffer time to pass before testing tidying")
- time.Sleep(15 * time.Second)
-
- serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
-
- return nil
- },
- },
-
- // This shouldn't do anything since the safety buffer is too long
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "tidy",
- Data: map[string]interface{}{
- "safety_buffer": "3h",
- "tidy_cert_store": true,
- "tidy_revocation_list": true,
- },
- },
-
- // We still expect to find these
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
-
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
-
- return nil
- },
- },
-
- // Both should appear in the CRL
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "crl",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- crlBytes := resp.Data["http_raw_body"].([]byte)
- certList, err := x509.ParseCRL(crlBytes)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- revokedList := certList.TBSCertList.RevokedCertificates
- if len(revokedList) != 2 {
- t.Fatalf("length of revoked list not 2; %d", len(revokedList))
- }
- foundRsa := false
- foundEc := false
- for _, revEntry := range revokedList {
- revokedString := certutil.GetHexFormatted(revEntry.SerialNumber.Bytes(), ":")
- if revokedString == reqdata["rsa_int_serial_number"].(string) {
- foundRsa = true
- }
- if revokedString == reqdata["ec_int_serial_number"].(string) {
- foundEc = true
- }
- }
- if !foundRsa || !foundEc {
- t.Fatalf("did not find an expected entry in CRL")
- }
-
- return nil
- },
- },
-
- // This shouldn't do anything since the boolean values default to false
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "tidy",
- Data: map[string]interface{}{
- "safety_buffer": "1s",
- },
- },
-
- // We still expect to find these
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
-
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
- return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
- }
-
- serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
-
- return nil
- },
- },
-
- // This should remove the values since the safety buffer is short
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "tidy",
- Data: map[string]interface{}{
- "safety_buffer": "1s",
- "tidy_cert_store": true,
- "tidy_revocation_list": true,
- },
- },
-
- // We do *not* expect to find these
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp != nil {
- return fmt.Errorf("expected no response")
- }
-
- serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
-
- return nil
- },
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- PreFlight: setSerialUnderTest,
- Check: func(resp *logical.Response) error {
- if resp != nil {
- return fmt.Errorf("expected no response")
- }
-
- serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
-
- return nil
- },
- },
-
- // Both should be gone from the CRL
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "crl",
- Data: reqdata,
- Check: func(resp *logical.Response) error {
- crlBytes := resp.Data["http_raw_body"].([]byte)
- certList, err := x509.ParseCRL(crlBytes)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- revokedList := certList.TBSCertList.RevokedCertificates
- if len(revokedList) != 0 {
- t.Fatalf("length of revoked list not 0; %d", len(revokedList))
- }
-
- return nil
- },
- },
- }
-
- return ret
-}
-
-// Generates steps to test out various role permutations
-func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep {
- roleVals := roleEntry{
- MaxTTL: "12h",
- KeyType: "rsa",
- KeyBits: 2048,
- }
- issueVals := certutil.IssueData{}
- ret := []logicaltest.TestStep{}
-
- roleTestStep := logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/test",
- }
- var issueTestStep logicaltest.TestStep
- if useCSRs {
- issueTestStep = logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "sign/test",
- }
- } else {
- issueTestStep = logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "issue/test",
- }
- }
-
- generatedRSAKeys := map[int]crypto.Signer{}
- generatedECKeys := map[int]crypto.Signer{}
-
- /*
- // For the number of tests being run, a seed of 1 has been tested
- // to hit all of the various values below. However, for normal
- // testing we use a randomized time for maximum fuzziness.
- */
- var seed int64 = 1
- fixedSeed := os.Getenv("VAULT_PKITESTS_FIXED_SEED")
- if len(fixedSeed) == 0 {
- seed = time.Now().UnixNano()
- } else {
- var err error
- seed, err = strconv.ParseInt(fixedSeed, 10, 64)
- if err != nil {
- t.Fatalf("error parsing fixed seed of %s: %v", fixedSeed, err)
- }
- }
- mathRand := mathrand.New(mathrand.NewSource(seed))
- t.Logf("seed under test: %v", seed)
-
- // Used by tests not toggling common names to turn off the behavior of random key bit fuzziness
- keybitSizeRandOff := false
-
- genericErrorOkCheck := func(resp *logical.Response) error {
- if resp.IsError() {
- return nil
- }
- return fmt.Errorf("Expected an error, but did not seem to get one")
- }
-
- // Adds tests with the currently configured issue/role information
- addTests := func(testCheck logicaltest.TestCheckFunc) {
- stepCount++
- //t.Logf("test step %d\nrole vals: %#v\n", stepCount, roleVals)
- stepCount++
- //t.Logf("test step %d\nissue vals: %#v\n", stepCount, issueTestStep)
- roleTestStep.Data = structs.New(roleVals).Map()
- roleTestStep.Data["generate_lease"] = false
- ret = append(ret, roleTestStep)
- issueTestStep.Data = structs.New(issueVals).Map()
- switch {
- case issueTestStep.ErrorOk:
- issueTestStep.Check = genericErrorOkCheck
- case testCheck != nil:
- issueTestStep.Check = testCheck
- default:
- issueTestStep.Check = nil
- }
- ret = append(ret, issueTestStep)
- }
-
- getOuCheck := func(role roleEntry) logicaltest.TestCheckFunc {
- var certBundle certutil.CertBundle
- return func(resp *logical.Response) error {
- err := mapstructure.Decode(resp.Data, &certBundle)
- if err != nil {
- return err
- }
- parsedCertBundle, err := certBundle.ToParsedCertBundle()
- if err != nil {
- return fmt.Errorf("Error checking generated certificate: %s", err)
- }
- cert := parsedCertBundle.Certificate
-
- expected := strutil.ParseDedupLowercaseAndSortStrings(role.OU, ",")
- if !reflect.DeepEqual(cert.Subject.OrganizationalUnit, expected) {
- return fmt.Errorf("Error: returned certificate has OU of %s but %s was specified in the role.", cert.Subject.OrganizationalUnit, expected)
- }
- return nil
- }
- }
-
- getOrganizationCheck := func(role roleEntry) logicaltest.TestCheckFunc {
- var certBundle certutil.CertBundle
- return func(resp *logical.Response) error {
- err := mapstructure.Decode(resp.Data, &certBundle)
- if err != nil {
- return err
- }
- parsedCertBundle, err := certBundle.ToParsedCertBundle()
- if err != nil {
- return fmt.Errorf("Error checking generated certificate: %s", err)
- }
- cert := parsedCertBundle.Certificate
-
- expected := strutil.ParseDedupLowercaseAndSortStrings(role.Organization, ",")
- if !reflect.DeepEqual(cert.Subject.Organization, expected) {
- return fmt.Errorf("Error: returned certificate has Organization of %s but %s was specified in the role.", cert.Subject.Organization, expected)
- }
- return nil
- }
- }
-
- // Returns a TestCheckFunc that performs various validity checks on the
- // returned certificate information, mostly within checkCertsAndPrivateKey
- getCnCheck := func(name string, role roleEntry, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration) logicaltest.TestCheckFunc {
- var certBundle certutil.CertBundle
- return func(resp *logical.Response) error {
- err := mapstructure.Decode(resp.Data, &certBundle)
- if err != nil {
- return err
- }
- parsedCertBundle, err := checkCertsAndPrivateKey(role.KeyType, key, usage, extUsage, validity, &certBundle)
- if err != nil {
- return fmt.Errorf("Error checking generated certificate: %s", err)
- }
- cert := parsedCertBundle.Certificate
- if cert.Subject.CommonName != name {
- return fmt.Errorf("Error: returned certificate has CN of %s but %s was requested", cert.Subject.CommonName, name)
- }
- if strings.Contains(cert.Subject.CommonName, "@") {
- if len(cert.DNSNames) != 0 || len(cert.EmailAddresses) != 1 {
- return fmt.Errorf("Error: found more than one DNS SAN or not one Email SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses)
- }
- } else {
- if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) != 0 {
- return fmt.Errorf("Error: found more than one Email SAN or not one DNS SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses)
- }
- }
- var retName string
- if len(cert.DNSNames) > 0 {
- retName = cert.DNSNames[0]
- }
- if len(cert.EmailAddresses) > 0 {
- retName = cert.EmailAddresses[0]
- }
- if retName != name {
- return fmt.Errorf("Error: returned certificate has a DNS SAN of %s but %s was requested", retName, name)
- }
- return nil
- }
- }
-
- // Common names to test with the various role flags toggled
- var commonNames struct {
- Localhost bool `structs:"localhost"`
- BareDomain bool `structs:"example.com"`
- SecondDomain bool `structs:"foobar.com"`
- SubDomain bool `structs:"foo.example.com"`
- Wildcard bool `structs:"*.example.com"`
- SubSubdomain bool `structs:"foo.bar.example.com"`
- SubSubdomainWildcard bool `structs:"*.bar.example.com"`
- GlobDomain bool `structs:"fooexample.com"`
- NonHostname bool `structs:"daɪˈɛrɨsɨs"`
- AnyHost bool `structs:"porkslap.beer"`
- }
-
- // Adds a series of tests based on the current selection of
- // allowed common names; contains some (seeded) randomness
- //
- // This allows for a variety of common names to be tested in various
- // combinations with allowed toggles of the role
- addCnTests := func() {
- cnMap := structs.New(commonNames).Map()
- for name, allowedInt := range cnMap {
- roleVals.KeyType = "rsa"
- roleVals.KeyBits = 2048
- if mathRand.Int()%2 == 1 {
- roleVals.KeyType = "ec"
- roleVals.KeyBits = 224
- }
-
- roleVals.ServerFlag = false
- roleVals.ClientFlag = false
- roleVals.CodeSigningFlag = false
- roleVals.EmailProtectionFlag = false
-
- var usage string
- if mathRand.Int()%2 == 1 {
- usage = usage + ",DigitalSignature"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",ContentCoMmitment"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",KeyEncipherment"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",DataEncipherment"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",KeyAgreemEnt"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",CertSign"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",CRLSign"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",EncipherOnly"
- }
- if mathRand.Int()%2 == 1 {
- usage = usage + ",DecipherOnly"
- }
-
- roleVals.KeyUsage = usage
- parsedKeyUsage := parseKeyUsages(roleVals.KeyUsage)
- if parsedKeyUsage == 0 && usage != "" {
- panic("parsed key usages was zero")
- }
- parsedKeyUsageUnderTest = parsedKeyUsage
-
- var extUsage x509.ExtKeyUsage
- i := mathRand.Int() % 4
- switch {
- case i == 0:
- extUsage = x509.ExtKeyUsageEmailProtection
- roleVals.EmailProtectionFlag = true
- case i == 1:
- extUsage = x509.ExtKeyUsageServerAuth
- roleVals.ServerFlag = true
- case i == 2:
- extUsage = x509.ExtKeyUsageClientAuth
- roleVals.ClientFlag = true
- default:
- extUsage = x509.ExtKeyUsageCodeSigning
- roleVals.CodeSigningFlag = true
- }
-
- allowed := allowedInt.(bool)
- issueVals.CommonName = name
- if roleVals.EmailProtectionFlag {
- if !strings.HasPrefix(name, "*") {
- issueVals.CommonName = "user@" + issueVals.CommonName
- }
- }
-
- issueTestStep.ErrorOk = !allowed
-
- validity, _ := time.ParseDuration(roleVals.MaxTTL)
-
- var testBitSize int
-
- if useCSRs {
- rsaKeyBits := []int{2048, 4096}
- ecKeyBits := []int{224, 256, 384, 521}
-
- var privKey crypto.Signer
- var ok bool
- switch roleVals.KeyType {
- case "rsa":
- roleVals.KeyBits = rsaKeyBits[mathRand.Int()%2]
-
- // If we don't expect an error already, randomly choose a
- // key size and expect an error if it's less than the role
- // setting
- testBitSize = roleVals.KeyBits
- if !keybitSizeRandOff && !issueTestStep.ErrorOk {
- testBitSize = rsaKeyBits[mathRand.Int()%2]
- }
-
- if testBitSize < roleVals.KeyBits {
- issueTestStep.ErrorOk = true
- }
-
- privKey, ok = generatedRSAKeys[testBitSize]
- if !ok {
- privKey, _ = rsa.GenerateKey(rand.Reader, testBitSize)
- generatedRSAKeys[testBitSize] = privKey
- }
-
- case "ec":
- roleVals.KeyBits = ecKeyBits[mathRand.Int()%4]
-
- var curve elliptic.Curve
-
- // If we don't expect an error already, randomly choose a
- // key size and expect an error if it's less than the role
- // setting
- testBitSize = roleVals.KeyBits
- if !keybitSizeRandOff && !issueTestStep.ErrorOk {
- testBitSize = ecKeyBits[mathRand.Int()%4]
- }
-
- switch testBitSize {
- case 224:
- curve = elliptic.P224()
- case 256:
- curve = elliptic.P256()
- case 384:
- curve = elliptic.P384()
- case 521:
- curve = elliptic.P521()
- }
-
- if curve.Params().BitSize < roleVals.KeyBits {
- issueTestStep.ErrorOk = true
- }
-
- privKey, ok = generatedECKeys[testBitSize]
- if !ok {
- privKey, _ = ecdsa.GenerateKey(curve, rand.Reader)
- generatedECKeys[testBitSize] = privKey
- }
- }
- templ := &x509.CertificateRequest{
- Subject: pkix.Name{
- CommonName: issueVals.CommonName,
- },
- }
- csr, err := x509.CreateCertificateRequest(rand.Reader, templ, privKey)
- if err != nil {
- t.Fatalf("Error creating certificate request: %s", err)
- }
- block := pem.Block{
- Type: "CERTIFICATE REQUEST",
- Bytes: csr,
- }
- issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block)))
-
- addTests(getCnCheck(issueVals.CommonName, roleVals, privKey, x509.KeyUsage(parsedKeyUsage), extUsage, validity))
- } else {
- addTests(getCnCheck(issueVals.CommonName, roleVals, nil, x509.KeyUsage(parsedKeyUsage), extUsage, validity))
- }
- }
- }
-
- // Common Name tests
- {
- // common_name not provided
- issueVals.CommonName = ""
- issueTestStep.ErrorOk = true
- addTests(nil)
-
- // Nothing is allowed
- addCnTests()
-
- roleVals.AllowLocalhost = true
- commonNames.Localhost = true
- addCnTests()
-
- roleVals.AllowedDomains = "foobar.com"
- addCnTests()
-
- roleVals.AllowedDomains = "example.com"
- roleVals.AllowSubdomains = true
- commonNames.SubDomain = true
- commonNames.Wildcard = true
- commonNames.SubSubdomain = true
- commonNames.SubSubdomainWildcard = true
- addCnTests()
-
- roleVals.AllowedDomains = "foobar.com,example.com"
- commonNames.SecondDomain = true
- roleVals.AllowBareDomains = true
- commonNames.BareDomain = true
- addCnTests()
-
- roleVals.AllowedDomains = "foobar.com,*example.com"
- roleVals.AllowGlobDomains = true
- commonNames.GlobDomain = true
- addCnTests()
-
- roleVals.AllowAnyName = true
- roleVals.EnforceHostnames = true
- commonNames.AnyHost = true
- addCnTests()
-
- roleVals.EnforceHostnames = false
- commonNames.NonHostname = true
- addCnTests()
-
- // Ensure that we end up with acceptable key sizes since they won't be
- // toggled any longer
- keybitSizeRandOff = true
- addCnTests()
- }
- // OU tests
- {
- roleVals.OU = "foo"
- addTests(getOuCheck(roleVals))
-
- roleVals.OU = "foo,bar"
- addTests(getOuCheck(roleVals))
- }
- // Organization tests
- {
- roleVals.Organization = "system:masters"
- addTests(getOrganizationCheck(roleVals))
-
- roleVals.Organization = "foo,bar"
- addTests(getOrganizationCheck(roleVals))
- }
- // IP SAN tests
- {
- roleVals.UseCSRSANs = true
- roleVals.AllowIPSANs = false
- issueTestStep.ErrorOk = false
- addTests(nil)
-
- roleVals.UseCSRSANs = false
- issueVals.IPSANs = "127.0.0.1,::1"
- issueTestStep.ErrorOk = true
- addTests(nil)
-
- roleVals.AllowIPSANs = true
- issueTestStep.ErrorOk = false
- addTests(nil)
-
- issueVals.IPSANs = "foobar"
- issueTestStep.ErrorOk = true
- addTests(nil)
-
- issueTestStep.ErrorOk = false
- issueVals.IPSANs = ""
- }
-
- // Lease tests
- {
- roleTestStep.ErrorOk = true
- roleVals.Lease = ""
- roleVals.MaxTTL = ""
- addTests(nil)
-
- roleVals.Lease = "12h"
- roleVals.MaxTTL = "6h"
- addTests(nil)
-
- roleTestStep.ErrorOk = false
- roleVals.TTL = ""
- roleVals.MaxTTL = "12h"
- }
-
- // Listing test
- ret = append(ret, logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "roles/",
- Check: func(resp *logical.Response) error {
- if resp.Data == nil {
- return fmt.Errorf("nil data")
- }
-
- keysRaw, ok := resp.Data["keys"]
- if !ok {
- return fmt.Errorf("no keys found")
- }
-
- keys, ok := keysRaw.([]string)
- if !ok {
- return fmt.Errorf("could not convert keys to a string list")
- }
-
- if len(keys) != 1 {
- return fmt.Errorf("unexpected keys length of %d", len(keys))
- }
-
- if keys[0] != "test" {
- return fmt.Errorf("unexpected key value of %s", keys[0])
- }
-
- return nil
- },
- })
-
- return ret
-}
-
-func TestBackend_PathFetchCertList(t *testing.T) {
- // create the backend
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b := Backend()
- err := b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // generate root
- rootData := map[string]interface{}{
- "common_name": "test.com",
- "ttl": "6h",
- }
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/generate/internal",
- Storage: storage,
- Data: rootData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to generate root, %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // config urls
- urlsData := map[string]interface{}{
- "issuing_certificates": "http://127.0.0.1:8200/v1/pki/ca",
- "crl_distribution_points": "http://127.0.0.1:8200/v1/pki/crl",
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/urls",
- Storage: storage,
- Data: urlsData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to config urls, %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // create a role entry
- roleData := map[string]interface{}{
- "allowed_domains": "test.com",
- "allow_subdomains": "true",
- "max_ttl": "4h",
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/test-example",
- Storage: storage,
- Data: roleData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create a role, %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // issue some certs
- i := 1
- for i < 10 {
- certData := map[string]interface{}{
- "common_name": "example.test.com",
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "issue/test-example",
- Storage: storage,
- Data: certData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to issue a cert, %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- i = i + 1
- }
-
- // list certs
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "certs",
- Storage: storage,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to list certs, %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
- // check that the root and 9 additional certs are all listed
- if len(resp.Data["keys"].([]string)) != 10 {
- t.Fatalf("failed to list all 10 certs")
- }
-
- // list certs/
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "certs/",
- Storage: storage,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to list certs, %#v", resp)
- }
- if err != nil {
- t.Fatal(err)
- }
- // check that the root and 9 additional certs are all listed
- if len(resp.Data["keys"].([]string)) != 10 {
- t.Fatalf("failed to list all 10 certs")
- }
-}
-
-func TestBackend_SignVerbatim(t *testing.T) {
- // create the backend
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b := Backend()
- err := b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // generate root
- rootData := map[string]interface{}{
- "common_name": "test.com",
- "ttl": "172800",
- }
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/generate/internal",
- Storage: storage,
- Data: rootData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to generate root, %#v", *resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- // create a CSR and key
- key, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- t.Fatal(err)
- }
- csrReq := &x509.CertificateRequest{
- Subject: pkix.Name{
- CommonName: "foo.bar.com",
- },
- }
- csr, err := x509.CreateCertificateRequest(rand.Reader, csrReq, key)
- if err != nil {
- t.Fatal(err)
- }
- if len(csr) == 0 {
- t.Fatal("generated csr is empty")
- }
- pemCSR := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE REQUEST",
- Bytes: csr,
- })
- if len(pemCSR) == 0 {
- t.Fatal("pem csr is empty")
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sign-verbatim",
- Storage: storage,
- Data: map[string]interface{}{
- "csr": string(pemCSR),
- },
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp)
- }
- if err != nil {
- t.Fatal(err)
- }
- if resp.Secret != nil {
- t.Fatal("secret is not nil")
- }
-
- // create a role entry; we use this to check that sign-verbatim when used with a role is still honoring TTLs
- roleData := map[string]interface{}{
- "ttl": "4h",
- "max_ttl": "8h",
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/test",
- Storage: storage,
- Data: roleData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create a role, %#v", *resp)
- }
- if err != nil {
- t.Fatal(err)
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sign-verbatim/test",
- Storage: storage,
- Data: map[string]interface{}{
- "csr": string(pemCSR),
- "ttl": "5h",
- },
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to sign-verbatim ttl'd CSR: %#v", *resp)
- }
- if err != nil {
- t.Fatal(err)
- }
- if resp.Secret != nil {
- t.Fatal("got a lease when we should not have")
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sign-verbatim/test",
- Storage: storage,
- Data: map[string]interface{}{
- "csr": string(pemCSR),
- "ttl": "12h",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil && resp.IsError() {
- t.Fatalf(resp.Error().Error())
- }
- if resp.Data == nil || resp.Data["certificate"] == nil {
- t.Fatal("did not get expected data")
- }
- certString := resp.Data["certificate"].(string)
- block, _ := pem.Decode([]byte(certString))
- if block == nil {
- t.Fatal("nil pem block")
- }
- certs, err := x509.ParseCertificates(block.Bytes)
- if err != nil {
- t.Fatal(err)
- }
- if len(certs) != 1 {
- t.Fatalf("expected a single cert, got %d", len(certs))
- }
- cert := certs[0]
- if math.Abs(float64(time.Now().Add(12*time.Hour).Unix()-cert.NotAfter.Unix())) < 10 {
- t.Fatalf("sign-verbatim did not properly cap validiaty period on signed CSR")
- }
-
- // now check that if we set generate-lease it takes it from the role and the TTLs match
- roleData = map[string]interface{}{
- "ttl": "4h",
- "max_ttl": "8h",
- "generate_lease": true,
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/test",
- Storage: storage,
- Data: roleData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to create a role, %#v", *resp)
- }
- if err != nil {
- t.Fatal(err)
- }
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sign-verbatim/test",
- Storage: storage,
- Data: map[string]interface{}{
- "csr": string(pemCSR),
- "ttl": "5h",
- },
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to sign-verbatim role-leased CSR: %#v", *resp)
- }
- if err != nil {
- t.Fatal(err)
- }
- if resp.Secret == nil {
- t.Fatalf("secret is nil, response is %#v", *resp)
- }
- if math.Abs(float64(resp.Secret.TTL-(5*time.Hour))) > float64(5*time.Hour) {
- t.Fatalf("ttl not default; wanted %v, got %v", b.System().DefaultLeaseTTL(), resp.Secret.TTL)
- }
-}
-
-func TestBackend_Root_Idempotentcy(t *testing.T) {
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "pki": Factory,
- },
- }
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
-
- client := cluster.Cores[0].Client
- var err error
- err = client.Sys().Mount("pki", &api.MountInput{
- Type: "pki",
- Config: api.MountConfigInput{
- DefaultLeaseTTL: "16h",
- MaxLeaseTTL: "32h",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{
- "common_name": "myvault.com",
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected ca info")
- }
- resp, err = client.Logical().Read("pki/cert/ca_chain")
- if err != nil {
- t.Fatalf("error reading ca_chain: %v", err)
- }
-
- r1Data := resp.Data
-
- // Try again, make sure it's a 204 and same CA
- resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{
- "common_name": "myvault.com",
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatal("expected no ca info")
- }
- resp, err = client.Logical().Read("pki/cert/ca_chain")
- if err != nil {
- t.Fatalf("error reading ca_chain: %v", err)
- }
- r2Data := resp.Data
- if !reflect.DeepEqual(r1Data, r2Data) {
- t.Fatal("got different ca certs")
- }
-
- resp, err = client.Logical().Delete("pki/root")
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatal("expected nil response")
- }
- // Make sure it behaves the same
- resp, err = client.Logical().Delete("pki/root")
- if err != nil {
- t.Fatal(err)
- }
- if resp != nil {
- t.Fatal("expected nil response")
- }
-
- _, err = client.Logical().Read("pki/cert/ca_chain")
- if err == nil {
- t.Fatal("expected error")
- }
-
- resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{
- "common_name": "myvault.com",
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected ca info")
- }
-
- _, err = client.Logical().Read("pki/cert/ca_chain")
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_Permitted_DNS_Domains(t *testing.T) {
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "pki": Factory,
- },
- }
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
-
- client := cluster.Cores[0].Client
- var err error
- err = client.Sys().Mount("root", &api.MountInput{
- Type: "pki",
- Config: api.MountConfigInput{
- DefaultLeaseTTL: "16h",
- MaxLeaseTTL: "32h",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- err = client.Sys().Mount("int", &api.MountInput{
- Type: "pki",
- Config: api.MountConfigInput{
- DefaultLeaseTTL: "4h",
- MaxLeaseTTL: "20h",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Logical().Write("root/roles/example", map[string]interface{}{
- "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com",
- "allow_bare_domains": true,
- "allow_subdomains": true,
- "max_ttl": "2h",
- })
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Logical().Write("int/roles/example", map[string]interface{}{
- "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com",
- "allow_subdomains": true,
- "allow_bare_domains": true,
- "max_ttl": "2h",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // Direct issuing from root
- _, err = client.Logical().Write("root/root/generate/internal", map[string]interface{}{
- "ttl": "40h",
- "common_name": "myvault.com",
- "permitted_dns_domains": []string{"foobar.com", ".zipzap.com"},
- })
- if err != nil {
- t.Fatal(err)
- }
-
- clientKey, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- t.Fatal(err)
- }
-
- path := "root/"
- checkIssue := func(valid bool, args ...interface{}) {
- argMap := map[string]interface{}{}
- var currString string
- for i, arg := range args {
- if i%2 == 0 {
- currString = arg.(string)
- } else {
- argMap[currString] = arg
- }
- }
- _, err = client.Logical().Write(path+"issue/example", argMap)
- switch {
- case valid && err != nil:
- t.Fatal(err)
- case !valid && err == nil:
- t.Fatal("expected error")
- }
-
- csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{
- Subject: pkix.Name{
- CommonName: argMap["common_name"].(string),
- },
- }, clientKey)
- if err != nil {
- t.Fatal(err)
- }
- delete(argMap, "common_name")
- argMap["csr"] = string(pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE REQUEST",
- Bytes: csr,
- }))
-
- _, err = client.Logical().Write(path+"sign/example", argMap)
- switch {
- case valid && err != nil:
- t.Fatal(err)
- case !valid && err == nil:
- t.Fatal("expected error")
- }
- }
-
- // Check issuing and signing against root's permitted domains
- checkIssue(false, "common_name", "zipzap.com")
- checkIssue(false, "common_name", "host.foobar.com")
- checkIssue(true, "common_name", "host.zipzap.com")
- checkIssue(true, "common_name", "foobar.com")
-
- // Verify that root also won't issue an intermediate outside of its permitted domains
- resp, err := client.Logical().Write("int/intermediate/generate/internal", map[string]interface{}{
- "common_name": "issuer.abc.com",
- })
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
- "common_name": "issuer.abc.com",
- "csr": resp.Data["csr"],
- "permitted_dns_domains": []string{"abc.com", ".xyz.com"},
- "ttl": "5h",
- })
- if err == nil {
- t.Fatal("expected error")
- }
- _, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
- "use_csr_values": true,
- "csr": resp.Data["csr"],
- "permitted_dns_domains": []string{"abc.com", ".xyz.com"},
- "ttl": "5h",
- })
- if err == nil {
- t.Fatal("expected error")
- }
-
- // Sign a valid intermediate
- resp, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
- "common_name": "issuer.zipzap.com",
- "csr": resp.Data["csr"],
- "permitted_dns_domains": []string{"abc.com", ".xyz.com"},
- "ttl": "5h",
- })
- if err != nil {
- t.Fatal(err)
- }
- resp, err = client.Logical().Write("int/intermediate/set-signed", map[string]interface{}{
- "certificate": resp.Data["certificate"],
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // Check enforcement with the intermediate's set values
- path = "int/"
- checkIssue(false, "common_name", "host.abc.com")
- checkIssue(false, "common_name", "xyz.com")
- checkIssue(true, "common_name", "abc.com")
- checkIssue(true, "common_name", "host.xyz.com")
-}
-
-func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) {
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "pki": Factory,
- },
- }
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
-
- client := cluster.Cores[0].Client
- var err error
- err = client.Sys().Mount("root", &api.MountInput{
- Type: "pki",
- Config: api.MountConfigInput{
- DefaultLeaseTTL: "16h",
- MaxLeaseTTL: "60h",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- err = client.Sys().Mount("int", &api.MountInput{
- Type: "pki",
- Config: api.MountConfigInput{
- DefaultLeaseTTL: "4h",
- MaxLeaseTTL: "20h",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // Direct issuing from root
- _, err = client.Logical().Write("root/root/generate/internal", map[string]interface{}{
- "ttl": "40h",
- "common_name": "myvault.com",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Logical().Write("root/roles/test", map[string]interface{}{
- "allow_bare_domains": true,
- "allow_subdomains": true,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := client.Logical().Write("int/intermediate/generate/internal", map[string]interface{}{
- "common_name": "myint.com",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- csr := resp.Data["csr"]
-
- _, err = client.Logical().Write("root/sign/test", map[string]interface{}{
- "common_name": "myint.com",
- "csr": csr,
- "ttl": "60h",
- })
- if err == nil {
- t.Fatal("expected error")
- }
-
- _, err = client.Logical().Write("root/sign-verbatim/test", map[string]interface{}{
- "common_name": "myint.com",
- "csr": csr,
- "ttl": "60h",
- })
- if err == nil {
- t.Fatal("expected error")
- }
-
- resp, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
- "common_name": "myint.com",
- "csr": csr,
- "ttl": "60h",
- })
- if err != nil {
- t.Fatalf("got error: %v", err)
- }
- if resp == nil {
- t.Fatal("got nil response")
- }
- if len(resp.Warnings) == 0 {
- t.Fatalf("expected warnings, got %#v", *resp)
- }
-}
-
-func TestBackend_SignSelfIssued(t *testing.T) {
- // create the backend
- config := logical.TestBackendConfig()
- storage := &logical.InmemStorage{}
- config.StorageView = storage
-
- b := Backend()
- err := b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // generate root
- rootData := map[string]interface{}{
- "common_name": "test.com",
- "ttl": "172800",
- }
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/generate/internal",
- Storage: storage,
- Data: rootData,
- })
- if resp != nil && resp.IsError() {
- t.Fatalf("failed to generate root, %#v", *resp)
- }
- if err != nil {
- t.Fatal(err)
- }
-
- key, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- t.Fatal(err)
- }
-
- getSelfSigned := func(subject, issuer *x509.Certificate) (string, *x509.Certificate) {
- selfSigned, err := x509.CreateCertificate(rand.Reader, subject, issuer, key.Public(), key)
- if err != nil {
- t.Fatal(err)
- }
- cert, err := x509.ParseCertificate(selfSigned)
- if err != nil {
- t.Fatal(err)
- }
- pemSS := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE",
- Bytes: selfSigned,
- })
- return string(pemSS), cert
- }
-
- template := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: "foo.bar.com",
- },
- SerialNumber: big.NewInt(1234),
- IsCA: false,
- BasicConstraintsValid: true,
- }
-
- ss, _ := getSelfSigned(template, template)
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/sign-self-issued",
- Storage: storage,
- Data: map[string]interface{}{
- "certificate": ss,
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response")
- }
- if !resp.IsError() {
- t.Fatalf("expected error due to non-CA; got: %#v", *resp)
- }
-
- // Set CA to true, but leave issuer alone
- template.IsCA = true
-
- issuer := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: "bar.foo.com",
- },
- SerialNumber: big.NewInt(2345),
- IsCA: true,
- BasicConstraintsValid: true,
- }
- ss, ssCert := getSelfSigned(template, issuer)
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/sign-self-issued",
- Storage: storage,
- Data: map[string]interface{}{
- "certificate": ss,
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response")
- }
- if !resp.IsError() {
- t.Fatalf("expected error due to different issuer; cert info is\nIssuer\n%#v\nSubject\n%#v\n", ssCert.Issuer, ssCert.Subject)
- }
-
- ss, ssCert = getSelfSigned(template, template)
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/sign-self-issued",
- Storage: storage,
- Data: map[string]interface{}{
- "certificate": ss,
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("got nil response")
- }
- if resp.IsError() {
- t.Fatalf("error in response: %s", resp.Error().Error())
- }
-
- newCertString := resp.Data["certificate"].(string)
- block, _ := pem.Decode([]byte(newCertString))
- newCert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- t.Fatal(err)
- }
-
- signingBundle, err := fetchCAInfo(&logical.Request{Storage: storage})
- if err != nil {
- t.Fatal(err)
- }
- if reflect.DeepEqual(newCert.Subject, newCert.Issuer) {
- t.Fatal("expected different subject/issuer")
- }
- if !reflect.DeepEqual(newCert.Issuer, signingBundle.Certificate.Subject) {
- t.Fatalf("expected matching issuer/CA subject\n\nIssuer:\n%#v\nSubject:\n%#v\n", newCert.Issuer, signingBundle.Certificate.Subject)
- }
- if bytes.Equal(newCert.AuthorityKeyId, newCert.SubjectKeyId) {
- t.Fatal("expected different authority/subject")
- }
- if !bytes.Equal(newCert.AuthorityKeyId, signingBundle.Certificate.SubjectKeyId) {
- t.Fatal("expected authority on new cert to be same as signing subject")
- }
- if newCert.Subject.CommonName != "foo.bar.com" {
- t.Fatalf("unexpected common name on new cert: %s", newCert.Subject.CommonName)
- }
-}
-
-const (
- rsaCAKey string = `-----BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAmPQlK7xD5p+E8iLQ8XlVmll5uU2NKMxKY3UF5tbh+0vkc+Fy
-XmutLxxXAyYRPoztZ1g7ocr8XBFYsQPK26TFc3TzrLL7bBEYHQArd8M+VUHjziB7
-zwwpbV7tG8WPqIScDKMNncavDcT8sDg3DUqb8/zWkBD8WEYmsVr1VfKY5pFdxIZU
-kHP3/MkDpGmfrED9K5qPu17dIHTL2VYi4KxKhtIryapZTk6vDwRNfIYJD23QbQnt
-Si1j0X9MTRUf3BIcd0Ch60aGvv0VSL+1NTafsZQD+z1RY/zNp9IUHz5bNIiePZ6l
-JrlddodAAXZ4sN1CMetf4bA2RXssxBEIb5FyiQIDAQABAoIBAGMScSk9DvZJCUIV
-zyU6JHqPzkp6sx5kBSMa37HAKiwt4lI1C3GhaVIEl0/Qzoannfa8rhOEeaXhDoPK
-IxHWTpcUf+mzHSvIfsf6Hi2655stzLLtU4SvKf5P6GF+vCi5jKKa0u0JjsXqfIpg
-Pzh6xT1q3kf+2JUNC28Brbv4IZXmPmqWwu21VN+t3GsMGYgOnEOzBjXMhvNnm9kN
-kznV9Y2y0UIcT4dhbe2VRs4Dp8dGEyrFM7/Ovb3hIJrTkPcxjBbL5eMqpXnIkiW2
-7NyPMWFvX2lGnGdZ1Erh65SVtMjnHFwnSJ8jD+x9RAH9c1LQrYASws3MvMV8Bdzg
-2iljNqECgYEAw3Ow0clLx2alj9qFXcS2ap1lUCJxXZ9UiIU5lOcPxpCpHPloua14
-46rj2EJ9SD1L2kyB5gCq4nGK5uUIx37AJryy1SGzUmtmIVxQLnm6XK6zKnTBk0gx
-gevS6D7fHLDiVGGl3oGw4evibUFCk7dFOb/I/uBRb1zyaJrqOIlDS7UCgYEAyFYi
-RYQbYJJ0k18fUWDKy/P/Rl7uy9D67Qa9+wxoYN2Kh/aQwnNxYHAbwG7Pupd0oGcW
-Yl4bgUliAX3IFGs/cCkPJAIHzwWBPjUDhsJ020TGxKfL4SWP9OaxOpN5TOAixvBY
-ar9aSaKEl7QShmzc/Dknxu58LcoZUwI82pKIGAUCgYAxaHJ/ZcpxOsKJjez+2jZe
-1zEAQ+SyjQ96f2sh+BMl1/XYLDhMD80qiE2WoqA2/b/KDGMd+Hc6TQeW/LjubV03
-raXreNxy7lFgB40BYqY4vbTu+5rfl3VkaW/kY9hU0WY1fIXIrLJBOjb/9WpWGxM1
-2QR/YcdURoPE67xf1FsdrQKBgE8KdNEakzah8e6nLBMOblTTutcH4410sVvNOi2P
-sqrtHZgRNwIRTB0xfjGJRtomoXQb2CANYyq6SjmuZ79upQPan0ekqXILiPeDMRX9
-KN/OHeI/FdiJ2mdUkX476zLih7YX47qSLsw4m7nC6UAyOWomHsSFGWdzglRW4K2X
-/KwFAoGAYQUEWhXp5vpKzAly1ivSH9+sGC59Cujdy50oJSjaw9J+W1fM5WO9z+MH
-CoEpRt8epIgvCBBP2IM7uJUu8i2jQgJ/rrn3NTJgZn2UEPzyxUxbuWnSyueyUsD6
-uhTwBDf8LWOpvdZHMI4CPZ5WJwxAGkvde9xtlzuZUSAlyI2X8m0=
------END RSA PRIVATE KEY-----
-`
- rsaCACert string = `-----BEGIN CERTIFICATE-----
-MIIDljCCAn6gAwIBAgIUQVapfgyAeDH9rAmpw3PQrhMcjRMwDQYJKoZIhvcNAQEL
-BQAwMzExMC8GA1UEAxMoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1
-dGhvcml0eTAeFw0xNjA4MDcyMjUzNTRaFw0yNjA3MjQxMDU0MjRaMDcxNTAzBgNV
-BAMTLFZhdWx0IFRlc3RpbmcgSW50ZXJtZWRpYXRlIFN1YiBTdWIgQXV0aG9yaXR5
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmPQlK7xD5p+E8iLQ8XlV
-mll5uU2NKMxKY3UF5tbh+0vkc+FyXmutLxxXAyYRPoztZ1g7ocr8XBFYsQPK26TF
-c3TzrLL7bBEYHQArd8M+VUHjziB7zwwpbV7tG8WPqIScDKMNncavDcT8sDg3DUqb
-8/zWkBD8WEYmsVr1VfKY5pFdxIZUkHP3/MkDpGmfrED9K5qPu17dIHTL2VYi4KxK
-htIryapZTk6vDwRNfIYJD23QbQntSi1j0X9MTRUf3BIcd0Ch60aGvv0VSL+1NTaf
-sZQD+z1RY/zNp9IUHz5bNIiePZ6lJrlddodAAXZ4sN1CMetf4bA2RXssxBEIb5Fy
-iQIDAQABo4GdMIGaMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
-A1UdDgQWBBRMeQTX9VkLqb1wzrvN/vFG09yhUTAfBgNVHSMEGDAWgBR0Oq2VTUBE
-dOm6a1sKJTvdZMV5LjA3BgNVHREEMDAugixWYXVsdCBUZXN0aW5nIEludGVybWVk
-aWF0ZSBTdWIgU3ViIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEAagYM7uFa
-tUziraBkuU7cIyX83y7lYFsDhUse2hkpqmgO14oEOwFsDox1Jg2QGt4FEfJoCOXf
-oCZZN8XmaWdSrfgs1nDmtE0xwXiX1z7JuJZ+Ygt3dcRHO1zs5tmuHLxrvMnKfIfG
-bsGmES4mknt0qQ7tGhpyC+KgEmcVL1QQJXNjzCrw5iQ9sgvQt+oCqV28pxOUSYkq
-FdrozmNdJwMgVADywiY/FqYJWgkixlFHQkPR7eiXwpahON+zRMk1JSgr/8N8fRDj
-aqVBRppPzVU9joUME0vOc8cK3VozNe4iRkKNZFelHU2NPPJSDjRLVH9tJ7jPVOEA
-/k6w2PwdoRom7Q==
------END CERTIFICATE-----
-`
-
- rsaCAChain string = `-----BEGIN CERTIFICATE-----
-MIIDijCCAnKgAwIBAgIUOiGo/1EOhRhuupTRGDYnqdALk/swDQYJKoZIhvcNAQEL
-BQAwLzEtMCsGA1UEAxMkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9y
-aXR5MB4XDTE2MDgwNzIyNTA1MloXDTI2MDcyODE0NTEyMlowMzExMC8GA1UEAxMo
-VmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1dGhvcml0eTCCASIwDQYJ
-KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMTPRQREwW3BEifNcm0XElMRB0GNTXHr
-XCuNoFVsVBlIEsNVQkka+SHZcmNBdEcZLBXP/W3tBT82B48GVN8jyxAGfYZ5hoOQ
-ed3GVft1A7lAnxcGvf5e9kfecKDcBB4G4rBhqdDNcAtklS2hV4uZUcVcEJKggpsQ
-a1wZkCn8eg6sqEYG/SxPouwL52PblxIN+Dd57sBeqx4qdL297XR8LuLkxqftwUCZ
-l2iFBnSDID/06ZmHDXA38I0n3jT2ZGjgPGFnIFKxRGq1vpVc3F5ga8qk+u66ybBu
-xWHzINQrrryjELbl2YBTr6i0R9HnZle6OPcXMWp0JuGjtDC1xb5NmnkCAwEAAaOB
-mTCBljAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
-dDqtlU1ARHTpumtbCiU73WTFeS4wHwYDVR0jBBgwFoAU+UO/nrlKr4COZCxLZSY/
-ul+YMvMwMwYDVR0RBCwwKoIoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3Vi
-IEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEAjgCuTXsLFkf0DVkfSsKReNwI
-U/yBcP8Ttbx/ltanJGIVfD5TZoCnNTWm6RkML29ohfxI27sHTUhj+/6Ba0MRiLeI
-FXdclXmHOU2dTHlrmUa0m/4cb5uYoiiEnpmyWL5k94fqPOZAvJcFHnP3db4vsaUW
-47YcOvJbPSJqFXZHadqnsf3Fur5NCeTkIk6yZSvwTaZJT0JIWcqfE5LK3mYAMMC3
-iPaIa1cYqOZhWx9ilQfW6u6WxWeOphGuDIusP7Q4qc2Dr9sekyD59dfIYsroK5TP
-QVJb69nIYINpYdg3l3VNmmkY4G30N9QNs6acaH49rYzLcRX6tLBgPklO6d+TPA==
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIDejCCAmKgAwIBAgIUULdIdrdK4Y8d+XM9fuOpDlNcJIYwDQYJKoZIhvcNAQEL
-BQAwJzElMCMGA1UEAxMcVmF1bHQgVGVzdGluZyBSb290IEF1dGhvcml0eTAeFw0x
-NjA4MDcyMjUwNTFaFw0yNjA4MDExODUxMjFaMC8xLTArBgNVBAMTJFZhdWx0IFRl
-c3RpbmcgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBANXa6U+MDiUrryeZeGxgkmAZdrm9wCKz/6SmxYSebKr8aZwD
-nfbsPLRFxU6BXp9Nc6pP7e8HLBv6PtFTQG389zxOBwAHxZQvUsFESumUd64oTLRG
-J+AErTh7rtSWbLZsgDtQVvpx+6mKkvm53f/aKcq+DbqAFOg6slYOaQix0ZvP/qL0
-iWGIPr1JZk9uBJOUuIUBJdbsgTk+KQqJL9M6up8bCnM0noCafwrNKwZWtsbkfOZE
-OLSycdzCEBeHejpHTIU0vgAkdj63oEy2AbK3hMPxKzNthL3DX6W0tssoVgL//92i
-oSfpDTxiXqqdr+J3accpsAvA+F+D2TqaxdAfjLcCAwEAAaOBlTCBkjAOBgNVHQ8B
-Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+UO/nrlKr4COZCxL
-ZSY/ul+YMvMwHwYDVR0jBBgwFoAUA3jY4OUWi1Y7zQgM7S9QeXjNgIQwLwYDVR0R
-BCgwJoIkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9yaXR5MA0GCSqG
-SIb3DQEBCwUAA4IBAQA9VJt92LsOOegtAx35rr41LSSfPWB2SCKg0fphL2gMPO5y
-fE2u8O5TF5dJWJ1fF3cg9/XK30Ohdl1/ujfHbVcX+O6Sb3cgwKTQQOhTdsAZKFRT
-RPHaf/Ja8uqAXMITApxOp7YiYQwukwZr+OsKi66s+zhlfI790PoQbC3UJvgmNDmv
-V5oP63mw4yhqRNPn4NOjzoC/hJSIM0AIdRB1nx2rsSUw0P354R1j9gO43L/Lj33S
-NEaPmw+SC3Tbcx4yxeKnTvGdu3sw/ndmZkCjaq5jxgTy9FONqT45TPJOyk29o5gl
-+AVQz5fD2M3C1L/sZIPH2OQbXxePHcsvUZVgaKyk
------END CERTIFICATE-----
-`
-
- ecCAKey string = `-----BEGIN EC PRIVATE KEY-----
-MIGkAgEBBDBP/t89wrC0RFVs0N+jiRuGPptoxI1Iyu42/PzzZWMKYnO7yCWFG/Qv
-zC8cRa8PDqegBwYFK4EEACKhZANiAAQI9e8n9RD6gOd5YpWpDi5AoPbskxQSogxx
-dYFzzHwS0RYIucmlcJ2CuJQNc+9E4dUCMsYr2cAnCgA4iUHzGaje3Fa4O667LVH1
-imAyAj5nbfSd89iNzg4XNPkFjuVNBlE=
------END EC PRIVATE KEY-----
-`
-
- ecCACert string = `-----BEGIN CERTIFICATE-----
-MIIDHzCCAqSgAwIBAgIUEQ4L+8Xl9+/uxU3MMCrd3Bw0HMcwCgYIKoZIzj0EAwIw
-XzEjMCEGA1UEAxMaVmF1bHQgRUMgdGVzdGluZyByb290IGNlcnQxODA2BgNVBAUT
-Lzk3MzY2MDk3NDQ1ODU2MDI3MDY5MDQ0MTkxNjIxODI4NjI0NjM0NTI5MTkzMTU5
-MB4XDTE1MTAwNTE2MzAwMFoXDTM1MDkzMDE2MzAwMFowXzEjMCEGA1UEAxMaVmF1
-bHQgRUMgdGVzdGluZyByb290IGNlcnQxODA2BgNVBAUTLzk3MzY2MDk3NDQ1ODU2
-MDI3MDY5MDQ0MTkxNjIxODI4NjI0NjM0NTI5MTkzMTU5MHYwEAYHKoZIzj0CAQYF
-K4EEACIDYgAECPXvJ/UQ+oDneWKVqQ4uQKD27JMUEqIMcXWBc8x8EtEWCLnJpXCd
-griUDXPvROHVAjLGK9nAJwoAOIlB8xmo3txWuDuuuy1R9YpgMgI+Z230nfPYjc4O
-FzT5BY7lTQZRo4IBHzCCARswDgYDVR0PAQH/BAQDAgGuMBMGA1UdJQQMMAoGCCsG
-AQUFBwMJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFCIBqs15CiKuj7vqmIW5
-L07WSeLhMB8GA1UdIwQYMBaAFCIBqs15CiKuj7vqmIW5L07WSeLhMEIGCCsGAQUF
-BwEBBDYwNDAyBggrBgEFBQcwAoYmaHR0cDovL3ZhdWx0LmV4YW1wbGUuY29tL3Yx
-L3Jvb3Rwa2kvY2EwJQYDVR0RBB4wHIIaVmF1bHQgRUMgdGVzdGluZyByb290IGNl
-cnQwOAYDVR0fBDEwLzAtoCugKYYnaHR0cDovL3ZhdWx0LmV4YW1wbGUuY29tL3Yx
-L3Jvb3Rwa2kvY3JsMAoGCCqGSM49BAMCA2kAMGYCMQDRrxXskBtXjuZ1tUTk+qae
-3bNVE1oeTDJhe0m3KN7qTykSGslxfEjlv83GYXziiv0CMQDsqu1U9uXPn3ezSbgG
-O30prQ/sanDzNAeJhftoGtNPJDspwx0fzclHvKIhgl3JVUc=
------END CERTIFICATE-----
-`
-)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go
deleted file mode 100644
index 7a6deda..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package pki
-
-import (
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) getGenerationParams(
- data *framework.FieldData,
-) (exported bool, format string, role *roleEntry, errorResp *logical.Response) {
- exportedStr := data.Get("exported").(string)
- switch exportedStr {
- case "exported":
- exported = true
- case "internal":
- default:
- errorResp = logical.ErrorResponse(
- `The "exported" path parameter must be "internal" or "exported"`)
- return
- }
-
- format = getFormat(data)
- if format == "" {
- errorResp = logical.ErrorResponse(
- `The "format" path parameter must be "pem", "der", or "pem_bundle"`)
- return
- }
-
- role = &roleEntry{
- TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(),
- KeyType: data.Get("key_type").(string),
- KeyBits: data.Get("key_bits").(int),
- AllowLocalhost: true,
- AllowAnyName: true,
- AllowIPSANs: true,
- EnforceHostnames: false,
- }
-
- if role.KeyType == "rsa" && role.KeyBits < 2048 {
- errorResp = logical.ErrorResponse("RSA keys < 2048 bits are unsafe and not supported")
- return
- }
-
- errorResp = validateKeyTypeLength(role.KeyType, role.KeyBits)
-
- return
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go
deleted file mode 100644
index b4bb381..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go
+++ /dev/null
@@ -1,1185 +0,0 @@
-package pki
-
-import (
- "bytes"
- "crypto/ecdsa"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/asn1"
- "encoding/pem"
- "fmt"
- "net"
- "regexp"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/ryanuber/go-glob"
-)
-
-type certExtKeyUsage int
-
-const (
- serverExtKeyUsage certExtKeyUsage = 1 << iota
- clientExtKeyUsage
- codeSigningExtKeyUsage
- emailProtectionExtKeyUsage
-)
-
-type creationBundle struct {
- CommonName string
- OU []string
- Organization []string
- DNSNames []string
- EmailAddresses []string
- IPAddresses []net.IP
- IsCA bool
- KeyType string
- KeyBits int
- SigningBundle *caInfoBundle
- NotAfter time.Time
- KeyUsage x509.KeyUsage
- ExtKeyUsage certExtKeyUsage
-
- // Only used when signing a CA cert
- UseCSRValues bool
- PermittedDNSDomains []string
-
- // URLs to encode into the certificate
- URLs *urlEntries
-
- // The maximum path length to encode
- MaxPathLength int
-}
-
-type caInfoBundle struct {
- certutil.ParsedCertBundle
- URLs *urlEntries
-}
-
-func (b *caInfoBundle) GetCAChain() []*certutil.CertBlock {
- chain := []*certutil.CertBlock{}
-
- // Include issuing CA in Chain, not including Root Authority
- if (len(b.Certificate.AuthorityKeyId) > 0 &&
- !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) ||
- (len(b.Certificate.AuthorityKeyId) == 0 &&
- !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) {
-
- chain = append(chain, &certutil.CertBlock{
- Certificate: b.Certificate,
- Bytes: b.CertificateBytes,
- })
- if b.CAChain != nil && len(b.CAChain) > 0 {
- chain = append(chain, b.CAChain...)
- }
- }
-
- return chain
-}
-
-var (
- hostnameRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`)
- oidExtensionBasicConstraints = []int{2, 5, 29, 19}
-)
-
-func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool {
- for _, e := range extensions {
- if e.Id.Equal(oid) {
- return true
- }
- }
- return false
-}
-
-func getFormat(data *framework.FieldData) string {
- format := data.Get("format").(string)
- switch format {
- case "pem":
- case "der":
- case "pem_bundle":
- default:
- format = ""
- }
- return format
-}
-
-func validateKeyTypeLength(keyType string, keyBits int) *logical.Response {
- switch keyType {
- case "rsa":
- switch keyBits {
- case 2048:
- case 4096:
- case 8192:
- default:
- return logical.ErrorResponse(fmt.Sprintf(
- "unsupported bit length for RSA key: %d", keyBits))
- }
- case "ec":
- switch keyBits {
- case 224:
- case 256:
- case 384:
- case 521:
- default:
- return logical.ErrorResponse(fmt.Sprintf(
- "unsupported bit length for EC key: %d", keyBits))
- }
- default:
- return logical.ErrorResponse(fmt.Sprintf(
- "unknown key type %s", keyType))
- }
-
- return nil
-}
-
-// Fetches the CA info. Unlike other certificates, the CA info is stored
-// in the backend as a CertBundle, because we are storing its private key
-func fetchCAInfo(req *logical.Request) (*caInfoBundle, error) {
- bundleEntry, err := req.Storage.Get("config/ca_bundle")
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch local CA certificate/key: %v", err)}
- }
- if bundleEntry == nil {
- return nil, errutil.UserError{Err: "backend must be configured with a CA certificate/key"}
- }
-
- var bundle certutil.CertBundle
- if err := bundleEntry.DecodeJSON(&bundle); err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode local CA certificate/key: %v", err)}
- }
-
- parsedBundle, err := bundle.ToParsedCertBundle()
- if err != nil {
- return nil, errutil.InternalError{Err: err.Error()}
- }
-
- if parsedBundle.Certificate == nil {
- return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"}
- }
-
- caInfo := &caInfoBundle{*parsedBundle, nil}
-
- entries, err := getURLs(req)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)}
- }
- if entries == nil {
- entries = &urlEntries{
- IssuingCertificates: []string{},
- CRLDistributionPoints: []string{},
- OCSPServers: []string{},
- }
- }
- caInfo.URLs = entries
-
- return caInfo, nil
-}
-
-// Allows fetching certificates from the backend; it handles the slightly
-// separate pathing for CA, CRL, and revoked certificates.
-func fetchCertBySerial(req *logical.Request, prefix, serial string) (*logical.StorageEntry, error) {
- var path, legacyPath string
- var err error
- var certEntry *logical.StorageEntry
-
- hyphenSerial := normalizeSerial(serial)
- colonSerial := strings.Replace(strings.ToLower(serial), "-", ":", -1)
-
- switch {
- // Revoked goes first as otherwise ca/crl get hardcoded paths which fail if
- // we actually want revocation info
- case strings.HasPrefix(prefix, "revoked/"):
- legacyPath = "revoked/" + colonSerial
- path = "revoked/" + hyphenSerial
- case serial == "ca":
- path = "ca"
- case serial == "crl":
- path = "crl"
- default:
- legacyPath = "certs/" + colonSerial
- path = "certs/" + hyphenSerial
- }
-
- certEntry, err = req.Storage.Get(path)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching certificate %s: %s", serial, err)}
- }
- if certEntry != nil {
- if certEntry.Value == nil || len(certEntry.Value) == 0 {
- return nil, errutil.InternalError{Err: fmt.Sprintf("returned certificate bytes for serial %s were empty", serial)}
- }
- return certEntry, nil
- }
-
- // If legacyPath is unset, it's going to be a CA or CRL; return immediately
- if legacyPath == "" {
- return nil, nil
- }
-
- // Retrieve the old-style path
- certEntry, err = req.Storage.Get(legacyPath)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching certificate %s: %s", serial, err)}
- }
- if certEntry == nil {
- return nil, nil
- }
- if certEntry.Value == nil || len(certEntry.Value) == 0 {
- return nil, errutil.InternalError{Err: fmt.Sprintf("returned certificate bytes for serial %s were empty", serial)}
- }
-
- // Update old-style paths to new-style paths
- certEntry.Key = path
- if err = req.Storage.Put(certEntry); err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location", serial)}
- }
- if err = req.Storage.Delete(legacyPath); err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error deleting certificate with serial %s from old location", serial)}
- }
-
- return certEntry, nil
-}
-
-// Given a set of requested names for a certificate, verifies that all of them
-// match the various toggles set in the role for controlling issuance.
-// If one does not pass, it is returned in the string argument.
-func validateNames(req *logical.Request, names []string, role *roleEntry) string {
- for _, name := range names {
- sanitizedName := name
- emailDomain := name
- isEmail := false
- isWildcard := false
-
- // If it has an @, assume it is an email address and separate out the
- // user from the hostname portion so that we can act on the hostname.
- // Note that this matches behavior from the alt_names parameter. If it
- // ends up being problematic for users, I guess that could be separated
- // into dns_names and email_names in the future to be explicit, but I
- // don't think this is likely.
- if strings.Contains(name, "@") {
- splitEmail := strings.Split(name, "@")
- if len(splitEmail) != 2 {
- return name
- }
- sanitizedName = splitEmail[1]
- emailDomain = splitEmail[1]
- isEmail = true
- }
-
- // If we have an asterisk as the first part of the domain name, mark it
- // as wildcard and set the sanitized name to the remainder of the
- // domain
- if strings.HasPrefix(sanitizedName, "*.") {
- sanitizedName = sanitizedName[2:]
- isWildcard = true
- }
-
- // Email addresses using wildcard domain names do not make sense
- if isEmail && isWildcard {
- return name
- }
-
- // AllowAnyName is checked after this because EnforceHostnames still
- // applies when allowing any name. Also, we check the sanitized name to
- // ensure that we are not either checking a full email address or a
- // wildcard prefix.
- if role.EnforceHostnames {
- if !hostnameRegex.MatchString(sanitizedName) {
- return name
- }
- }
-
- // Self-explanatory
- if role.AllowAnyName {
- continue
- }
-
- // The following blocks all work the same basic way:
- // 1) If a role allows a certain class of base (localhost, token
- // display name, role-configured domains), perform further tests
- //
- // 2) If there is a perfect match on either the name itself or it's an
- // email address with a perfect match on the hostname portion, allow it
- //
- // 3) If subdomains are allowed, we check based on the sanitized name;
- // note that if not a wildcard, will be equivalent to the email domain
- // for email checks, and we already checked above for both a wildcard
- // and email address being present in the same name
- // 3a) First we check for a non-wildcard subdomain, as in .
- // 3b) Then we check if it's a wildcard and the base domain is a match
- //
- // Variances are noted in-line
-
- if role.AllowLocalhost {
- if name == "localhost" ||
- name == "localdomain" ||
- (isEmail && emailDomain == "localhost") ||
- (isEmail && emailDomain == "localdomain") {
- continue
- }
-
- if role.AllowSubdomains {
- // It is possible, if unlikely, to have a subdomain of "localhost"
- if strings.HasSuffix(sanitizedName, ".localhost") ||
- (isWildcard && sanitizedName == "localhost") {
- continue
- }
-
- // A subdomain of "localdomain" is also not entirely uncommon
- if strings.HasSuffix(sanitizedName, ".localdomain") ||
- (isWildcard && sanitizedName == "localdomain") {
- continue
- }
- }
- }
-
- if role.AllowTokenDisplayName {
- if name == req.DisplayName {
- continue
- }
-
- if role.AllowSubdomains {
- if isEmail {
- // If it's an email address, we need to parse the token
- // display name in order to do a proper comparison of the
- // subdomain
- if strings.Contains(req.DisplayName, "@") {
- splitDisplay := strings.Split(req.DisplayName, "@")
- if len(splitDisplay) == 2 {
- // Compare the sanitized name against the hostname
- // portion of the email address in the roken
- // display name
- if strings.HasSuffix(sanitizedName, "."+splitDisplay[1]) {
- continue
- }
- }
- }
- }
-
- if strings.HasSuffix(sanitizedName, "."+req.DisplayName) ||
- (isWildcard && sanitizedName == req.DisplayName) {
- continue
- }
- }
- }
-
- if role.AllowedDomains != "" {
- valid := false
- for _, currDomain := range strings.Split(role.AllowedDomains, ",") {
- // If there is, say, a trailing comma, ignore it
- if currDomain == "" {
- continue
- }
-
- // First, allow an exact match of the base domain if that role flag
- // is enabled
- if role.AllowBareDomains &&
- (name == currDomain ||
- (isEmail && emailDomain == currDomain)) {
- valid = true
- break
- }
-
- if role.AllowSubdomains {
- if strings.HasSuffix(sanitizedName, "."+currDomain) ||
- (isWildcard && sanitizedName == currDomain) {
- valid = true
- break
- }
- }
-
- if role.AllowGlobDomains &&
- strings.Contains(currDomain, "*") &&
- glob.Glob(currDomain, name) {
- valid = true
- break
- }
- }
- if valid {
- continue
- }
- }
-
- //panic(fmt.Sprintf("\nName is %s\nRole is\n%#v\n", name, role))
- return name
- }
-
- return ""
-}
-
-func generateCert(b *backend,
- role *roleEntry,
- signingBundle *caInfoBundle,
- isCA bool,
- req *logical.Request,
- data *framework.FieldData) (*certutil.ParsedCertBundle, error) {
-
- if role.KeyType == "rsa" && role.KeyBits < 2048 {
- return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"}
- }
-
- creationBundle, err := generateCreationBundle(b, role, signingBundle, nil, req, data)
- if err != nil {
- return nil, err
- }
-
- if isCA {
- creationBundle.IsCA = isCA
-
- creationBundle.PermittedDNSDomains = data.Get("permitted_dns_domains").([]string)
-
- if signingBundle == nil {
- // Generating a self-signed root certificate
- entries, err := getURLs(req)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch URL information: %v", err)}
- }
- if entries == nil {
- entries = &urlEntries{
- IssuingCertificates: []string{},
- CRLDistributionPoints: []string{},
- OCSPServers: []string{},
- }
- }
- creationBundle.URLs = entries
-
- if role.MaxPathLength == nil {
- creationBundle.MaxPathLength = -1
- } else {
- creationBundle.MaxPathLength = *role.MaxPathLength
- }
- }
- }
-
- parsedBundle, err := createCertificate(creationBundle)
- if err != nil {
- return nil, err
- }
-
- return parsedBundle, nil
-}
-
-// N.B.: This is only meant to be used for generating intermediate CAs.
-// It skips some sanity checks.
-func generateIntermediateCSR(b *backend,
- role *roleEntry,
- signingBundle *caInfoBundle,
- req *logical.Request,
- data *framework.FieldData) (*certutil.ParsedCSRBundle, error) {
-
- creationBundle, err := generateCreationBundle(b, role, signingBundle, nil, req, data)
- if err != nil {
- return nil, err
- }
-
- parsedBundle, err := createCSR(creationBundle)
- if err != nil {
- return nil, err
- }
-
- return parsedBundle, nil
-}
-
-func signCert(b *backend,
- role *roleEntry,
- signingBundle *caInfoBundle,
- isCA bool,
- useCSRValues bool,
- req *logical.Request,
- data *framework.FieldData) (*certutil.ParsedCertBundle, error) {
-
- csrString := data.Get("csr").(string)
- if csrString == "" {
- return nil, errutil.UserError{Err: fmt.Sprintf("\"csr\" is empty")}
- }
-
- pemBytes := []byte(csrString)
- pemBlock, pemBytes := pem.Decode(pemBytes)
- if pemBlock == nil {
- return nil, errutil.UserError{Err: "csr contains no data"}
- }
- csr, err := x509.ParseCertificateRequest(pemBlock.Bytes)
- if err != nil {
- return nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)}
- }
-
- switch role.KeyType {
- case "rsa":
- // Verify that the key matches the role type
- if csr.PublicKeyAlgorithm != x509.RSA {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "role requires keys of type %s",
- role.KeyType)}
- }
- pubKey, ok := csr.PublicKey.(*rsa.PublicKey)
- if !ok {
- return nil, errutil.UserError{Err: "could not parse CSR's public key"}
- }
-
- // Verify that the key is at least 2048 bits
- if pubKey.N.BitLen() < 2048 {
- return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"}
- }
-
- // Verify that the bit size is at least the size specified in the role
- if pubKey.N.BitLen() < role.KeyBits {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "role requires a minimum of a %d-bit key, but CSR's key is %d bits",
- role.KeyBits,
- pubKey.N.BitLen())}
- }
-
- case "ec":
- // Verify that the key matches the role type
- if csr.PublicKeyAlgorithm != x509.ECDSA {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "role requires keys of type %s",
- role.KeyType)}
- }
- pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey)
- if !ok {
- return nil, errutil.UserError{Err: "could not parse CSR's public key"}
- }
-
- // Verify that the bit size is at least the size specified in the role
- if pubKey.Params().BitSize < role.KeyBits {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "role requires a minimum of a %d-bit key, but CSR's key is %d bits",
- role.KeyBits,
- pubKey.Params().BitSize)}
- }
-
- case "any":
- // We only care about running RSA < 2048 bit checks, so if not RSA
- // break out
- if csr.PublicKeyAlgorithm != x509.RSA {
- break
- }
-
- // Run RSA < 2048 bit checks
- pubKey, ok := csr.PublicKey.(*rsa.PublicKey)
- if !ok {
- return nil, errutil.UserError{Err: "could not parse CSR's public key"}
- }
- if pubKey.N.BitLen() < 2048 {
- return nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"}
- }
-
- }
-
- creationBundle, err := generateCreationBundle(b, role, signingBundle, csr, req, data)
- if err != nil {
- return nil, err
- }
-
- creationBundle.IsCA = isCA
- creationBundle.UseCSRValues = useCSRValues
-
- if isCA {
- creationBundle.PermittedDNSDomains = data.Get("permitted_dns_domains").([]string)
- }
-
- parsedBundle, err := signCertificate(creationBundle, csr)
- if err != nil {
- return nil, err
- }
-
- return parsedBundle, nil
-}
-
-// generateCreationBundle is a shared function that reads parameters supplied
-// from the various endpoints and generates a creationBundle with the
-// parameters that can be used to issue or sign
-func generateCreationBundle(b *backend,
- role *roleEntry,
- signingBundle *caInfoBundle,
- csr *x509.CertificateRequest,
- req *logical.Request,
- data *framework.FieldData) (*creationBundle, error) {
- var err error
- var ok bool
-
- // Read in names -- CN, DNS and email addresses
- var cn string
- dnsNames := []string{}
- emailAddresses := []string{}
- {
- if csr != nil && role.UseCSRCommonName {
- cn = csr.Subject.CommonName
- }
- if cn == "" {
- cn = data.Get("common_name").(string)
- if cn == "" {
- return nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true`}
- }
- }
-
- if csr != nil && role.UseCSRSANs {
- dnsNames = csr.DNSNames
- emailAddresses = csr.EmailAddresses
- }
-
- if !data.Get("exclude_cn_from_sans").(bool) {
- if strings.Contains(cn, "@") {
- // Note: emails are not disallowed if the role's email protection
- // flag is false, because they may well be included for
- // informational purposes; it is up to the verifying party to
- // ensure that email addresses in a subject alternate name can be
- // used for the purpose for which they are presented
- emailAddresses = append(emailAddresses, cn)
- } else {
- dnsNames = append(dnsNames, cn)
- }
- }
-
- if csr == nil || !role.UseCSRSANs {
- cnAltRaw, ok := data.GetOk("alt_names")
- if ok {
- cnAlt := strutil.ParseDedupLowercaseAndSortStrings(cnAltRaw.(string), ",")
- for _, v := range cnAlt {
- if strings.Contains(v, "@") {
- emailAddresses = append(emailAddresses, v)
- } else {
- dnsNames = append(dnsNames, v)
- }
- }
- }
- }
-
- // Check the CN. This ensures that the CN is checked even if it's
- // excluded from SANs.
- badName := validateNames(req, []string{cn}, role)
- if len(badName) != 0 {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "common name %s not allowed by this role", badName)}
- }
-
- // Check for bad email and/or DNS names
- badName = validateNames(req, dnsNames, role)
- if len(badName) != 0 {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "subject alternate name %s not allowed by this role", badName)}
- }
-
- badName = validateNames(req, emailAddresses, role)
- if len(badName) != 0 {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "email address %s not allowed by this role", badName)}
- }
- }
-
- // Get and verify any IP SANs
- ipAddresses := []net.IP{}
- var ipAltInt interface{}
- {
- if csr != nil && role.UseCSRSANs {
- if len(csr.IPAddresses) > 0 {
- if !role.AllowIPSANs {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR")}
- }
- ipAddresses = csr.IPAddresses
- }
- } else {
- ipAltInt, ok = data.GetOk("ip_sans")
- if ok {
- ipAlt := ipAltInt.(string)
- if len(ipAlt) != 0 {
- if !role.AllowIPSANs {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)}
- }
- for _, v := range strings.Split(ipAlt, ",") {
- parsedIP := net.ParseIP(v)
- if parsedIP == nil {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "the value '%s' is not a valid IP address", v)}
- }
- ipAddresses = append(ipAddresses, parsedIP)
- }
- }
- }
- }
- }
-
- // Set OU (organizationalUnit) values if specified in the role
- ou := []string{}
- {
- if role.OU != "" {
- ou = strutil.RemoveDuplicates(strutil.ParseStringSlice(role.OU, ","), false)
- }
- }
-
- // Set O (organization) values if specified in the role
- organization := []string{}
- {
- if role.Organization != "" {
- organization = strutil.RemoveDuplicates(strutil.ParseStringSlice(role.Organization, ","), false)
- }
- }
-
- // Get the TTL and verify it against the max allowed
- var ttl time.Duration
- var maxTTL time.Duration
- var notAfter time.Time
- {
- ttl = time.Duration(data.Get("ttl").(int)) * time.Second
-
- if ttl == 0 {
- if role.TTL != "" {
- ttl, err = parseutil.ParseDurationSecond(role.TTL)
- if err != nil {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "invalid role ttl: %s", err)}
- }
- }
- }
-
- if role.MaxTTL != "" {
- maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
- if err != nil {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "invalid role max_ttl: %s", err)}
- }
- }
-
- if ttl == 0 {
- ttl = b.System().DefaultLeaseTTL()
- }
- if maxTTL == 0 {
- maxTTL = b.System().MaxLeaseTTL()
- }
- if ttl > maxTTL {
- ttl = maxTTL
- }
-
- notAfter = time.Now().Add(ttl)
-
- // If it's not self-signed, verify that the issued certificate won't be
- // valid past the lifetime of the CA certificate
- if signingBundle != nil &&
- notAfter.After(signingBundle.Certificate.NotAfter) && !role.AllowExpirationPastCA {
-
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "cannot satisfy request, as TTL is beyond the expiration of the CA certificate")}
- }
- }
-
- // Build up usages
- var extUsage certExtKeyUsage
- {
- if role.ServerFlag {
- extUsage = extUsage | serverExtKeyUsage
- }
- if role.ClientFlag {
- extUsage = extUsage | clientExtKeyUsage
- }
- if role.CodeSigningFlag {
- extUsage = extUsage | codeSigningExtKeyUsage
- }
- if role.EmailProtectionFlag {
- extUsage = extUsage | emailProtectionExtKeyUsage
- }
- }
-
- creationBundle := &creationBundle{
- CommonName: cn,
- OU: ou,
- Organization: organization,
- DNSNames: dnsNames,
- EmailAddresses: emailAddresses,
- IPAddresses: ipAddresses,
- KeyType: role.KeyType,
- KeyBits: role.KeyBits,
- SigningBundle: signingBundle,
- NotAfter: notAfter,
- KeyUsage: x509.KeyUsage(parseKeyUsages(role.KeyUsage)),
- ExtKeyUsage: extUsage,
- }
-
- // Don't deal with URLs or max path length if it's self-signed, as these
- // normally come from the signing bundle
- if signingBundle == nil {
- return creationBundle, nil
- }
-
- // This will have been read in from the getURLs function
- creationBundle.URLs = signingBundle.URLs
-
- // If the max path length in the role is not nil, it was specified at
- // generation time with the max_path_length parameter; otherwise derive it
- // from the signing certificate
- if role.MaxPathLength != nil {
- creationBundle.MaxPathLength = *role.MaxPathLength
- } else {
- switch {
- case signingBundle.Certificate.MaxPathLen < 0:
- creationBundle.MaxPathLength = -1
- case signingBundle.Certificate.MaxPathLen == 0 &&
- signingBundle.Certificate.MaxPathLenZero:
- // The signing function will ensure that we do not issue a CA cert
- creationBundle.MaxPathLength = 0
- default:
- // If this takes it to zero, we handle this case later if
- // necessary
- creationBundle.MaxPathLength = signingBundle.Certificate.MaxPathLen - 1
- }
- }
-
- return creationBundle, nil
-}
-
-// addKeyUsages adds approrpiate key usages to the template given the creation
-// information
-func addKeyUsages(creationInfo *creationBundle, certTemplate *x509.Certificate) {
- if creationInfo.IsCA {
- certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign)
- return
- }
-
- certTemplate.KeyUsage = creationInfo.KeyUsage
-
- if creationInfo.ExtKeyUsage&serverExtKeyUsage != 0 {
- certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth)
- }
- if creationInfo.ExtKeyUsage&clientExtKeyUsage != 0 {
- certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth)
- }
- if creationInfo.ExtKeyUsage&codeSigningExtKeyUsage != 0 {
- certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning)
- }
- if creationInfo.ExtKeyUsage&emailProtectionExtKeyUsage != 0 {
- certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection)
- }
-}
-
-// Performs the heavy lifting of creating a certificate. Returns
-// a fully-filled-in ParsedCertBundle.
-func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle, error) {
- var err error
- result := &certutil.ParsedCertBundle{}
-
- serialNumber, err := certutil.GenerateSerialNumber()
- if err != nil {
- return nil, err
- }
-
- if err := certutil.GeneratePrivateKey(creationInfo.KeyType,
- creationInfo.KeyBits,
- result); err != nil {
- return nil, err
- }
-
- subjKeyID, err := certutil.GetSubjKeyID(result.PrivateKey)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)}
- }
-
- subject := pkix.Name{
- CommonName: creationInfo.CommonName,
- OrganizationalUnit: creationInfo.OU,
- Organization: creationInfo.Organization,
- }
-
- certTemplate := &x509.Certificate{
- SerialNumber: serialNumber,
- Subject: subject,
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: creationInfo.NotAfter,
- IsCA: false,
- SubjectKeyId: subjKeyID,
- DNSNames: creationInfo.DNSNames,
- EmailAddresses: creationInfo.EmailAddresses,
- IPAddresses: creationInfo.IPAddresses,
- }
-
- // Add this before calling addKeyUsages
- if creationInfo.SigningBundle == nil {
- certTemplate.IsCA = true
- }
-
- // This will only be filled in from the generation paths
- if len(creationInfo.PermittedDNSDomains) > 0 {
- certTemplate.PermittedDNSDomains = creationInfo.PermittedDNSDomains
- certTemplate.PermittedDNSDomainsCritical = true
- }
-
- addKeyUsages(creationInfo, certTemplate)
-
- certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates
- certTemplate.CRLDistributionPoints = creationInfo.URLs.CRLDistributionPoints
- certTemplate.OCSPServer = creationInfo.URLs.OCSPServers
-
- var certBytes []byte
- if creationInfo.SigningBundle != nil {
- switch creationInfo.SigningBundle.PrivateKeyType {
- case certutil.RSAPrivateKey:
- certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
- case certutil.ECPrivateKey:
- certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
- }
-
- caCert := creationInfo.SigningBundle.Certificate
- certTemplate.AuthorityKeyId = caCert.SubjectKeyId
-
- err = checkPermittedDNSDomains(certTemplate, caCert)
- if err != nil {
- return nil, errutil.UserError{Err: err.Error()}
- }
-
- certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), creationInfo.SigningBundle.PrivateKey)
- } else {
- // Creating a self-signed root
- if creationInfo.MaxPathLength == 0 {
- certTemplate.MaxPathLen = 0
- certTemplate.MaxPathLenZero = true
- } else {
- certTemplate.MaxPathLen = creationInfo.MaxPathLength
- }
-
- switch creationInfo.KeyType {
- case "rsa":
- certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
- case "ec":
- certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
- }
-
- certTemplate.AuthorityKeyId = subjKeyID
- certTemplate.BasicConstraintsValid = true
- certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey)
- }
-
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
- }
-
- result.CertificateBytes = certBytes
- result.Certificate, err = x509.ParseCertificate(certBytes)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)}
- }
-
- if creationInfo.SigningBundle != nil {
- if len(creationInfo.SigningBundle.Certificate.AuthorityKeyId) > 0 &&
- !bytes.Equal(creationInfo.SigningBundle.Certificate.AuthorityKeyId, creationInfo.SigningBundle.Certificate.SubjectKeyId) {
-
- result.CAChain = []*certutil.CertBlock{
- &certutil.CertBlock{
- Certificate: creationInfo.SigningBundle.Certificate,
- Bytes: creationInfo.SigningBundle.CertificateBytes,
- },
- }
- result.CAChain = append(result.CAChain, creationInfo.SigningBundle.CAChain...)
- }
- }
-
- return result, nil
-}
-
-// Creates a CSR. This is currently only meant for use when
-// generating an intermediate certificate.
-func createCSR(creationInfo *creationBundle) (*certutil.ParsedCSRBundle, error) {
- var err error
- result := &certutil.ParsedCSRBundle{}
-
- if err := certutil.GeneratePrivateKey(creationInfo.KeyType,
- creationInfo.KeyBits,
- result); err != nil {
- return nil, err
- }
-
- // Like many root CAs, other information is ignored
- subject := pkix.Name{
- CommonName: creationInfo.CommonName,
- }
-
- csrTemplate := &x509.CertificateRequest{
- Subject: subject,
- DNSNames: creationInfo.DNSNames,
- EmailAddresses: creationInfo.EmailAddresses,
- IPAddresses: creationInfo.IPAddresses,
- }
-
- switch creationInfo.KeyType {
- case "rsa":
- csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA
- case "ec":
- csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
- }
-
- csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, result.PrivateKey)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
- }
-
- result.CSRBytes = csr
- result.CSR, err = x509.ParseCertificateRequest(csr)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)}
- }
-
- return result, nil
-}
-
-// Performs the heavy lifting of generating a certificate from a CSR.
-// Returns a ParsedCertBundle sans private keys.
-func signCertificate(creationInfo *creationBundle,
- csr *x509.CertificateRequest) (*certutil.ParsedCertBundle, error) {
- switch {
- case creationInfo == nil:
- return nil, errutil.UserError{Err: "nil creation info given to signCertificate"}
- case creationInfo.SigningBundle == nil:
- return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"}
- case csr == nil:
- return nil, errutil.UserError{Err: "nil csr given to signCertificate"}
- }
-
- err := csr.CheckSignature()
- if err != nil {
- return nil, errutil.UserError{Err: "request signature invalid"}
- }
-
- result := &certutil.ParsedCertBundle{}
-
- serialNumber, err := certutil.GenerateSerialNumber()
- if err != nil {
- return nil, err
- }
-
- marshaledKey, err := x509.MarshalPKIXPublicKey(csr.PublicKey)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)}
- }
- subjKeyID := sha1.Sum(marshaledKey)
-
- caCert := creationInfo.SigningBundle.Certificate
-
- subject := pkix.Name{
- CommonName: creationInfo.CommonName,
- OrganizationalUnit: creationInfo.OU,
- Organization: creationInfo.Organization,
- }
-
- certTemplate := &x509.Certificate{
- SerialNumber: serialNumber,
- Subject: subject,
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: creationInfo.NotAfter,
- SubjectKeyId: subjKeyID[:],
- AuthorityKeyId: caCert.SubjectKeyId,
- }
-
- switch creationInfo.SigningBundle.PrivateKeyType {
- case certutil.RSAPrivateKey:
- certTemplate.SignatureAlgorithm = x509.SHA256WithRSA
- case certutil.ECPrivateKey:
- certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
- }
-
- if creationInfo.UseCSRValues {
- certTemplate.Subject = csr.Subject
-
- certTemplate.DNSNames = csr.DNSNames
- certTemplate.EmailAddresses = csr.EmailAddresses
- certTemplate.IPAddresses = csr.IPAddresses
-
- certTemplate.ExtraExtensions = csr.Extensions
- } else {
- certTemplate.DNSNames = creationInfo.DNSNames
- certTemplate.EmailAddresses = creationInfo.EmailAddresses
- certTemplate.IPAddresses = creationInfo.IPAddresses
- }
-
- addKeyUsages(creationInfo, certTemplate)
-
- var certBytes []byte
-
- certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates
- certTemplate.CRLDistributionPoints = creationInfo.URLs.CRLDistributionPoints
- certTemplate.OCSPServer = creationInfo.SigningBundle.URLs.OCSPServers
-
- if creationInfo.IsCA {
- certTemplate.BasicConstraintsValid = true
- certTemplate.IsCA = true
-
- if creationInfo.SigningBundle.Certificate.MaxPathLen == 0 &&
- creationInfo.SigningBundle.Certificate.MaxPathLenZero {
- return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"}
- }
-
- certTemplate.MaxPathLen = creationInfo.MaxPathLength
- if certTemplate.MaxPathLen == 0 {
- certTemplate.MaxPathLenZero = true
- }
- }
-
- if len(creationInfo.PermittedDNSDomains) > 0 {
- certTemplate.PermittedDNSDomains = creationInfo.PermittedDNSDomains
- certTemplate.PermittedDNSDomainsCritical = true
- }
- err = checkPermittedDNSDomains(certTemplate, caCert)
- if err != nil {
- return nil, errutil.UserError{Err: err.Error()}
- }
-
- certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, csr.PublicKey, creationInfo.SigningBundle.PrivateKey)
-
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)}
- }
-
- result.CertificateBytes = certBytes
- result.Certificate, err = x509.ParseCertificate(certBytes)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)}
- }
-
- result.CAChain = creationInfo.SigningBundle.GetCAChain()
-
- return result, nil
-}
-
-func checkPermittedDNSDomains(template, ca *x509.Certificate) error {
- if len(ca.PermittedDNSDomains) == 0 {
- return nil
- }
-
- namesToCheck := map[string]struct{}{
- template.Subject.CommonName: struct{}{},
- }
- for _, name := range template.DNSNames {
- namesToCheck[name] = struct{}{}
- }
-
- var badName string
-NameCheck:
- for name := range namesToCheck {
- for _, perm := range ca.PermittedDNSDomains {
- switch {
- case strings.HasPrefix(perm, ".") && strings.HasSuffix(name, perm):
- // .example.com matches my.host.example.com and
- // host.example.com but does not match example.com
- break NameCheck
- case perm == name:
- break NameCheck
- }
- }
- badName = name
- break
- }
-
- if badName == "" {
- return nil
- }
-
- return fmt.Errorf("name %q disallowed by CA's permitted DNS domains", badName)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util_test.go
deleted file mode 100644
index 068a0a6..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package pki
-
-import (
- "fmt"
- "testing"
-
- "strings"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestPki_FetchCertBySerial(t *testing.T) {
- storage := &logical.InmemStorage{}
-
- cases := map[string]struct {
- Req *logical.Request
- Prefix string
- Serial string
- }{
- "valid cert": {
- &logical.Request{
- Storage: storage,
- },
- "certs/",
- "00:00:00:00:00:00:00:00",
- },
- "revoked cert": {
- &logical.Request{
- Storage: storage,
- },
- "revoked/",
- "11:11:11:11:11:11:11:11",
- },
- }
-
- // Test for colon-based paths in storage
- for name, tc := range cases {
- storageKey := fmt.Sprintf("%s%s", tc.Prefix, tc.Serial)
- err := storage.Put(&logical.StorageEntry{
- Key: storageKey,
- Value: []byte("some data"),
- })
- if err != nil {
- t.Fatalf("error writing to storage on %s colon-based storage path: %s", name, err)
- }
-
- certEntry, err := fetchCertBySerial(tc.Req, tc.Prefix, tc.Serial)
- if err != nil {
- t.Fatalf("error on %s for colon-based storage path: %s", name, err)
- }
-
- // Check for non-nil on valid/revoked certs
- if certEntry == nil {
- t.Fatalf("nil on %s for colon-based storage path", name)
- }
-
- // Ensure that cert serials are converted/updated after fetch
- expectedKey := tc.Prefix + normalizeSerial(tc.Serial)
- se, err := storage.Get(expectedKey)
- if err != nil {
- t.Fatalf("error on %s for colon-based storage path:%s", name, err)
- }
- if strings.Compare(expectedKey, se.Key) != 0 {
- t.Fatalf("expected: %s, got: %s", expectedKey, certEntry.Key)
- }
- }
-
- // Reset storage
- storage = &logical.InmemStorage{}
-
- // Test for hyphen-base paths in storage
- for name, tc := range cases {
- storageKey := tc.Prefix + normalizeSerial(tc.Serial)
- err := storage.Put(&logical.StorageEntry{
- Key: storageKey,
- Value: []byte("some data"),
- })
- if err != nil {
- t.Fatalf("error writing to storage on %s hyphen-based storage path: %s", name, err)
- }
-
- certEntry, err := fetchCertBySerial(tc.Req, tc.Prefix, tc.Serial)
- if err != nil || certEntry == nil {
- t.Fatalf("error on %s for hyphen-based storage path: err: %v, entry: %v", name, err, certEntry)
- }
- }
-
- noConvCases := map[string]struct {
- Req *logical.Request
- Prefix string
- Serial string
- }{
- "ca": {
- &logical.Request{
- Storage: storage,
- },
- "",
- "ca",
- },
- "crl": {
- &logical.Request{
- Storage: storage,
- },
- "",
- "crl",
- },
- }
-
- // Test for ca and crl case
- for name, tc := range noConvCases {
- err := storage.Put(&logical.StorageEntry{
- Key: tc.Serial,
- Value: []byte("some data"),
- })
- if err != nil {
- t.Fatalf("error writing to storage on %s: %s", name, err)
- }
-
- certEntry, err := fetchCertBySerial(tc.Req, tc.Prefix, tc.Serial)
- if err != nil || certEntry == nil {
- t.Fatalf("error on %s: err: %v, entry: %v", name, err, certEntry)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/crl_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/crl_util.go
deleted file mode 100644
index c40e759..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/crl_util.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package pki
-
-import (
- "crypto/rand"
- "crypto/x509"
- "crypto/x509/pkix"
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
-)
-
-type revocationInfo struct {
- CertificateBytes []byte `json:"certificate_bytes"`
- RevocationTime int64 `json:"revocation_time"`
- RevocationTimeUTC time.Time `json:"revocation_time_utc"`
-}
-
-// Revokes a cert, and tries to be smart about error recovery
-func revokeCert(b *backend, req *logical.Request, serial string, fromLease bool) (*logical.Response, error) {
- // As this backend is self-contained and this function does not hook into
- // third parties to manage users or resources, if the mount is tainted,
- // revocation doesn't matter anyways -- the CRL that would be written will
- // be immediately blown away by the view being cleared. So we can simply
- // fast path a successful exit.
- if b.System().Tainted() {
- return nil, nil
- }
-
- alreadyRevoked := false
- var revInfo revocationInfo
-
- revEntry, err := fetchCertBySerial(req, "revoked/", serial)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), nil
- case errutil.InternalError:
- return nil, err
- }
- }
- if revEntry != nil {
- // Set the revocation info to the existing values
- alreadyRevoked = true
- err = revEntry.DecodeJSON(&revInfo)
- if err != nil {
- return nil, fmt.Errorf("Error decoding existing revocation info")
- }
- }
-
- if !alreadyRevoked {
- certEntry, err := fetchCertBySerial(req, "certs/", serial)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), nil
- case errutil.InternalError:
- return nil, err
- }
- }
- if certEntry == nil {
- return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found", serial)), nil
- }
-
- cert, err := x509.ParseCertificate(certEntry.Value)
- if err != nil {
- return nil, fmt.Errorf("Error parsing certificate: %s", err)
- }
- if cert == nil {
- return nil, fmt.Errorf("Got a nil certificate")
- }
-
- if cert.NotAfter.Before(time.Now()) {
- return nil, nil
- }
-
- // Compatibility: Don't revoke CAs if they had leases. New CAs going
- // forward aren't issued leases.
- if cert.IsCA && fromLease {
- return nil, nil
- }
-
- currTime := time.Now()
- revInfo.CertificateBytes = certEntry.Value
- revInfo.RevocationTime = currTime.Unix()
- revInfo.RevocationTimeUTC = currTime.UTC()
-
- revEntry, err = logical.StorageEntryJSON("revoked/"+normalizeSerial(serial), revInfo)
- if err != nil {
- return nil, fmt.Errorf("Error creating revocation entry")
- }
-
- err = req.Storage.Put(revEntry)
- if err != nil {
- return nil, fmt.Errorf("Error saving revoked certificate to new location")
- }
-
- }
-
- crlErr := buildCRL(b, req)
- switch crlErr.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil
- case errutil.InternalError:
- return nil, fmt.Errorf("Error encountered during CRL building: %s", crlErr)
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "revocation_time": revInfo.RevocationTime,
- },
- }
- if !revInfo.RevocationTimeUTC.IsZero() {
- resp.Data["revocation_time_rfc3339"] = revInfo.RevocationTimeUTC.Format(time.RFC3339Nano)
- }
- return resp, nil
-}
-
-// Builds a CRL by going through the list of revoked certificates and building
-// a new CRL with the stored revocation times and serial numbers.
-func buildCRL(b *backend, req *logical.Request) error {
- revokedSerials, err := req.Storage.List("revoked/")
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Error fetching list of revoked certs: %s", err)}
- }
-
- revokedCerts := []pkix.RevokedCertificate{}
- var revInfo revocationInfo
- for _, serial := range revokedSerials {
- revokedEntry, err := req.Storage.Get("revoked/" + serial)
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Unable to fetch revoked cert with serial %s: %s", serial, err)}
- }
- if revokedEntry == nil {
- return errutil.InternalError{Err: fmt.Sprintf("Revoked certificate entry for serial %s is nil", serial)}
- }
- if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 {
- // TODO: In this case, remove it and continue? How likely is this to
- // happen? Alternately, could skip it entirely, or could implement a
- // delete function so that there is a way to remove these
- return errutil.InternalError{Err: fmt.Sprintf("Found revoked serial but actual certificate is empty")}
- }
-
- err = revokedEntry.DecodeJSON(&revInfo)
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)}
- }
-
- revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes)
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Unable to parse stored revoked certificate with serial %s: %s", serial, err)}
- }
-
- // NOTE: We have to change this to UTC time because the CRL standard
- // mandates it but Go will happily encode the CRL without this.
- newRevCert := pkix.RevokedCertificate{
- SerialNumber: revokedCert.SerialNumber,
- }
- if !revInfo.RevocationTimeUTC.IsZero() {
- newRevCert.RevocationTime = revInfo.RevocationTimeUTC
- } else {
- newRevCert.RevocationTime = time.Unix(revInfo.RevocationTime, 0).UTC()
- }
- revokedCerts = append(revokedCerts, newRevCert)
- }
-
- signingBundle, caErr := fetchCAInfo(req)
- switch caErr.(type) {
- case errutil.UserError:
- return errutil.UserError{Err: fmt.Sprintf("Could not fetch the CA certificate: %s", caErr)}
- case errutil.InternalError:
- return errutil.InternalError{Err: fmt.Sprintf("Error fetching CA certificate: %s", caErr)}
- }
-
- crlLifetime := b.crlLifetime
- crlInfo, err := b.CRL(req.Storage)
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Error fetching CRL config information: %s", err)}
- }
- if crlInfo != nil {
- crlDur, err := time.ParseDuration(crlInfo.Expiry)
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Error parsing CRL duration of %s", crlInfo.Expiry)}
- }
- crlLifetime = crlDur
- }
-
- crlBytes, err := signingBundle.Certificate.CreateCRL(rand.Reader, signingBundle.PrivateKey, revokedCerts, time.Now(), time.Now().Add(crlLifetime))
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Error creating new CRL: %s", err)}
- }
-
- err = req.Storage.Put(&logical.StorageEntry{
- Key: "crl",
- Value: crlBytes,
- })
- if err != nil {
- return errutil.InternalError{Err: fmt.Sprintf("Error storing CRL: %s", err)}
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go
deleted file mode 100644
index 52adf10..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package pki
-
-import "github.com/hashicorp/vault/logical/framework"
-
-// addIssueAndSignCommonFields adds fields common to both CA and non-CA issuing
-// and signing
-func addIssueAndSignCommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
- fields["exclude_cn_from_sans"] = &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If true, the Common Name will not be
-included in DNS or Email Subject Alternate Names.
-Defaults to false (CN is included).`,
- }
-
- fields["format"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "pem",
- Description: `Format for returned data. Can be "pem", "der",
-or "pem_bundle". If "pem_bundle" any private
-key and issuing cert will be appended to the
-certificate pem. Defaults to "pem".`,
- }
-
- fields["ip_sans"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The requested IP SANs, if any, in a
-comma-delimited list`,
- }
-
- return fields
-}
-
-// addNonCACommonFields adds fields with help text specific to non-CA
-// certificate issuing and signing
-func addNonCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
- fields = addIssueAndSignCommonFields(fields)
-
- fields["role"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The desired role with configuration for this
-request`,
- }
-
- fields["common_name"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The requested common name; if you want more than
-one, specify the alternative names in the
-alt_names map. If email protection is enabled
-in the role, this may be an email address.`,
- }
-
- fields["alt_names"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The requested Subject Alternative Names, if any,
-in a comma-delimited list. If email protection
-is enabled for the role, this may contain
-email addresses.`,
- }
-
- fields["ttl"] = &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `The requested Time To Live for the certificate;
-sets the expiration date. If not specified
-the role default, backend default, or system
-default TTL is used, in that order. Cannot
-be later than the role max TTL.`,
- }
-
- return fields
-}
-
-// addCACommonFields adds fields with help text specific to CA
-// certificate issuing and signing
-func addCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
- fields = addIssueAndSignCommonFields(fields)
-
- fields["alt_names"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The requested Subject Alternative Names, if any,
-in a comma-delimited list. May contain both
-DNS names and email addresses.`,
- }
-
- fields["common_name"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The requested common name; if you want more than
-one, specify the alternative names in the alt_names
-map. If not specified when signing, the common
-name will be taken from the CSR; other names
-must still be specified in alt_names or ip_sans.`,
- }
-
- fields["ttl"] = &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `The requested Time To Live for the certificate;
-sets the expiration date. If not specified
-the role default, backend default, or system
-default TTL is used, in that order. Cannot
-be larger than the mount max TTL. Note:
-this only has an effect when generating
-a CA cert or signing a CA cert, not when
-generating a CSR for an intermediate CA.`,
- }
-
- return fields
-}
-
-// addCAKeyGenerationFields adds fields with help text specific to CA key
-// generation and exporting
-func addCAKeyGenerationFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
- fields["exported"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Must be "internal" or "exported". If set to
-"exported", the generated private key will be
-returned. This is your *only* chance to retrieve
-the private key!`,
- }
-
- fields["key_bits"] = &framework.FieldSchema{
- Type: framework.TypeInt,
- Default: 2048,
- Description: `The number of bits to use. You will almost
-certainly want to change this if you adjust
-the key_type.`,
- }
-
- fields["key_type"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "rsa",
- Description: `The type of key to use; defaults to RSA. "rsa"
-and "ec" are the only valid values.`,
- }
-
- return fields
-}
-
-// addCAIssueFields adds fields common to CA issuing, e.g. when returning
-// an actual certificate
-func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema {
- fields["max_path_length"] = &framework.FieldSchema{
- Type: framework.TypeInt,
- Default: -1,
- Description: "The maximum allowable path length",
- }
-
- fields["permitted_dns_domains"] = &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: `Domains for which this certificate is allowed to sign or issue child certificates. If set, all DNS names (subject and alt) on child certs must be exact matches or subsets of the given domains (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`,
- }
-
- return fields
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go
deleted file mode 100644
index 347ac01..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package pki
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigCA(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/ca",
- Fields: map[string]*framework.FieldSchema{
- "pem_bundle": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `PEM-format, concatenated unencrypted
-secret key and certificate.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathCAWrite,
- },
-
- HelpSynopsis: pathConfigCAHelpSyn,
- HelpDescription: pathConfigCAHelpDesc,
- }
-}
-
-func (b *backend) pathCAWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- pemBundle := data.Get("pem_bundle").(string)
-
- parsedBundle, err := certutil.ParsePEMBundle(pemBundle)
- if err != nil {
- switch err.(type) {
- case errutil.InternalError:
- return nil, err
- default:
- return logical.ErrorResponse(err.Error()), nil
- }
- }
-
- if parsedBundle.PrivateKey == nil ||
- parsedBundle.PrivateKeyType == certutil.UnknownPrivateKey {
- return logical.ErrorResponse("private key not found in the PEM bundle"), nil
- }
-
- if parsedBundle.Certificate == nil {
- return logical.ErrorResponse("no certificate found in the PEM bundle"), nil
- }
-
- if !parsedBundle.Certificate.IsCA {
- return logical.ErrorResponse("the given certificate is not marked for CA use and cannot be used with this backend"), nil
- }
-
- cb, err := parsedBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("error converting raw values into cert bundle: %s", err)
- }
-
- entry, err := logical.StorageEntryJSON("config/ca_bundle", cb)
- if err != nil {
- return nil, err
- }
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- // For ease of later use, also store just the certificate at a known
- // location, plus a fresh CRL
- entry.Key = "ca"
- entry.Value = parsedBundle.CertificateBytes
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- err = buildCRL(b, req)
-
- return nil, err
-}
-
-const pathConfigCAHelpSyn = `
-Set the CA certificate and private key used for generated credentials.
-`
-
-const pathConfigCAHelpDesc = `
-This sets the CA information used for credentials generated by this
-by this mount. This must be a PEM-format, concatenated unencrypted
-secret key and certificate.
-
-For security reasons, the secret key cannot be retrieved later.
-`
-
-const pathConfigCAGenerateHelpSyn = `
-Generate a new CA certificate and private key used for signing.
-`
-
-const pathConfigCAGenerateHelpDesc = `
-This path generates a CA certificate and private key to be used for
-credentials generated by this mount. The path can either
-end in "internal" or "exported"; this controls whether the
-unencrypted private key is exported after generation. This will
-be your only chance to export the private key; for security reasons
-it cannot be read or exported later.
-
-If the "type" option is set to "self-signed", the generated
-certificate will be a self-signed root CA. Otherwise, this mount
-will act as an intermediate CA; a CSR will be returned, to be signed
-by your chosen CA (which could be another mount of this backend).
-Note that the CRL path will be set to this mount's CRL path; if you
-need further customization it is recommended that you create a CSR
-separately and get it signed. Either way, use the "config/ca/set"
-endpoint to load the signed certificate into Vault.
-`
-
-const pathConfigCASignHelpSyn = `
-Generate a signed CA certificate from a CSR.
-`
-
-const pathConfigCASignHelpDesc = `
-This path generates a CA certificate to be used for credentials
-generated by the certificate's destination mount.
-
-Use the "config/ca/set" endpoint to load the signed certificate
-into Vault another Vault mount.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_crl.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_crl.go
deleted file mode 100644
index 36db15d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_crl.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package pki
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// CRLConfig holds basic CRL configuration information
-type crlConfig struct {
- Expiry string `json:"expiry" mapstructure:"expiry" structs:"expiry"`
-}
-
-func pathConfigCRL(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/crl",
- Fields: map[string]*framework.FieldSchema{
- "expiry": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The amount of time the generated CRL should be
-valid; defaults to 72 hours`,
- Default: "72h",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathCRLRead,
- logical.UpdateOperation: b.pathCRLWrite,
- },
-
- HelpSynopsis: pathConfigCRLHelpSyn,
- HelpDescription: pathConfigCRLHelpDesc,
- }
-}
-
-func (b *backend) CRL(s logical.Storage) (*crlConfig, error) {
- entry, err := s.Get("config/crl")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result crlConfig
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathCRLRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- config, err := b.CRL(req.Storage)
- if err != nil {
- return nil, err
- }
- if config == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "expiry": config.Expiry,
- },
- }, nil
-}
-
-func (b *backend) pathCRLWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- expiry := d.Get("expiry").(string)
-
- _, err := time.ParseDuration(expiry)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Given expiry could not be decoded: %s", err)), nil
- }
-
- config := &crlConfig{
- Expiry: expiry,
- }
-
- entry, err := logical.StorageEntryJSON("config/crl", config)
- if err != nil {
- return nil, err
- }
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-const pathConfigCRLHelpSyn = `
-Configure the CRL expiration.
-`
-
-const pathConfigCRLHelpDesc = `
-This endpoint allows configuration of the CRL lifetime.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_urls.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_urls.go
deleted file mode 100644
index 42ce1ca..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_urls.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package pki
-
-import (
- "fmt"
- "strings"
-
- "github.com/asaskevich/govalidator"
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigURLs(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/urls",
- Fields: map[string]*framework.FieldSchema{
- "issuing_certificates": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Comma-separated list of URLs to be used
-for the issuing certificate attribute`,
- },
-
- "crl_distribution_points": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Comma-separated list of URLs to be used
-for the CRL distribution points attribute`,
- },
-
- "ocsp_servers": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Comma-separated list of URLs to be used
-for the OCSP servers attribute`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathWriteURL,
- logical.ReadOperation: b.pathReadURL,
- },
-
- HelpSynopsis: pathConfigURLsHelpSyn,
- HelpDescription: pathConfigURLsHelpDesc,
- }
-}
-
-func validateURLs(urls []string) string {
- for _, curr := range urls {
- if !govalidator.IsURL(curr) {
- return curr
- }
- }
-
- return ""
-}
-
-func getURLs(req *logical.Request) (*urlEntries, error) {
- entry, err := req.Storage.Get("urls")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var entries urlEntries
- if err := entry.DecodeJSON(&entries); err != nil {
- return nil, err
- }
-
- return &entries, nil
-}
-
-func writeURLs(req *logical.Request, entries *urlEntries) error {
- entry, err := logical.StorageEntryJSON("urls", entries)
- if err != nil {
- return err
- }
- if entry == nil {
- return fmt.Errorf("Unable to marshal entry into JSON")
- }
-
- err = req.Storage.Put(entry)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (b *backend) pathReadURL(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entries, err := getURLs(req)
- if err != nil {
- return nil, err
- }
- if entries == nil {
- return nil, nil
- }
-
- resp := &logical.Response{
- Data: structs.New(entries).Map(),
- }
-
- return resp, nil
-}
-
-func (b *backend) pathWriteURL(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entries, err := getURLs(req)
- if err != nil {
- return nil, err
- }
- if entries == nil {
- entries = &urlEntries{
- IssuingCertificates: []string{},
- CRLDistributionPoints: []string{},
- OCSPServers: []string{},
- }
- }
-
- if urlsInt, ok := data.GetOk("issuing_certificates"); ok {
- splitURLs := strings.Split(urlsInt.(string), ",")
- entries.IssuingCertificates = splitURLs
- if badURL := validateURLs(entries.IssuingCertificates); badURL != "" {
- return logical.ErrorResponse(fmt.Sprintf(
- "invalid URL found in issuing certificates: %s", badURL)), nil
- }
- }
- if urlsInt, ok := data.GetOk("crl_distribution_points"); ok {
- splitURLs := strings.Split(urlsInt.(string), ",")
- entries.CRLDistributionPoints = splitURLs
- if badURL := validateURLs(entries.CRLDistributionPoints); badURL != "" {
- return logical.ErrorResponse(fmt.Sprintf(
- "invalid URL found in CRL distribution points: %s", badURL)), nil
- }
- }
- if urlsInt, ok := data.GetOk("ocsp_servers"); ok {
- splitURLs := strings.Split(urlsInt.(string), ",")
- entries.OCSPServers = splitURLs
- if badURL := validateURLs(entries.OCSPServers); badURL != "" {
- return logical.ErrorResponse(fmt.Sprintf(
- "invalid URL found in OCSP servers: %s", badURL)), nil
- }
- }
-
- return nil, writeURLs(req, entries)
-}
-
-type urlEntries struct {
- IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"`
- CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"`
- OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"`
-}
-
-const pathConfigURLsHelpSyn = `
-Set the URLs for the issuing CA, CRL distribution points, and OCSP servers.
-`
-
-const pathConfigURLsHelpDesc = `
-This path allows you to set the issuing CA, CRL distribution points, and
-OCSP server URLs that will be encoded into issued certificates. If these
-values are not set, no such information will be encoded in the issued
-certificates. To delete URLs, simply re-set the appropriate value with an
-empty string.
-
-Multiple URLs can be specified for each type; use commas to separate them.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go
deleted file mode 100644
index cf71b4c..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package pki
-
-import (
- "encoding/pem"
- "fmt"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// Returns the CA in raw format
-func pathFetchCA(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `ca(/pem)?`,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathFetchRead,
- },
-
- HelpSynopsis: pathFetchHelpSyn,
- HelpDescription: pathFetchHelpDesc,
- }
-}
-
-// Returns the CA chain
-func pathFetchCAChain(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `(cert/)?ca_chain`,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathFetchRead,
- },
-
- HelpSynopsis: pathFetchHelpSyn,
- HelpDescription: pathFetchHelpDesc,
- }
-}
-
-// Returns the CRL in raw format
-func pathFetchCRL(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `crl(/pem)?`,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathFetchRead,
- },
-
- HelpSynopsis: pathFetchHelpSyn,
- HelpDescription: pathFetchHelpDesc,
- }
-}
-
-// Returns any valid (non-revoked) cert. Since "ca" fits the pattern, this path
-// also handles returning the CA cert in a non-raw format.
-func pathFetchValid(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `cert/(?P[0-9A-Fa-f-:]+)`,
- Fields: map[string]*framework.FieldSchema{
- "serial": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Certificate serial number, in colon- or
-hyphen-separated octal`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathFetchRead,
- },
-
- HelpSynopsis: pathFetchHelpSyn,
- HelpDescription: pathFetchHelpDesc,
- }
-}
-
-// This returns the CRL in a non-raw format
-func pathFetchCRLViaCertPath(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `cert/crl`,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathFetchRead,
- },
-
- HelpSynopsis: pathFetchHelpSyn,
- HelpDescription: pathFetchHelpDesc,
- }
-}
-
-// This returns the list of serial numbers for certs
-func pathFetchListCerts(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "certs/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathFetchCertList,
- },
-
- HelpSynopsis: pathFetchHelpSyn,
- HelpDescription: pathFetchHelpDesc,
- }
-}
-
-func (b *backend) pathFetchCertList(req *logical.Request, data *framework.FieldData) (response *logical.Response, retErr error) {
- entries, err := req.Storage.List("certs/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathFetchRead(req *logical.Request, data *framework.FieldData) (response *logical.Response, retErr error) {
- var serial, pemType, contentType string
- var certEntry, revokedEntry *logical.StorageEntry
- var funcErr error
- var certificate []byte
- var revocationTime int64
- response = &logical.Response{
- Data: map[string]interface{}{},
- }
-
- // Some of these need to return raw and some non-raw;
- // this is basically handled by setting contentType or not.
- // Errors don't cause an immediate exit, because the raw
- // paths still need to return raw output.
-
- switch {
- case req.Path == "ca" || req.Path == "ca/pem":
- serial = "ca"
- contentType = "application/pkix-cert"
- if req.Path == "ca/pem" {
- pemType = "CERTIFICATE"
- }
- case req.Path == "ca_chain" || req.Path == "cert/ca_chain":
- serial = "ca_chain"
- if req.Path == "ca_chain" {
- contentType = "application/pkix-cert"
- }
- case req.Path == "crl" || req.Path == "crl/pem":
- serial = "crl"
- contentType = "application/pkix-crl"
- if req.Path == "crl/pem" {
- pemType = "X509 CRL"
- }
- case req.Path == "cert/crl":
- serial = "crl"
- pemType = "X509 CRL"
- default:
- serial = data.Get("serial").(string)
- pemType = "CERTIFICATE"
- }
- if len(serial) == 0 {
- response = logical.ErrorResponse("The serial number must be provided")
- goto reply
- }
-
- if serial == "ca_chain" {
- caInfo, err := fetchCAInfo(req)
- switch err.(type) {
- case errutil.UserError:
- response = logical.ErrorResponse(err.Error())
- goto reply
- case errutil.InternalError:
- retErr = err
- goto reply
- }
-
- caChain := caInfo.GetCAChain()
- for _, ca := range caChain {
- block := pem.Block{
- Type: "CERTIFICATE",
- Bytes: ca.Bytes,
- }
- certificate = append(certificate, pem.EncodeToMemory(&block)...)
- }
- goto reply
- }
-
- certEntry, funcErr = fetchCertBySerial(req, req.Path, serial)
- if funcErr != nil {
- switch funcErr.(type) {
- case errutil.UserError:
- response = logical.ErrorResponse(funcErr.Error())
- goto reply
- case errutil.InternalError:
- retErr = funcErr
- goto reply
- }
- }
- if certEntry == nil {
- response = nil
- goto reply
- }
-
- certificate = certEntry.Value
-
- if len(pemType) != 0 {
- block := pem.Block{
- Type: pemType,
- Bytes: certEntry.Value,
- }
- certificate = pem.EncodeToMemory(&block)
- }
-
- revokedEntry, funcErr = fetchCertBySerial(req, "revoked/", serial)
- if funcErr != nil {
- switch funcErr.(type) {
- case errutil.UserError:
- response = logical.ErrorResponse(funcErr.Error())
- goto reply
- case errutil.InternalError:
- retErr = funcErr
- goto reply
- }
- }
- if revokedEntry != nil {
- var revInfo revocationInfo
- err := revokedEntry.DecodeJSON(&revInfo)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)), nil
- }
- revocationTime = revInfo.RevocationTime
- }
-
-reply:
- switch {
- case len(contentType) != 0:
- response = &logical.Response{
- Data: map[string]interface{}{
- logical.HTTPContentType: contentType,
- logical.HTTPRawBody: certificate,
- }}
- if retErr != nil {
- if b.Logger().IsWarn() {
- b.Logger().Warn("Possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr)
- }
- }
- retErr = nil
- if len(certificate) > 0 {
- response.Data[logical.HTTPStatusCode] = 200
- } else {
- response.Data[logical.HTTPStatusCode] = 204
- }
- case retErr != nil:
- response = nil
- return
- case response == nil:
- return
- case response.IsError():
- return response, nil
- default:
- response.Data["certificate"] = string(certificate)
- response.Data["revocation_time"] = revocationTime
- }
-
- return
-}
-
-const pathFetchHelpSyn = `
-Fetch a CA, CRL, CA Chain, or non-revoked certificate.
-`
-
-const pathFetchHelpDesc = `
-This allows certificates to be fetched. If using the fetch/ prefix any non-revoked certificate can be fetched.
-
-Using "ca" or "crl" as the value fetches the appropriate information in DER encoding. Add "/pem" to either to get PEM encoding.
-
-Using "ca_chain" as the value fetches the certificate authority trust chain in PEM encoding.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go
deleted file mode 100644
index 2073621..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package pki
-
-import (
- "encoding/base64"
- "fmt"
-
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathGenerateIntermediate(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "intermediate/generate/" + framework.GenericNameRegex("exported"),
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathGenerateIntermediate,
- },
-
- HelpSynopsis: pathGenerateIntermediateHelpSyn,
- HelpDescription: pathGenerateIntermediateHelpDesc,
- }
-
- ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{})
- ret.Fields = addCAKeyGenerationFields(ret.Fields)
-
- return ret
-}
-
-func pathSetSignedIntermediate(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "intermediate/set-signed",
-
- Fields: map[string]*framework.FieldSchema{
- "certificate": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `PEM-format certificate. This must be a CA
-certificate with a public key matching the
-previously-generated key from the generation
-endpoint.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathSetSignedIntermediate,
- },
-
- HelpSynopsis: pathSetSignedIntermediateHelpSyn,
- HelpDescription: pathSetSignedIntermediateHelpDesc,
- }
-
- return ret
-}
-
-func (b *backend) pathGenerateIntermediate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var err error
-
- exported, format, role, errorResp := b.getGenerationParams(data)
- if errorResp != nil {
- return errorResp, nil
- }
-
- var resp *logical.Response
- parsedBundle, err := generateIntermediateCSR(b, role, nil, req, data)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), nil
- case errutil.InternalError:
- return nil, err
- }
- }
-
- csrb, err := parsedBundle.ToCSRBundle()
- if err != nil {
- return nil, fmt.Errorf("Error converting raw CSR bundle to CSR bundle: %s", err)
- }
-
- resp = &logical.Response{
- Data: map[string]interface{}{},
- }
-
- switch format {
- case "pem":
- resp.Data["csr"] = csrb.CSR
- if exported {
- resp.Data["private_key"] = csrb.PrivateKey
- resp.Data["private_key_type"] = csrb.PrivateKeyType
- }
-
- case "pem_bundle":
- resp.Data["csr"] = csrb.CSR
- if exported {
- resp.Data["csr"] = fmt.Sprintf("%s\n%s", csrb.PrivateKey, csrb.CSR)
- resp.Data["private_key"] = csrb.PrivateKey
- resp.Data["private_key_type"] = csrb.PrivateKeyType
- }
-
- case "der":
- resp.Data["csr"] = base64.StdEncoding.EncodeToString(parsedBundle.CSRBytes)
- if exported {
- resp.Data["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes)
- resp.Data["private_key_type"] = csrb.PrivateKeyType
- }
- }
-
- cb := &certutil.CertBundle{}
- cb.PrivateKey = csrb.PrivateKey
- cb.PrivateKeyType = csrb.PrivateKeyType
-
- entry, err := logical.StorageEntryJSON("config/ca_bundle", cb)
- if err != nil {
- return nil, err
- }
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-func (b *backend) pathSetSignedIntermediate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- cert := data.Get("certificate").(string)
-
- if cert == "" {
- return logical.ErrorResponse("no certificate provided in the \"certificate\" parameter"), nil
- }
-
- inputBundle, err := certutil.ParsePEMBundle(cert)
- if err != nil {
- switch err.(type) {
- case errutil.InternalError:
- return nil, err
- default:
- return logical.ErrorResponse(err.Error()), nil
- }
- }
-
- if inputBundle.Certificate == nil {
- return logical.ErrorResponse("supplied certificate could not be successfully parsed"), nil
- }
-
- cb := &certutil.CertBundle{}
- entry, err := req.Storage.Get("config/ca_bundle")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return logical.ErrorResponse("could not find any existing entry with a private key"), nil
- }
-
- err = entry.DecodeJSON(cb)
- if err != nil {
- return nil, err
- }
-
- if len(cb.PrivateKey) == 0 || cb.PrivateKeyType == "" {
- return logical.ErrorResponse("could not find an existing private key"), nil
- }
-
- parsedCB, err := cb.ToParsedCertBundle()
- if err != nil {
- return nil, err
- }
- if parsedCB.PrivateKey == nil {
- return nil, fmt.Errorf("saved key could not be parsed successfully")
- }
-
- inputBundle.PrivateKey = parsedCB.PrivateKey
- inputBundle.PrivateKeyType = parsedCB.PrivateKeyType
- inputBundle.PrivateKeyBytes = parsedCB.PrivateKeyBytes
-
- if !inputBundle.Certificate.IsCA {
- return logical.ErrorResponse("the given certificate is not marked for CA use and cannot be used with this backend"), nil
- }
-
- if err := inputBundle.Verify(); err != nil {
- return nil, fmt.Errorf("verification of parsed bundle failed: %s", err)
- }
-
- cb, err = inputBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("error converting raw values into cert bundle: %s", err)
- }
-
- entry, err = logical.StorageEntryJSON("config/ca_bundle", cb)
- if err != nil {
- return nil, err
- }
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- entry.Key = "certs/" + normalizeSerial(cb.SerialNumber)
- entry.Value = inputBundle.CertificateBytes
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- // For ease of later use, also store just the certificate at a known
- // location
- entry.Key = "ca"
- entry.Value = inputBundle.CertificateBytes
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- // Build a fresh CRL
- err = buildCRL(b, req)
-
- return nil, err
-}
-
-const pathGenerateIntermediateHelpSyn = `
-Generate a new CSR and private key used for signing.
-`
-
-const pathGenerateIntermediateHelpDesc = `
-See the API documentation for more information.
-`
-
-const pathSetSignedIntermediateHelpSyn = `
-Provide the signed intermediate CA cert.
-`
-
-const pathSetSignedIntermediateHelpDesc = `
-See the API documentation for more information.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go
deleted file mode 100644
index d7b0c36..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go
+++ /dev/null
@@ -1,317 +0,0 @@
-package pki
-
-import (
- "encoding/base64"
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathIssue(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "issue/" + framework.GenericNameRegex("role"),
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathIssue,
- },
-
- HelpSynopsis: pathIssueHelpSyn,
- HelpDescription: pathIssueHelpDesc,
- }
-
- ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
- return ret
-}
-
-func pathSign(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "sign/" + framework.GenericNameRegex("role"),
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathSign,
- },
-
- HelpSynopsis: pathSignHelpSyn,
- HelpDescription: pathSignHelpDesc,
- }
-
- ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
-
- ret.Fields["csr"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: `PEM-format CSR to be signed.`,
- }
-
- return ret
-}
-
-func pathSignVerbatim(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "sign-verbatim" + framework.OptionalParamRegex("role"),
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathSignVerbatim,
- },
-
- HelpSynopsis: pathSignHelpSyn,
- HelpDescription: pathSignHelpDesc,
- }
-
- ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
-
- ret.Fields["csr"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: `PEM-format CSR to be signed. Values will be
-taken verbatim from the CSR, except for
-basic constraints.`,
- }
-
- return ret
-}
-
-// pathIssue issues a certificate and private key from given parameters,
-// subject to role restrictions
-func (b *backend) pathIssue(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role").(string)
-
- // Get the role
- role, err := b.getRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
- }
-
- return b.pathIssueSignCert(req, data, role, false, false)
-}
-
-// pathSign issues a certificate from a submitted CSR, subject to role
-// restrictions
-func (b *backend) pathSign(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role").(string)
-
- // Get the role
- role, err := b.getRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
- }
-
- return b.pathIssueSignCert(req, data, role, true, false)
-}
-
-// pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to
-// role restrictions
-func (b *backend) pathSignVerbatim(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
-
- roleName := data.Get("role").(string)
-
- // Get the role if one was specified
- role, err := b.getRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
-
- ttl := b.System().DefaultLeaseTTL()
- maxTTL := b.System().MaxLeaseTTL()
-
- entry := &roleEntry{
- TTL: ttl.String(),
- MaxTTL: maxTTL.String(),
- AllowLocalhost: true,
- AllowAnyName: true,
- AllowIPSANs: true,
- EnforceHostnames: false,
- KeyType: "any",
- UseCSRCommonName: true,
- UseCSRSANs: true,
- GenerateLease: new(bool),
- }
-
- if role != nil {
- if role.TTL != "" {
- entry.TTL = role.TTL
- }
- if role.MaxTTL != "" {
- entry.MaxTTL = role.MaxTTL
- }
- entry.NoStore = role.NoStore
- }
-
- *entry.GenerateLease = false
- if role != nil && role.GenerateLease != nil {
- *entry.GenerateLease = *role.GenerateLease
- }
-
- return b.pathIssueSignCert(req, data, entry, true, true)
-}
-
-func (b *backend) pathIssueSignCert(
- req *logical.Request, data *framework.FieldData, role *roleEntry, useCSR, useCSRValues bool) (*logical.Response, error) {
- format := getFormat(data)
- if format == "" {
- return logical.ErrorResponse(
- `The "format" path parameter must be "pem", "der", or "pem_bundle"`), nil
- }
-
- var caErr error
- signingBundle, caErr := fetchCAInfo(req)
- switch caErr.(type) {
- case errutil.UserError:
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "Could not fetch the CA certificate (was one set?): %s", caErr)}
- case errutil.InternalError:
- return nil, errutil.InternalError{Err: fmt.Sprintf(
- "Error fetching CA certificate: %s", caErr)}
- }
-
- var parsedBundle *certutil.ParsedCertBundle
- var err error
- if useCSR {
- parsedBundle, err = signCert(b, role, signingBundle, false, useCSRValues, req, data)
- } else {
- parsedBundle, err = generateCert(b, role, signingBundle, false, req, data)
- }
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), nil
- case errutil.InternalError:
- return nil, err
- }
- }
-
- signingCB, err := signingBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err)
- }
-
- cb, err := parsedBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("Error converting raw cert bundle to cert bundle: %s", err)
- }
-
- respData := map[string]interface{}{
- "serial_number": cb.SerialNumber,
- }
-
- switch format {
- case "pem":
- respData["issuing_ca"] = signingCB.Certificate
- respData["certificate"] = cb.Certificate
- if cb.CAChain != nil && len(cb.CAChain) > 0 {
- respData["ca_chain"] = cb.CAChain
- }
- if !useCSR {
- respData["private_key"] = cb.PrivateKey
- respData["private_key_type"] = cb.PrivateKeyType
- }
-
- case "pem_bundle":
- respData["issuing_ca"] = signingCB.Certificate
- respData["certificate"] = cb.ToPEMBundle()
- if cb.CAChain != nil && len(cb.CAChain) > 0 {
- respData["ca_chain"] = cb.CAChain
- }
- if !useCSR {
- respData["private_key"] = cb.PrivateKey
- respData["private_key_type"] = cb.PrivateKeyType
- }
-
- case "der":
- respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
- respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes)
-
- var caChain []string
- for _, caCert := range parsedBundle.CAChain {
- caChain = append(caChain, base64.StdEncoding.EncodeToString(caCert.Bytes))
- }
- if caChain != nil && len(caChain) > 0 {
- respData["ca_chain"] = caChain
- }
-
- if !useCSR {
- respData["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes)
- respData["private_key_type"] = cb.PrivateKeyType
- }
- }
-
- var resp *logical.Response
- switch {
- case role.GenerateLease == nil:
- return nil, fmt.Errorf("generate lease in role is nil")
- case *role.GenerateLease == false:
- // If lease generation is disabled do not populate `Secret` field in
- // the response
- resp = &logical.Response{
- Data: respData,
- }
- default:
- resp = b.Secret(SecretCertsType).Response(
- respData,
- map[string]interface{}{
- "serial_number": cb.SerialNumber,
- })
- resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now())
- }
-
- if !role.NoStore {
- err = req.Storage.Put(&logical.StorageEntry{
- Key: "certs/" + normalizeSerial(cb.SerialNumber),
- Value: parsedBundle.CertificateBytes,
- })
- if err != nil {
- return nil, fmt.Errorf("unable to store certificate locally: %v", err)
- }
- }
-
- if useCSR {
- if role.UseCSRCommonName && data.Get("common_name").(string) != "" {
- resp.AddWarning("the common_name field was provided but the role is set with \"use_csr_common_name\" set to true")
- }
- if role.UseCSRSANs && data.Get("alt_names").(string) != "" {
- resp.AddWarning("the alt_names field was provided but the role is set with \"use_csr_sans\" set to true")
- }
- }
-
- return resp, nil
-}
-
-const pathIssueHelpSyn = `
-Request a certificate using a certain role with the provided details.
-`
-
-const pathIssueHelpDesc = `
-This path allows requesting a certificate to be issued according to the
-policy of the given role. The certificate will only be issued if the
-requested details are allowed by the role policy.
-
-This path returns a certificate and a private key. If you want a workflow
-that does not expose a private key, generate a CSR locally and use the
-sign path instead.
-`
-
-const pathSignHelpSyn = `
-Request certificates using a certain role with the provided details.
-`
-
-const pathSignHelpDesc = `
-This path allows requesting certificates to be issued according to the
-policy of the given role. The certificate will only be issued if the
-requested common name is allowed by the role policy.
-
-This path requires a CSR; if you want Vault to generate a private key
-for you, use the issue path instead.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_revoke.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_revoke.go
deleted file mode 100644
index 2911995..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_revoke.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package pki
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathRevoke(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `revoke`,
- Fields: map[string]*framework.FieldSchema{
- "serial_number": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Certificate serial number, in colon- or
-hyphen-separated octal`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRevokeWrite,
- },
-
- HelpSynopsis: pathRevokeHelpSyn,
- HelpDescription: pathRevokeHelpDesc,
- }
-}
-
-func pathRotateCRL(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `crl/rotate`,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRotateCRLRead,
- },
-
- HelpSynopsis: pathRotateCRLHelpSyn,
- HelpDescription: pathRotateCRLHelpDesc,
- }
-}
-
-func (b *backend) pathRevokeWrite(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- serial := data.Get("serial_number").(string)
- if len(serial) == 0 {
- return logical.ErrorResponse("The serial number must be provided"), nil
- }
-
- // We store and identify by lowercase colon-separated hex, but other
- // utilities use dashes and/or uppercase, so normalize
- serial = strings.Replace(strings.ToLower(serial), "-", ":", -1)
-
- b.revokeStorageLock.Lock()
- defer b.revokeStorageLock.Unlock()
-
- return revokeCert(b, req, serial, false)
-}
-
-func (b *backend) pathRotateCRLRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.revokeStorageLock.RLock()
- defer b.revokeStorageLock.RUnlock()
-
- crlErr := buildCRL(b, req)
- switch crlErr.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil
- case errutil.InternalError:
- return nil, fmt.Errorf("Error encountered during CRL building: %s", crlErr)
- default:
- return &logical.Response{
- Data: map[string]interface{}{
- "success": true,
- },
- }, nil
- }
-}
-
-const pathRevokeHelpSyn = `
-Revoke a certificate by serial number.
-`
-
-const pathRevokeHelpDesc = `
-This allows certificates to be revoked using its serial number. A root token is required.
-`
-
-const pathRotateCRLHelpSyn = `
-Force a rebuild of the CRL.
-`
-
-const pathRotateCRLHelpDesc = `
-Force a rebuild of the CRL. This can be used to remove expired certificates from it if no certificates have been revoked. A root token is required.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go
deleted file mode 100644
index 96d0197..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go
+++ /dev/null
@@ -1,546 +0,0 @@
-package pki
-
-import (
- "crypto/x509"
- "fmt"
- "strings"
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathListRolesHelpSyn,
- HelpDescription: pathListRolesHelpDesc,
- }
-}
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
-
- "ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: "",
- Description: `The lease duration if no specific lease duration is
-requested. The lease duration controls the expiration
-of certificates issued by this backend. Defaults to
-the value of max_ttl.`,
- },
-
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: "The maximum allowed lease duration",
- },
-
- "allow_localhost": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `Whether to allow "localhost" as a valid common
-name in a request`,
- },
-
- "allowed_domains": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: `If set, clients can request certificates for
-subdomains directly beneath these domains, including
-the wildcard subdomains. See the documentation for more
-information. This parameter accepts a comma-separated list
-of domains.`,
- },
-
- "allow_bare_domains": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, clients can request certificates
-for the base domains themselves, e.g. "example.com".
-This is a separate option as in some cases this can
-be considered a security threat.`,
- },
-
- "allow_subdomains": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, clients can request certificates for
-subdomains of the CNs allowed by the other role options,
-including wildcard subdomains. See the documentation for
-more information.`,
- },
-
- "allow_glob_domains": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, domains specified in "allowed_domains"
-can include glob patterns, e.g. "ftp*.example.com". See
-the documentation for more information.`,
- },
-
- "allow_any_name": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, clients can request certificates for
-any CN they like. See the documentation for more
-information.`,
- },
-
- "enforce_hostnames": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, only valid host names are allowed for
-CN and SANs. Defaults to true.`,
- },
-
- "allow_ip_sans": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, IP Subject Alternative Names are allowed.
-Any valid IP is accepted.`,
- },
-
- "server_flag": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, certificates are flagged for server auth use.
-Defaults to true.`,
- },
-
- "client_flag": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, certificates are flagged for client auth use.
-Defaults to true.`,
- },
-
- "code_signing_flag": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, certificates are flagged for code signing
-use. Defaults to false.`,
- },
-
- "email_protection_flag": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If set, certificates are flagged for email
-protection use. Defaults to false.`,
- },
-
- "key_type": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "rsa",
- Description: `The type of key to use; defaults to RSA. "rsa"
-and "ec" are the only valid values.`,
- },
-
- "key_bits": &framework.FieldSchema{
- Type: framework.TypeInt,
- Default: 2048,
- Description: `The number of bits to use. You will almost
-certainly want to change this if you adjust
-the key_type.`,
- },
-
- "key_usage": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "DigitalSignature,KeyAgreement,KeyEncipherment",
- Description: `A comma-separated set of key usages (not extended
-key usages). Valid values can be found at
-https://golang.org/pkg/crypto/x509/#KeyUsage
--- simply drop the "KeyUsage" part of the name.
-To remove all key usages from being set, set
-this value to an empty string.`,
- },
-
- "use_csr_common_name": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, when used with a signing profile,
-the common name in the CSR will be used. This
-does *not* include any requested Subject Alternative
-Names. Defaults to true.`,
- },
-
- "use_csr_sans": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, when used with a signing profile,
-the SANs in the CSR will be used. This does *not*
-include the Common Name (cn). Defaults to true.`,
- },
-
- "ou": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: `If set, the OU (OrganizationalUnit) will be set to
-this value in certificates issued by this role.`,
- },
-
- "organization": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: `If set, the O (Organization) will be set to
-this value in certificates issued by this role.`,
- },
-
- "generate_lease": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `
-If set, certificates issued/signed against this role will have Vault leases
-attached to them. Defaults to "false". Certificates can be added to the CRL by
-"vault revoke " when certificates are associated with leases. It can
-also be done using the "pki/revoke" endpoint. However, when lease generation is
-disabled, invoking "pki/revoke" would be the only way to add the certificates
-to the CRL. When large number of certificates are generated with long
-lifetimes, it is recommended that lease generation be disabled, as large amount of
-leases adversely affect the startup time of Vault.`,
- },
- "no_store": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `
-If set, certificates issued/signed against this role will not be stored in the
-in the storage backend. This can improve performance when issuing large numbers
-of certificates. However, certificates issued in this way cannot be enumerated
-or revoked, so this option is recommended only for certificates that are
-non-sensitive, or extremely short-lived. This option implies a value of "false"
-for "generate_lease".`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleCreate,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func (b *backend) getRole(s logical.Storage, n string) (*roleEntry, error) {
- entry, err := s.Get("role/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- // Migrate existing saved entries and save back if changed
- modified := false
- if len(result.TTL) == 0 && len(result.Lease) != 0 {
- result.TTL = result.Lease
- result.Lease = ""
- modified = true
- }
- if len(result.MaxTTL) == 0 && len(result.LeaseMax) != 0 {
- result.MaxTTL = result.LeaseMax
- result.LeaseMax = ""
- modified = true
- }
- if result.AllowBaseDomain {
- result.AllowBaseDomain = false
- result.AllowBareDomains = true
- modified = true
- }
- if result.AllowedBaseDomain != "" {
- found := false
- allowedDomains := strings.Split(result.AllowedDomains, ",")
- if len(allowedDomains) != 0 {
- for _, v := range allowedDomains {
- if v == result.AllowedBaseDomain {
- found = true
- break
- }
- }
- }
- if !found {
- if result.AllowedDomains == "" {
- result.AllowedDomains = result.AllowedBaseDomain
- } else {
- result.AllowedDomains += "," + result.AllowedBaseDomain
- }
- }
- result.AllowedBaseDomain = ""
- modified = true
- }
-
- // Upgrade generate_lease in role
- if result.GenerateLease == nil {
- // All the new roles will have GenerateLease always set to a value. A
- // nil value indicates that this role needs an upgrade. Set it to
- // `true` to not alter its current behavior.
- result.GenerateLease = new(bool)
- *result.GenerateLease = true
- modified = true
- }
-
- if modified {
- jsonEntry, err := logical.StorageEntryJSON("role/"+n, &result)
- if err != nil {
- return nil, err
- }
- if err := s.Put(jsonEntry); err != nil {
- return nil, err
- }
- }
-
- return &result, nil
-}
-
-func (b *backend) pathRoleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("role/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathRoleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("name").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role name"), nil
- }
-
- role, err := b.getRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- hasMax := true
- if len(role.MaxTTL) == 0 {
- role.MaxTTL = "(system default)"
- hasMax = false
- }
- if len(role.TTL) == 0 {
- if hasMax {
- role.TTL = "(system default, capped to role max)"
- } else {
- role.TTL = "(system default)"
- }
- }
-
- resp := &logical.Response{
- Data: structs.New(role).Map(),
- }
-
- if resp.Data == nil {
- return nil, fmt.Errorf("error converting role data to response")
- }
-
- // These values are deprecated and the entries are migrated on read
- delete(resp.Data, "lease")
- delete(resp.Data, "lease_max")
- delete(resp.Data, "allowed_base_domain")
-
- return resp, nil
-}
-
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathRoleCreate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var err error
- name := data.Get("name").(string)
-
- entry := &roleEntry{
- MaxTTL: data.Get("max_ttl").(string),
- TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(),
- AllowLocalhost: data.Get("allow_localhost").(bool),
- AllowedDomains: data.Get("allowed_domains").(string),
- AllowBareDomains: data.Get("allow_bare_domains").(bool),
- AllowSubdomains: data.Get("allow_subdomains").(bool),
- AllowGlobDomains: data.Get("allow_glob_domains").(bool),
- AllowAnyName: data.Get("allow_any_name").(bool),
- EnforceHostnames: data.Get("enforce_hostnames").(bool),
- AllowIPSANs: data.Get("allow_ip_sans").(bool),
- ServerFlag: data.Get("server_flag").(bool),
- ClientFlag: data.Get("client_flag").(bool),
- CodeSigningFlag: data.Get("code_signing_flag").(bool),
- EmailProtectionFlag: data.Get("email_protection_flag").(bool),
- KeyType: data.Get("key_type").(string),
- KeyBits: data.Get("key_bits").(int),
- UseCSRCommonName: data.Get("use_csr_common_name").(bool),
- UseCSRSANs: data.Get("use_csr_sans").(bool),
- KeyUsage: data.Get("key_usage").(string),
- OU: data.Get("ou").(string),
- Organization: data.Get("organization").(string),
- GenerateLease: new(bool),
- NoStore: data.Get("no_store").(bool),
- }
-
- // no_store implies generate_lease := false
- if entry.NoStore {
- *entry.GenerateLease = false
- } else {
- *entry.GenerateLease = data.Get("generate_lease").(bool)
- }
-
- if entry.KeyType == "rsa" && entry.KeyBits < 2048 {
- return logical.ErrorResponse("RSA keys < 2048 bits are unsafe and not supported"), nil
- }
-
- var maxTTL time.Duration
- maxSystemTTL := b.System().MaxLeaseTTL()
- if len(entry.MaxTTL) == 0 {
- maxTTL = maxSystemTTL
- } else {
- maxTTL, err = parseutil.ParseDurationSecond(entry.MaxTTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid max ttl: %s", err)), nil
- }
- }
- if maxTTL > maxSystemTTL {
- return logical.ErrorResponse("Requested max TTL is higher than backend maximum"), nil
- }
-
- ttl := b.System().DefaultLeaseTTL()
- if len(entry.TTL) != 0 {
- ttl, err = parseutil.ParseDurationSecond(entry.TTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid ttl: %s", err)), nil
- }
- }
- if ttl > maxTTL {
- // If they are using the system default, cap it to the role max;
- // if it was specified on the command line, make it an error
- if len(entry.TTL) == 0 {
- ttl = maxTTL
- } else {
- return logical.ErrorResponse(
- `"ttl" value must be less than "max_ttl" and/or backend default max lease TTL value`,
- ), nil
- }
- }
-
- // Persist clamped TTLs
- entry.TTL = ttl.String()
- entry.MaxTTL = maxTTL.String()
-
- if errResp := validateKeyTypeLength(entry.KeyType, entry.KeyBits); errResp != nil {
- return errResp, nil
- }
-
- // Store it
- jsonEntry, err := logical.StorageEntryJSON("role/"+name, entry)
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(jsonEntry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func parseKeyUsages(input string) int {
- var parsedKeyUsages x509.KeyUsage
- splitKeyUsage := strings.Split(input, ",")
- for _, k := range splitKeyUsage {
- switch strings.ToLower(strings.TrimSpace(k)) {
- case "digitalsignature":
- parsedKeyUsages |= x509.KeyUsageDigitalSignature
- case "contentcommitment":
- parsedKeyUsages |= x509.KeyUsageContentCommitment
- case "keyencipherment":
- parsedKeyUsages |= x509.KeyUsageKeyEncipherment
- case "dataencipherment":
- parsedKeyUsages |= x509.KeyUsageDataEncipherment
- case "keyagreement":
- parsedKeyUsages |= x509.KeyUsageKeyAgreement
- case "certsign":
- parsedKeyUsages |= x509.KeyUsageCertSign
- case "crlsign":
- parsedKeyUsages |= x509.KeyUsageCRLSign
- case "encipheronly":
- parsedKeyUsages |= x509.KeyUsageEncipherOnly
- case "decipheronly":
- parsedKeyUsages |= x509.KeyUsageDecipherOnly
- }
- }
-
- return int(parsedKeyUsages)
-}
-
-type roleEntry struct {
- LeaseMax string `json:"lease_max" structs:"lease_max" mapstructure:"lease_max"`
- Lease string `json:"lease" structs:"lease" mapstructure:"lease"`
- MaxTTL string `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
- TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"`
- AllowLocalhost bool `json:"allow_localhost" structs:"allow_localhost" mapstructure:"allow_localhost"`
- AllowedBaseDomain string `json:"allowed_base_domain" structs:"allowed_base_domain" mapstructure:"allowed_base_domain"`
- AllowedDomains string `json:"allowed_domains" structs:"allowed_domains" mapstructure:"allowed_domains"`
- AllowBaseDomain bool `json:"allow_base_domain" structs:"allow_base_domain" mapstructure:"allow_base_domain"`
- AllowBareDomains bool `json:"allow_bare_domains" structs:"allow_bare_domains" mapstructure:"allow_bare_domains"`
- AllowTokenDisplayName bool `json:"allow_token_displayname" structs:"allow_token_displayname" mapstructure:"allow_token_displayname"`
- AllowSubdomains bool `json:"allow_subdomains" structs:"allow_subdomains" mapstructure:"allow_subdomains"`
- AllowGlobDomains bool `json:"allow_glob_domains" structs:"allow_glob_domains" mapstructure:"allow_glob_domains"`
- AllowAnyName bool `json:"allow_any_name" structs:"allow_any_name" mapstructure:"allow_any_name"`
- EnforceHostnames bool `json:"enforce_hostnames" structs:"enforce_hostnames" mapstructure:"enforce_hostnames"`
- AllowIPSANs bool `json:"allow_ip_sans" structs:"allow_ip_sans" mapstructure:"allow_ip_sans"`
- ServerFlag bool `json:"server_flag" structs:"server_flag" mapstructure:"server_flag"`
- ClientFlag bool `json:"client_flag" structs:"client_flag" mapstructure:"client_flag"`
- CodeSigningFlag bool `json:"code_signing_flag" structs:"code_signing_flag" mapstructure:"code_signing_flag"`
- EmailProtectionFlag bool `json:"email_protection_flag" structs:"email_protection_flag" mapstructure:"email_protection_flag"`
- UseCSRCommonName bool `json:"use_csr_common_name" structs:"use_csr_common_name" mapstructure:"use_csr_common_name"`
- UseCSRSANs bool `json:"use_csr_sans" structs:"use_csr_sans" mapstructure:"use_csr_sans"`
- KeyType string `json:"key_type" structs:"key_type" mapstructure:"key_type"`
- KeyBits int `json:"key_bits" structs:"key_bits" mapstructure:"key_bits"`
- MaxPathLength *int `json:",omitempty" structs:"max_path_length,omitempty" mapstructure:"max_path_length"`
- KeyUsage string `json:"key_usage" structs:"key_usage" mapstructure:"key_usage"`
- OU string `json:"ou" structs:"ou" mapstructure:"ou"`
- Organization string `json:"organization" structs:"organization" mapstructure:"organization"`
- GenerateLease *bool `json:"generate_lease,omitempty" structs:"generate_lease,omitempty"`
- NoStore bool `json:"no_store" structs:"no_store" mapstructure:"no_store"`
-
- // Used internally for signing intermediates
- AllowExpirationPastCA bool
-}
-
-const pathListRolesHelpSyn = `List the existing roles in this backend`
-
-const pathListRolesHelpDesc = `Roles will be listed by the role name.`
-
-const pathRoleHelpSyn = `Manage the roles that can be created with this backend.`
-
-const pathRoleHelpDesc = `This path lets you manage the roles that can be created with this backend.`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go
deleted file mode 100644
index bd0aa90..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go
+++ /dev/null
@@ -1,310 +0,0 @@
-package pki
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/mapstructure"
-)
-
-func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- var err error
- b := Backend()
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- return b, config.StorageView
-}
-
-func TestPki_RoleGenerateLease(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "allowed_domains": "myvault.com",
- "ttl": "5h",
- }
-
- roleReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/testrole",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- // generate_lease cannot be nil. It either has to be set during role
- // creation or has to be filled in by the upgrade code
- generateLease := resp.Data["generate_lease"].(*bool)
- if generateLease == nil {
- t.Fatalf("generate_lease should not be nil")
- }
-
- // By default, generate_lease should be `false`
- if *generateLease {
- t.Fatalf("generate_lease should not be set by default")
- }
-
- // role.GenerateLease will be nil after the decode
- var role roleEntry
- err = mapstructure.Decode(resp.Data, &role)
- if err != nil {
- t.Fatal(err)
- }
-
- // Make it explicit
- role.GenerateLease = nil
-
- entry, err := logical.StorageEntryJSON("role/testrole", role)
- if err != nil {
- t.Fatal(err)
- }
- if err := storage.Put(entry); err != nil {
- t.Fatal(err)
- }
-
- // Reading should upgrade generate_lease
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- generateLease = resp.Data["generate_lease"].(*bool)
- if generateLease == nil {
- t.Fatalf("generate_lease should not be nil")
- }
-
- // Upgrade should set generate_lease to `true`
- if !*generateLease {
- t.Fatalf("generate_lease should be set after an upgrade")
- }
-
- // Make sure that setting generate_lease to `true` works properly
- roleReq.Operation = logical.UpdateOperation
- roleReq.Path = "roles/testrole2"
- roleReq.Data["generate_lease"] = true
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- generateLease = resp.Data["generate_lease"].(*bool)
- if generateLease == nil {
- t.Fatalf("generate_lease should not be nil")
- }
- if !*generateLease {
- t.Fatalf("generate_lease should have been set")
- }
-}
-
-func TestPki_RoleNoStore(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- roleData := map[string]interface{}{
- "allowed_domains": "myvault.com",
- "ttl": "5h",
- }
-
- roleReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/testrole",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- // By default, no_store should be `false`
- noStore := resp.Data["no_store"].(bool)
- if noStore {
- t.Fatalf("no_store should not be set by default")
- }
-
- // Make sure that setting no_store to `true` works properly
- roleReq.Operation = logical.UpdateOperation
- roleReq.Path = "roles/testrole_nostore"
- roleReq.Data["no_store"] = true
- roleReq.Data["allowed_domain"] = "myvault.com"
- roleReq.Data["allow_subdomains"] = true
- roleReq.Data["ttl"] = "5h"
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- roleReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- noStore = resp.Data["no_store"].(bool)
- if !noStore {
- t.Fatalf("no_store should have been set to true")
- }
-
- // issue a certificate and test that it's not stored
- caData := map[string]interface{}{
- "common_name": "myvault.com",
- "ttl": "5h",
- "ip_sans": "127.0.0.1",
- }
- caReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/generate/internal",
- Storage: storage,
- Data: caData,
- }
- resp, err = b.HandleRequest(caReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- issueData := map[string]interface{}{
- "common_name": "cert.myvault.com",
- "format": "pem",
- "ip_sans": "127.0.0.1",
- "ttl": "1h",
- }
- issueReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "issue/testrole_nostore",
- Storage: storage,
- Data: issueData,
- }
-
- resp, err = b.HandleRequest(issueReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- // list certs
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "certs",
- Storage: storage,
- })
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
- if len(resp.Data["keys"].([]string)) != 1 {
- t.Fatalf("Only the CA certificate should be stored: %#v", resp)
- }
-}
-
-func TestPki_CertsLease(t *testing.T) {
- var resp *logical.Response
- var err error
- b, storage := createBackendWithStorage(t)
-
- caData := map[string]interface{}{
- "common_name": "myvault.com",
- "ttl": "5h",
- "ip_sans": "127.0.0.1",
- }
-
- caReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "root/generate/internal",
- Storage: storage,
- Data: caData,
- }
-
- resp, err = b.HandleRequest(caReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- roleData := map[string]interface{}{
- "allowed_domains": "myvault.com",
- "allow_subdomains": true,
- "ttl": "2h",
- }
-
- roleReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/testrole",
- Storage: storage,
- Data: roleData,
- }
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- issueData := map[string]interface{}{
- "common_name": "cert.myvault.com",
- "format": "pem",
- "ip_sans": "127.0.0.1",
- }
- issueReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "issue/testrole",
- Storage: storage,
- Data: issueData,
- }
-
- resp, err = b.HandleRequest(issueReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- if resp.Secret != nil {
- t.Fatalf("expected a response that does not contain a secret")
- }
-
- // Turn on the lease generation and issue a certificate. The response
- // should have a `Secret` object populated.
- roleData["generate_lease"] = true
-
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- resp, err = b.HandleRequest(issueReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v resp: %#v", err, resp)
- }
-
- if resp.Secret == nil {
- t.Fatalf("expected a response that contains a secret")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go
deleted file mode 100644
index 438c92e..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go
+++ /dev/null
@@ -1,458 +0,0 @@
-package pki
-
-import (
- "crypto/rand"
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "fmt"
- "reflect"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathGenerateRoot(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "root/generate/" + framework.GenericNameRegex("exported"),
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathCAGenerateRoot,
- },
-
- HelpSynopsis: pathGenerateRootHelpSyn,
- HelpDescription: pathGenerateRootHelpDesc,
- }
-
- ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{})
- ret.Fields = addCAKeyGenerationFields(ret.Fields)
- ret.Fields = addCAIssueFields(ret.Fields)
-
- return ret
-}
-
-func pathDeleteRoot(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "root",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.DeleteOperation: b.pathCADeleteRoot,
- },
-
- HelpSynopsis: pathDeleteRootHelpSyn,
- HelpDescription: pathDeleteRootHelpDesc,
- }
-
- return ret
-}
-
-func pathSignIntermediate(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "root/sign-intermediate",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathCASignIntermediate,
- },
-
- HelpSynopsis: pathSignIntermediateHelpSyn,
- HelpDescription: pathSignIntermediateHelpDesc,
- }
-
- ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{})
- ret.Fields = addCAIssueFields(ret.Fields)
-
- ret.Fields["csr"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: `PEM-format CSR to be signed.`,
- }
-
- ret.Fields["use_csr_values"] = &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: `If true, then:
-1) Subject information, including names and alternate
-names, will be preserved from the CSR rather than
-using values provided in the other parameters to
-this path;
-2) Any key usages requested in the CSR will be
-added to the basic set of key usages used for CA
-certs signed by this path; for instance,
-the non-repudiation flag.`,
- }
-
- return ret
-}
-
-func pathSignSelfIssued(b *backend) *framework.Path {
- ret := &framework.Path{
- Pattern: "root/sign-self-issued",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathCASignSelfIssued,
- },
-
- Fields: map[string]*framework.FieldSchema{
- "certificate": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `PEM-format self-issued certificate to be signed.`,
- },
- },
-
- HelpSynopsis: pathSignSelfIssuedHelpSyn,
- HelpDescription: pathSignSelfIssuedHelpDesc,
- }
-
- return ret
-}
-
-func (b *backend) pathCADeleteRoot(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return nil, req.Storage.Delete("config/ca_bundle")
-}
-
-func (b *backend) pathCAGenerateRoot(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var err error
-
- entry, err := req.Storage.Get("config/ca_bundle")
- if err != nil {
- return nil, err
- }
- if entry != nil {
- return nil, nil
- }
-
- exported, format, role, errorResp := b.getGenerationParams(data)
- if errorResp != nil {
- return errorResp, nil
- }
-
- maxPathLengthIface, ok := data.GetOk("max_path_length")
- if ok {
- maxPathLength := maxPathLengthIface.(int)
- role.MaxPathLength = &maxPathLength
- }
-
- parsedBundle, err := generateCert(b, role, nil, true, req, data)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), nil
- case errutil.InternalError:
- return nil, err
- }
- }
-
- cb, err := parsedBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %s", err)
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()),
- "serial_number": cb.SerialNumber,
- },
- }
-
- switch format {
- case "pem":
- resp.Data["certificate"] = cb.Certificate
- resp.Data["issuing_ca"] = cb.Certificate
- if exported {
- resp.Data["private_key"] = cb.PrivateKey
- resp.Data["private_key_type"] = cb.PrivateKeyType
- }
-
- case "pem_bundle":
- resp.Data["issuing_ca"] = cb.Certificate
-
- if exported {
- resp.Data["private_key"] = cb.PrivateKey
- resp.Data["private_key_type"] = cb.PrivateKeyType
- resp.Data["certificate"] = fmt.Sprintf("%s\n%s", cb.PrivateKey, cb.Certificate)
- } else {
- resp.Data["certificate"] = cb.Certificate
- }
-
- case "der":
- resp.Data["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
- resp.Data["issuing_ca"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
- if exported {
- resp.Data["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes)
- resp.Data["private_key_type"] = cb.PrivateKeyType
- }
- }
-
- // Store it as the CA bundle
- entry, err = logical.StorageEntryJSON("config/ca_bundle", cb)
- if err != nil {
- return nil, err
- }
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- // Also store it as just the certificate identified by serial number, so it
- // can be revoked
- err = req.Storage.Put(&logical.StorageEntry{
- Key: "certs/" + normalizeSerial(cb.SerialNumber),
- Value: parsedBundle.CertificateBytes,
- })
- if err != nil {
- return nil, fmt.Errorf("Unable to store certificate locally: %v", err)
- }
-
- // For ease of later use, also store just the certificate at a known
- // location
- entry.Key = "ca"
- entry.Value = parsedBundle.CertificateBytes
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- // Build a fresh CRL
- err = buildCRL(b, req)
- if err != nil {
- return nil, err
- }
-
- if parsedBundle.Certificate.MaxPathLen == 0 {
- resp.AddWarning("Max path length of the generated certificate is zero. This certificate cannot be used to issue intermediate CA certificates.")
- }
-
- return resp, nil
-}
-
-func (b *backend) pathCASignIntermediate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var err error
-
- format := getFormat(data)
- if format == "" {
- return logical.ErrorResponse(
- `The "format" path parameter must be "pem" or "der"`,
- ), nil
- }
-
- role := &roleEntry{
- TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(),
- AllowLocalhost: true,
- AllowAnyName: true,
- AllowIPSANs: true,
- EnforceHostnames: false,
- KeyType: "any",
- AllowExpirationPastCA: true,
- }
-
- if cn := data.Get("common_name").(string); len(cn) == 0 {
- role.UseCSRCommonName = true
- }
-
- var caErr error
- signingBundle, caErr := fetchCAInfo(req)
- switch caErr.(type) {
- case errutil.UserError:
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "could not fetch the CA certificate (was one set?): %s", caErr)}
- case errutil.InternalError:
- return nil, errutil.InternalError{Err: fmt.Sprintf(
- "error fetching CA certificate: %s", caErr)}
- }
-
- useCSRValues := data.Get("use_csr_values").(bool)
-
- maxPathLengthIface, ok := data.GetOk("max_path_length")
- if ok {
- maxPathLength := maxPathLengthIface.(int)
- role.MaxPathLength = &maxPathLength
- }
-
- parsedBundle, err := signCert(b, role, signingBundle, true, useCSRValues, req, data)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), nil
- case errutil.InternalError:
- return nil, err
- }
- }
-
- if err := parsedBundle.Verify(); err != nil {
- return nil, fmt.Errorf("verification of parsed bundle failed: %s", err)
- }
-
- signingCB, err := signingBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err)
- }
-
- cb, err := parsedBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("Error converting raw cert bundle to cert bundle: %s", err)
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()),
- "serial_number": cb.SerialNumber,
- },
- }
-
- if signingBundle.Certificate.NotAfter.Before(parsedBundle.Certificate.NotAfter) {
- resp.AddWarning("The expiration time for the signed certificate is after the CA's expiration time. If the new certificate is not treated as a root, validation paths with the certificate past the issuing CA's expiration time will fail.")
- }
-
- switch format {
- case "pem":
- resp.Data["certificate"] = cb.Certificate
- resp.Data["issuing_ca"] = signingCB.Certificate
- if cb.CAChain != nil && len(cb.CAChain) > 0 {
- resp.Data["ca_chain"] = cb.CAChain
- }
-
- case "pem_bundle":
- resp.Data["certificate"] = cb.ToPEMBundle()
- resp.Data["issuing_ca"] = signingCB.Certificate
- if cb.CAChain != nil && len(cb.CAChain) > 0 {
- resp.Data["ca_chain"] = cb.CAChain
- }
-
- case "der":
- resp.Data["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
- resp.Data["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes)
-
- var caChain []string
- for _, caCert := range parsedBundle.CAChain {
- caChain = append(caChain, base64.StdEncoding.EncodeToString(caCert.Bytes))
- }
- if caChain != nil && len(caChain) > 0 {
- resp.Data["ca_chain"] = cb.CAChain
- }
- }
-
- err = req.Storage.Put(&logical.StorageEntry{
- Key: "certs/" + normalizeSerial(cb.SerialNumber),
- Value: parsedBundle.CertificateBytes,
- })
- if err != nil {
- return nil, fmt.Errorf("Unable to store certificate locally: %v", err)
- }
-
- if parsedBundle.Certificate.MaxPathLen == 0 {
- resp.AddWarning("Max path length of the signed certificate is zero. This certificate cannot be used to issue intermediate CA certificates.")
- }
-
- return resp, nil
-}
-
-func (b *backend) pathCASignSelfIssued(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var err error
-
- certPem := data.Get("certificate").(string)
- block, _ := pem.Decode([]byte(certPem))
- if block == nil || len(block.Bytes) == 0 {
- return logical.ErrorResponse("certificate could not be PEM-decoded"), nil
- }
- certs, err := x509.ParseCertificates(block.Bytes)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error parsing certificate: %s", err)), nil
- }
- if len(certs) != 1 {
- return logical.ErrorResponse(fmt.Sprintf("%d certificates found in PEM file, expected 1", len(certs))), nil
- }
-
- cert := certs[0]
- if !cert.IsCA {
- return logical.ErrorResponse("given certificate is not a CA certificate"), nil
- }
- if !reflect.DeepEqual(cert.Issuer, cert.Subject) {
- return logical.ErrorResponse("given certificate is not self-issued"), nil
- }
-
- var caErr error
- signingBundle, caErr := fetchCAInfo(req)
- switch caErr.(type) {
- case errutil.UserError:
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "could not fetch the CA certificate (was one set?): %s", caErr)}
- case errutil.InternalError:
- return nil, errutil.InternalError{Err: fmt.Sprintf(
- "error fetching CA certificate: %s", caErr)}
- }
-
- signingCB, err := signingBundle.ToCertBundle()
- if err != nil {
- return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err)
- }
-
- urls := &urlEntries{}
- if signingBundle.URLs != nil {
- urls = signingBundle.URLs
- }
- cert.IssuingCertificateURL = urls.IssuingCertificates
- cert.CRLDistributionPoints = urls.CRLDistributionPoints
- cert.OCSPServer = urls.OCSPServers
-
- newCert, err := x509.CreateCertificate(rand.Reader, cert, signingBundle.Certificate, cert.PublicKey, signingBundle.PrivateKey)
- if err != nil {
- return nil, errwrap.Wrapf("error signing self-issued certificate: {{err}}", err)
- }
- if len(newCert) == 0 {
- return nil, fmt.Errorf("nil cert was created when signing self-issued certificate")
- }
- pemCert := pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE",
- Bytes: newCert,
- })
-
- return &logical.Response{
- Data: map[string]interface{}{
- "certificate": string(pemCert),
- "issuing_ca": signingCB.Certificate,
- },
- }, nil
-}
-
-const pathGenerateRootHelpSyn = `
-Generate a new CA certificate and private key used for signing.
-`
-
-const pathGenerateRootHelpDesc = `
-See the API documentation for more information.
-`
-
-const pathDeleteRootHelpSyn = `
-Deletes the root CA key to allow a new one to be generated.
-`
-
-const pathDeleteRootHelpDesc = `
-See the API documentation for more information.
-`
-
-const pathSignIntermediateHelpSyn = `
-Issue an intermediate CA certificate based on the provided CSR.
-`
-
-const pathSignIntermediateHelpDesc = `
-see the API documentation for more information.
-`
-
-const pathSignSelfIssuedHelpSyn = `
-Signs another CA's self-issued certificate.
-`
-
-const pathSignSelfIssuedHelpDesc = `
-Signs another CA's self-issued certificate. This is most often used for rolling roots; unless you know you need this you probably want to use sign-intermediate instead.
-
-Note that this is a very privileged operation and should be extremely restricted in terms of who is allowed to use it. All values will be taken directly from the incoming certificate and only verification that it is self-issued will be performed.
-
-Configured URLs for CRLs/OCSP/etc. will be copied over and the issuer will be this mount's CA cert. Other than that, all other values will be used verbatim.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_tidy.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_tidy.go
deleted file mode 100644
index 386ff0f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_tidy.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package pki
-
-import (
- "crypto/x509"
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathTidy(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "tidy",
- Fields: map[string]*framework.FieldSchema{
- "tidy_cert_store": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Set to true to enable tidying up
-the certificate store`,
- Default: false,
- },
-
- "tidy_revocation_list": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Set to true to enable tidying up
-the revocation list`,
- Default: false,
- },
-
- "safety_buffer": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: `The amount of extra time that must have passed
-beyond certificate expiration before it is removed
-from the backend storage and/or revocation list.
-Defaults to 72 hours.`,
- Default: 259200, //72h, but TypeDurationSecond currently requires defaults to be int
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathTidyWrite,
- },
-
- HelpSynopsis: pathTidyHelpSyn,
- HelpDescription: pathTidyHelpDesc,
- }
-}
-
-func (b *backend) pathTidyWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- safetyBuffer := d.Get("safety_buffer").(int)
- tidyCertStore := d.Get("tidy_cert_store").(bool)
- tidyRevocationList := d.Get("tidy_revocation_list").(bool)
-
- bufferDuration := time.Duration(safetyBuffer) * time.Second
-
- if tidyCertStore {
- serials, err := req.Storage.List("certs/")
- if err != nil {
- return nil, fmt.Errorf("error fetching list of certs: %s", err)
- }
-
- for _, serial := range serials {
- certEntry, err := req.Storage.Get("certs/" + serial)
- if err != nil {
- return nil, fmt.Errorf("error fetching certificate %s: %s", serial, err)
- }
-
- if certEntry == nil {
- return nil, fmt.Errorf("certificate entry for serial %s is nil", serial)
- }
-
- if certEntry.Value == nil || len(certEntry.Value) == 0 {
- return nil, fmt.Errorf("found entry for serial %s but actual certificate is empty", serial)
- }
-
- cert, err := x509.ParseCertificate(certEntry.Value)
- if err != nil {
- return nil, fmt.Errorf("unable to parse stored certificate with serial %s: %s", serial, err)
- }
-
- if time.Now().After(cert.NotAfter.Add(bufferDuration)) {
- if err := req.Storage.Delete("certs/" + serial); err != nil {
- return nil, fmt.Errorf("error deleting serial %s from storage: %s", serial, err)
- }
- }
- }
- }
-
- if tidyRevocationList {
- b.revokeStorageLock.Lock()
- defer b.revokeStorageLock.Unlock()
-
- tidiedRevoked := false
-
- revokedSerials, err := req.Storage.List("revoked/")
- if err != nil {
- return nil, fmt.Errorf("error fetching list of revoked certs: %s", err)
- }
-
- var revInfo revocationInfo
- for _, serial := range revokedSerials {
- revokedEntry, err := req.Storage.Get("revoked/" + serial)
- if err != nil {
- return nil, fmt.Errorf("unable to fetch revoked cert with serial %s: %s", serial, err)
- }
- if revokedEntry == nil {
- return nil, fmt.Errorf("revoked certificate entry for serial %s is nil", serial)
- }
- if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 {
- // TODO: In this case, remove it and continue? How likely is this to
- // happen? Alternately, could skip it entirely, or could implement a
- // delete function so that there is a way to remove these
- return nil, fmt.Errorf("found revoked serial but actual certificate is empty")
- }
-
- err = revokedEntry.DecodeJSON(&revInfo)
- if err != nil {
- return nil, fmt.Errorf("error decoding revocation entry for serial %s: %s", serial, err)
- }
-
- revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes)
- if err != nil {
- return nil, fmt.Errorf("unable to parse stored revoked certificate with serial %s: %s", serial, err)
- }
-
- if time.Now().After(revokedCert.NotAfter.Add(bufferDuration)) {
- if err := req.Storage.Delete("revoked/" + serial); err != nil {
- return nil, fmt.Errorf("error deleting serial %s from revoked list: %s", serial, err)
- }
- tidiedRevoked = true
- }
- }
-
- if tidiedRevoked {
- if err := buildCRL(b, req); err != nil {
- return nil, err
- }
- }
- }
-
- return nil, nil
-}
-
-const pathTidyHelpSyn = `
-Tidy up the backend by removing expired certificates, revocation information,
-or both.
-`
-
-const pathTidyHelpDesc = `
-This endpoint allows expired certificates and/or revocation information to be
-removed from the backend, freeing up storage and shortening CRLs.
-
-For safety, this function is a noop if called without parameters; cleanup from
-normal certificate storage must be enabled with 'tidy_cert_store' and cleanup
-from revocation information must be enabled with 'tidy_revocation_list'.
-
-The 'safety_buffer' parameter is useful to ensure that clock skew amongst your
-hosts cannot lead to a certificate being removed from the CRL while it is still
-considered valid by other hosts (for instance, if their clocks are a few
-minutes behind). The 'safety_buffer' parameter can be an integer number of
-seconds or a string duration like "72h".
-
-All certificates and/or revocation information currently stored in the backend
-will be checked when this endpoint is hit. The expiration of the
-certificate/revocation information of each certificate being held in
-certificate storage or in revocation infomation will then be checked. If the
-current time, minus the value of 'safety_buffer', is greater than the
-expiration, it will be removed.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/secret_certs.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/secret_certs.go
deleted file mode 100644
index 32f6f42..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/secret_certs.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pki
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// SecretCertsType is the name used to identify this type
-const SecretCertsType = "pki"
-
-func secretCerts(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCertsType,
- Fields: map[string]*framework.FieldSchema{
- "certificate": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The PEM-encoded concatenated certificate and
-issuing certificate authority`,
- },
- "private_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The PEM-encoded private key for the certificate",
- },
- "serial": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The serial number of the certificate, for handy
-reference`,
- },
- },
-
- Revoke: b.secretCredsRevoke,
- }
-}
-
-func (b *backend) secretCredsRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- if req.Secret == nil {
- return nil, fmt.Errorf("secret is nil in request")
- }
-
- serialInt, ok := req.Secret.InternalData["serial_number"]
- if !ok {
- return nil, fmt.Errorf("could not find serial in internal secret data")
- }
-
- b.revokeStorageLock.Lock()
- defer b.revokeStorageLock.Unlock()
-
- return revokeCert(b, req, serialInt.(string), true)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/util.go
deleted file mode 100644
index 3dffb53..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/util.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package pki
-
-import "strings"
-
-func normalizeSerial(serial string) string {
- return strings.Replace(strings.ToLower(serial), ":", "-", -1)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go
deleted file mode 100644
index 4a689f8..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package postgresql
-
-import (
- "database/sql"
- "fmt"
- "strings"
- "sync"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend(conf)
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend(conf *logical.BackendConfig) *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathConfigConnection(&b),
- pathConfigLease(&b),
- pathListRoles(&b),
- pathRoles(&b),
- pathRoleCreate(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCreds(&b),
- },
-
- Clean: b.ResetDB,
- Invalidate: b.invalidate,
- BackendType: logical.TypeLogical,
- }
-
- b.logger = conf.Logger
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- db *sql.DB
- lock sync.Mutex
-
- logger log.Logger
-}
-
-// DB returns the database connection.
-func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
- b.logger.Trace("postgres/db: enter")
- defer b.logger.Trace("postgres/db: exit")
-
- b.lock.Lock()
- defer b.lock.Unlock()
-
- // If we already have a DB, we got it!
- if b.db != nil {
- if err := b.db.Ping(); err == nil {
- return b.db, nil
- }
- // If the ping was unsuccessful, close it and ignore errors as we'll be
- // reestablishing anyways
- b.db.Close()
- }
-
- // Otherwise, attempt to make connection
- entry, err := s.Get("config/connection")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil,
- fmt.Errorf("configure the DB connection with config/connection first")
- }
-
- var connConfig connectionConfig
- if err := entry.DecodeJSON(&connConfig); err != nil {
- return nil, err
- }
-
- conn := connConfig.ConnectionURL
- if len(conn) == 0 {
- conn = connConfig.ConnectionString
- }
-
- // Ensure timezone is set to UTC for all the conenctions
- if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") {
- if strings.Contains(conn, "?") {
- conn += "&timezone=utc"
- } else {
- conn += "?timezone=utc"
- }
- } else {
- conn += " timezone=utc"
- }
-
- b.db, err = sql.Open("postgres", conn)
- if err != nil {
- return nil, err
- }
-
- // Set some connection pool settings. We don't need much of this,
- // since the request rate shouldn't be high.
- b.db.SetMaxOpenConns(connConfig.MaxOpenConnections)
- b.db.SetMaxIdleConns(connConfig.MaxIdleConnections)
-
- return b.db, nil
-}
-
-// ResetDB forces a connection next time DB() is called.
-func (b *backend) ResetDB() {
- b.logger.Trace("postgres/resetdb: enter")
- defer b.logger.Trace("postgres/resetdb: exit")
-
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.db != nil {
- b.db.Close()
- }
-
- b.db = nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "config/connection":
- b.ResetDB()
- }
-}
-
-// Lease returns the lease information
-func (b *backend) Lease(s logical.Storage) (*configLease, error) {
- entry, err := s.Get("config/lease")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result configLease
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-const backendHelp = `
-The PostgreSQL backend dynamically generates database users.
-
-After mounting this backend, configure it using the endpoints within
-the "config/" path.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend_test.go
deleted file mode 100644
index 5559a80..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend_test.go
+++ /dev/null
@@ -1,620 +0,0 @@
-package postgresql
-
-import (
- "database/sql"
- "encoding/json"
- "fmt"
- "log"
- "os"
- "path"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/lib/pq"
- "github.com/mitchellh/mapstructure"
- dockertest "gopkg.in/ory-am/dockertest.v2"
-)
-
-var (
- testImagePull sync.Once
-)
-
-func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
- if os.Getenv("PG_URL") != "" {
- return "", os.Getenv("PG_URL")
- }
-
- // Without this the checks for whether the container has started seem to
- // never actually pass. There's really no reason to expose the test
- // containers, so don't.
- dockertest.BindDockerToLocalhost = "yep"
-
- testImagePull.Do(func() {
- dockertest.Pull("postgres")
- })
-
- cid, connErr := dockertest.ConnectToPostgreSQL(60, 500*time.Millisecond, func(connURL string) bool {
- // This will cause a validation to run
- resp, err := b.HandleRequest(&logical.Request{
- Storage: s,
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: map[string]interface{}{
- "connection_url": connURL,
- },
- })
- if err != nil || (resp != nil && resp.IsError()) {
- // It's likely not up and running yet, so return false and try again
- return false
- }
- if resp == nil {
- t.Fatal("expected warning")
- }
-
- retURL = connURL
- return true
- })
-
- if connErr != nil {
- t.Fatalf("could not connect to database: %v", connErr)
- }
-
- return
-}
-
-func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
- err := cid.KillRemove()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_config_connection(t *testing.T) {
- var resp *logical.Response
- var err error
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- configData := map[string]interface{}{
- "connection_url": "sample_connection_url",
- "value": "",
- "max_open_connections": 9,
- "max_idle_connections": 7,
- "verify_connection": false,
- }
-
- configReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Storage: config.StorageView,
- Data: configData,
- }
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- configReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%s resp:%#v\n", err, resp)
- }
-
- delete(configData, "verify_connection")
- if !reflect.DeepEqual(configData, resp.Data) {
- t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
- }
-}
-
-func TestBackend_basic(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- testAccStepCreateRole(t, "web", testRole, false),
- testAccStepReadCreds(t, b, config.StorageView, "web", connURL),
- },
- })
-}
-
-func TestBackend_roleCrud(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- testAccStepCreateRole(t, "web", testRole, false),
- testAccStepReadRole(t, "web", testRole),
- testAccStepDeleteRole(t, "web"),
- testAccStepReadRole(t, "web", ""),
- },
- })
-}
-
-func TestBackend_BlockStatements(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- jsonBlockStatement, err := json.Marshal(testBlockStatementRoleSlice)
- if err != nil {
- t.Fatal(err)
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- // This will also validate the query
- testAccStepCreateRole(t, "web-block", testBlockStatementRole, true),
- testAccStepCreateRole(t, "web-block", string(jsonBlockStatement), false),
- },
- })
-}
-
-func TestBackend_roleReadOnly(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- testAccStepCreateRole(t, "web", testRole, false),
- testAccStepCreateRole(t, "web-readonly", testReadOnlyRole, false),
- testAccStepReadRole(t, "web-readonly", testReadOnlyRole),
- testAccStepCreateTable(t, b, config.StorageView, "web", connURL),
- testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL),
- testAccStepDropTable(t, b, config.StorageView, "web", connURL),
- testAccStepDeleteRole(t, "web-readonly"),
- testAccStepDeleteRole(t, "web"),
- testAccStepReadRole(t, "web-readonly", ""),
- },
- })
-}
-
-func TestBackend_roleReadOnly_revocationSQL(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- cid, connURL := prepareTestContainer(t, config.StorageView, b)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- connData := map[string]interface{}{
- "connection_url": connURL,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t, connData, false),
- testAccStepCreateRoleWithRevocationSQL(t, "web", testRole, defaultRevocationSQL, false),
- testAccStepCreateRoleWithRevocationSQL(t, "web-readonly", testReadOnlyRole, defaultRevocationSQL, false),
- testAccStepReadRole(t, "web-readonly", testReadOnlyRole),
- testAccStepCreateTable(t, b, config.StorageView, "web", connURL),
- testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL),
- testAccStepDropTable(t, b, config.StorageView, "web", connURL),
- testAccStepDeleteRole(t, "web-readonly"),
- testAccStepDeleteRole(t, "web"),
- testAccStepReadRole(t, "web-readonly", ""),
- },
- })
-}
-
-func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: d,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if expectError {
- if resp.Data == nil {
- return fmt.Errorf("data is nil")
- }
- var e struct {
- Error string `mapstructure:"error"`
- }
- if err := mapstructure.Decode(resp.Data, &e); err != nil {
- return err
- }
- if len(e.Error) == 0 {
- return fmt.Errorf("expected error, but write succeeded.")
- }
- return nil
- } else if resp != nil && resp.IsError() {
- return fmt.Errorf("got an error response: %v", resp.Error())
- }
- return nil
- },
- }
-}
-
-func testAccStepCreateRole(t *testing.T, name string, sql string, expectFail bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: path.Join("roles", name),
- Data: map[string]interface{}{
- "sql": sql,
- },
- ErrorOk: expectFail,
- }
-}
-
-func testAccStepCreateRoleWithRevocationSQL(t *testing.T, name, sql, revocationSQL string, expectFail bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: path.Join("roles", name),
- Data: map[string]interface{}{
- "sql": sql,
- "revocation_sql": revocationSQL,
- },
- ErrorOk: expectFail,
- }
-}
-
-func testAccStepDeleteRole(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: path.Join("roles", name),
- }
-}
-
-func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: path.Join("creds", name),
- Check: func(resp *logical.Response) error {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[TRACE] Generated credentials: %v", d)
- conn, err := pq.ParseURL(connURL)
-
- if err != nil {
- t.Fatal(err)
- }
-
- conn += " timezone=utc"
-
- db, err := sql.Open("postgres", conn)
- if err != nil {
- t.Fatal(err)
- }
-
- returnedRows := func() int {
- stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');")
- if err != nil {
- return -1
- }
- defer stmt.Close()
-
- rows, err := stmt.Query(d.Username)
- if err != nil {
- return -1
- }
- defer rows.Close()
-
- i := 0
- for rows.Next() {
- i++
- }
- return i
- }
-
- // minNumPermissions is the minimum number of permissions that will always be present.
- const minNumPermissions = 2
-
- userRows := returnedRows()
- if userRows < minNumPermissions {
- t.Fatalf("did not get expected number of rows, got %d", userRows)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.RevokeOperation,
- Storage: s,
- Secret: &logical.Secret{
- InternalData: map[string]interface{}{
- "secret_type": "creds",
- "username": d.Username,
- "role": name,
- },
- },
- })
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.IsError() {
- return fmt.Errorf("Error on resp: %#v", *resp)
- }
- }
-
- userRows = returnedRows()
- // User shouldn't exist so returnedRows() should encounter an error and exit with -1
- if userRows != -1 {
- t.Fatalf("did not get expected number of rows, got %d", userRows)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepCreateTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: path.Join("creds", name),
- Check: func(resp *logical.Response) error {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[TRACE] Generated credentials: %v", d)
- conn, err := pq.ParseURL(connURL)
-
- if err != nil {
- t.Fatal(err)
- }
-
- conn += " timezone=utc"
-
- db, err := sql.Open("postgres", conn)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = db.Exec("CREATE TABLE test (id SERIAL PRIMARY KEY);")
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.RevokeOperation,
- Storage: s,
- Secret: &logical.Secret{
- InternalData: map[string]interface{}{
- "secret_type": "creds",
- "username": d.Username,
- },
- },
- })
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.IsError() {
- return fmt.Errorf("Error on resp: %#v", *resp)
- }
- }
-
- return nil
- },
- }
-}
-
-func testAccStepDropTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: path.Join("creds", name),
- Check: func(resp *logical.Response) error {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[TRACE] Generated credentials: %v", d)
- conn, err := pq.ParseURL(connURL)
-
- if err != nil {
- t.Fatal(err)
- }
-
- conn += " timezone=utc"
-
- db, err := sql.Open("postgres", conn)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = db.Exec("DROP TABLE test;")
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.RevokeOperation,
- Storage: s,
- Secret: &logical.Secret{
- InternalData: map[string]interface{}{
- "secret_type": "creds",
- "username": d.Username,
- },
- },
- })
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.IsError() {
- return fmt.Errorf("Error on resp: %#v", *resp)
- }
- }
-
- return nil
- },
- }
-}
-
-func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if sql == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- SQL string `mapstructure:"sql"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.SQL != sql {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- return nil
- },
- }
-}
-
-const testRole = `
-CREATE ROLE "{{name}}" WITH
- LOGIN
- PASSWORD '{{password}}'
- VALID UNTIL '{{expiration}}';
-GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
-`
-
-const testReadOnlyRole = `
-CREATE ROLE "{{name}}" WITH
- LOGIN
- PASSWORD '{{password}}'
- VALID UNTIL '{{expiration}}';
-GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
-GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
-`
-
-const testBlockStatementRole = `
-DO $$
-BEGIN
- IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
- CREATE ROLE "foo-role";
- CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
- ALTER ROLE "foo-role" SET search_path = foo;
- GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
- GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
- END IF;
-END
-$$
-
-CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';
-GRANT "foo-role" TO "{{name}}";
-ALTER ROLE "{{name}}" SET search_path = foo;
-GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";
-`
-
-var testBlockStatementRoleSlice = []string{
- `
-DO $$
-BEGIN
- IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
- CREATE ROLE "foo-role";
- CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
- ALTER ROLE "foo-role" SET search_path = foo;
- GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
- GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
- END IF;
-END
-$$
-`,
- `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`,
- `GRANT "foo-role" TO "{{name}}";`,
- `ALTER ROLE "{{name}}" SET search_path = foo;`,
- `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`,
-}
-
-const defaultRevocationSQL = `
-REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
-REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
-REVOKE USAGE ON SCHEMA public FROM {{name}};
-
-DROP ROLE IF EXISTS {{name}};
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_connection.go
deleted file mode 100644
index 577c296..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_connection.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package postgresql
-
-import (
- "database/sql"
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- _ "github.com/lib/pq"
-)
-
-func pathConfigConnection(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/connection",
- Fields: map[string]*framework.FieldSchema{
- "connection_url": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "DB connection string",
- },
-
- "value": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `DB connection string. Use 'connection_url' instead.
-This will be deprecated.`,
- },
-
- "verify_connection": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, connection_url is verified by actually connecting to the database`,
- },
-
- "max_open_connections": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `Maximum number of open connections to the database;
-a zero uses the default value of two and a
-negative value means unlimited`,
- },
-
- "max_idle_connections": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `Maximum number of idle connections to the database;
-a zero uses the value of max_open_connections
-and a negative value disables idle connections.
-If larger than max_open_connections it will be
-reduced to the same size.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConnectionWrite,
- logical.ReadOperation: b.pathConnectionRead,
- },
-
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-// pathConnectionRead reads out the connection configuration
-func (b *backend) pathConnectionRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get("config/connection")
- if err != nil {
- return nil, fmt.Errorf("failed to read connection configuration")
- }
- if entry == nil {
- return nil, nil
- }
-
- var config connectionConfig
- if err := entry.DecodeJSON(&config); err != nil {
- return nil, err
- }
- return &logical.Response{
- Data: structs.New(config).Map(),
- }, nil
-}
-
-func (b *backend) pathConnectionWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- connValue := data.Get("value").(string)
- connURL := data.Get("connection_url").(string)
- if connURL == "" {
- if connValue == "" {
- return logical.ErrorResponse("connection_url parameter must be supplied"), nil
- } else {
- connURL = connValue
- }
- }
-
- maxOpenConns := data.Get("max_open_connections").(int)
- if maxOpenConns == 0 {
- maxOpenConns = 2
- }
-
- maxIdleConns := data.Get("max_idle_connections").(int)
- if maxIdleConns == 0 {
- maxIdleConns = maxOpenConns
- }
- if maxIdleConns > maxOpenConns {
- maxIdleConns = maxOpenConns
- }
-
- // Don't check the connection_url if verification is disabled
- verifyConnection := data.Get("verify_connection").(bool)
- if verifyConnection {
- // Verify the string
- db, err := sql.Open("postgres", connURL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error validating connection info: %s", err)), nil
- }
- defer db.Close()
- if err := db.Ping(); err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error validating connection info: %s", err)), nil
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
- ConnectionString: connValue,
- ConnectionURL: connURL,
- MaxOpenConnections: maxOpenConns,
- MaxIdleConnections: maxIdleConns,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- // Reset the DB connection
- b.ResetDB()
-
- resp := &logical.Response{}
- resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string or URL as it is, including passwords, if any.")
-
- return resp, nil
-}
-
-type connectionConfig struct {
- ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
- // Deprecate "value" in coming releases
- ConnectionString string `json:"value" structs:"value" mapstructure:"value"`
- MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
- MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"`
-}
-
-const pathConfigConnectionHelpSyn = `
-Configure the connection string to talk to PostgreSQL.
-`
-
-const pathConfigConnectionHelpDesc = `
-This path configures the connection string used to connect to PostgreSQL.
-The value of the string can be a URL, or a PG style string in the
-format of "user=foo host=bar" etc.
-
-The URL looks like:
-"postgresql://user:pass@host:port/dbname"
-
-When configuring the connection string, the backend will verify its validity.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_lease.go
deleted file mode 100644
index 4bc55c8..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_config_lease.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package postgresql
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigLease(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/lease",
- Fields: map[string]*framework.FieldSchema{
- "lease": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Default lease for roles.",
- },
-
- "lease_max": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Maximum time a credential is valid for.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathLeaseRead,
- logical.UpdateOperation: b.pathLeaseWrite,
- },
-
- HelpSynopsis: pathConfigLeaseHelpSyn,
- HelpDescription: pathConfigLeaseHelpDesc,
- }
-}
-
-func (b *backend) pathLeaseWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- leaseRaw := d.Get("lease").(string)
- leaseMaxRaw := d.Get("lease_max").(string)
-
- lease, err := time.ParseDuration(leaseRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid lease: %s", err)), nil
- }
- leaseMax, err := time.ParseDuration(leaseMaxRaw)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Invalid lease: %s", err)), nil
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/lease", &configLease{
- Lease: lease,
- LeaseMax: leaseMax,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathLeaseRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- lease, err := b.Lease(req.Storage)
-
- if err != nil {
- return nil, err
- }
- if lease == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "lease": lease.Lease.String(),
- "lease_max": lease.LeaseMax.String(),
- },
- }, nil
-}
-
-type configLease struct {
- Lease time.Duration
- LeaseMax time.Duration
-}
-
-const pathConfigLeaseHelpSyn = `
-Configure the default lease information for generated credentials.
-`
-
-const pathConfigLeaseHelpDesc = `
-This configures the default lease information used for credentials
-generated by this backend. The lease specifies the duration that a
-credential will be valid for, as well as the maximum session for
-a set of credentials.
-
-The format for the lease is "1h" or integer and then unit. The longest
-unit is hour.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_role_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_role_create.go
deleted file mode 100644
index 5ca92c4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_role_create.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package postgresql
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- _ "github.com/lib/pq"
-)
-
-func pathRoleCreate(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleCreateRead,
- },
-
- HelpSynopsis: pathRoleCreateReadHelpSyn,
- HelpDescription: pathRoleCreateReadHelpDesc,
- }
-}
-
-func (b *backend) pathRoleCreateRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.logger.Trace("postgres/pathRoleCreateRead: enter")
- defer b.logger.Trace("postgres/pathRoleCreateRead: exit")
-
- name := data.Get("name").(string)
-
- // Get the role
- b.logger.Trace("postgres/pathRoleCreateRead: getting role")
- role, err := b.Role(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
- }
-
- // Determine if we have a lease
- b.logger.Trace("postgres/pathRoleCreateRead: getting lease")
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- // Unlike some other backends we need a lease here (can't leave as 0 and
- // let core fill it in) because Postgres also expires users as a safety
- // measure, so cannot be zero
- if lease == nil {
- lease = &configLease{
- Lease: b.System().DefaultLeaseTTL(),
- }
- }
-
- // Generate the username, password and expiration. PG limits user to 63 characters
- displayName := req.DisplayName
- if len(displayName) > 26 {
- displayName = displayName[:26]
- }
- userUUID, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- username := fmt.Sprintf("%s-%s", displayName, userUUID)
- if len(username) > 63 {
- username = username[:63]
- }
- password, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- expiration := time.Now().
- Add(lease.Lease).
- Format("2006-01-02 15:04:05-0700")
-
- // Get our handle
- b.logger.Trace("postgres/pathRoleCreateRead: getting database handle")
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Start a transaction
- b.logger.Trace("postgres/pathRoleCreateRead: starting transaction")
- tx, err := db.Begin()
- if err != nil {
- return nil, err
- }
- defer func() {
- b.logger.Trace("postgres/pathRoleCreateRead: rolling back transaction")
- tx.Rollback()
- }()
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- b.logger.Trace("postgres/pathRoleCreateRead: preparing statement")
- stmt, err := tx.Prepare(Query(query, map[string]string{
- "name": username,
- "password": password,
- "expiration": expiration,
- }))
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
- b.logger.Trace("postgres/pathRoleCreateRead: executing statement")
- if _, err := stmt.Exec(); err != nil {
- return nil, err
- }
- }
-
- // Commit the transaction
-
- b.logger.Trace("postgres/pathRoleCreateRead: committing transaction")
- if err := tx.Commit(); err != nil {
- return nil, err
- }
-
- // Return the secret
-
- b.logger.Trace("postgres/pathRoleCreateRead: generating secret")
- resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
- "username": username,
- "password": password,
- }, map[string]interface{}{
- "username": username,
- "role": name,
- })
- resp.Secret.TTL = lease.Lease
- return resp, nil
-}
-
-const pathRoleCreateReadHelpSyn = `
-Request database credentials for a certain role.
-`
-
-const pathRoleCreateReadHelpDesc = `
-This path reads database credentials for a certain role. The
-database credentials will be generated on demand and will be automatically
-revoked when the lease is up.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_roles.go
deleted file mode 100644
index dc0aaf0..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/path_roles.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package postgresql
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
-
- "sql": {
- Type: framework.TypeString,
- Description: "SQL string to create a user. See help for more info.",
- },
-
- "revocation_sql": {
- Type: framework.TypeString,
- Description: `SQL statements to be executed to revoke a user. Must be a semicolon-separated
-string, a base64-encoded semicolon-separated string, a serialized JSON string
-array, or a base64-encoded serialized JSON string array. The '{{name}}' value
-will be substituted.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleCreate,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
- entry, err := s.Get("role/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathRoleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("role/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathRoleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, err := b.Role(req.Storage, data.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "sql": role.SQL,
- "revocation_sql": role.RevocationSQL,
- },
- }, nil
-}
-
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathRoleCreate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- sql := data.Get("sql").(string)
-
- // Get our connection
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Test the query by trying to prepare it
- for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := db.Prepare(Query(query, map[string]string{
- "name": "foo",
- "password": "bar",
- "expiration": "",
- }))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error testing query: %s", err)), nil
- }
- stmt.Close()
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
- SQL: sql,
- RevocationSQL: data.Get("revocation_sql").(string),
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type roleEntry struct {
- SQL string `json:"sql" mapstructure:"sql" structs:"sql"`
- RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"`
-}
-
-const pathRoleHelpSyn = `
-Manage the roles that can be created with this backend.
-`
-
-const pathRoleHelpDesc = `
-This path lets you manage the roles that can be created with this backend.
-
-The "sql" parameter customizes the SQL string used to create the role.
-This can be a sequence of SQL queries. Some substitution will be done to the
-SQL string for certain keys. The names of the variables must be surrounded
-by "{{" and "}}" to be replaced.
-
- * "name" - The random username generated for the DB user.
-
- * "password" - The random password generated for the DB user.
-
- * "expiration" - The timestamp when this user will expire.
-
-Example of a decent SQL query to use:
-
- CREATE ROLE "{{name}}" WITH
- LOGIN
- PASSWORD '{{password}}'
- VALID UNTIL '{{expiration}}';
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
-
-Note the above user would be able to access everything in schema public.
-For more complex GRANT clauses, see the PostgreSQL manual.
-
-The "revocation_sql" parameter customizes the SQL string used to revoke a user.
-Example of a decent revocation SQL query to use:
-
- REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
- REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
- REVOKE USAGE ON SCHEMA public FROM {{name}};
- DROP ROLE IF EXISTS {{name}};
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/query.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/query.go
deleted file mode 100644
index e4f7f59..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/query.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package postgresql
-
-import (
- "fmt"
- "strings"
-)
-
-// Query templates a query for us.
-func Query(tpl string, data map[string]string) string {
- for k, v := range data {
- tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
- }
-
- return tpl
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go
deleted file mode 100644
index 9c5010a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package postgresql
-
-import (
- "database/sql"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/lib/pq"
-)
-
-const SecretCredsType = "creds"
-
-func secretCreds(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCredsType,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username",
- },
-
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password",
- },
- },
-
- Renew: b.secretCredsRenew,
- Revoke: b.secretCredsRevoke,
- }
-}
-
-func (b *backend) secretCredsRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
- if !ok {
- return nil, fmt.Errorf("usernameRaw is not a string")
- }
- // Get our connection
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- // Get the lease information
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- if lease == nil {
- lease = &configLease{}
- }
-
- f := framework.LeaseExtend(lease.Lease, lease.LeaseMax, b.System())
- resp, err := f(req, d)
- if err != nil {
- return nil, err
- }
-
- // Make sure we increase the VALID UNTIL endpoint for this user.
- if expireTime := resp.Secret.ExpirationTime(); !expireTime.IsZero() {
- expiration := expireTime.Format("2006-01-02 15:04:05-0700")
-
- query := fmt.Sprintf(
- "ALTER ROLE %s VALID UNTIL '%s';",
- pq.QuoteIdentifier(username),
- expiration)
- stmt, err := db.Prepare(query)
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return nil, err
- }
- }
-
- return resp, nil
-}
-
-func (b *backend) secretCredsRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username, ok := usernameRaw.(string)
- if !ok {
- return nil, fmt.Errorf("usernameRaw is not a string")
- }
- var revocationSQL string
- var resp *logical.Response
-
- roleNameRaw, ok := req.Secret.InternalData["role"]
- if ok {
- role, err := b.Role(req.Storage, roleNameRaw.(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- if resp == nil {
- resp = &logical.Response{}
- }
- resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default revocation SQL.", roleNameRaw.(string)))
- } else {
- revocationSQL = role.RevocationSQL
- }
- }
-
- // Get our connection
- db, err := b.DB(req.Storage)
- if err != nil {
- return nil, err
- }
-
- switch revocationSQL {
-
- // This is the default revocation logic. If revocation SQL is provided it
- // is simply executed as-is.
- case "":
- // Check if the role exists
- var exists bool
- err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists)
- if err != nil && err != sql.ErrNoRows {
- return nil, err
- }
-
- if exists == false {
- return resp, nil
- }
-
- // Query for permissions; we need to revoke permissions before we can drop
- // the role
- // This isn't done in a transaction because even if we fail along the way,
- // we want to remove as much access as possible
- stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;")
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
-
- rows, err := stmt.Query(username)
- if err != nil {
- return nil, err
- }
- defer rows.Close()
-
- const initialNumRevocations = 16
- revocationStmts := make([]string, 0, initialNumRevocations)
- for rows.Next() {
- var schema string
- err = rows.Scan(&schema)
- if err != nil {
- // keep going; remove as many permissions as possible right now
- continue
- }
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`,
- pq.QuoteIdentifier(schema),
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE USAGE ON SCHEMA %s FROM %s;`,
- pq.QuoteIdentifier(schema),
- pq.QuoteIdentifier(username)))
- }
-
- // for good measure, revoke all privileges and usage on schema public
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`,
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;",
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- "REVOKE USAGE ON SCHEMA public FROM %s;",
- pq.QuoteIdentifier(username)))
-
- // get the current database name so we can issue a REVOKE CONNECT for
- // this username
- var dbname sql.NullString
- if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil {
- return nil, err
- }
-
- if dbname.Valid {
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE CONNECT ON DATABASE %s FROM %s;`,
- pq.QuoteIdentifier(dbname.String),
- pq.QuoteIdentifier(username)))
- }
-
- // again, here, we do not stop on error, as we want to remove as
- // many permissions as possible right now
- var lastStmtError error
- for _, query := range revocationStmts {
- stmt, err := db.Prepare(query)
- if err != nil {
- lastStmtError = err
- continue
- }
- defer stmt.Close()
- _, err = stmt.Exec()
- if err != nil {
- lastStmtError = err
- }
- }
-
- // can't drop if not all privileges are revoked
- if rows.Err() != nil {
- return nil, fmt.Errorf("could not generate revocation statements for all rows: %s", rows.Err())
- }
- if lastStmtError != nil {
- return nil, fmt.Errorf("could not perform all revocation statements: %s", lastStmtError)
- }
-
- // Drop this user
- stmt, err = db.Prepare(fmt.Sprintf(
- `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username)))
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return nil, err
- }
-
- // We have revocation SQL, execute directly, within a transaction
- default:
- tx, err := db.Begin()
- if err != nil {
- return nil, err
- }
- defer func() {
- tx.Rollback()
- }()
-
- for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(Query(query, map[string]string{
- "name": username,
- }))
- if err != nil {
- return nil, err
- }
- defer stmt.Close()
-
- if _, err := stmt.Exec(); err != nil {
- return nil, err
- }
- }
-
- if err := tx.Commit(); err != nil {
- return nil, err
- }
- }
-
- return resp, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go
deleted file mode 100644
index 1e3f1ec..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package rabbitmq
-
-import (
- "fmt"
- "strings"
- "sync"
-
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/michaelklishin/rabbit-hole"
-)
-
-// Factory creates and configures the backend
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-// Creates a new backend with all the paths and secrets belonging to it
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathConfigConnection(&b),
- pathConfigLease(&b),
- pathListRoles(&b),
- pathCreds(&b),
- pathRoles(&b),
- },
-
- Secrets: []*framework.Secret{
- secretCreds(&b),
- },
-
- Clean: b.resetClient,
- Invalidate: b.invalidate,
- BackendType: logical.TypeLogical,
- }
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- client *rabbithole.Client
- lock sync.RWMutex
-}
-
-// DB returns the database connection.
-func (b *backend) Client(s logical.Storage) (*rabbithole.Client, error) {
- b.lock.RLock()
-
- // If we already have a client, return it
- if b.client != nil {
- b.lock.RUnlock()
- return b.client, nil
- }
-
- b.lock.RUnlock()
-
- // Otherwise, attempt to make connection
- entry, err := s.Get("config/connection")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, fmt.Errorf("configure the client connection with config/connection first")
- }
-
- var connConfig connectionConfig
- if err := entry.DecodeJSON(&connConfig); err != nil {
- return nil, err
- }
-
- b.lock.Lock()
- defer b.lock.Unlock()
-
- // If the client was creted during the lock switch, return it
- if b.client != nil {
- return b.client, nil
- }
-
- b.client, err = rabbithole.NewClient(connConfig.URI, connConfig.Username, connConfig.Password)
- if err != nil {
- return nil, err
- }
- // Use a default pooled transport so there would be no leaked file descriptors
- b.client.SetTransport(cleanhttp.DefaultPooledTransport())
-
- return b.client, nil
-}
-
-// resetClient forces a connection next time Client() is called.
-func (b *backend) resetClient() {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- b.client = nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "config/connection":
- b.resetClient()
- }
-}
-
-// Lease returns the lease information
-func (b *backend) Lease(s logical.Storage) (*configLease, error) {
- entry, err := s.Get("config/lease")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result configLease
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-const backendHelp = `
-The RabbitMQ backend dynamically generates RabbitMQ users.
-
-After mounting this backend, configure it using the endpoints within
-the "config/" path.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend_test.go
deleted file mode 100644
index 41a45eb..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend_test.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package rabbitmq
-
-import (
- "fmt"
- "log"
- "os"
- "testing"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/michaelklishin/rabbit-hole"
- "github.com/mitchellh/mapstructure"
-)
-
-// Set the following env vars for the below test case to work.
-//
-// RABBITMQ_CONNECTION_URI
-// RABBITMQ_USERNAME
-// RABBITMQ_PASSWORD
-func TestBackend_basic(t *testing.T) {
- if os.Getenv(logicaltest.TestEnvVar) == "" {
- t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
- return
- }
- b, _ := Factory(logical.TestBackendConfig())
-
- logicaltest.Test(t, logicaltest.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepRole(t),
- testAccStepReadCreds(t, b, "web"),
- },
- })
-
-}
-
-func TestBackend_roleCrud(t *testing.T) {
- if os.Getenv(logicaltest.TestEnvVar) == "" {
- t.Skip(fmt.Sprintf("Acceptance tests skipped unless env '%s' set", logicaltest.TestEnvVar))
- return
- }
- b, _ := Factory(logical.TestBackendConfig())
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepConfig(t),
- testAccStepRole(t),
- testAccStepReadRole(t, "web", "administrator", `{"/": {"configure": ".*", "write": ".*", "read": ".*"}}`),
- testAccStepDeleteRole(t, "web"),
- testAccStepReadRole(t, "web", "", ""),
- },
- })
-}
-
-const (
- envRabbitMQConnectionURI = "RABBITMQ_CONNECTION_URI"
- envRabbitMQUsername = "RABBITMQ_USERNAME"
- envRabbitMQPassword = "RABBITMQ_PASSWORD"
-)
-
-func testAccPreCheck(t *testing.T) {
- if uri := os.Getenv(envRabbitMQConnectionURI); uri == "" {
- t.Fatalf(fmt.Sprintf("%s must be set for acceptance tests", envRabbitMQConnectionURI))
- }
- if username := os.Getenv(envRabbitMQUsername); username == "" {
- t.Fatalf(fmt.Sprintf("%s must be set for acceptance tests", envRabbitMQUsername))
- }
- if password := os.Getenv(envRabbitMQPassword); password == "" {
- t.Fatalf(fmt.Sprintf("%s must be set for acceptance tests", envRabbitMQPassword))
- }
-}
-
-func testAccStepConfig(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/connection",
- Data: map[string]interface{}{
- "connection_uri": os.Getenv(envRabbitMQConnectionURI),
- "username": os.Getenv(envRabbitMQUsername),
- "password": os.Getenv(envRabbitMQPassword),
- },
- }
-}
-
-func testAccStepRole(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/web",
- Data: map[string]interface{}{
- "tags": "administrator",
- "vhosts": `{"/": {"configure": ".*", "write": ".*", "read": ".*"}}`,
- },
- }
-}
-
-func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + n,
- }
-}
-
-func testAccStepReadCreds(t *testing.T, b logical.Backend, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "creds/" + name,
- Check: func(resp *logical.Response) error {
- var d struct {
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- log.Printf("[WARN] Generated credentials: %v", d)
-
- uri := os.Getenv(envRabbitMQConnectionURI)
-
- client, err := rabbithole.NewClient(uri, d.Username, d.Password)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.ListVhosts()
- if err != nil {
- t.Fatalf("unable to list vhosts with generated credentials: %s", err)
- }
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.RevokeOperation,
- Secret: &logical.Secret{
- InternalData: map[string]interface{}{
- "secret_type": "creds",
- "username": d.Username,
- },
- },
- })
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.IsError() {
- return fmt.Errorf("Error on resp: %#v", *resp)
- }
- }
-
- client, err = rabbithole.NewClient(uri, d.Username, d.Password)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.ListVhosts()
- if err == nil {
- t.Fatalf("expected to fail listing vhosts: %s", err)
- }
-
- return nil
- },
- }
-}
-
-func testAccStepReadRole(t *testing.T, name, tags, rawVHosts string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if tags == "" && rawVHosts == "" {
- return nil
- }
-
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Tags string `mapstructure:"tags"`
- VHosts map[string]vhostPermission `mapstructure:"vhosts"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.Tags != tags {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var vhosts map[string]vhostPermission
- if err := jsonutil.DecodeJSON([]byte(rawVHosts), &vhosts); err != nil {
- return fmt.Errorf("bad expected vhosts %#v: %s", vhosts, err)
- }
-
- for host, permission := range vhosts {
- actualPermission, ok := d.VHosts[host]
- if !ok {
- return fmt.Errorf("expected vhost: %s", host)
- }
-
- if actualPermission.Configure != permission.Configure {
- return fmt.Errorf("expected permission %s to be %s, got %s", "configure", permission.Configure, actualPermission.Configure)
- }
-
- if actualPermission.Write != permission.Write {
- return fmt.Errorf("expected permission %s to be %s, got %s", "write", permission.Write, actualPermission.Write)
- }
-
- if actualPermission.Read != permission.Read {
- return fmt.Errorf("expected permission %s to be %s, got %s", "read", permission.Read, actualPermission.Read)
- }
- }
-
- return nil
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_connection.go
deleted file mode 100644
index 53f392c..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_connection.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package rabbitmq
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/michaelklishin/rabbit-hole"
-)
-
-func pathConfigConnection(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/connection",
- Fields: map[string]*framework.FieldSchema{
- "connection_uri": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "RabbitMQ Management URI",
- },
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username of a RabbitMQ management administrator",
- },
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password of the provided RabbitMQ management user",
- },
- "verify_connection": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: `If set, connection_uri is verified by actually connecting to the RabbitMQ management API`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConnectionUpdate,
- },
-
- HelpSynopsis: pathConfigConnectionHelpSyn,
- HelpDescription: pathConfigConnectionHelpDesc,
- }
-}
-
-func (b *backend) pathConnectionUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- uri := data.Get("connection_uri").(string)
- if uri == "" {
- return logical.ErrorResponse("missing connection_uri"), nil
- }
-
- username := data.Get("username").(string)
- if username == "" {
- return logical.ErrorResponse("missing username"), nil
- }
-
- password := data.Get("password").(string)
- if password == "" {
- return logical.ErrorResponse("missing password"), nil
- }
-
- // Don't check the connection_url if verification is disabled
- verifyConnection := data.Get("verify_connection").(bool)
- if verifyConnection {
- // Create RabbitMQ management client
- client, err := rabbithole.NewClient(uri, username, password)
- if err != nil {
- return nil, fmt.Errorf("failed to create client: %s", err)
- }
-
- // Verify that configured credentials is capable of listing
- if _, err = client.ListUsers(); err != nil {
- return nil, fmt.Errorf("failed to validate the connection: %s", err)
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{
- URI: uri,
- Username: username,
- Password: password,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- // Reset the client connection
- b.resetClient()
-
- return nil, nil
-}
-
-// connectionConfig contains the information required to make a connection to a RabbitMQ node
-type connectionConfig struct {
- // URI of the RabbitMQ server
- URI string `json:"connection_uri"`
-
- // Username which has 'administrator' tag attached to it
- Username string `json:"username"`
-
- // Password for the Username
- Password string `json:"password"`
-}
-
-const pathConfigConnectionHelpSyn = `
-Configure the connection URI, username, and password to talk to RabbitMQ management HTTP API.
-`
-
-const pathConfigConnectionHelpDesc = `
-This path configures the connection properties used to connect to RabbitMQ management HTTP API.
-The "connection_uri" parameter is a string that is used to connect to the API. The "username"
-and "password" parameters are strings that are used as credentials to the API. The "verify_connection"
-parameter is a boolean that is used to verify whether the provided connection URI, username, and password
-are valid.
-
-The URI looks like:
-"http://localhost:15672"
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease.go
deleted file mode 100644
index 25a9f48..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package rabbitmq
-
-import (
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathConfigLease(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/lease",
- Fields: map[string]*framework.FieldSchema{
- "ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: "Duration before which the issued credentials needs renewal",
- },
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: `Duration after which the issued credentials should not be allowed to be renewed`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathLeaseRead,
- logical.UpdateOperation: b.pathLeaseUpdate,
- },
-
- HelpSynopsis: pathConfigLeaseHelpSyn,
- HelpDescription: pathConfigLeaseHelpDesc,
- }
-}
-
-// Sets the lease configuration parameters
-func (b *backend) pathLeaseUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entry, err := logical.StorageEntryJSON("config/lease", &configLease{
- TTL: time.Second * time.Duration(d.Get("ttl").(int)),
- MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)),
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// Returns the lease configuration parameters
-func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- if lease == nil {
- return nil, nil
- }
-
- lease.TTL = lease.TTL / time.Second
- lease.MaxTTL = lease.MaxTTL / time.Second
-
- return &logical.Response{
- Data: structs.New(lease).Map(),
- }, nil
-}
-
-// Lease configuration information for the secrets issued by this backend
-type configLease struct {
- TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
- MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
-}
-
-var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated credentials"
-
-var pathConfigLeaseHelpDesc = `
-Sets the ttl and max_ttl values for the secrets to be issued by this backend.
-Both ttl and max_ttl takes in an integer number of seconds as input as well as
-inputs like "1h".
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go
deleted file mode 100644
index 4182fd4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package rabbitmq
-
-import (
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestBackend_config_lease_RU(t *testing.T) {
- var resp *logical.Response
- var err error
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b := Backend()
- if err = b.Setup(config); err != nil {
- t.Fatal(err)
- }
-
- configData := map[string]interface{}{
- "ttl": "10h",
- "max_ttl": "20h",
- }
- configReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/lease",
- Storage: config.StorageView,
- Data: configData,
- }
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: resp: %#v\nerr:%s", resp, err)
- }
- if resp != nil {
- t.Fatal("expected a nil response")
- }
-
- configReq.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(configReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: resp: %#v\nerr:%s", resp, err)
- }
- if resp == nil {
- t.Fatal("expected a response")
- }
-
- if resp.Data["ttl"].(time.Duration) != 36000 {
- t.Fatalf("bad: ttl: expected:36000 actual:%d", resp.Data["ttl"].(time.Duration))
- }
- if resp.Data["max_ttl"].(time.Duration) != 72000 {
- t.Fatalf("bad: ttl: expected:72000 actual:%d", resp.Data["ttl"].(time.Duration))
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_role_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_role_create.go
deleted file mode 100644
index 240eb2a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_role_create.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package rabbitmq
-
-import (
- "fmt"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/michaelklishin/rabbit-hole"
-)
-
-func pathCreds(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathCredsRead,
- },
-
- HelpSynopsis: pathRoleCreateReadHelpSyn,
- HelpDescription: pathRoleCreateReadHelpDesc,
- }
-}
-
-// Issues the credential based on the role name
-func (b *backend) pathCredsRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse("missing name"), nil
- }
-
- // Get the role
- role, err := b.Role(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
- }
-
- // Ensure username is unique
- uuidVal, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- username := fmt.Sprintf("%s-%s", req.DisplayName, uuidVal)
-
- password, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
-
- // Get the client configuration
- client, err := b.Client(req.Storage)
- if err != nil {
- return nil, err
- }
- if client == nil {
- return logical.ErrorResponse("failed to get the client"), nil
- }
-
- // Register the generated credentials in the backend, with the RabbitMQ server
- if _, err = client.PutUser(username, rabbithole.UserSettings{
- Password: password,
- Tags: role.Tags,
- }); err != nil {
- return nil, fmt.Errorf("failed to create a new user with the generated credentials")
- }
-
- // If the role had vhost permissions specified, assign those permissions
- // to the created username for respective vhosts.
- for vhost, permission := range role.VHosts {
- if _, err := client.UpdatePermissionsIn(vhost, username, rabbithole.Permissions{
- Configure: permission.Configure,
- Write: permission.Write,
- Read: permission.Read,
- }); err != nil {
- // Delete the user because it's in an unknown state
- if _, rmErr := client.DeleteUser(username); rmErr != nil {
- return nil, fmt.Errorf("failed to delete user:%s, err: %s. %s", username, err, rmErr)
- }
- return nil, fmt.Errorf("failed to update permissions to the %s user. err:%s", username, err)
- }
- }
-
- // Return the secret
- resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
- "username": username,
- "password": password,
- }, map[string]interface{}{
- "username": username,
- })
-
- // Determine if we have a lease
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- if lease != nil {
- resp.Secret.TTL = lease.TTL
- }
-
- return resp, nil
-}
-
-const pathRoleCreateReadHelpSyn = `
-Request RabbitMQ credentials for a certain role.
-`
-
-const pathRoleCreateReadHelpDesc = `
-This path reads RabbitMQ credentials for a certain role. The
-RabbitMQ credentials will be generated on demand and will be automatically
-revoked when the lease is up.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_roles.go
deleted file mode 100644
index bb03d3d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_roles.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package rabbitmq
-
-import (
- "fmt"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role.",
- },
- "tags": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Comma-separated list of tags for this role.",
- },
- "vhosts": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "A map of virtual hosts to permissions.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleUpdate,
- logical.DeleteOperation: b.pathRoleDelete,
- },
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-// Reads the role configuration from the storage
-func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
- entry, err := s.Get("role/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result roleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-// Deletes an existing role
-func (b *backend) pathRoleDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse("missing name"), nil
- }
-
- return nil, req.Storage.Delete("role/" + name)
-}
-
-// Reads an existing role
-func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse("missing name"), nil
- }
-
- role, err := b.Role(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: structs.New(role).Map(),
- }, nil
-}
-
-// Lists all the roles registered with the backend
-func (b *backend) pathRoleList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- roles, err := req.Storage.List("role/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(roles), nil
-}
-
-// Registers a new role with the backend
-func (b *backend) pathRoleUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- if name == "" {
- return logical.ErrorResponse("missing name"), nil
- }
-
- tags := d.Get("tags").(string)
- rawVHosts := d.Get("vhosts").(string)
-
- if tags == "" && rawVHosts == "" {
- return logical.ErrorResponse("both tags and vhosts not specified"), nil
- }
-
- var vhosts map[string]vhostPermission
- if len(rawVHosts) > 0 {
- if err := jsonutil.DecodeJSON([]byte(rawVHosts), &vhosts); err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to unmarshal vhosts: %s", err)), nil
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
- Tags: tags,
- VHosts: vhosts,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// Role that defines the capabilities of the credentials issued against it
-type roleEntry struct {
- Tags string `json:"tags" structs:"tags" mapstructure:"tags"`
- VHosts map[string]vhostPermission `json:"vhosts" structs:"vhosts" mapstructure:"vhosts"`
-}
-
-// Structure representing the permissions of a vhost
-type vhostPermission struct {
- Configure string `json:"configure" structs:"configure" mapstructure:"configure"`
- Write string `json:"write" structs:"write" mapstructure:"write"`
- Read string `json:"read" structs:"read" mapstructure:"read"`
-}
-
-const pathRoleHelpSyn = `
-Manage the roles that can be created with this backend.
-`
-
-const pathRoleHelpDesc = `
-This path lets you manage the roles that can be created with this backend.
-
-The "tags" parameter customizes the tags used to create the role.
-This is a comma separated list of strings. The "vhosts" parameter customizes
-the virtual hosts that this user will be associated with. This is a JSON object
-passed as a string in the form:
-{
- "vhostOne": {
- "configure": ".*",
- "write": ".*",
- "read": ".*"
- },
- "vhostTwo": {
- "configure": ".*",
- "write": ".*",
- "read": ".*"
- }
-}
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/secret_creds.go
deleted file mode 100644
index f59bb1a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/secret_creds.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package rabbitmq
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// SecretCredsType is the key for this backend's secrets.
-const SecretCredsType = "creds"
-
-func secretCreds(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretCredsType,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "RabbitMQ username",
- },
- "password": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Password for the RabbitMQ username",
- },
- },
- Renew: b.secretCredsRenew,
- Revoke: b.secretCredsRevoke,
- }
-}
-
-// Renew the previously issued secret
-func (b *backend) secretCredsRenew(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the lease information
- lease, err := b.Lease(req.Storage)
- if err != nil {
- return nil, err
- }
- if lease == nil {
- lease = &configLease{}
- }
-
- return framework.LeaseExtend(lease.TTL, lease.MaxTTL, b.System())(req, d)
-}
-
-// Revoke the previously issued secret
-func (b *backend) secretCredsRevoke(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // Get the username from the internal data
- usernameRaw, ok := req.Secret.InternalData["username"]
- if !ok {
- return nil, fmt.Errorf("secret is missing username internal data")
- }
- username := usernameRaw.(string)
-
- // Get our connection
- client, err := b.Client(req.Storage)
- if err != nil {
- return nil, err
- }
-
- if _, err = client.DeleteUser(username); err != nil {
- return nil, fmt.Errorf("could not delete user: %s", err)
- }
-
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go
deleted file mode 100644
index c14685d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package ssh
-
-import (
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-type backend struct {
- *framework.Backend
- view logical.Storage
- salt *salt.Salt
- saltMutex sync.RWMutex
-}
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b, err := Backend(conf)
- if err != nil {
- return nil, err
- }
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend(conf *logical.BackendConfig) (*backend, error) {
- var b backend
- b.view = conf.StorageView
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "verify",
- "public_key",
- },
-
- LocalStorage: []string{
- "otp/",
- },
- },
-
- Paths: []*framework.Path{
- pathConfigZeroAddress(&b),
- pathKeys(&b),
- pathListRoles(&b),
- pathRoles(&b),
- pathCredsCreate(&b),
- pathLookup(&b),
- pathVerify(&b),
- pathConfigCA(&b),
- pathSign(&b),
- pathFetchPublicKey(&b),
- },
-
- Secrets: []*framework.Secret{
- secretDynamicKey(&b),
- secretOTP(&b),
- },
-
- Invalidate: b.invalidate,
- BackendType: logical.TypeLogical,
- }
- return &b, nil
-}
-
-func (b *backend) Salt() (*salt.Salt, error) {
- b.saltMutex.RLock()
- if b.salt != nil {
- defer b.saltMutex.RUnlock()
- return b.salt, nil
- }
- b.saltMutex.RUnlock()
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- if b.salt != nil {
- return b.salt, nil
- }
- salt, err := salt.NewSalt(b.view, &salt.Config{
- HashFunc: salt.SHA256Hash,
- Location: salt.DefaultLocation,
- })
- if err != nil {
- return nil, err
- }
- b.salt = salt
- return salt, nil
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case salt.DefaultLocation:
- b.saltMutex.Lock()
- defer b.saltMutex.Unlock()
- b.salt = nil
- }
-}
-
-const backendHelp = `
-The SSH backend generates credentials allowing clients to establish SSH
-connections to remote hosts.
-
-There are three variants of the backend, which generate different types of
-credentials: dynamic keys, One-Time Passwords (OTPs) and certificate authority. The desired behavior
-is role-specific and chosen at role creation time with the 'key_type'
-parameter.
-
-Please see the backend documentation for a thorough description of both
-types. The Vault team strongly recommends the OTP type.
-
-After mounting this backend, before generating credentials, configure the
-backend's lease behavior using the 'config/lease' endpoint and create roles
-using the 'roles/' endpoint.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go
deleted file mode 100644
index 139d24a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go
+++ /dev/null
@@ -1,1052 +0,0 @@
-package ssh
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "golang.org/x/crypto/ssh"
-
- "encoding/base64"
- "errors"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/mapstructure"
-)
-
-// Before the following tests are run, a username going by the name 'vaultssh' has
-// to be created and its ~/.ssh/authorized_keys file should contain the below key.
-//
-// ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT
-
-const (
- testIP = "127.0.0.1"
- testUserName = "vaultssh"
- testAdminUser = "vaultssh"
- testOTPKeyType = "otp"
- testDynamicKeyType = "dynamic"
- testCIDRList = "127.0.0.1/32"
- testDynamicRoleName = "testDynamicRoleName"
- testOTPRoleName = "testOTPRoleName"
- testKeyName = "testKeyName"
- testSharedPrivateKey = `
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
-A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
-/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
-mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
-GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
-nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
-+aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
-MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
-Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
-4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
-oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
-Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
-6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
-IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
-CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
-AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
-kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
-rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
-AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
-cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
-5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
-My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
-O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
-oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
-+B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
------END RSA PRIVATE KEY-----
-`
- // Public half of `privateKey`, identical to how it would be fed in from a file
- publicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB dummy@example.com
-`
- publicKey2 = `AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB
-`
- privateKey = `-----BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAwK4CtIpUUX5PBOxyLI8+ZdwbsZsKQ3fAx0QlXzoW2eIDklcY
-xDjqgcFho8oHW6ASGcBV5sA5VOrFl9IQSQIKUM9Ao7248bavzY5LwE62D4J611IU
-2hJ9nkDtD7KkTUhR+Okiv6CX06dKvP1oKI8Vidi+xVFfDzvLiJOPQ86gKj1e1l1O
-t7xbI8ft1kDl/yC3kLxwr2qrN3Vo48xKA7nUVgjnVKnBxDrIVfWsyMz615UFc0Vd
-0ACperu6Qvv0nnKK6cLeiB6TCqFTxokdLTDLjTlR/M6wCsyMTxn8tlpjQ6SaKW1z
-E70jhgKjXKePTuEijkJfRc36thxHryWpii6zAQIDAQABAoIBAA/DrPD8iF2KigiL
-F+RRa/eFhLaJStOuTpV/G9eotwnolgY5Hguf5H/tRIHUG7oBZLm6pMyWWZp7AuOj
-CjYO9q0Z5939vc349nVI+SWoyviF4msPiik1bhWulja8lPjFu/8zg+ZNy15Dx7ei
-vAzleAupMiKOv8pNSB/KguQ3WZ9a9bcQcoFQ2Foru6mXpLJ03kghVRlkqvQ7t5cA
-n11d2Hiipq9mleESr0c+MUPKLBX/neaWfGA4xgJTjIYjZi6avmYc/Ox3sQ9aLq2J
-tH0D4HVUZvaU28hn+jhbs64rRFbu++qQMe3vNvi/Q/iqcYU4b6tgDNzm/JFRTS/W
-njiz4mkCgYEA44CnQVmonN6qQ0AgNNlBY5+RX3wwBJZ1AaxpzwDRylAt2vlVUA0n
-YY4RW4J4+RMRKwHwjxK5RRmHjsIJx+nrpqihW3fte3ev5F2A9Wha4dzzEHxBY6IL
-362T/x2f+vYk6tV+uTZSUPHsuELH26mitbBVFNB/00nbMNdEc2bO5FMCgYEA2NCw
-ubt+g2bRkkT/Qf8gIM8ZDpZbARt6onqxVcWkQFT16ZjbsBWUrH1Xi7alv9+lwYLJ
-ckY/XDX4KeU19HabeAbpyy6G9Q2uBSWZlJbjl7QNhdLeuzV82U1/r8fy6Uu3gQnU
-WSFx2GesRpSmZpqNKMs5ksqteZ9Yjg1EIgXdINsCgYBIn9REt1NtKGOf7kOZu1T1
-cYXdvm4xuLoHW7u3OiK+e9P3mCqU0G4m5UxDMyZdFKohWZAqjCaamWi9uNGYgOMa
-I7DG20TzaiS7OOIm9TY17eul8pSJMrypnealxRZB7fug/6Bhjaa/cktIEwFr7P4l
-E/JFH73+fBA9yipu0H3xQwKBgHmiwrLAZF6VrVcxDD9bQQwHA5iyc4Wwg+Fpkdl7
-0wUgZQHTdtRXlxwaCaZhJqX5c4WXuSo6DMvPn1TpuZZXgCsbPch2ZtJOBWXvzTSW
-XkK6iaedQMWoYU2L8+mK9FU73EwxVodWgwcUSosiVCRV6oGLWdZnjGEiK00uVh38
-Si1nAoGBAL47wWinv1cDTnh5mm0mybz3oI2a6V9aIYCloQ/EFcvtahyR/gyB8qNF
-lObH9Faf0WGdnACZvTz22U9gWhw79S0SpDV31tC5Kl8dXHFiZ09vYUKkYmSd/kms
-SeKWrUkryx46LVf6NMhkyYmRqCEjBwfOozzezi5WbiJy6nn54GQt
------END RSA PRIVATE KEY-----
-`
-)
-
-func TestBackend_allowed_users(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Initialize()
- if err != nil {
- t.Fatal(err)
- }
-
- roleData := map[string]interface{}{
- "key_type": "otp",
- "default_user": "ubuntu",
- "cidr_list": "52.207.235.245/16",
- "allowed_users": "test",
- }
-
- roleReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/role1",
- Storage: config.StorageView,
- Data: roleData,
- }
-
- resp, err := b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) || resp != nil {
- t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
- }
-
- credsData := map[string]interface{}{
- "ip": "52.207.235.245",
- "username": "ubuntu",
- }
- credsReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Storage: config.StorageView,
- Path: "creds/role1",
- Data: credsData,
- }
-
- resp, err = b.HandleRequest(credsReq)
- if err != nil || (resp != nil && resp.IsError()) || resp == nil {
- t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
- }
- if resp.Data["key"] == "" ||
- resp.Data["key_type"] != "otp" ||
- resp.Data["ip"] != "52.207.235.245" ||
- resp.Data["username"] != "ubuntu" {
- t.Fatalf("failed to create credential: resp:%#v", resp)
- }
-
- credsData["username"] = "test"
- resp, err = b.HandleRequest(credsReq)
- if err != nil || (resp != nil && resp.IsError()) || resp == nil {
- t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
- }
- if resp.Data["key"] == "" ||
- resp.Data["key_type"] != "otp" ||
- resp.Data["ip"] != "52.207.235.245" ||
- resp.Data["username"] != "test" {
- t.Fatalf("failed to create credential: resp:%#v", resp)
- }
-
- credsData["username"] = "random"
- resp, err = b.HandleRequest(credsReq)
- if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
- t.Fatalf("expected failure: resp:%#v err:%s", resp, err)
- }
-
- delete(roleData, "allowed_users")
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) || resp != nil {
- t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
- }
-
- credsData["username"] = "ubuntu"
- resp, err = b.HandleRequest(credsReq)
- if err != nil || (resp != nil && resp.IsError()) || resp == nil {
- t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
- }
- if resp.Data["key"] == "" ||
- resp.Data["key_type"] != "otp" ||
- resp.Data["ip"] != "52.207.235.245" ||
- resp.Data["username"] != "ubuntu" {
- t.Fatalf("failed to create credential: resp:%#v", resp)
- }
-
- credsData["username"] = "test"
- resp, err = b.HandleRequest(credsReq)
- if err != nil || resp == nil || (resp != nil && !resp.IsError()) {
- t.Fatalf("expected failure: resp:%#v err:%s", resp, err)
- }
-
- roleData["allowed_users"] = "*"
- resp, err = b.HandleRequest(roleReq)
- if err != nil || (resp != nil && resp.IsError()) || resp != nil {
- t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
- }
-
- resp, err = b.HandleRequest(credsReq)
- if err != nil || (resp != nil && resp.IsError()) || resp == nil {
- t.Fatalf("failed to create role: resp:%#v err:%s", resp, err)
- }
- if resp.Data["key"] == "" ||
- resp.Data["key_type"] != "otp" ||
- resp.Data["ip"] != "52.207.235.245" ||
- resp.Data["username"] != "test" {
- t.Fatalf("failed to create credential: resp:%#v", resp)
- }
-}
-
-func testingFactory(conf *logical.BackendConfig) (logical.Backend, error) {
- _, err := vault.StartSSHHostTestServer()
- if err != nil {
- panic(fmt.Sprintf("error starting mock server:%s", err))
- }
- defaultLeaseTTLVal := 2 * time.Minute
- maxLeaseTTLVal := 10 * time.Minute
- return Factory(&logical.BackendConfig{
- Logger: nil,
- StorageView: &logical.InmemStorage{},
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- },
- })
-}
-
-func TestSSHBackend_Lookup(t *testing.T) {
- testOTPRoleData := map[string]interface{}{
- "key_type": testOTPKeyType,
- "default_user": testUserName,
- "cidr_list": testCIDRList,
- }
- testDynamicRoleData := map[string]interface{}{
- "key_type": testDynamicKeyType,
- "key": testKeyName,
- "admin_user": testAdminUser,
- "default_user": testAdminUser,
- "cidr_list": testCIDRList,
- }
- data := map[string]interface{}{
- "ip": testIP,
- }
- resp1 := []string(nil)
- resp2 := []string{testOTPRoleName}
- resp3 := []string{testDynamicRoleName, testOTPRoleName}
- resp4 := []string{testDynamicRoleName}
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testLookupRead(t, data, resp1),
- testRoleWrite(t, testOTPRoleName, testOTPRoleData),
- testLookupRead(t, data, resp2),
- testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
- testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
- testLookupRead(t, data, resp3),
- testRoleDelete(t, testOTPRoleName),
- testLookupRead(t, data, resp4),
- testRoleDelete(t, testDynamicRoleName),
- testLookupRead(t, data, resp1),
- },
- })
-}
-
-func TestSSHBackend_DynamicKeyCreate(t *testing.T) {
- testDynamicRoleData := map[string]interface{}{
- "key_type": testDynamicKeyType,
- "key": testKeyName,
- "admin_user": testAdminUser,
- "default_user": testAdminUser,
- "cidr_list": testCIDRList,
- }
- data := map[string]interface{}{
- "username": testUserName,
- "ip": testIP,
- }
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
- testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
- testCredsWrite(t, testDynamicRoleName, data, false),
- },
- })
-}
-
-func TestSSHBackend_OTPRoleCrud(t *testing.T) {
- testOTPRoleData := map[string]interface{}{
- "key_type": testOTPKeyType,
- "default_user": testUserName,
- "cidr_list": testCIDRList,
- }
- respOTPRoleData := map[string]interface{}{
- "key_type": testOTPKeyType,
- "port": 22,
- "default_user": testUserName,
- "cidr_list": testCIDRList,
- }
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testRoleWrite(t, testOTPRoleName, testOTPRoleData),
- testRoleRead(t, testOTPRoleName, respOTPRoleData),
- testRoleDelete(t, testOTPRoleName),
- testRoleRead(t, testOTPRoleName, nil),
- },
- })
-}
-
-func TestSSHBackend_DynamicRoleCrud(t *testing.T) {
- testDynamicRoleData := map[string]interface{}{
- "key_type": testDynamicKeyType,
- "key": testKeyName,
- "admin_user": testAdminUser,
- "default_user": testAdminUser,
- "cidr_list": testCIDRList,
- }
- respDynamicRoleData := map[string]interface{}{
- "cidr_list": testCIDRList,
- "port": 22,
- "install_script": DefaultPublicKeyInstallScript,
- "key_bits": 1024,
- "key": testKeyName,
- "admin_user": testUserName,
- "default_user": testUserName,
- "key_type": testDynamicKeyType,
- }
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
- testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
- testRoleRead(t, testDynamicRoleName, respDynamicRoleData),
- testRoleDelete(t, testDynamicRoleName),
- testRoleRead(t, testDynamicRoleName, nil),
- },
- })
-}
-
-func TestSSHBackend_NamedKeysCrud(t *testing.T) {
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
- testNamedKeysDelete(t),
- },
- })
-}
-
-func TestSSHBackend_OTPCreate(t *testing.T) {
- testOTPRoleData := map[string]interface{}{
- "key_type": testOTPKeyType,
- "default_user": testUserName,
- "cidr_list": testCIDRList,
- }
- data := map[string]interface{}{
- "username": testUserName,
- "ip": testIP,
- }
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testRoleWrite(t, testOTPRoleName, testOTPRoleData),
- testCredsWrite(t, testOTPRoleName, data, false),
- },
- })
-}
-
-func TestSSHBackend_VerifyEcho(t *testing.T) {
- verifyData := map[string]interface{}{
- "otp": api.VerifyEchoRequest,
- }
- expectedData := map[string]interface{}{
- "message": api.VerifyEchoResponse,
- }
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testVerifyWrite(t, verifyData, expectedData),
- },
- })
-}
-
-func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) {
- testOTPRoleData := map[string]interface{}{
- "key_type": testOTPKeyType,
- "default_user": testUserName,
- "cidr_list": testCIDRList,
- }
- testDynamicRoleData := map[string]interface{}{
- "key_type": testDynamicKeyType,
- "key": testKeyName,
- "admin_user": testAdminUser,
- "default_user": testAdminUser,
- "cidr_list": testCIDRList,
- }
- req1 := map[string]interface{}{
- "roles": testOTPRoleName,
- }
- resp1 := map[string]interface{}{
- "roles": []string{testOTPRoleName},
- }
- req2 := map[string]interface{}{
- "roles": fmt.Sprintf("%s,%s", testOTPRoleName, testDynamicRoleName),
- }
- resp2 := map[string]interface{}{
- "roles": []string{testOTPRoleName, testDynamicRoleName},
- }
- resp3 := map[string]interface{}{
- "roles": []string{},
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testRoleWrite(t, testOTPRoleName, testOTPRoleData),
- testConfigZeroAddressWrite(t, req1),
- testConfigZeroAddressRead(t, resp1),
- testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
- testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
- testConfigZeroAddressWrite(t, req2),
- testConfigZeroAddressRead(t, resp2),
- testRoleDelete(t, testDynamicRoleName),
- testConfigZeroAddressRead(t, resp1),
- testRoleDelete(t, testOTPRoleName),
- testConfigZeroAddressRead(t, resp3),
- testConfigZeroAddressDelete(t),
- },
- })
-}
-
-func TestSSHBackend_CredsForZeroAddressRoles(t *testing.T) {
- dynamicRoleData := map[string]interface{}{
- "key_type": testDynamicKeyType,
- "key": testKeyName,
- "admin_user": testAdminUser,
- "default_user": testAdminUser,
- }
- otpRoleData := map[string]interface{}{
- "key_type": testOTPKeyType,
- "default_user": testUserName,
- }
- data := map[string]interface{}{
- "username": testUserName,
- "ip": testIP,
- }
- req1 := map[string]interface{}{
- "roles": testOTPRoleName,
- }
- req2 := map[string]interface{}{
- "roles": fmt.Sprintf("%s,%s", testOTPRoleName, testDynamicRoleName),
- }
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Factory: testingFactory,
- Steps: []logicaltest.TestStep{
- testRoleWrite(t, testOTPRoleName, otpRoleData),
- testCredsWrite(t, testOTPRoleName, data, true),
- testConfigZeroAddressWrite(t, req1),
- testCredsWrite(t, testOTPRoleName, data, false),
- testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
- testRoleWrite(t, testDynamicRoleName, dynamicRoleData),
- testCredsWrite(t, testDynamicRoleName, data, true),
- testConfigZeroAddressWrite(t, req2),
- testCredsWrite(t, testDynamicRoleName, data, false),
- testConfigZeroAddressDelete(t),
- testCredsWrite(t, testOTPRoleName, data, true),
- testCredsWrite(t, testDynamicRoleName, data, true),
- },
- })
-}
-
-func TestBackend_AbleToRetrievePublicKey(t *testing.T) {
-
- config := logical.TestBackendConfig()
-
- b, err := Factory(config)
- if err != nil {
- t.Fatalf("Cannot create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- configCaStep(),
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "public_key",
- Unauthenticated: true,
-
- Check: func(resp *logical.Response) error {
-
- key := string(resp.Data["http_raw_body"].([]byte))
-
- if key != publicKey {
- return fmt.Errorf("public_key incorrect. Expected %v, actual %v", publicKey, key)
- }
-
- return nil
- },
- },
- },
- }
-
- logicaltest.Test(t, testCase)
-}
-
-func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) {
-
- config := logical.TestBackendConfig()
-
- b, err := Factory(config)
- if err != nil {
- t.Fatalf("Cannot create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- },
-
- logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "public_key",
- Unauthenticated: true,
-
- Check: func(resp *logical.Response) error {
-
- key := string(resp.Data["http_raw_body"].([]byte))
-
- if key == "" {
- return fmt.Errorf("public_key empty. Expected not empty, actual %s", key)
- }
-
- return nil
- },
- },
- },
- }
-
- logicaltest.Test(t, testCase)
-}
-
-func TestBackend_ValidPrincipalsValidatedForHostCertificates(t *testing.T) {
- config := logical.TestBackendConfig()
-
- b, err := Factory(config)
- if err != nil {
- t.Fatalf("Cannot create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- configCaStep(),
-
- createRoleStep("testing", map[string]interface{}{
- "key_type": "ca",
- "allow_host_certificates": true,
- "allowed_domains": "example.com,example.org",
- "allow_subdomains": true,
- "default_critical_options": map[string]interface{}{
- "option": "value",
- },
- "default_extensions": map[string]interface{}{
- "extension": "extended",
- },
- }),
-
- signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{
- "option": "value",
- }, map[string]string{
- "extension": "extended",
- },
- 2*time.Hour, map[string]interface{}{
- "public_key": publicKey2,
- "ttl": "2h",
- "cert_type": "host",
- "valid_principals": "dummy.example.org,second.example.com",
- }),
- },
- }
-
- logicaltest.Test(t, testCase)
-}
-
-func TestBackend_OptionsOverrideDefaults(t *testing.T) {
- config := logical.TestBackendConfig()
-
- b, err := Factory(config)
- if err != nil {
- t.Fatalf("Cannot create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- configCaStep(),
-
- createRoleStep("testing", map[string]interface{}{
- "key_type": "ca",
- "allowed_users": "tuber",
- "default_user": "tuber",
- "allow_user_certificates": true,
- "allowed_critical_options": "option,secondary",
- "allowed_extensions": "extension,additional",
- "default_critical_options": map[string]interface{}{
- "option": "value",
- },
- "default_extensions": map[string]interface{}{
- "extension": "extended",
- },
- }),
-
- signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{
- "secondary": "value",
- }, map[string]string{
- "additional": "value",
- }, 2*time.Hour, map[string]interface{}{
- "public_key": publicKey2,
- "ttl": "2h",
- "critical_options": map[string]interface{}{
- "secondary": "value",
- },
- "extensions": map[string]interface{}{
- "additional": "value",
- },
- }),
- },
- }
-
- logicaltest.Test(t, testCase)
-}
-
-func TestBackend_CustomKeyIDFormat(t *testing.T) {
- config := logical.TestBackendConfig()
-
- b, err := Factory(config)
- if err != nil {
- t.Fatalf("Cannot create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- configCaStep(),
-
- createRoleStep("customrole", map[string]interface{}{
- "key_type": "ca",
- "key_id_format": "{{role_name}}-{{token_display_name}}-{{public_key_hash}}",
- "allowed_users": "tuber",
- "default_user": "tuber",
- "allow_user_certificates": true,
- "allowed_critical_options": "option,secondary",
- "allowed_extensions": "extension,additional",
- "default_critical_options": map[string]interface{}{
- "option": "value",
- },
- "default_extensions": map[string]interface{}{
- "extension": "extended",
- },
- }),
-
- signCertificateStep("customrole", "customrole-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{
- "secondary": "value",
- }, map[string]string{
- "additional": "value",
- }, 2*time.Hour, map[string]interface{}{
- "public_key": publicKey2,
- "ttl": "2h",
- "critical_options": map[string]interface{}{
- "secondary": "value",
- },
- "extensions": map[string]interface{}{
- "additional": "value",
- },
- }),
- },
- }
-
- logicaltest.Test(t, testCase)
-}
-
-func TestBackend_DisallowUserProvidedKeyIDs(t *testing.T) {
- config := logical.TestBackendConfig()
-
- b, err := Factory(config)
- if err != nil {
- t.Fatalf("Cannot create backend: %s", err)
- }
-
- testCase := logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- configCaStep(),
-
- createRoleStep("testing", map[string]interface{}{
- "key_type": "ca",
- "allow_user_key_ids": false,
- "allow_user_certificates": true,
- }),
- logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "sign/testing",
- Data: map[string]interface{}{
- "public_key": publicKey2,
- "key_id": "override",
- },
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if resp.Data["error"] != "setting key_id is not allowed by role" {
- return errors.New("Custom user key id was allowed even when 'allow_user_key_ids' is false.")
- }
- return nil
- },
- },
- },
- }
-
- logicaltest.Test(t, testCase)
-}
-
-func configCaStep() logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/ca",
- Data: map[string]interface{}{
- "public_key": publicKey,
- "private_key": privateKey,
- },
- }
-}
-
-func createRoleStep(name string, parameters map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.CreateOperation,
- Path: "roles/" + name,
- Data: parameters,
- }
-}
-
-func signCertificateStep(
- role, keyId string, certType int, validPrincipals []string,
- criticalOptionPermissions, extensionPermissions map[string]string,
- ttl time.Duration,
- requestParameters map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "sign/" + role,
- Data: requestParameters,
-
- Check: func(resp *logical.Response) error {
-
- serialNumber := resp.Data["serial_number"].(string)
- if serialNumber == "" {
- return errors.New("No serial number in response")
- }
-
- signedKey := strings.TrimSpace(resp.Data["signed_key"].(string))
- if signedKey == "" {
- return errors.New("No signed key in response")
- }
-
- key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1])
-
- parsedKey, err := ssh.ParsePublicKey(key)
- if err != nil {
- return err
- }
-
- return validateSSHCertificate(parsedKey.(*ssh.Certificate), keyId, certType, validPrincipals, criticalOptionPermissions, extensionPermissions, ttl)
- },
- }
-}
-
-func validateSSHCertificate(cert *ssh.Certificate, keyId string, certType int, validPrincipals []string, criticalOptionPermissions, extensionPermissions map[string]string,
- ttl time.Duration) error {
-
- if cert.KeyId != keyId {
- return fmt.Errorf("Incorrect KeyId: %v, wanted %v", cert.KeyId, keyId)
- }
-
- if cert.CertType != uint32(certType) {
- return fmt.Errorf("Incorrect CertType: %v", cert.CertType)
- }
-
- if time.Unix(int64(cert.ValidAfter), 0).After(time.Now()) {
- return fmt.Errorf("Incorrect ValidAfter: %v", cert.ValidAfter)
- }
-
- if time.Unix(int64(cert.ValidBefore), 0).Before(time.Now()) {
- return fmt.Errorf("Incorrect ValidBefore: %v", cert.ValidBefore)
- }
-
- actualTtl := time.Unix(int64(cert.ValidBefore), 0).Add(-30 * time.Second).Sub(time.Unix(int64(cert.ValidAfter), 0))
- if actualTtl != ttl {
- return fmt.Errorf("Incorrect ttl: expected: %v, actualL %v", ttl, actualTtl)
- }
-
- if !reflect.DeepEqual(cert.ValidPrincipals, validPrincipals) {
- return fmt.Errorf("Incorrect ValidPrincipals: expected: %#v actual: %#v", validPrincipals, cert.ValidPrincipals)
- }
-
- publicSigningKey, err := getSigningPublicKey()
- if err != nil {
- return err
- }
- if !reflect.DeepEqual(cert.SignatureKey, publicSigningKey) {
- return fmt.Errorf("Incorrect SignatureKey: %v", cert.SignatureKey)
- }
-
- if cert.Signature == nil {
- return fmt.Errorf("Incorrect Signature: %v", cert.Signature)
- }
-
- if !reflect.DeepEqual(cert.Permissions.Extensions, extensionPermissions) {
- return fmt.Errorf("Incorrect Permissions.Extensions: Expected: %v, Actual: %v", extensionPermissions, cert.Permissions.Extensions)
- }
-
- if !reflect.DeepEqual(cert.Permissions.CriticalOptions, criticalOptionPermissions) {
- return fmt.Errorf("Incorrect Permissions.CriticalOptions: %v", cert.Permissions.CriticalOptions)
- }
-
- return nil
-}
-
-func getSigningPublicKey() (ssh.PublicKey, error) {
- key, err := base64.StdEncoding.DecodeString(strings.Split(publicKey, " ")[1])
- if err != nil {
- return nil, err
- }
-
- parsedKey, err := ssh.ParsePublicKey(key)
- if err != nil {
- return nil, err
- }
-
- return parsedKey, nil
-}
-
-func testConfigZeroAddressDelete(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "config/zeroaddress",
- }
-}
-
-func testConfigZeroAddressWrite(t *testing.T, data map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "config/zeroaddress",
- Data: data,
- }
-}
-
-func testConfigZeroAddressRead(t *testing.T, expected map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "config/zeroaddress",
- Check: func(resp *logical.Response) error {
- var d zeroAddressRoles
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- var ex zeroAddressRoles
- if err := mapstructure.Decode(expected, &ex); err != nil {
- return err
- }
-
- if !reflect.DeepEqual(d, ex) {
- return fmt.Errorf("Response mismatch:\nActual:%#v\nExpected:%#v", d, ex)
- }
-
- return nil
- },
- }
-}
-
-func testVerifyWrite(t *testing.T, data map[string]interface{}, expected map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: fmt.Sprintf("verify"),
- Data: data,
- Check: func(resp *logical.Response) error {
- var ac api.SSHVerifyResponse
- if err := mapstructure.Decode(resp.Data, &ac); err != nil {
- return err
- }
- var ex api.SSHVerifyResponse
- if err := mapstructure.Decode(expected, &ex); err != nil {
- return err
- }
-
- if !reflect.DeepEqual(ac, ex) {
- return fmt.Errorf("Invalid response")
- }
- return nil
- },
- }
-}
-
-func testNamedKeysWrite(t *testing.T, name, key string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: fmt.Sprintf("keys/%s", name),
- Data: map[string]interface{}{
- "key": key,
- },
- }
-}
-
-func testNamedKeysDelete(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: fmt.Sprintf("keys/%s", testKeyName),
- }
-}
-
-func testLookupRead(t *testing.T, data map[string]interface{}, expected []string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "lookup",
- Data: data,
- Check: func(resp *logical.Response) error {
- if resp.Data == nil || resp.Data["roles"] == nil {
- return fmt.Errorf("Missing roles information")
- }
- if !reflect.DeepEqual(resp.Data["roles"].([]string), expected) {
- return fmt.Errorf("Invalid response: \nactual:%#v\nexpected:%#v", resp.Data["roles"].([]string), expected)
- }
- return nil
- },
- }
-}
-
-func testRoleWrite(t *testing.T, name string, data map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "roles/" + name,
- Data: data,
- }
-}
-
-func testRoleRead(t *testing.T, roleName string, expected map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "roles/" + roleName,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if expected == nil {
- return nil
- }
- return fmt.Errorf("bad: %#v", resp)
- }
- var d sshRole
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return fmt.Errorf("error decoding response:%s", err)
- }
- if roleName == testOTPRoleName {
- if d.KeyType != expected["key_type"] || d.DefaultUser != expected["default_user"] || d.CIDRList != expected["cidr_list"] {
- return fmt.Errorf("data mismatch. bad: %#v", resp)
- }
- } else {
- if d.AdminUser != expected["admin_user"] || d.CIDRList != expected["cidr_list"] || d.KeyName != expected["key"] || d.KeyType != expected["key_type"] {
- return fmt.Errorf("data mismatch. bad: %#v", resp)
- }
- }
- return nil
- },
- }
-}
-
-func testRoleDelete(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "roles/" + name,
- }
-}
-
-func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: fmt.Sprintf("creds/%s", roleName),
- Data: data,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- return fmt.Errorf("response is nil")
- }
- if resp.Data == nil {
- return fmt.Errorf("data is nil")
- }
- if expectError {
- var e struct {
- Error string `mapstructure:"error"`
- }
- if err := mapstructure.Decode(resp.Data, &e); err != nil {
- return err
- }
- if len(e.Error) == 0 {
- return fmt.Errorf("expected error, but write succeeded.")
- }
- return nil
- }
- if roleName == testDynamicRoleName {
- var d struct {
- Key string `mapstructure:"key"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if d.Key == "" {
- return fmt.Errorf("Generated key is an empty string")
- }
- // Checking only for a parsable key
- _, err := ssh.ParsePrivateKey([]byte(d.Key))
- if err != nil {
- return fmt.Errorf("Generated key is invalid")
- }
- } else {
- if resp.Data["key_type"] != KeyTypeOTP {
- return fmt.Errorf("Incorrect key_type")
- }
- if resp.Data["key"] == nil {
- return fmt.Errorf("Invalid key")
- }
- }
- return nil
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/communicator.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/communicator.go
deleted file mode 100644
index 3ab86fa..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/communicator.go
+++ /dev/null
@@ -1,350 +0,0 @@
-package ssh
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "os"
- "path/filepath"
-
- log "github.com/mgutz/logxi/v1"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/agent"
-)
-
-type comm struct {
- client *ssh.Client
- config *SSHCommConfig
- conn net.Conn
- address string
-}
-
-// SSHCommConfig is the structure used to configure the SSH communicator.
-type SSHCommConfig struct {
- // The configuration of the Go SSH connection
- SSHConfig *ssh.ClientConfig
-
- // Connection returns a new connection. The current connection
- // in use will be closed as part of the Close method, or in the
- // case an error occurs.
- Connection func() (net.Conn, error)
-
- // Pty, if true, will request a pty from the remote end.
- Pty bool
-
- // DisableAgent, if true, will not forward the SSH agent.
- DisableAgent bool
-
- // Logger for output
- Logger log.Logger
-}
-
-// Creates a new communicator implementation over SSH. This takes
-// an already existing TCP connection and SSH configuration.
-func SSHCommNew(address string, config *SSHCommConfig) (result *comm, err error) {
- // Establish an initial connection and connect
- result = &comm{
- config: config,
- address: address,
- }
-
- if err = result.reconnect(); err != nil {
- result = nil
- return
- }
-
- return
-}
-
-func (c *comm) Close() error {
- var err error
- if c.conn != nil {
- err = c.conn.Close()
- }
- c.conn = nil
- c.client = nil
- return err
-}
-
-func (c *comm) Upload(path string, input io.Reader, fi *os.FileInfo) error {
- // The target directory and file for talking the SCP protocol
- target_dir := filepath.Dir(path)
- target_file := filepath.Base(path)
-
- // On windows, filepath.Dir uses backslash separators (ie. "\tmp").
- // This does not work when the target host is unix. Switch to forward slash
- // which works for unix and windows
- target_dir = filepath.ToSlash(target_dir)
-
- scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error {
- return scpUploadFile(target_file, input, w, stdoutR, fi)
- }
-
- return c.scpSession("scp -vt "+target_dir, scpFunc)
-}
-
-func (c *comm) NewSession() (session *ssh.Session, err error) {
- if c.client == nil {
- err = errors.New("client not available")
- } else {
- session, err = c.client.NewSession()
- }
-
- if err != nil {
- c.config.Logger.Error("ssh session open error, attempting reconnect", "error", err)
- if err := c.reconnect(); err != nil {
- c.config.Logger.Error("reconnect attempt failed", "error", err)
- return nil, err
- }
-
- return c.client.NewSession()
- }
-
- return session, nil
-}
-
-func (c *comm) reconnect() error {
- // Close previous connection.
- if c.conn != nil {
- c.Close()
- }
-
- var err error
- c.conn, err = c.config.Connection()
- if err != nil {
- // Explicitly set this to the REAL nil. Connection() can return
- // a nil implementation of net.Conn which will make the
- // "if c.conn == nil" check fail above. Read here for more information
- // on this psychotic language feature:
- //
- // http://golang.org/doc/faq#nil_error
- c.conn = nil
- c.config.Logger.Error("reconnection error", "error", err)
- return err
- }
-
- sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig)
- if err != nil {
- c.config.Logger.Error("handshake error", "error", err)
- c.Close()
- return err
- }
- if sshConn != nil {
- c.client = ssh.NewClient(sshConn, sshChan, req)
- }
- c.connectToAgent()
-
- return nil
-}
-
-func (c *comm) connectToAgent() {
- if c.client == nil {
- return
- }
-
- if c.config.DisableAgent {
- return
- }
-
- // open connection to the local agent
- socketLocation := os.Getenv("SSH_AUTH_SOCK")
- if socketLocation == "" {
- return
- }
- agentConn, err := net.Dial("unix", socketLocation)
- if err != nil {
- c.config.Logger.Error("could not connect to local agent socket", "socket_path", socketLocation)
- return
- }
- defer agentConn.Close()
-
- // create agent and add in auth
- forwardingAgent := agent.NewClient(agentConn)
- if forwardingAgent == nil {
- c.config.Logger.Error("could not create agent client")
- return
- }
-
- // add callback for forwarding agent to SSH config
- // XXX - might want to handle reconnects appending multiple callbacks
- auth := ssh.PublicKeysCallback(forwardingAgent.Signers)
- c.config.SSHConfig.Auth = append(c.config.SSHConfig.Auth, auth)
- agent.ForwardToAgent(c.client, forwardingAgent)
-
- // Setup a session to request agent forwarding
- session, err := c.NewSession()
- if err != nil {
- return
- }
- defer session.Close()
-
- err = agent.RequestAgentForwarding(session)
- if err != nil {
- c.config.Logger.Error("error requesting agent forwarding", "error", err)
- return
- }
- return
-}
-
-func (c *comm) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error {
- session, err := c.NewSession()
- if err != nil {
- return err
- }
- defer session.Close()
-
- // Get a pipe to stdin so that we can send data down
- stdinW, err := session.StdinPipe()
- if err != nil {
- return err
- }
-
- // We only want to close once, so we nil w after we close it,
- // and only close in the defer if it hasn't been closed already.
- defer func() {
- if stdinW != nil {
- stdinW.Close()
- }
- }()
-
- // Get a pipe to stdout so that we can get responses back
- stdoutPipe, err := session.StdoutPipe()
- if err != nil {
- return err
- }
- stdoutR := bufio.NewReader(stdoutPipe)
-
- // Set stderr to a bytes buffer
- stderr := new(bytes.Buffer)
- session.Stderr = stderr
-
- // Start the sink mode on the other side
- if err := session.Start(scpCommand); err != nil {
- return err
- }
-
- // Call our callback that executes in the context of SCP. We ignore
- // EOF errors if they occur because it usually means that SCP prematurely
- // ended on the other side.
- if err := f(stdinW, stdoutR); err != nil && err != io.EOF {
- return err
- }
-
- // Close the stdin, which sends an EOF, and then set w to nil so that
- // our defer func doesn't close it again since that is unsafe with
- // the Go SSH package.
- stdinW.Close()
- stdinW = nil
-
- // Wait for the SCP connection to close, meaning it has consumed all
- // our data and has completed. Or has errored.
- err = session.Wait()
- if err != nil {
- if exitErr, ok := err.(*ssh.ExitError); ok {
- // Otherwise, we have an ExitErorr, meaning we can just read
- // the exit status
- c.config.Logger.Error("got non-zero exit status", "exit_status", exitErr.ExitStatus())
-
- // If we exited with status 127, it means SCP isn't available.
- // Return a more descriptive error for that.
- if exitErr.ExitStatus() == 127 {
- return errors.New(
- "SCP failed to start. This usually means that SCP is not\n" +
- "properly installed on the remote system.")
- }
- }
-
- return err
- }
- return nil
-}
-
-// checkSCPStatus checks that a prior command sent to SCP completed
-// successfully. If it did not complete successfully, an error will
-// be returned.
-func checkSCPStatus(r *bufio.Reader) error {
- code, err := r.ReadByte()
- if err != nil {
- return err
- }
-
- if code != 0 {
- // Treat any non-zero (really 1 and 2) as fatal errors
- message, _, err := r.ReadLine()
- if err != nil {
- return fmt.Errorf("Error reading error message: %s", err)
- }
-
- return errors.New(string(message))
- }
-
- return nil
-}
-
-func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *os.FileInfo) error {
- var mode os.FileMode
- var size int64
-
- if fi != nil && (*fi).Mode().IsRegular() {
- mode = (*fi).Mode().Perm()
- size = (*fi).Size()
- } else {
- // Create a temporary file where we can copy the contents of the src
- // so that we can determine the length, since SCP is length-prefixed.
- tf, err := ioutil.TempFile("", "vault-ssh-upload")
- if err != nil {
- return fmt.Errorf("Error creating temporary file for upload: %s", err)
- }
- defer os.Remove(tf.Name())
- defer tf.Close()
-
- mode = 0644
-
- if _, err := io.Copy(tf, src); err != nil {
- return err
- }
-
- // Sync the file so that the contents are definitely on disk, then
- // read the length of it.
- if err := tf.Sync(); err != nil {
- return fmt.Errorf("Error creating temporary file for upload: %s", err)
- }
-
- // Seek the file to the beginning so we can re-read all of it
- if _, err := tf.Seek(0, 0); err != nil {
- return fmt.Errorf("Error creating temporary file for upload: %s", err)
- }
-
- tfi, err := tf.Stat()
- if err != nil {
- return fmt.Errorf("Error creating temporary file for upload: %s", err)
- }
-
- size = tfi.Size()
- src = tf
- }
-
- // Start the protocol
- perms := fmt.Sprintf("C%04o", mode)
-
- fmt.Fprintln(w, perms, size, dst)
- if err := checkSCPStatus(r); err != nil {
- return err
- }
-
- if _, err := io.CopyN(w, src, size); err != nil {
- return err
- }
-
- fmt.Fprint(w, "\x00")
- if err := checkSCPStatus(r); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/linux_install_script.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/linux_install_script.go
deleted file mode 100644
index 2c944c4..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/linux_install_script.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package ssh
-
-const (
- // This is a constant representing a script to install and uninstall public
- // key in remote hosts.
- DefaultPublicKeyInstallScript = `
-#!/bin/bash
-#
-# This is a default script which installs or uninstalls an RSA public key to/from
-# authorized_keys file in a typical linux machine.
-#
-# If the platform differs or if the binaries used in this script are not available
-# in target machine, use the 'install_script' parameter with 'roles/' endpoint to
-# register a custom script (applicable for Dynamic type only).
-#
-# Vault server runs this script on the target machine with the following params:
-#
-# $1:INSTALL_OPTION: "install" or "uninstall"
-#
-# $2:PUBLIC_KEY_FILE: File name containing public key to be installed. Vault server
-# uses UUID as name to avoid collisions with public keys generated for other requests.
-#
-# $3:AUTH_KEYS_FILE: Absolute path of the authorized_keys file.
-# Currently, vault uses /home//.ssh/authorized_keys as the path.
-#
-# [Note: This script will be run by Vault using the registered admin username.
-# Notice that some commands below are run as 'sudo'. For graceful execution of
-# this script there should not be any password prompts. So, disable password
-# prompt for the admin username registered with Vault.
-
-set -e
-
-# Storing arguments into variables, to increase readability of the script.
-INSTALL_OPTION=$1
-PUBLIC_KEY_FILE=$2
-AUTH_KEYS_FILE=$3
-
-# Delete the public key file and the temporary file
-function cleanup
-{
- rm -f "$PUBLIC_KEY_FILE" temp_$PUBLIC_KEY_FILE
-}
-
-# 'cleanup' will be called if the script ends or if any command fails.
-trap cleanup EXIT
-
-# Return if the option is anything other than 'install' or 'uninstall'.
-if [ "$INSTALL_OPTION" != "install" ] && [ "$INSTALL_OPTION" != "uninstall" ]; then
- exit 1
-fi
-
-# Create the .ssh directory and authorized_keys file if it does not exist
-SSH_DIR=$(dirname $AUTH_KEYS_FILE)
-sudo mkdir -p "$SSH_DIR"
-sudo touch "$AUTH_KEYS_FILE"
-
-# Remove the key from authorized_keys file if it is already present.
-# This step is common for both install and uninstall. Note that grep's
-# return code is ignored, thus if grep fails all keys will be removed
-# rather than none and it fails secure
-sudo grep -vFf "$PUBLIC_KEY_FILE" "$AUTH_KEYS_FILE" > temp_$PUBLIC_KEY_FILE || true
-cat temp_$PUBLIC_KEY_FILE | sudo tee "$AUTH_KEYS_FILE"
-
-# Append the new public key to authorized_keys file
-if [ "$INSTALL_OPTION" == "install" ]; then
- cat "$PUBLIC_KEY_FILE" | sudo tee --append "$AUTH_KEYS_FILE"
-fi
-`
-)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca.go
deleted file mode 100644
index 37300b7..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca.go
+++ /dev/null
@@ -1,286 +0,0 @@
-package ssh
-
-import (
- "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "fmt"
-
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "golang.org/x/crypto/ssh"
-)
-
-const (
- caPublicKey = "ca_public_key"
- caPrivateKey = "ca_private_key"
- caPublicKeyStoragePath = "config/ca_public_key"
- caPublicKeyStoragePathDeprecated = "public_key"
- caPrivateKeyStoragePath = "config/ca_private_key"
- caPrivateKeyStoragePathDeprecated = "config/ca_bundle"
-)
-
-type keyStorageEntry struct {
- Key string `json:"key" structs:"key" mapstructure:"key"`
-}
-
-func pathConfigCA(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/ca",
- Fields: map[string]*framework.FieldSchema{
- "private_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Private half of the SSH key that will be used to sign certificates.`,
- },
- "public_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Public half of the SSH key that will be used to sign certificates.`,
- },
- "generate_signing_key": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Generate SSH key pair internally rather than use the private_key and public_key fields.`,
- Default: true,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConfigCAUpdate,
- logical.DeleteOperation: b.pathConfigCADelete,
- logical.ReadOperation: b.pathConfigCARead,
- },
-
- HelpSynopsis: `Set the SSH private key used for signing certificates.`,
- HelpDescription: `This sets the CA information used for certificates generated by this
-by this mount. The fields must be in the standard private and public SSH format.
-
-For security reasons, the private key cannot be retrieved later.
-
-Read operations will return the public key, if already stored/generated.`,
- }
-}
-
-func (b *backend) pathConfigCARead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- publicKeyEntry, err := caKey(req.Storage, caPublicKey)
- if err != nil {
- return nil, fmt.Errorf("failed to read CA public key: %v", err)
- }
-
- if publicKeyEntry == nil {
- return logical.ErrorResponse("keys haven't been configured yet"), nil
- }
-
- response := &logical.Response{
- Data: map[string]interface{}{
- "public_key": publicKeyEntry.Key,
- },
- }
-
- return response, nil
-}
-
-func (b *backend) pathConfigCADelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if err := req.Storage.Delete(caPrivateKeyStoragePath); err != nil {
- return nil, err
- }
- if err := req.Storage.Delete(caPublicKeyStoragePath); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-func caKey(storage logical.Storage, keyType string) (*keyStorageEntry, error) {
- var path, deprecatedPath string
- switch keyType {
- case caPrivateKey:
- path = caPrivateKeyStoragePath
- deprecatedPath = caPrivateKeyStoragePathDeprecated
- case caPublicKey:
- path = caPublicKeyStoragePath
- deprecatedPath = caPublicKeyStoragePathDeprecated
- default:
- return nil, fmt.Errorf("unrecognized key type %q", keyType)
- }
-
- entry, err := storage.Get(path)
- if err != nil {
- return nil, fmt.Errorf("failed to read CA key of type %q: %v", keyType, err)
- }
-
- if entry == nil {
- // If the entry is not found, look at an older path. If found, upgrade
- // it.
- entry, err = storage.Get(deprecatedPath)
- if err != nil {
- return nil, err
- }
- if entry != nil {
- entry, err = logical.StorageEntryJSON(path, keyStorageEntry{
- Key: string(entry.Value),
- })
- if err != nil {
- return nil, err
- }
- if err := storage.Put(entry); err != nil {
- return nil, err
- }
- if err = storage.Delete(deprecatedPath); err != nil {
- return nil, err
- }
- }
- }
- if entry == nil {
- return nil, nil
- }
-
- var keyEntry keyStorageEntry
- if err := entry.DecodeJSON(&keyEntry); err != nil {
- return nil, err
- }
-
- return &keyEntry, nil
-}
-
-func (b *backend) pathConfigCAUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var err error
- publicKey := data.Get("public_key").(string)
- privateKey := data.Get("private_key").(string)
-
- var generateSigningKey bool
-
- generateSigningKeyRaw, ok := data.GetOk("generate_signing_key")
- switch {
- // explicitly set true
- case ok && generateSigningKeyRaw.(bool):
- if publicKey != "" || privateKey != "" {
- return logical.ErrorResponse("public_key and private_key must not be set when generate_signing_key is set to true"), nil
- }
-
- generateSigningKey = true
-
- // explicitly set to false, or not set and we have both a public and private key
- case ok, publicKey != "" && privateKey != "":
- if publicKey == "" {
- return logical.ErrorResponse("missing public_key"), nil
- }
-
- if privateKey == "" {
- return logical.ErrorResponse("missing private_key"), nil
- }
-
- _, err := ssh.ParsePrivateKey([]byte(privateKey))
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Unable to parse private_key as an SSH private key: %v", err)), nil
- }
-
- _, err = parsePublicSSHKey(publicKey)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Unable to parse public_key as an SSH public key: %v", err)), nil
- }
-
- // not set and no public/private key provided so generate
- case publicKey == "" && privateKey == "":
- generateSigningKey = true
-
- // not set, but one or the other supplied
- default:
- return logical.ErrorResponse("only one of public_key and private_key set; both must be set to use, or both must be blank to auto-generate"), nil
- }
-
- if generateSigningKey {
- publicKey, privateKey, err = generateSSHKeyPair()
- if err != nil {
- return nil, err
- }
- }
-
- if publicKey == "" || privateKey == "" {
- return nil, fmt.Errorf("failed to generate or parse the keys")
- }
-
- publicKeyEntry, err := caKey(req.Storage, caPublicKey)
- if err != nil {
- return nil, fmt.Errorf("failed to read CA public key: %v", err)
- }
-
- privateKeyEntry, err := caKey(req.Storage, caPrivateKey)
- if err != nil {
- return nil, fmt.Errorf("failed to read CA private key: %v", err)
- }
-
- if (publicKeyEntry != nil && publicKeyEntry.Key != "") || (privateKeyEntry != nil && privateKeyEntry.Key != "") {
- return nil, fmt.Errorf("keys are already configured; delete them before reconfiguring")
- }
-
- entry, err := logical.StorageEntryJSON(caPublicKeyStoragePath, &keyStorageEntry{
- Key: publicKey,
- })
- if err != nil {
- return nil, err
- }
-
- // Save the public key
- err = req.Storage.Put(entry)
- if err != nil {
- return nil, err
- }
-
- entry, err = logical.StorageEntryJSON(caPrivateKeyStoragePath, &keyStorageEntry{
- Key: privateKey,
- })
- if err != nil {
- return nil, err
- }
-
- // Save the private key
- err = req.Storage.Put(entry)
- if err != nil {
- var mErr *multierror.Error
-
- mErr = multierror.Append(mErr, fmt.Errorf("failed to store CA private key: %v", err))
-
- // If storing private key fails, the corresponding public key should be
- // removed
- if delErr := req.Storage.Delete(caPublicKeyStoragePath); delErr != nil {
- mErr = multierror.Append(mErr, fmt.Errorf("failed to cleanup CA public key: %v", delErr))
- return nil, mErr
- }
-
- return nil, err
- }
-
- if generateSigningKey {
- response := &logical.Response{
- Data: map[string]interface{}{
- "public_key": publicKey,
- },
- }
-
- return response, nil
- }
-
- return nil, nil
-}
-
-func generateSSHKeyPair() (string, string, error) {
- privateSeed, err := rsa.GenerateKey(rand.Reader, 4096)
- if err != nil {
- return "", "", err
- }
-
- privateBlock := &pem.Block{
- Type: "RSA PRIVATE KEY",
- Headers: nil,
- Bytes: x509.MarshalPKCS1PrivateKey(privateSeed),
- }
-
- public, err := ssh.NewPublicKey(&privateSeed.PublicKey)
- if err != nil {
- return "", "", err
- }
-
- return string(ssh.MarshalAuthorizedKey(public)), string(pem.EncodeToMemory(privateBlock)), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go
deleted file mode 100644
index 250ab4f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package ssh
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestSSH_ConfigCAStorageUpgrade(t *testing.T) {
- var err error
-
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- b, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
-
- err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Store at an older path
- err = config.StorageView.Put(&logical.StorageEntry{
- Key: caPrivateKeyStoragePathDeprecated,
- Value: []byte(privateKey),
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // Reading it should return the key as well as upgrade the storage path
- privateKeyEntry, err := caKey(config.StorageView, caPrivateKey)
- if err != nil {
- t.Fatal(err)
- }
- if privateKeyEntry == nil || privateKeyEntry.Key == "" {
- t.Fatalf("failed to read the stored private key")
- }
-
- entry, err := config.StorageView.Get(caPrivateKeyStoragePathDeprecated)
- if err != nil {
- t.Fatal(err)
- }
- if entry != nil {
- t.Fatalf("bad: expected a nil entry after upgrade")
- }
-
- entry, err = config.StorageView.Get(caPrivateKeyStoragePath)
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatalf("bad: expected a non-nil entry after upgrade")
- }
-
- // Store at an older path
- err = config.StorageView.Put(&logical.StorageEntry{
- Key: caPublicKeyStoragePathDeprecated,
- Value: []byte(publicKey),
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // Reading it should return the key as well as upgrade the storage path
- publicKeyEntry, err := caKey(config.StorageView, caPublicKey)
- if err != nil {
- t.Fatal(err)
- }
- if publicKeyEntry == nil || publicKeyEntry.Key == "" {
- t.Fatalf("failed to read the stored public key")
- }
-
- entry, err = config.StorageView.Get(caPublicKeyStoragePathDeprecated)
- if err != nil {
- t.Fatal(err)
- }
- if entry != nil {
- t.Fatalf("bad: expected a nil entry after upgrade")
- }
-
- entry, err = config.StorageView.Get(caPublicKeyStoragePath)
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatalf("bad: expected a non-nil entry after upgrade")
- }
-}
-
-func TestSSH_ConfigCAUpdateDelete(t *testing.T) {
- var resp *logical.Response
- var err error
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- b, err := Factory(config)
- if err != nil {
- t.Fatalf("Cannot create backend: %s", err)
- }
-
- caReq := &logical.Request{
- Path: "config/ca",
- Operation: logical.UpdateOperation,
- Storage: config.StorageView,
- }
-
- // Auto-generate the keys
- resp, err = b.HandleRequest(caReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v, resp:%v", err, resp)
- }
-
- // Fail to overwrite it
- resp, err = b.HandleRequest(caReq)
- if err == nil {
- t.Fatalf("expected an error")
- }
-
- caReq.Operation = logical.DeleteOperation
- // Delete the configured keys
- resp, err = b.HandleRequest(caReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v, resp:%v", err, resp)
- }
-
- caReq.Operation = logical.UpdateOperation
- caReq.Data = map[string]interface{}{
- "public_key": publicKey,
- "private_key": privateKey,
- }
-
- // Successfully create a new one
- resp, err = b.HandleRequest(caReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v, resp:%v", err, resp)
- }
-
- // Fail to overwrite it
- resp, err = b.HandleRequest(caReq)
- if err == nil {
- t.Fatalf("expected an error")
- }
-
- caReq.Operation = logical.DeleteOperation
- // Delete the configured keys
- resp, err = b.HandleRequest(caReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v, resp:%v", err, resp)
- }
-
- caReq.Operation = logical.UpdateOperation
- caReq.Data = nil
-
- // Successfully create a new one
- resp, err = b.HandleRequest(caReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("bad: err: %v, resp:%v", err, resp)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_zeroaddress.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_zeroaddress.go
deleted file mode 100644
index ab6b838..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_zeroaddress.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package ssh
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// Structure to hold roles that are allowed to accept any IP address.
-type zeroAddressRoles struct {
- Roles []string `json:"roles" mapstructure:"roles"`
-}
-
-func pathConfigZeroAddress(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "config/zeroaddress",
- Fields: map[string]*framework.FieldSchema{
- "roles": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `[Required] Comma separated list of role names which
- allows credentials to be requested for any IP address. CIDR blocks
- previously registered under these roles will be ignored.`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConfigZeroAddressWrite,
- logical.ReadOperation: b.pathConfigZeroAddressRead,
- logical.DeleteOperation: b.pathConfigZeroAddressDelete,
- },
- HelpSynopsis: pathConfigZeroAddressSyn,
- HelpDescription: pathConfigZeroAddressDesc,
- }
-}
-
-func (b *backend) pathConfigZeroAddressDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("config/zeroaddress")
- if err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-func (b *backend) pathConfigZeroAddressRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entry, err := b.getZeroAddressRoles(req.Storage)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "roles": entry.Roles,
- },
- }, nil
-}
-
-func (b *backend) pathConfigZeroAddressWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- roleNames := d.Get("roles").(string)
- if roleNames == "" {
- return logical.ErrorResponse("Missing roles"), nil
- }
-
- // Check if the roles listed actually exist in the backend
- roles := strings.Split(roleNames, ",")
- for _, item := range roles {
- role, err := b.getRole(req.Storage, item)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("Role %q does not exist", item)), nil
- }
- }
-
- err := b.putZeroAddressRoles(req.Storage, roles)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// Stores the given list of roles at zeroaddress endpoint
-func (b *backend) putZeroAddressRoles(s logical.Storage, roles []string) error {
- entry, err := logical.StorageEntryJSON("config/zeroaddress", &zeroAddressRoles{
- Roles: roles,
- })
- if err != nil {
- return err
- }
- if err := s.Put(entry); err != nil {
- return err
- }
- return nil
-}
-
-// Retrieves the list of roles from the zeroaddress endpoint.
-func (b *backend) getZeroAddressRoles(s logical.Storage) (*zeroAddressRoles, error) {
- entry, err := s.Get("config/zeroaddress")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result zeroAddressRoles
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-// Removes a role from the list of roles present in config/zeroaddress path
-func (b *backend) removeZeroAddressRole(s logical.Storage, roleName string) error {
- zeroAddressEntry, err := b.getZeroAddressRoles(s)
- if err != nil {
- return err
- }
- if zeroAddressEntry == nil {
- return nil
- }
-
- err = zeroAddressEntry.remove(roleName)
- if err != nil {
- return err
- }
-
- return b.putZeroAddressRoles(s, zeroAddressEntry.Roles)
-}
-
-// Removes a given role from the comma separated string
-func (r *zeroAddressRoles) remove(roleName string) error {
- var index int
- for i, role := range r.Roles {
- if role == roleName {
- index = i
- break
- }
- }
- length := len(r.Roles)
- if index >= length || index < 0 {
- return fmt.Errorf("invalid index [%d]", index)
- }
- // If slice has zero or one item, remove the item by setting slice to nil.
- if length < 2 {
- r.Roles = nil
- return nil
- }
-
- // Last item to be deleted
- if length-1 == index {
- r.Roles = r.Roles[:length-1]
- return nil
- }
-
- // Delete the item by appending all items except the one at index
- r.Roles = append(r.Roles[:index], r.Roles[index+1:]...)
- return nil
-}
-
-const pathConfigZeroAddressSyn = `
-Assign zero address as default CIDR block for select roles.
-`
-
-const pathConfigZeroAddressDesc = `
-Administrator can choose to make a select few registered roles to accept any IP
-address, overriding the CIDR blocks registered during creation of roles. This
-doesn't mean that the credentials are created for any IP address. Clients who
-have access to these roles are trusted to make valid requests. Access to these
-roles should be controlled using Vault policies. It is recommended that all the
-roles that are allowed to accept any IP address should have an explicit policy
-of deny for unintended clients.
-
-This is a root authenticated endpoint. If backend is mounted at 'ssh' then use
-the endpoint 'ssh/config/zeroaddress' to provide the list of allowed roles.
-After mounting the backend, use 'path-help' for additional information.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go
deleted file mode 100644
index 53d55ed..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go
+++ /dev/null
@@ -1,331 +0,0 @@
-package ssh
-
-import (
- "fmt"
- "net"
- "strings"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-type sshOTP struct {
- Username string `json:"username" structs:"username" mapstructure:"username"`
- IP string `json:"ip" structs:"ip" mapstructure:"ip"`
- RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"`
-}
-
-func pathCredsCreate(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "creds/" + framework.GenericNameRegex("role"),
- Fields: map[string]*framework.FieldSchema{
- "role": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "[Required] Name of the role",
- },
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "[Optional] Username in remote host",
- },
- "ip": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "[Required] IP of the remote host",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathCredsCreateWrite,
- },
- HelpSynopsis: pathCredsCreateHelpSyn,
- HelpDescription: pathCredsCreateHelpDesc,
- }
-}
-
-func (b *backend) pathCredsCreateWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- roleName := d.Get("role").(string)
- if roleName == "" {
- return logical.ErrorResponse("Missing role"), nil
- }
-
- ipRaw := d.Get("ip").(string)
- if ipRaw == "" {
- return logical.ErrorResponse("Missing ip"), nil
- }
-
- role, err := b.getRole(req.Storage, roleName)
- if err != nil {
- return nil, fmt.Errorf("error retrieving role: %v", err)
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("Role %q not found", roleName)), nil
- }
-
- // username is an optional parameter.
- username := d.Get("username").(string)
-
- // Set the default username
- if username == "" {
- if role.DefaultUser == "" {
- return logical.ErrorResponse("No default username registered. Use 'username' option"), nil
- }
- username = role.DefaultUser
- }
-
- if role.AllowedUsers != "" {
- // Check if the username is present in allowed users list.
- err := validateUsername(username, role.AllowedUsers)
-
- // If username is not present in allowed users list, check if it
- // is the default username in the role. If neither is true, then
- // that username is not allowed to generate a credential.
- if err != nil && username != role.DefaultUser {
- return logical.ErrorResponse("Username is not present is allowed users list"), nil
- }
- } else if username != role.DefaultUser {
- return logical.ErrorResponse("Username has to be either in allowed users list or has to be a default username"), nil
- }
-
- // Validate the IP address
- ipAddr := net.ParseIP(ipRaw)
- if ipAddr == nil {
- return logical.ErrorResponse(fmt.Sprintf("Invalid IP %q", ipRaw)), nil
- }
-
- // Check if the IP belongs to the registered list of CIDR blocks under the role
- ip := ipAddr.String()
-
- zeroAddressEntry, err := b.getZeroAddressRoles(req.Storage)
- if err != nil {
- return nil, fmt.Errorf("error retrieving zero-address roles: %v", err)
- }
- var zeroAddressRoles []string
- if zeroAddressEntry != nil {
- zeroAddressRoles = zeroAddressEntry.Roles
- }
-
- err = validateIP(ip, roleName, role.CIDRList, role.ExcludeCIDRList, zeroAddressRoles)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error validating IP: %v", err)), nil
- }
-
- var result *logical.Response
- if role.KeyType == KeyTypeOTP {
- // Generate an OTP
- otp, err := b.GenerateOTPCredential(req, &sshOTP{
- Username: username,
- IP: ip,
- RoleName: roleName,
- })
- if err != nil {
- return nil, err
- }
-
- // Return the information relevant to user of OTP type and save
- // the data required for later use in the internal section of secret.
- // In this case, saving just the OTP is sufficient since there is
- // no need to establish connection with the remote host.
- result = b.Secret(SecretOTPType).Response(map[string]interface{}{
- "key_type": role.KeyType,
- "key": otp,
- "username": username,
- "ip": ip,
- "port": role.Port,
- }, map[string]interface{}{
- "otp": otp,
- })
- } else if role.KeyType == KeyTypeDynamic {
- // Generate an RSA key pair. This also installs the newly generated
- // public key in the remote host.
- dynamicPublicKey, dynamicPrivateKey, err := b.GenerateDynamicCredential(req, role, username, ip)
- if err != nil {
- return nil, err
- }
-
- // Return the information relevant to user of dynamic type and save
- // information required for later use in internal section of secret.
- result = b.Secret(SecretDynamicKeyType).Response(map[string]interface{}{
- "key": dynamicPrivateKey,
- "key_type": role.KeyType,
- "username": username,
- "ip": ip,
- "port": role.Port,
- }, map[string]interface{}{
- "admin_user": role.AdminUser,
- "username": username,
- "ip": ip,
- "host_key_name": role.KeyName,
- "dynamic_public_key": dynamicPublicKey,
- "port": role.Port,
- "install_script": role.InstallScript,
- })
- } else {
- return nil, fmt.Errorf("key type unknown")
- }
-
- return result, nil
-}
-
-// Generates a RSA key pair and installs it in the remote target
-func (b *backend) GenerateDynamicCredential(req *logical.Request, role *sshRole, username, ip string) (string, string, error) {
- // Fetch the host key to be used for dynamic key installation
- keyEntry, err := req.Storage.Get(fmt.Sprintf("keys/%s", role.KeyName))
- if err != nil {
- return "", "", fmt.Errorf("key %q not found. err: %v", role.KeyName, err)
- }
-
- if keyEntry == nil {
- return "", "", fmt.Errorf("key %q not found", role.KeyName)
- }
-
- var hostKey sshHostKey
- if err := keyEntry.DecodeJSON(&hostKey); err != nil {
- return "", "", fmt.Errorf("error reading the host key: %v", err)
- }
-
- // Generate a new RSA key pair with the given key length.
- dynamicPublicKey, dynamicPrivateKey, err := generateRSAKeys(role.KeyBits)
- if err != nil {
- return "", "", fmt.Errorf("error generating key: %v", err)
- }
-
- if len(role.KeyOptionSpecs) != 0 {
- dynamicPublicKey = fmt.Sprintf("%s %s", role.KeyOptionSpecs, dynamicPublicKey)
- }
-
- // Add the public key to authorized_keys file in target machine
- err = b.installPublicKeyInTarget(role.AdminUser, username, ip, role.Port, hostKey.Key, dynamicPublicKey, role.InstallScript, true)
- if err != nil {
- return "", "", fmt.Errorf("failed to add public key to authorized_keys file in target: %v", err)
- }
- return dynamicPublicKey, dynamicPrivateKey, nil
-}
-
-// Generates a UUID OTP and its salted value based on the salt of the backend.
-func (b *backend) GenerateSaltedOTP() (string, string, error) {
- str, err := uuid.GenerateUUID()
- if err != nil {
- return "", "", err
- }
- salt, err := b.Salt()
- if err != nil {
- return "", "", err
- }
-
- return str, salt.SaltID(str), nil
-}
-
-// Generates an UUID OTP and creates an entry for the same in storage backend with its salted string.
-func (b *backend) GenerateOTPCredential(req *logical.Request, sshOTPEntry *sshOTP) (string, error) {
- otp, otpSalted, err := b.GenerateSaltedOTP()
- if err != nil {
- return "", err
- }
-
- // Check if there is an entry already created for the newly generated OTP.
- entry, err := b.getOTP(req.Storage, otpSalted)
-
- // If entry already exists for the OTP, make sure that new OTP is not
- // replacing an existing one by recreating new ones until an unused
- // OTP is generated. It is very unlikely that this is the case and this
- // code is just for safety.
- for err == nil && entry != nil {
- otp, otpSalted, err = b.GenerateSaltedOTP()
- if err != nil {
- return "", err
- }
- entry, err = b.getOTP(req.Storage, otpSalted)
- if err != nil {
- return "", err
- }
- }
-
- // Store an entry for the salt of OTP.
- newEntry, err := logical.StorageEntryJSON("otp/"+otpSalted, sshOTPEntry)
- if err != nil {
- return "", err
- }
- if err := req.Storage.Put(newEntry); err != nil {
- return "", err
- }
- return otp, nil
-}
-
-// ValidateIP first checks if the role belongs to the list of privileged
-// roles that could allow any IP address and if there is a match, IP is
-// accepted immediately. If not, IP is searched in the allowed CIDR blocks
-// registered with the role. If there is a match, then it is searched in the
-// excluded CIDR blocks and if IP is found there as well, an error is returned.
-// IP is valid only if it is encompassed by allowed CIDR blocks and not by
-// excluded CIDR blocks.
-func validateIP(ip, roleName, cidrList, excludeCidrList string, zeroAddressRoles []string) error {
- // Search IP in the zero-address list
- for _, role := range zeroAddressRoles {
- if roleName == role {
- return nil
- }
- }
-
- // Search IP in allowed CIDR blocks
- ipMatched, err := cidrListContainsIP(ip, cidrList)
- if err != nil {
- return err
- }
- if !ipMatched {
- return fmt.Errorf("IP does not belong to role")
- }
-
- if len(excludeCidrList) == 0 {
- return nil
- }
-
- // Search IP in exclude list
- ipMatched, err = cidrListContainsIP(ip, excludeCidrList)
- if err != nil {
- return err
- }
- if ipMatched {
- return fmt.Errorf("IP does not belong to role")
- }
-
- return nil
-}
-
-// Checks if the username supplied by the user is present in the list of
-// allowed users registered which creation of role.
-func validateUsername(username, allowedUsers string) error {
- if allowedUsers == "" {
- return fmt.Errorf("username not in allowed users list")
- }
-
- // Role was explicitly configured to allow any username.
- if allowedUsers == "*" {
- return nil
- }
-
- userList := strings.Split(allowedUsers, ",")
- for _, user := range userList {
- if strings.TrimSpace(user) == username {
- return nil
- }
- }
-
- return fmt.Errorf("username not in allowed users list")
-}
-
-const pathCredsCreateHelpSyn = `
-Creates a credential for establishing SSH connection with the remote host.
-`
-
-const pathCredsCreateHelpDesc = `
-This path will generate a new key for establishing SSH session with
-target host. The key can either be a long lived dynamic key or a One
-Time Password (OTP), using 'key_type' parameter being 'dynamic' or
-'otp' respectively. For dynamic keys, a named key should be supplied.
-Create named key using the 'keys/' endpoint, and this represents the
-shared SSH key of target host. If this backend is mounted at 'ssh',
-then "ssh/creds/web" would generate a key for 'web' role.
-
-Keys will have a lease associated with them. The access keys can be
-revoked by using the lease ID.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_fetch.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_fetch.go
deleted file mode 100644
index 1b59794..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_fetch.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package ssh
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathFetchPublicKey(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `public_key`,
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathFetchPublicKey,
- },
-
- HelpSynopsis: `Retrieve the public key.`,
- HelpDescription: `This allows the public key, that this backend has been configured with, to be fetched.`,
- }
-}
-
-func (b *backend) pathFetchPublicKey(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- publicKeyEntry, err := caKey(req.Storage, caPublicKey)
- if err != nil {
- return nil, err
- }
- if publicKeyEntry == nil || publicKeyEntry.Key == "" {
- return nil, nil
- }
-
- response := &logical.Response{
- Data: map[string]interface{}{
- logical.HTTPContentType: "text/plain",
- logical.HTTPRawBody: []byte(publicKeyEntry.Key),
- logical.HTTPStatusCode: 200,
- },
- }
-
- return response, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_keys.go
deleted file mode 100644
index aa0f444..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_keys.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package ssh
-
-import (
- "fmt"
-
- "golang.org/x/crypto/ssh"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-type sshHostKey struct {
- Key string `json:"key"`
-}
-
-func pathKeys(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "keys/" + framework.GenericNameRegex("key_name"),
- Fields: map[string]*framework.FieldSchema{
- "key_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "[Required] Name of the key",
- },
- "key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "[Required] SSH private key with super user privileges in host",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathKeysWrite,
- logical.DeleteOperation: b.pathKeysDelete,
- },
- HelpSynopsis: pathKeysSyn,
- HelpDescription: pathKeysDesc,
- }
-}
-
-func (b *backend) getKey(s logical.Storage, n string) (*sshHostKey, error) {
- entry, err := s.Get("keys/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result sshHostKey
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-func (b *backend) pathKeysDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- keyName := d.Get("key_name").(string)
- keyPath := fmt.Sprintf("keys/%s", keyName)
- err := req.Storage.Delete(keyPath)
- if err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-func (b *backend) pathKeysWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- keyName := d.Get("key_name").(string)
- if keyName == "" {
- return logical.ErrorResponse("Missing key_name"), nil
- }
-
- keyString := d.Get("key").(string)
-
- // Check if the key provided is infact a private key
- signer, err := ssh.ParsePrivateKey([]byte(keyString))
- if err != nil || signer == nil {
- return logical.ErrorResponse("Invalid key"), nil
- }
-
- if keyString == "" {
- return logical.ErrorResponse("Missing key"), nil
- }
-
- keyPath := fmt.Sprintf("keys/%s", keyName)
-
- // Store the key
- entry, err := logical.StorageEntryJSON(keyPath, map[string]interface{}{
- "key": keyString,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-const pathKeysSyn = `
-Register a shared private key with Vault.
-`
-
-const pathKeysDesc = `
-Vault uses this key to install and uninstall dynamic keys in remote hosts. This
-key should have sudoer privileges in remote hosts. This enables installing keys
-for unprivileged usernames.
-
-If this backend is mounted as "ssh", then the endpoint for registering shared
-key is "ssh/keys/". The name given here can be associated with any number
-of roles via the endpoint "ssh/roles/".
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_lookup.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_lookup.go
deleted file mode 100644
index f253258..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_lookup.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package ssh
-
-import (
- "fmt"
- "net"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathLookup(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "lookup",
- Fields: map[string]*framework.FieldSchema{
- "ip": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "[Required] IP address of remote host",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathLookupWrite,
- },
- HelpSynopsis: pathLookupSyn,
- HelpDescription: pathLookupDesc,
- }
-}
-
-func (b *backend) pathLookupWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- ipAddr := d.Get("ip").(string)
- if ipAddr == "" {
- return logical.ErrorResponse("Missing ip"), nil
- }
- ip := net.ParseIP(ipAddr)
- if ip == nil {
- return logical.ErrorResponse(fmt.Sprintf("Invalid IP %q", ip.String())), nil
- }
-
- // Get all the roles created in the backend.
- keys, err := req.Storage.List("roles/")
- if err != nil {
- return nil, err
- }
-
- // Look for roles which has CIDR blocks that encompasses the given IP
- // and create a list out of it.
- var matchingRoles []string
- for _, role := range keys {
- if contains, _ := roleContainsIP(req.Storage, role, ip.String()); contains {
- matchingRoles = append(matchingRoles, role)
- }
- }
-
- // Add roles that are allowed to accept any IP address.
- zeroAddressEntry, err := b.getZeroAddressRoles(req.Storage)
- if err != nil {
- return nil, err
- }
- if zeroAddressEntry != nil {
- matchingRoles = append(matchingRoles, zeroAddressEntry.Roles...)
- }
-
- // This list may potentially reveal more information than it is supposed to.
- // The roles for which the client is not authorized to will also be displayed.
- // However, if the client tries to use the role for which the client is not
- // authenticated, it will fail. It is not a problem. In a way this can be
- // viewed as a feature. The client can ask for permissions to be given for
- // a specific role if things are not working!
- //
- // Ideally, the role names should be filtered and only the roles which
- // the client is authorized to see, should be returned.
- return &logical.Response{
- Data: map[string]interface{}{
- "roles": matchingRoles,
- },
- }, nil
-}
-
-const pathLookupSyn = `
-List all the roles associated with the given IP address.
-`
-
-const pathLookupDesc = `
-The IP address for which the key is requested, is searched in the CIDR blocks
-registered with vault using the 'roles' endpoint. Keys can be generated only by
-specifying the 'role' name. The roles that can be used to generate the key for
-a particular IP, are listed via this endpoint. For example, if this backend is
-mounted at "ssh", then "ssh/lookup" lists the roles associated with keys can be
-generated for a target IP, if the CIDR block encompassing the IP is registered
-with vault.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go
deleted file mode 100644
index 6be96b6..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go
+++ /dev/null
@@ -1,630 +0,0 @@
-package ssh
-
-import (
- "fmt"
- "strings"
-
- "time"
-
- "github.com/hashicorp/vault/helper/cidrutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- KeyTypeOTP = "otp"
- KeyTypeDynamic = "dynamic"
- KeyTypeCA = "ca"
-)
-
-// Structure that represents a role in SSH backend. This is a common role structure
-// for both OTP and Dynamic roles. Not all the fields are mandatory for both type.
-// Some are applicable for one and not for other. It doesn't matter.
-type sshRole struct {
- KeyType string `mapstructure:"key_type" json:"key_type"`
- KeyName string `mapstructure:"key" json:"key"`
- KeyBits int `mapstructure:"key_bits" json:"key_bits"`
- AdminUser string `mapstructure:"admin_user" json:"admin_user"`
- DefaultUser string `mapstructure:"default_user" json:"default_user"`
- CIDRList string `mapstructure:"cidr_list" json:"cidr_list"`
- ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"`
- Port int `mapstructure:"port" json:"port"`
- InstallScript string `mapstructure:"install_script" json:"install_script"`
- AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"`
- AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"`
- KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"`
- MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"`
- TTL string `mapstructure:"ttl" json:"ttl"`
- DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"`
- DefaultExtensions map[string]string `mapstructure:"default_extensions" json:"default_extensions"`
- AllowedCriticalOptions string `mapstructure:"allowed_critical_options" json:"allowed_critical_options"`
- AllowedExtensions string `mapstructure:"allowed_extensions" json:"allowed_extensions"`
- AllowUserCertificates bool `mapstructure:"allow_user_certificates" json:"allow_user_certificates"`
- AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"`
- AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"`
- AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"`
- AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"`
- KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"`
-}
-
-func pathListRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathRoleList,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func pathRoles(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "roles/" + framework.GenericNameRegex("role"),
- Fields: map[string]*framework.FieldSchema{
- "role": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Required for all types]
- Name of the role being created.`,
- },
- "key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
- Name of the registered key in Vault. Before creating the role, use the
- 'keys/' endpoint to create a named key.`,
- },
- "admin_user": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
- Admin user at remote host. The shared key being registered should be
- for this user and should have root privileges. Everytime a dynamic
- credential is being generated for other users, Vault uses this admin
- username to login to remote host and install the generated credential
- for the other user.`,
- },
- "default_user": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Required for Dynamic type] [Required for OTP type] [Optional for CA type]
- Default username for which a credential will be generated.
- When the endpoint 'creds/' is used without a username, this
- value will be used as default username.`,
- },
- "cidr_list": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type]
- Comma separated list of CIDR blocks for which the role is applicable for.
- CIDR blocks can belong to more than one role.`,
- },
- "exclude_cidr_list": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type]
- Comma separated list of CIDR blocks. IP addresses belonging to these blocks are not
- accepted by the role. This is particularly useful when big CIDR blocks are being used
- by the role and certain parts of it needs to be kept out.`,
- },
- "port": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `
- [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type]
- Port number for SSH connection. Default is '22'. Port number does not
- play any role in creation of OTP. For 'otp' type, this is just a way
- to inform client about the port number to use. Port number will be
- returned to client by Vault server along with OTP.`,
- },
- "key_type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Required for all types]
- Type of key used to login to hosts. It can be either 'otp', 'dynamic' or 'ca'.
- 'otp' type requires agent to be installed in remote hosts.`,
- },
- "key_bits": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `
- [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
- Length of the RSA dynamic key in bits. It is 1024 by default or it can be 2048.`,
- },
- "install_script": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Optional for Dynamic type] [Not-applicable for OTP type] [Not applicable for CA type]
- Script used to install and uninstall public keys in the target machine.
- The inbuilt default install script will be for Linux hosts. For sample
- script, refer the project documentation website.`,
- },
- "allowed_users": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Optional for all types] [Works differently for CA type]
- If this option is not specified, or is '*', client can request a
- credential for any valid user at the remote host, including the
- admin user. If only certain usernames are to be allowed, then
- this list enforces it. If this field is set, then credentials
- can only be created for default_user and usernames present in
- this list. Setting this option will enable all the users with
- access to this role to fetch credentials for all other usernames
- in this list. Use with caution. N.B.: with the CA type, an empty
- list means that no users are allowed; explicitly specify '*' to
- allow any user.
- `,
- },
- "allowed_domains": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- If this option is not specified, client can request for a signed certificate for any
- valid host. If only certain domains are allowed, then this list enforces it.
- `,
- },
- "key_option_specs": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type]
- Comma separated option specifications which will be prefixed to RSA key in
- authorized_keys file. Options should be valid and comply with authorized_keys
- file format and should not contain spaces.
- `,
- },
- "ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- The lease duration if no specific lease duration is
- requested. The lease duration controls the expiration
- of certificates issued by this backend. Defaults to
- the value of max_ttl.`,
- },
- "max_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- The maximum allowed lease duration
- `,
- },
- "allowed_critical_options": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- A comma-separated list of critical options that certificates can have when signed.
- To allow any critical options, set this to an empty string.
- `,
- },
- "allowed_extensions": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- A comma-separated list of extensions that certificates can have when signed.
- To allow any extensions, set this to an empty string.
- `,
- },
- "default_critical_options": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type]
- [Optional for CA type] Critical options certificates should
- have if none are provided when signing. This field takes in key
- value pairs in JSON format. Note that these are not restricted
- by "allowed_critical_options". Defaults to none.
- `,
- },
- "default_extensions": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type]
- [Optional for CA type] Extensions certificates should have if
- none are provided when signing. This field takes in key value
- pairs in JSON format. Note that these are not restricted by
- "allowed_extensions". Defaults to none.
- `,
- },
- "allow_user_certificates": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- If set, certificates are allowed to be signed for use as a 'user'.
- `,
- Default: false,
- },
- "allow_host_certificates": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- If set, certificates are allowed to be signed for use as a 'host'.
- `,
- Default: false,
- },
- "allow_bare_domains": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- If set, host certificates that are requested are allowed to use the base domains listed in
- "allowed_domains", e.g. "example.com".
- This is a separate option as in some cases this can be considered a security threat.
- `,
- },
- "allow_subdomains": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- If set, host certificates that are requested are allowed to use subdomains of those listed in "allowed_domains".
- `,
- },
- "allow_user_key_ids": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- If true, users can override the key ID for a signed certificate with the "key_id" field.
- When false, the key ID will always be the token display name.
- The key ID is logged by the SSH server and can be useful for auditing.
- `,
- },
- "key_id_format": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
- [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
- When supplied, this value specifies a custom format for the key id of a signed certificate.
- The following variables are availble for use: '{{token_display_name}}' - The display name of
- the token used to make the request. '{{role_name}}' - The name of the role signing the request.
- '{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed.
- `,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathRoleRead,
- logical.UpdateOperation: b.pathRoleWrite,
- logical.DeleteOperation: b.pathRoleDelete,
- },
-
- HelpSynopsis: pathRoleHelpSyn,
- HelpDescription: pathRoleHelpDesc,
- }
-}
-
-func (b *backend) pathRoleWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- roleName := d.Get("role").(string)
- if roleName == "" {
- return logical.ErrorResponse("missing role name"), nil
- }
-
- // Allowed users is an optional field, applicable for both OTP and Dynamic types.
- allowedUsers := d.Get("allowed_users").(string)
-
- // Validate the CIDR blocks
- cidrList := d.Get("cidr_list").(string)
- if cidrList != "" {
- valid, err := cidrutil.ValidateCIDRListString(cidrList, ",")
- if err != nil {
- return nil, fmt.Errorf("failed to validate cidr_list: %v", err)
- }
- if !valid {
- return logical.ErrorResponse("failed to validate cidr_list"), nil
- }
- }
-
- // Validate the excluded CIDR blocks
- excludeCidrList := d.Get("exclude_cidr_list").(string)
- if excludeCidrList != "" {
- valid, err := cidrutil.ValidateCIDRListString(excludeCidrList, ",")
- if err != nil {
- return nil, fmt.Errorf("failed to validate exclude_cidr_list entry: %v", err)
- }
- if !valid {
- return logical.ErrorResponse(fmt.Sprintf("failed to validate exclude_cidr_list entry: %v", err)), nil
- }
- }
-
- port := d.Get("port").(int)
- if port == 0 {
- port = 22
- }
-
- keyType := d.Get("key_type").(string)
- if keyType == "" {
- return logical.ErrorResponse("missing key type"), nil
- }
- keyType = strings.ToLower(keyType)
-
- var roleEntry sshRole
- if keyType == KeyTypeOTP {
- defaultUser := d.Get("default_user").(string)
- if defaultUser == "" {
- return logical.ErrorResponse("missing default user"), nil
- }
-
- // Admin user is not used if OTP key type is used because there is
- // no need to login to remote machine.
- adminUser := d.Get("admin_user").(string)
- if adminUser != "" {
- return logical.ErrorResponse("admin user not required for OTP type"), nil
- }
-
- // Below are the only fields used from the role structure for OTP type.
- roleEntry = sshRole{
- DefaultUser: defaultUser,
- CIDRList: cidrList,
- ExcludeCIDRList: excludeCidrList,
- KeyType: KeyTypeOTP,
- Port: port,
- AllowedUsers: allowedUsers,
- }
- } else if keyType == KeyTypeDynamic {
- defaultUser := d.Get("default_user").(string)
- if defaultUser == "" {
- return logical.ErrorResponse("missing default user"), nil
- }
- // Key name is required by dynamic type and not by OTP type.
- keyName := d.Get("key").(string)
- if keyName == "" {
- return logical.ErrorResponse("missing key name"), nil
- }
- keyEntry, err := req.Storage.Get(fmt.Sprintf("keys/%s", keyName))
- if err != nil || keyEntry == nil {
- return logical.ErrorResponse(fmt.Sprintf("invalid 'key': %q", keyName)), nil
- }
-
- installScript := d.Get("install_script").(string)
- keyOptionSpecs := d.Get("key_option_specs").(string)
-
- // Setting the default script here. The script will install the
- // generated public key in the authorized_keys file of linux host.
- if installScript == "" {
- installScript = DefaultPublicKeyInstallScript
- }
-
- adminUser := d.Get("admin_user").(string)
- if adminUser == "" {
- return logical.ErrorResponse("missing admin username"), nil
- }
-
- // This defaults to 1024 and it can also be 2048.
- keyBits := d.Get("key_bits").(int)
- if keyBits != 0 && keyBits != 1024 && keyBits != 2048 {
- return logical.ErrorResponse("invalid key_bits field"), nil
- }
-
- // If user has not set this field, default it to 1024
- if keyBits == 0 {
- keyBits = 1024
- }
-
- // Store all the fields required by dynamic key type
- roleEntry = sshRole{
- KeyName: keyName,
- AdminUser: adminUser,
- DefaultUser: defaultUser,
- CIDRList: cidrList,
- ExcludeCIDRList: excludeCidrList,
- Port: port,
- KeyType: KeyTypeDynamic,
- KeyBits: keyBits,
- InstallScript: installScript,
- AllowedUsers: allowedUsers,
- KeyOptionSpecs: keyOptionSpecs,
- }
- } else if keyType == KeyTypeCA {
- role, errorResponse := b.createCARole(allowedUsers, d.Get("default_user").(string), d)
- if errorResponse != nil {
- return errorResponse, nil
- }
- roleEntry = *role
- } else {
- return logical.ErrorResponse("invalid key type"), nil
- }
-
- entry, err := logical.StorageEntryJSON(fmt.Sprintf("roles/%s", roleName), roleEntry)
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework.FieldData) (*sshRole, *logical.Response) {
- role := &sshRole{
- MaxTTL: data.Get("max_ttl").(string),
- TTL: data.Get("ttl").(string),
- AllowedCriticalOptions: data.Get("allowed_critical_options").(string),
- AllowedExtensions: data.Get("allowed_extensions").(string),
- AllowUserCertificates: data.Get("allow_user_certificates").(bool),
- AllowHostCertificates: data.Get("allow_host_certificates").(bool),
- AllowedUsers: allowedUsers,
- AllowedDomains: data.Get("allowed_domains").(string),
- DefaultUser: defaultUser,
- AllowBareDomains: data.Get("allow_bare_domains").(bool),
- AllowSubdomains: data.Get("allow_subdomains").(bool),
- AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool),
- KeyIDFormat: data.Get("key_id_format").(string),
- KeyType: KeyTypeCA,
- }
-
- if !role.AllowUserCertificates && !role.AllowHostCertificates {
- return nil, logical.ErrorResponse("Either 'allow_user_certificates' or 'allow_host_certificates' must be set to 'true'")
- }
-
- defaultCriticalOptions := convertMapToStringValue(data.Get("default_critical_options").(map[string]interface{}))
- defaultExtensions := convertMapToStringValue(data.Get("default_extensions").(map[string]interface{}))
-
- var maxTTL time.Duration
- maxSystemTTL := b.System().MaxLeaseTTL()
- if len(role.MaxTTL) == 0 {
- maxTTL = maxSystemTTL
- } else {
- var err error
- maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
- if err != nil {
- return nil, logical.ErrorResponse(fmt.Sprintf(
- "Invalid max ttl: %s", err))
- }
- }
- if maxTTL > maxSystemTTL {
- return nil, logical.ErrorResponse("Requested max TTL is higher than backend maximum")
- }
-
- ttl := b.System().DefaultLeaseTTL()
- if len(role.TTL) != 0 {
- var err error
- ttl, err = parseutil.ParseDurationSecond(role.TTL)
- if err != nil {
- return nil, logical.ErrorResponse(fmt.Sprintf(
- "Invalid ttl: %s", err))
- }
- }
- if ttl > maxTTL {
- // If they are using the system default, cap it to the role max;
- // if it was specified on the command line, make it an error
- if len(role.TTL) == 0 {
- ttl = maxTTL
- } else {
- return nil, logical.ErrorResponse(
- `"ttl" value must be less than "max_ttl" and/or backend default max lease TTL value`,
- )
- }
- }
-
- // Persist clamped TTLs
- role.TTL = ttl.String()
- role.MaxTTL = maxTTL.String()
- role.DefaultCriticalOptions = defaultCriticalOptions
- role.DefaultExtensions = defaultExtensions
-
- return role, nil
-}
-
-func (b *backend) getRole(s logical.Storage, n string) (*sshRole, error) {
- entry, err := s.Get("roles/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result sshRole
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathRoleList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("roles/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- role, err := b.getRole(req.Storage, d.Get("role").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- // Return information should be based on the key type of the role
- if role.KeyType == KeyTypeOTP {
- return &logical.Response{
- Data: map[string]interface{}{
- "default_user": role.DefaultUser,
- "cidr_list": role.CIDRList,
- "exclude_cidr_list": role.ExcludeCIDRList,
- "key_type": role.KeyType,
- "port": role.Port,
- "allowed_users": role.AllowedUsers,
- },
- }, nil
- } else if role.KeyType == KeyTypeCA {
- return &logical.Response{
- Data: map[string]interface{}{
- "allowed_users": role.AllowedUsers,
- "allowed_domains": role.AllowedDomains,
- "default_user": role.DefaultUser,
- "max_ttl": role.MaxTTL,
- "ttl": role.TTL,
- "allowed_critical_options": role.AllowedCriticalOptions,
- "allowed_extensions": role.AllowedExtensions,
- "allow_user_certificates": role.AllowUserCertificates,
- "allow_host_certificates": role.AllowHostCertificates,
- "allow_bare_domains": role.AllowBareDomains,
- "allow_subdomains": role.AllowSubdomains,
- "allow_user_key_ids": role.AllowUserKeyIDs,
- "key_id_format": role.KeyIDFormat,
- "key_type": role.KeyType,
- "default_critical_options": role.DefaultCriticalOptions,
- "default_extensions": role.DefaultExtensions,
- },
- }, nil
- } else {
- return &logical.Response{
- Data: map[string]interface{}{
- "key": role.KeyName,
- "admin_user": role.AdminUser,
- "default_user": role.DefaultUser,
- "cidr_list": role.CIDRList,
- "exclude_cidr_list": role.ExcludeCIDRList,
- "port": role.Port,
- "key_type": role.KeyType,
- "key_bits": role.KeyBits,
- "allowed_users": role.AllowedUsers,
- "key_option_specs": role.KeyOptionSpecs,
- // Returning install script will make the output look messy.
- // But this is one way for clients to see the script that is
- // being used to install the key. If there is some problem,
- // the script can be modified and configured by clients.
- "install_script": role.InstallScript,
- },
- }, nil
- }
-}
-
-func (b *backend) pathRoleDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- roleName := d.Get("role").(string)
-
- // If the role was given privilege to accept any IP address, there will
- // be an entry for this role in zero-address roles list. Before the role
- // is removed, the entry in the list has to be removed.
- err := b.removeZeroAddressRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
-
- err = req.Storage.Delete(fmt.Sprintf("roles/%s", roleName))
- if err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-const pathRoleHelpSyn = `
-Manage the 'roles' that can be created with this backend.
-`
-
-const pathRoleHelpDesc = `
-This path allows you to manage the roles that are used to generate credentials.
-
-Role takes a 'key_type' parameter that decides what type of credential this role
-can generate. If remote hosts have Vault SSH Agent installed, an 'otp' type can
-be used, otherwise 'dynamic' type can be used.
-
-If the backend is mounted at "ssh" and the role is created at "ssh/roles/web",
-then a user could request for a credential at "ssh/creds/web" for an IP that
-belongs to the role. The credential will be for the 'default_user' registered
-with the role. There is also an optional parameter 'username' for 'creds/' endpoint.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go
deleted file mode 100644
index 4d62f4a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go
+++ /dev/null
@@ -1,429 +0,0 @@
-package ssh
-
-import (
- "crypto/rand"
- "crypto/sha256"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "golang.org/x/crypto/ssh"
-)
-
-type creationBundle struct {
- KeyId string
- ValidPrincipals []string
- PublicKey ssh.PublicKey
- CertificateType uint32
- TTL time.Duration
- Signer ssh.Signer
- Role *sshRole
- CriticalOptions map[string]string
- Extensions map[string]string
-}
-
-func pathSign(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "sign/" + framework.GenericNameRegex("role"),
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathSign,
- },
-
- Fields: map[string]*framework.FieldSchema{
- "role": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The desired role with configuration for this request.`,
- },
- "ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The requested Time To Live for the SSH certificate;
-sets the expiration date. If not specified
-the role default, backend default, or system
-default TTL is used, in that order. Cannot
-be later than the role max TTL.`,
- },
- "public_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `SSH public key that should be signed.`,
- },
- "valid_principals": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for.`,
- },
- "cert_type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Type of certificate to be created; either "user" or "host".`,
- Default: "user",
- },
- "key_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Key id that the created certificate should have. If not specified, the display name of the token will be used.`,
- },
- "critical_options": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: `Critical options that the certificate should be signed for.`,
- },
- "extensions": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: `Extensions that the certificate should be signed for.`,
- },
- },
-
- HelpSynopsis: `Request signing an SSH key using a certain role with the provided details.`,
- HelpDescription: `This path allows SSH keys to be signed according to the policy of the given role.`,
- }
-}
-
-func (b *backend) pathSign(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- roleName := data.Get("role").(string)
-
- // Get the role
- role, err := b.getRole(req.Storage, roleName)
- if err != nil {
- return nil, err
- }
- if role == nil {
- return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
- }
-
- return b.pathSignCertificate(req, data, role)
-}
-
-func (b *backend) pathSignCertificate(req *logical.Request, data *framework.FieldData, role *sshRole) (*logical.Response, error) {
- publicKey := data.Get("public_key").(string)
- if publicKey == "" {
- return logical.ErrorResponse("missing public_key"), nil
- }
-
- userPublicKey, err := parsePublicSSHKey(publicKey)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to parse public_key as SSH key: %s", err)), nil
- }
-
- // Note that these various functions always return "user errors" so we pass
- // them as 4xx values
- keyId, err := b.calculateKeyId(data, req, role, userPublicKey)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- certificateType, err := b.calculateCertificateType(data, role)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- var parsedPrincipals []string
- if certificateType == ssh.HostCert {
- parsedPrincipals, err = b.calculateValidPrincipals(data, "", role.AllowedDomains, validateValidPrincipalForHosts(role))
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
- } else {
- parsedPrincipals, err = b.calculateValidPrincipals(data, role.DefaultUser, role.AllowedUsers, strutil.StrListContains)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
- }
-
- ttl, err := b.calculateTTL(data, role)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- criticalOptions, err := b.calculateCriticalOptions(data, role)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- extensions, err := b.calculateExtensions(data, role)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- privateKeyEntry, err := caKey(req.Storage, caPrivateKey)
- if err != nil {
- return nil, fmt.Errorf("failed to read CA private key: %v", err)
- }
- if privateKeyEntry == nil || privateKeyEntry.Key == "" {
- return nil, fmt.Errorf("failed to read CA private key")
- }
-
- signer, err := ssh.ParsePrivateKey([]byte(privateKeyEntry.Key))
- if err != nil {
- return nil, fmt.Errorf("failed to parse stored CA private key: %v", err)
- }
-
- cBundle := creationBundle{
- KeyId: keyId,
- PublicKey: userPublicKey,
- Signer: signer,
- ValidPrincipals: parsedPrincipals,
- TTL: ttl,
- CertificateType: certificateType,
- Role: role,
- CriticalOptions: criticalOptions,
- Extensions: extensions,
- }
-
- certificate, err := cBundle.sign()
- if err != nil {
- return nil, err
- }
-
- signedSSHCertificate := ssh.MarshalAuthorizedKey(certificate)
- if len(signedSSHCertificate) == 0 {
- return nil, fmt.Errorf("error marshaling signed certificate")
- }
-
- response := &logical.Response{
- Data: map[string]interface{}{
- "serial_number": strconv.FormatUint(certificate.Serial, 16),
- "signed_key": string(signedSSHCertificate),
- },
- }
-
- return response, nil
-}
-
-func (b *backend) calculateValidPrincipals(data *framework.FieldData, defaultPrincipal, principalsAllowedByRole string, validatePrincipal func([]string, string) bool) ([]string, error) {
- validPrincipals := ""
- validPrincipalsRaw, ok := data.GetOk("valid_principals")
- if ok {
- validPrincipals = validPrincipalsRaw.(string)
- } else {
- validPrincipals = defaultPrincipal
- }
-
- parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false)
- allowedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false)
- switch {
- case len(parsedPrincipals) == 0:
- // There is nothing to process
- return nil, nil
- case len(allowedPrincipals) == 0:
- // User has requested principals to be set, but role is not configured
- // with any principals
- return nil, fmt.Errorf("role is not configured to allow any principles")
- default:
- // Role was explicitly configured to allow any principal.
- if principalsAllowedByRole == "*" {
- return parsedPrincipals, nil
- }
-
- for _, principal := range parsedPrincipals {
- if !validatePrincipal(allowedPrincipals, principal) {
- return nil, fmt.Errorf("%v is not a valid value for valid_principals", principal)
- }
- }
- return parsedPrincipals, nil
- }
-}
-
-func validateValidPrincipalForHosts(role *sshRole) func([]string, string) bool {
- return func(allowedPrincipals []string, validPrincipal string) bool {
- for _, allowedPrincipal := range allowedPrincipals {
- if allowedPrincipal == validPrincipal && role.AllowBareDomains {
- return true
- }
- if role.AllowSubdomains && strings.HasSuffix(validPrincipal, "."+allowedPrincipal) {
- return true
- }
- }
-
- return false
- }
-}
-
-func (b *backend) calculateCertificateType(data *framework.FieldData, role *sshRole) (uint32, error) {
- requestedCertificateType := data.Get("cert_type").(string)
-
- var certificateType uint32
- switch requestedCertificateType {
- case "user":
- if !role.AllowUserCertificates {
- return 0, errors.New("cert_type 'user' is not allowed by role")
- }
- certificateType = ssh.UserCert
- case "host":
- if !role.AllowHostCertificates {
- return 0, errors.New("cert_type 'host' is not allowed by role")
- }
- certificateType = ssh.HostCert
- default:
- return 0, errors.New("cert_type must be either 'user' or 'host'")
- }
-
- return certificateType, nil
-}
-
-func (b *backend) calculateKeyId(data *framework.FieldData, req *logical.Request, role *sshRole, pubKey ssh.PublicKey) (string, error) {
- reqId := data.Get("key_id").(string)
-
- if reqId != "" {
- if !role.AllowUserKeyIDs {
- return "", fmt.Errorf("setting key_id is not allowed by role")
- }
- return reqId, nil
- }
-
- keyIDFormat := "vault-{{token_display_name}}-{{public_key_hash}}"
- if req.DisplayName == "" {
- keyIDFormat = "vault-{{public_key_hash}}"
- }
-
- if role.KeyIDFormat != "" {
- keyIDFormat = role.KeyIDFormat
- }
-
- keyID := substQuery(keyIDFormat, map[string]string{
- "token_display_name": req.DisplayName,
- "role_name": data.Get("role").(string),
- "public_key_hash": fmt.Sprintf("%x", sha256.Sum256(pubKey.Marshal())),
- })
-
- return keyID, nil
-}
-
-func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
- unparsedCriticalOptions := data.Get("critical_options").(map[string]interface{})
- if len(unparsedCriticalOptions) == 0 {
- return role.DefaultCriticalOptions, nil
- }
-
- criticalOptions := convertMapToStringValue(unparsedCriticalOptions)
-
- if role.AllowedCriticalOptions != "" {
- notAllowedOptions := []string{}
- allowedCriticalOptions := strings.Split(role.AllowedCriticalOptions, ",")
-
- for option := range criticalOptions {
- if !strutil.StrListContains(allowedCriticalOptions, option) {
- notAllowedOptions = append(notAllowedOptions, option)
- }
- }
-
- if len(notAllowedOptions) != 0 {
- return nil, fmt.Errorf("Critical options not on allowed list: %v", notAllowedOptions)
- }
- }
-
- return criticalOptions, nil
-}
-
-func (b *backend) calculateExtensions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
- unparsedExtensions := data.Get("extensions").(map[string]interface{})
- if len(unparsedExtensions) == 0 {
- return role.DefaultExtensions, nil
- }
-
- extensions := convertMapToStringValue(unparsedExtensions)
-
- if role.AllowedExtensions != "" {
- notAllowed := []string{}
- allowedExtensions := strings.Split(role.AllowedExtensions, ",")
-
- for extension := range extensions {
- if !strutil.StrListContains(allowedExtensions, extension) {
- notAllowed = append(notAllowed, extension)
- }
- }
-
- if len(notAllowed) != 0 {
- return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed)
- }
- }
-
- return extensions, nil
-}
-
-func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) {
-
- var ttl, maxTTL time.Duration
- var ttlField string
- ttlFieldInt, ok := data.GetOk("ttl")
- if !ok {
- ttlField = role.TTL
- } else {
- ttlField = ttlFieldInt.(string)
- }
-
- if len(ttlField) == 0 {
- ttl = b.System().DefaultLeaseTTL()
- } else {
- var err error
- ttl, err = parseutil.ParseDurationSecond(ttlField)
- if err != nil {
- return 0, fmt.Errorf("invalid requested ttl: %s", err)
- }
- }
-
- if len(role.MaxTTL) == 0 {
- maxTTL = b.System().MaxLeaseTTL()
- } else {
- var err error
- maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
- if err != nil {
- return 0, fmt.Errorf("invalid requested max ttl: %s", err)
- }
- }
-
- if ttl > maxTTL {
- // Don't error if they were using system defaults, only error if
- // they specifically chose a bad TTL
- if len(ttlField) == 0 {
- ttl = maxTTL
- } else {
- return 0, fmt.Errorf("ttl is larger than maximum allowed (%d)", maxTTL/time.Second)
- }
- }
-
- return ttl, nil
-}
-
-func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) {
- defer func() {
- if r := recover(); r != nil {
- errMsg, ok := r.(string)
- if ok {
- retCert = nil
- retErr = errors.New(errMsg)
- }
- }
- }()
-
- serialNumber, err := certutil.GenerateSerialNumber()
- if err != nil {
- return nil, err
- }
-
- now := time.Now()
-
- certificate := &ssh.Certificate{
- Serial: serialNumber.Uint64(),
- Key: b.PublicKey,
- KeyId: b.KeyId,
- ValidPrincipals: b.ValidPrincipals,
- ValidAfter: uint64(now.Add(-30 * time.Second).In(time.UTC).Unix()),
- ValidBefore: uint64(now.Add(b.TTL).In(time.UTC).Unix()),
- CertType: b.CertificateType,
- Permissions: ssh.Permissions{
- CriticalOptions: b.CriticalOptions,
- Extensions: b.Extensions,
- },
- }
-
- err = certificate.SignCert(rand.Reader, b.Signer)
- if err != nil {
- return nil, fmt.Errorf("failed to generate signed SSH key")
- }
-
- return certificate, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go
deleted file mode 100644
index 1c5e453..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package ssh
-
-import (
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathVerify(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "verify",
- Fields: map[string]*framework.FieldSchema{
- "otp": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "[Required] One-Time-Key that needs to be validated",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathVerifyWrite,
- },
- HelpSynopsis: pathVerifyHelpSyn,
- HelpDescription: pathVerifyHelpDesc,
- }
-}
-
-func (b *backend) getOTP(s logical.Storage, n string) (*sshOTP, error) {
- entry, err := s.Get("otp/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result sshOTP
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathVerifyWrite(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- otp := d.Get("otp").(string)
-
- // If OTP is not a UUID and a string matching VerifyEchoRequest, then the
- // response will be VerifyEchoResponse. This is used by agent to check if
- // connection to Vault server is proper.
- if otp == api.VerifyEchoRequest {
- return &logical.Response{
- Data: map[string]interface{}{
- "message": api.VerifyEchoResponse,
- },
- }, nil
- }
-
- // Create the salt of OTP because entry would have been create with the
- // salt and not directly of the OTP. Salt will yield the same value which
- // because the seed is the same, the backend salt.
- salt, err := b.Salt()
- if err != nil {
- return nil, err
- }
- otpSalted := salt.SaltID(otp)
-
- // Return nil if there is no entry found for the OTP
- otpEntry, err := b.getOTP(req.Storage, otpSalted)
- if err != nil {
- return nil, err
- }
- if otpEntry == nil {
- return logical.ErrorResponse("OTP not found"), nil
- }
-
- // Delete the OTP if found. This is what makes the key an OTP.
- err = req.Storage.Delete("otp/" + otpSalted)
- if err != nil {
- return nil, err
- }
-
- // Return username and IP only if there were no problems uptill this point.
- return &logical.Response{
- Data: map[string]interface{}{
- "username": otpEntry.Username,
- "ip": otpEntry.IP,
- "role_name": otpEntry.RoleName,
- },
- }, nil
-}
-
-const pathVerifyHelpSyn = `
-Validate the OTP provided by Vault SSH Agent.
-`
-
-const pathVerifyHelpDesc = `
-This path will be used by Vault SSH Agent runnin in the remote hosts. The OTP
-provided by the client is sent to Vault for validation by the agent. If Vault
-finds an entry for the OTP, it responds with the username and IP it is associated
-with. Agent uses this information to authenticate the client. Vault deletes the
-OTP after validating it once.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_dynamic_key.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_dynamic_key.go
deleted file mode 100644
index bbe9df2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_dynamic_key.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package ssh
-
-import (
- "fmt"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/mitchellh/mapstructure"
-)
-
-const SecretDynamicKeyType = "secret_dynamic_key_type"
-
-func secretDynamicKey(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretDynamicKeyType,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Username in host",
- },
- "ip": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "IP address of host",
- },
- },
-
- Renew: b.secretDynamicKeyRenew,
- Revoke: b.secretDynamicKeyRevoke,
- }
-}
-
-func (b *backend) secretDynamicKeyRenew(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- f := framework.LeaseExtend(0, 0, b.System())
- return f(req, d)
-}
-
-func (b *backend) secretDynamicKeyRevoke(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- type sec struct {
- AdminUser string `mapstructure:"admin_user"`
- Username string `mapstructure:"username"`
- IP string `mapstructure:"ip"`
- HostKeyName string `mapstructure:"host_key_name"`
- DynamicPublicKey string `mapstructure:"dynamic_public_key"`
- InstallScript string `mapstructure:"install_script"`
- Port int `mapstructure:"port"`
- }
-
- intSec := &sec{}
- err := mapstructure.Decode(req.Secret.InternalData, intSec)
- if err != nil {
- return nil, errwrap.Wrapf("secret internal data could not be decoded: {{err}}", err)
- }
-
- // Fetch the host key using the key name
- hostKey, err := b.getKey(req.Storage, intSec.HostKeyName)
- if err != nil {
- return nil, fmt.Errorf("key %q not found error: %v", intSec.HostKeyName, err)
- }
- if hostKey == nil {
- return nil, fmt.Errorf("key %q not found", intSec.HostKeyName)
- }
-
- // Remove the public key from authorized_keys file in target machine
- // The last param 'false' indicates that the key should be uninstalled.
- err = b.installPublicKeyInTarget(intSec.AdminUser, intSec.Username, intSec.IP, intSec.Port, hostKey.Key, intSec.DynamicPublicKey, intSec.InstallScript, false)
- if err != nil {
- return nil, fmt.Errorf("error removing public key from authorized_keys file in target")
- }
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go
deleted file mode 100644
index cc8872b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package ssh
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const SecretOTPType = "secret_otp_type"
-
-func secretOTP(b *backend) *framework.Secret {
- return &framework.Secret{
- Type: SecretOTPType,
- Fields: map[string]*framework.FieldSchema{
- "otp": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "One time password",
- },
- },
-
- Revoke: b.secretOTPRevoke,
- }
-}
-
-func (b *backend) secretOTPRevoke(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- otpRaw, ok := req.Secret.InternalData["otp"]
- if !ok {
- return nil, fmt.Errorf("secret is missing internal data")
- }
- otp, ok := otpRaw.(string)
- if !ok {
- return nil, fmt.Errorf("secret is missing internal data")
- }
-
- salt, err := b.Salt()
- if err != nil {
- return nil, err
- }
- err = req.Storage.Delete("otp/" + salt.SaltID(otp))
- if err != nil {
- return nil, err
- }
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go
deleted file mode 100644
index 106c740..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package ssh
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "fmt"
- "net"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/logical"
-
- log "github.com/mgutz/logxi/v1"
- "golang.org/x/crypto/ssh"
-)
-
-// Creates a new RSA key pair with the given key length. The private key will be
-// of pem format and the public key will be of OpenSSH format.
-func generateRSAKeys(keyBits int) (publicKeyRsa string, privateKeyRsa string, err error) {
- privateKey, err := rsa.GenerateKey(rand.Reader, keyBits)
- if err != nil {
- return "", "", fmt.Errorf("error generating RSA key-pair: %v", err)
- }
-
- privateKeyRsa = string(pem.EncodeToMemory(&pem.Block{
- Type: "RSA PRIVATE KEY",
- Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
- }))
-
- sshPublicKey, err := ssh.NewPublicKey(privateKey.Public())
- if err != nil {
- return "", "", fmt.Errorf("error generating RSA key-pair: %v", err)
- }
- publicKeyRsa = "ssh-rsa " + base64.StdEncoding.EncodeToString(sshPublicKey.Marshal())
- return
-}
-
-// Public key and the script to install the key are uploaded to remote machine.
-// Public key is either added or removed from authorized_keys file using the
-// script. Default script is for a Linux machine and hence the path of the
-// authorized_keys file is hard coded to resemble Linux.
-//
-// The last param 'install' if false, uninstalls the key.
-func (b *backend) installPublicKeyInTarget(adminUser, username, ip string, port int, hostkey, dynamicPublicKey, installScript string, install bool) error {
- // Transfer the newly generated public key to remote host under a random
- // file name. This is to avoid name collisions from other requests.
- _, publicKeyFileName, err := b.GenerateSaltedOTP()
- if err != nil {
- return err
- }
-
- comm, err := createSSHComm(b.Logger(), adminUser, ip, port, hostkey)
- if err != nil {
- return err
- }
- defer comm.Close()
-
- err = comm.Upload(publicKeyFileName, bytes.NewBufferString(dynamicPublicKey), nil)
- if err != nil {
- return fmt.Errorf("error uploading public key: %v", err)
- }
-
- // Transfer the script required to install or uninstall the key to the remote
- // host under a random file name as well. This is to avoid name collisions
- // from other requests.
- scriptFileName := fmt.Sprintf("%s.sh", publicKeyFileName)
- err = comm.Upload(scriptFileName, bytes.NewBufferString(installScript), nil)
- if err != nil {
- return fmt.Errorf("error uploading install script: %v", err)
- }
-
- // Create a session to run remote command that triggers the script to install
- // or uninstall the key.
- session, err := comm.NewSession()
- if err != nil {
- return fmt.Errorf("unable to create SSH Session using public keys: %v", err)
- }
- if session == nil {
- return fmt.Errorf("invalid session object")
- }
- defer session.Close()
-
- authKeysFileName := fmt.Sprintf("/home/%s/.ssh/authorized_keys", username)
-
- var installOption string
- if install {
- installOption = "install"
- } else {
- installOption = "uninstall"
- }
-
- // Give execute permissions to install script, run and delete it.
- chmodCmd := fmt.Sprintf("chmod +x %s", scriptFileName)
- scriptCmd := fmt.Sprintf("./%s %s %s %s", scriptFileName, installOption, publicKeyFileName, authKeysFileName)
- rmCmd := fmt.Sprintf("rm -f %s", scriptFileName)
- targetCmd := fmt.Sprintf("%s;%s;%s", chmodCmd, scriptCmd, rmCmd)
-
- session.Run(targetCmd)
- return nil
-}
-
-// Takes an IP address and role name and checks if the IP is part
-// of CIDR blocks belonging to the role.
-func roleContainsIP(s logical.Storage, roleName string, ip string) (bool, error) {
- if roleName == "" {
- return false, fmt.Errorf("missing role name")
- }
-
- if ip == "" {
- return false, fmt.Errorf("missing ip")
- }
-
- roleEntry, err := s.Get(fmt.Sprintf("roles/%s", roleName))
- if err != nil {
- return false, fmt.Errorf("error retrieving role %v", err)
- }
- if roleEntry == nil {
- return false, fmt.Errorf("role %q not found", roleName)
- }
-
- var role sshRole
- if err := roleEntry.DecodeJSON(&role); err != nil {
- return false, fmt.Errorf("error decoding role %q", roleName)
- }
-
- if matched, err := cidrListContainsIP(ip, role.CIDRList); err != nil {
- return false, err
- } else {
- return matched, nil
- }
-}
-
-// Returns true if the IP supplied by the user is part of the comma
-// separated CIDR blocks
-func cidrListContainsIP(ip, cidrList string) (bool, error) {
- if len(cidrList) == 0 {
- return false, fmt.Errorf("IP does not belong to role")
- }
- for _, item := range strings.Split(cidrList, ",") {
- _, cidrIPNet, err := net.ParseCIDR(item)
- if err != nil {
- return false, fmt.Errorf("invalid CIDR entry %q", item)
- }
- if cidrIPNet.Contains(net.ParseIP(ip)) {
- return true, nil
- }
- }
- return false, nil
-}
-
-func createSSHComm(logger log.Logger, username, ip string, port int, hostkey string) (*comm, error) {
- signer, err := ssh.ParsePrivateKey([]byte(hostkey))
- if err != nil {
- return nil, err
- }
-
- clientConfig := &ssh.ClientConfig{
- User: username,
- Auth: []ssh.AuthMethod{
- ssh.PublicKeys(signer),
- },
- HostKeyCallback: ssh.InsecureIgnoreHostKey(),
- }
-
- connfunc := func() (net.Conn, error) {
- c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, port), 15*time.Second)
- if err != nil {
- return nil, err
- }
-
- if tcpConn, ok := c.(*net.TCPConn); ok {
- tcpConn.SetKeepAlive(true)
- tcpConn.SetKeepAlivePeriod(5 * time.Second)
- }
-
- return c, nil
- }
- config := &SSHCommConfig{
- SSHConfig: clientConfig,
- Connection: connfunc,
- Pty: false,
- DisableAgent: true,
- Logger: logger,
- }
-
- return SSHCommNew(fmt.Sprintf("%s:%d", ip, port), config)
-}
-
-func parsePublicSSHKey(key string) (ssh.PublicKey, error) {
- keyParts := strings.Split(key, " ")
- if len(keyParts) > 1 {
- // Someone has sent the 'full' public key rather than just the base64 encoded part that the ssh library wants
- key = keyParts[1]
- }
-
- decodedKey, err := base64.StdEncoding.DecodeString(key)
- if err != nil {
- return nil, err
- }
-
- return ssh.ParsePublicKey([]byte(decodedKey))
-}
-
-func convertMapToStringValue(initial map[string]interface{}) map[string]string {
- result := map[string]string{}
- for key, value := range initial {
- result[key] = fmt.Sprintf("%v", value)
- }
- return result
-}
-
-// Serve a template processor for custom format inputs
-func substQuery(tpl string, data map[string]string) string {
- for k, v := range data {
- tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
- }
-
- return tpl
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go
deleted file mode 100644
index 936b46b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package totp
-
-import (
- "strings"
- "time"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- cache "github.com/patrickmn/go-cache"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(backendHelp),
-
- Paths: []*framework.Path{
- pathListKeys(&b),
- pathKeys(&b),
- pathCode(&b),
- },
-
- Secrets: []*framework.Secret{},
- BackendType: logical.TypeLogical,
- }
-
- b.usedCodes = cache.New(0, 30*time.Second)
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- usedCodes *cache.Cache
-}
-
-const backendHelp = `
-The TOTP backend dynamically generates time-based one-time use passwords.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go
deleted file mode 100644
index a3304c2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go
+++ /dev/null
@@ -1,1131 +0,0 @@
-package totp
-
-import (
- "fmt"
- "log"
- "net/url"
- "path"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
- otplib "github.com/pquerna/otp"
- totplib "github.com/pquerna/otp/totp"
-)
-
-func createKey() (string, error) {
- keyUrl, err := totplib.Generate(totplib.GenerateOpts{
- Issuer: "Vault",
- AccountName: "Test",
- })
-
- key := keyUrl.Secret()
-
- return key, err
-}
-
-func generateCode(key string, period uint, digits otplib.Digits, algorithm otplib.Algorithm) (string, error) {
- // Generate password using totp library
- totpToken, err := totplib.GenerateCodeCustom(key, time.Now(), totplib.ValidateOpts{
- Period: period,
- Digits: digits,
- Algorithm: algorithm,
- })
-
- return totpToken, err
-}
-
-func TestBackend_readCredentialsDefaultValues(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "key": key,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "",
- "account_name": "",
- "digits": otplib.DigitsSix,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA1,
- "key": key,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_readCredentialsEightDigitsThirtySecondPeriod(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "digits": 8,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "digits": otplib.DigitsEight,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA1,
- "key": key,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_readCredentialsSixDigitsNinetySecondPeriod(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "period": 90,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "digits": otplib.DigitsSix,
- "period": 90,
- "algorithm": otplib.AlgorithmSHA1,
- "key": key,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_readCredentialsSHA256(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "algorithm": "SHA256",
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "digits": otplib.DigitsSix,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA256,
- "key": key,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_readCredentialsSHA512(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "algorithm": "SHA512",
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "digits": otplib.DigitsSix,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA512,
- "key": key,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_keyCrudDefaultValues(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "digits": otplib.DigitsSix,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA1,
- "key": key,
- }
-
- code, _ := generateCode(key, 30, otplib.DigitsSix, otplib.AlgorithmSHA1)
- invalidCode := "12345678"
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepValidateCode(t, "test", code, true, false),
- // Next step should fail because it should be in the used cache
- testAccStepValidateCode(t, "test", code, false, true),
- testAccStepValidateCode(t, "test", invalidCode, false, false),
- testAccStepDeleteKey(t, "test"),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_createKeyMissingKeyValue(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_createKeyInvalidKeyValue(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": "1",
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_createKeyInvalidAlgorithm(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "algorithm": "BADALGORITHM",
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_createKeyInvalidPeriod(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "period": -1,
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_createKeyInvalidDigits(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate a new shared key
- key, _ := createKey()
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key": key,
- "digits": 20,
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_generatedKeyDefaultValues(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "generate": true,
- "key_size": 20,
- "exported": true,
- "qr_size": 200,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "digits": otplib.DigitsSix,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA1,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- },
- })
-}
-
-func TestBackend_generatedKeyDefaultValuesNoQR(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "generate": true,
- "key_size": 20,
- "exported": true,
- "qr_size": 0,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- },
- })
-}
-
-func TestBackend_generatedKeyNonDefaultKeySize(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "generate": true,
- "key_size": 10,
- "exported": true,
- "qr_size": 200,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "digits": otplib.DigitsSix,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA1,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- },
- })
-}
-
-func TestBackend_urlPassedNonGeneratedKeyInvalidPeriod(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=AZ"
-
- keyData := map[string]interface{}{
- "url": urlString,
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_urlPassedNonGeneratedKeyInvalidDigits(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=Q&period=60"
-
- keyData := map[string]interface{}{
- "url": urlString,
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_urlPassedNonGeneratedKeyIssuerInFirstPosition(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
-
- keyData := map[string]interface{}{
- "url": urlString,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "test@email.com",
- "digits": otplib.DigitsSix,
- "period": 60,
- "algorithm": otplib.AlgorithmSHA512,
- "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_urlPassedNonGeneratedKeyIssuerInQueryString(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60&issuer=Vault"
-
- keyData := map[string]interface{}{
- "url": urlString,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "test@email.com",
- "digits": otplib.DigitsSix,
- "period": 60,
- "algorithm": otplib.AlgorithmSHA512,
- "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_urlPassedNonGeneratedKeyMissingIssuer(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
-
- keyData := map[string]interface{}{
- "url": urlString,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "",
- "account_name": "test@email.com",
- "digits": otplib.DigitsSix,
- "period": 60,
- "algorithm": otplib.AlgorithmSHA512,
- "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_urlPassedNonGeneratedKeyMissingAccountName(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- urlString := "otpauth://totp/Vault:?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
-
- keyData := map[string]interface{}{
- "url": urlString,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "",
- "digits": otplib.DigitsSix,
- "period": 60,
- "algorithm": otplib.AlgorithmSHA512,
- "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_urlPassedNonGeneratedKeyMissingAccountNameandIssuer(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- urlString := "otpauth://totp/?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
-
- keyData := map[string]interface{}{
- "url": urlString,
- "generate": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "",
- "account_name": "",
- "digits": otplib.DigitsSix,
- "period": 60,
- "algorithm": otplib.AlgorithmSHA512,
- "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- testAccStepReadCreds(t, b, config.StorageView, "test", expected),
- },
- })
-}
-
-func TestBackend_generatedKeyInvalidSkew(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "skew": "2",
- "generate": true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_generatedKeyInvalidQRSize(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "qr_size": "-100",
- "generate": true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_generatedKeyInvalidKeySize(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "Test",
- "key_size": "-100",
- "generate": true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_generatedKeyMissingAccountName(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "generate": true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_generatedKeyMissingIssuer(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "account_name": "test@email.com",
- "generate": true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_invalidURLValue(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "url": "notaurl",
- "generate": false,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_urlAndGenerateTrue(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "url": "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60",
- "generate": true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_keyAndGenerateTrue(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
- "generate": true,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, true),
- testAccStepReadKey(t, "test", nil),
- },
- })
-}
-
-func TestBackend_generatedKeyExportedFalse(t *testing.T) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
- b, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-
- keyData := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "test@email.com",
- "generate": true,
- "exported": false,
- }
-
- expected := map[string]interface{}{
- "issuer": "Vault",
- "account_name": "test@email.com",
- "digits": otplib.DigitsSix,
- "period": 30,
- "algorithm": otplib.AlgorithmSHA1,
- }
-
- logicaltest.Test(t, logicaltest.TestCase{
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepCreateKey(t, "test", keyData, false),
- testAccStepReadKey(t, "test", expected),
- },
- })
-}
-
-func testAccStepCreateKey(t *testing.T, name string, keyData map[string]interface{}, expectFail bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: path.Join("keys", name),
- Data: keyData,
- ErrorOk: expectFail,
- Check: func(resp *logical.Response) error {
- //Skip this if the key is not generated by vault or if the test is expected to fail
- if !keyData["generate"].(bool) || expectFail {
- return nil
- }
-
- // Check to see if barcode and url were returned if exported is false
- if !keyData["exported"].(bool) {
- if resp != nil {
- t.Fatalf("data was returned when exported was set to false")
- }
- return nil
- }
-
- // Check to see if a barcode was returned when qr_size is zero
- if keyData["qr_size"].(int) == 0 {
- if _, exists := resp.Data["barcode"]; exists {
- t.Fatalf("a barcode was returned when qr_size was set to zero")
- }
- return nil
- }
-
- var d struct {
- Url string `mapstructure:"url"`
- Barcode string `mapstructure:"barcode"`
- }
-
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- //Check to see if barcode and url are returned
- if d.Barcode == "" {
- t.Fatalf("a barcode was not returned for a generated key")
- }
-
- if d.Url == "" {
- t.Fatalf("a url was not returned for a generated key")
- }
-
- //Parse url
- urlObject, err := url.Parse(d.Url)
-
- if err != nil {
- t.Fatal("an error occured while parsing url string")
- }
-
- //Set up query object
- urlQuery := urlObject.Query()
-
- //Read secret
- urlSecret := urlQuery.Get("secret")
-
- //Check key length
- keySize := keyData["key_size"].(int)
- correctSecretStringSize := (keySize / 5) * 8
- actualSecretStringSize := len(urlSecret)
-
- if actualSecretStringSize != correctSecretStringSize {
- t.Fatal("incorrect key string length")
- }
-
- return nil
- },
- }
-}
-
-func testAccStepDeleteKey(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: path.Join("keys", name),
- }
-}
-
-func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, validation map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: path.Join("code", name),
- Check: func(resp *logical.Response) error {
- var d struct {
- Code string `mapstructure:"code"`
- }
-
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- log.Printf("[TRACE] Generated credentials: %v", d)
-
- period := validation["period"].(int)
- key := validation["key"].(string)
- algorithm := validation["algorithm"].(otplib.Algorithm)
- digits := validation["digits"].(otplib.Digits)
-
- valid, _ := totplib.ValidateCustom(d.Code, key, time.Now(), totplib.ValidateOpts{
- Period: uint(period),
- Skew: 1,
- Digits: digits,
- Algorithm: algorithm,
- })
-
- if !valid {
- t.Fatalf("generated code isn't valid")
- }
-
- return nil
- },
- }
-}
-
-func testAccStepReadKey(t *testing.T, name string, expected map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "keys/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- if expected == nil {
- return nil
- }
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Issuer string `mapstructure:"issuer"`
- AccountName string `mapstructure:"account_name"`
- Period uint `mapstructure:"period"`
- Algorithm string `mapstructure:"algorithm"`
- Digits otplib.Digits `mapstructure:"digits"`
- }
-
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- var keyAlgorithm otplib.Algorithm
- switch d.Algorithm {
- case "SHA1":
- keyAlgorithm = otplib.AlgorithmSHA1
- case "SHA256":
- keyAlgorithm = otplib.AlgorithmSHA256
- case "SHA512":
- keyAlgorithm = otplib.AlgorithmSHA512
- }
-
- period := expected["period"].(int)
-
- switch {
- case d.Issuer != expected["issuer"]:
- return fmt.Errorf("issuer should equal: %s", expected["issuer"])
- case d.AccountName != expected["account_name"]:
- return fmt.Errorf("account_name should equal: %s", expected["account_name"])
- case d.Period != uint(period):
- return fmt.Errorf("period should equal: %d", expected["period"])
- case keyAlgorithm != expected["algorithm"]:
- return fmt.Errorf("algorithm should equal: %s", expected["algorithm"])
- case d.Digits != expected["digits"]:
- return fmt.Errorf("digits should equal: %d", expected["digits"])
- }
- return nil
- },
- }
-}
-
-func testAccStepValidateCode(t *testing.T, name string, code string, valid, expectError bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "code/" + name,
- Data: map[string]interface{}{
- "code": code,
- },
- ErrorOk: expectError,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- return fmt.Errorf("bad: %#v", resp)
- }
-
- var d struct {
- Valid bool `mapstructure:"valid"`
- }
-
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- switch valid {
- case true:
- if d.Valid != true {
- return fmt.Errorf("code was not valid: %s", code)
- }
-
- default:
- if d.Valid != false {
- return fmt.Errorf("code was incorrectly validated: %s", code)
- }
- }
- return nil
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go
deleted file mode 100644
index ebc3d47..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package totp
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- otplib "github.com/pquerna/otp"
- totplib "github.com/pquerna/otp/totp"
-)
-
-func pathCode(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "code/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the key.",
- },
- "code": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "TOTP code to be validated.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathReadCode,
- logical.UpdateOperation: b.pathValidateCode,
- },
-
- HelpSynopsis: pathCodeHelpSyn,
- HelpDescription: pathCodeHelpDesc,
- }
-}
-
-func (b *backend) pathReadCode(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- // Get the key
- key, err := b.Key(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if key == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil
- }
-
- // Generate password using totp library
- totpToken, err := totplib.GenerateCodeCustom(key.Key, time.Now(), totplib.ValidateOpts{
- Period: key.Period,
- Digits: key.Digits,
- Algorithm: key.Algorithm,
- })
- if err != nil {
- return nil, err
- }
-
- // Return the secret
- return &logical.Response{
- Data: map[string]interface{}{
- "code": totpToken,
- },
- }, nil
-}
-
-func (b *backend) pathValidateCode(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- code := data.Get("code").(string)
-
- // Enforce input value requirements
- if code == "" {
- return logical.ErrorResponse("the code value is required"), nil
- }
-
- // Get the key's stored values
- key, err := b.Key(req.Storage, name)
- if err != nil {
- return nil, err
- }
- if key == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil
- }
-
- usedName := fmt.Sprintf("%s_%s", name, code)
-
- _, ok := b.usedCodes.Get(usedName)
- if ok {
- return logical.ErrorResponse("code already used; wait until the next time period"), nil
- }
-
- valid, err := totplib.ValidateCustom(code, key.Key, time.Now(), totplib.ValidateOpts{
- Period: key.Period,
- Skew: key.Skew,
- Digits: key.Digits,
- Algorithm: key.Algorithm,
- })
- if err != nil && err != otplib.ErrValidateInputInvalidLength {
- return logical.ErrorResponse("an error occured while validating the code"), err
- }
-
- // Take the key skew, add two for behind and in front, and multiple that by
- // the period to cover the full possibility of the validity of the key
- err = b.usedCodes.Add(usedName, nil, time.Duration(
- int64(time.Second)*
- int64(key.Period)*
- int64((2+key.Skew))))
- if err != nil {
- return nil, errwrap.Wrapf("error adding code to used cache: {{err}}", err)
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "valid": valid,
- },
- }, nil
-}
-
-const pathCodeHelpSyn = `
-Request time-based one-time use password or validate a password for a certain key .
-`
-const pathCodeHelpDesc = `
-This path generates and validates time-based one-time use passwords for a certain key.
-
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go
deleted file mode 100644
index 3f36aef..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go
+++ /dev/null
@@ -1,424 +0,0 @@
-package totp
-
-import (
- "bytes"
- "encoding/base32"
- "encoding/base64"
- "fmt"
- "image/png"
- "net/url"
- "strconv"
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- otplib "github.com/pquerna/otp"
- totplib "github.com/pquerna/otp/totp"
-)
-
-func pathListKeys(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "keys/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathKeyList,
- },
-
- HelpSynopsis: pathKeyHelpSyn,
- HelpDescription: pathKeyHelpDesc,
- }
-}
-
-func pathKeys(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "keys/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the key.",
- },
-
- "generate": {
- Type: framework.TypeBool,
- Default: false,
- Description: "Determines if a key should be generated by Vault or if a key is being passed from another service.",
- },
-
- "exported": {
- Type: framework.TypeBool,
- Default: true,
- Description: "Determines if a QR code and url are returned upon generating a key. Only used if generate is true.",
- },
-
- "key_size": {
- Type: framework.TypeInt,
- Default: 20,
- Description: "Determines the size in bytes of the generated key. Only used if generate is true.",
- },
-
- "key": {
- Type: framework.TypeString,
- Description: "The shared master key used to generate a TOTP token. Only used if generate is false.",
- },
-
- "issuer": {
- Type: framework.TypeString,
- Description: `The name of the key's issuing organization. Required if generate is true.`,
- },
-
- "account_name": {
- Type: framework.TypeString,
- Description: `The name of the account associated with the key. Required if generate is true.`,
- },
-
- "period": {
- Type: framework.TypeDurationSecond,
- Default: 30,
- Description: `The length of time used to generate a counter for the TOTP token calculation.`,
- },
-
- "algorithm": {
- Type: framework.TypeString,
- Default: "SHA1",
- Description: `The hashing algorithm used to generate the TOTP token. Options include SHA1, SHA256 and SHA512.`,
- },
-
- "digits": {
- Type: framework.TypeInt,
- Default: 6,
- Description: `The number of digits in the generated TOTP token. This value can either be 6 or 8.`,
- },
-
- "skew": {
- Type: framework.TypeInt,
- Default: 1,
- Description: `The number of delay periods that are allowed when validating a TOTP token. This value can either be 0 or 1. Only used if generate is true.`,
- },
-
- "qr_size": {
- Type: framework.TypeInt,
- Default: 200,
- Description: `The pixel size of the generated square QR code. Only used if generate is true and exported is true. If this value is 0, a QR code will not be returned.`,
- },
-
- "url": {
- Type: framework.TypeString,
- Description: `A TOTP url string containing all of the parameters for key setup. Only used if generate is false.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathKeyRead,
- logical.UpdateOperation: b.pathKeyCreate,
- logical.DeleteOperation: b.pathKeyDelete,
- },
-
- HelpSynopsis: pathKeyHelpSyn,
- HelpDescription: pathKeyHelpDesc,
- }
-}
-
-func (b *backend) Key(s logical.Storage, n string) (*keyEntry, error) {
- entry, err := s.Get("key/" + n)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result keyEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- return &result, nil
-}
-
-func (b *backend) pathKeyDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- err := req.Storage.Delete("key/" + data.Get("name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathKeyRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- key, err := b.Key(req.Storage, data.Get("name").(string))
- if err != nil {
- return nil, err
- }
- if key == nil {
- return nil, nil
- }
-
- // Translate algorithm back to string
- algorithm := key.Algorithm.String()
-
- // Return values of key
- return &logical.Response{
- Data: map[string]interface{}{
- "issuer": key.Issuer,
- "account_name": key.AccountName,
- "period": key.Period,
- "algorithm": algorithm,
- "digits": key.Digits,
- },
- }, nil
-}
-
-func (b *backend) pathKeyList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("key/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathKeyCreate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
- generate := data.Get("generate").(bool)
- exported := data.Get("exported").(bool)
- keyString := data.Get("key").(string)
- issuer := data.Get("issuer").(string)
- accountName := data.Get("account_name").(string)
- period := data.Get("period").(int)
- algorithm := data.Get("algorithm").(string)
- digits := data.Get("digits").(int)
- skew := data.Get("skew").(int)
- qrSize := data.Get("qr_size").(int)
- keySize := data.Get("key_size").(int)
- inputURL := data.Get("url").(string)
-
- if generate {
- if keyString != "" {
- return logical.ErrorResponse("a key should not be passed if generate is true"), nil
- }
- if inputURL != "" {
- return logical.ErrorResponse("a url should not be passed if generate is true"), nil
- }
- }
-
- // Read parameters from url if given
- if inputURL != "" {
- //Parse url
- urlObject, err := url.Parse(inputURL)
- if err != nil {
- return logical.ErrorResponse("an error occured while parsing url string"), err
- }
-
- //Set up query object
- urlQuery := urlObject.Query()
- path := strings.TrimPrefix(urlObject.Path, "/")
- index := strings.Index(path, ":")
-
- //Read issuer
- urlIssuer := urlQuery.Get("issuer")
- if urlIssuer != "" {
- issuer = urlIssuer
- } else {
- if index != -1 {
- issuer = path[:index]
- }
- }
-
- //Read account name
- if index == -1 {
- accountName = path
- } else {
- accountName = path[index+1:]
- }
-
- //Read key string
- keyString = urlQuery.Get("secret")
-
- //Read period
- periodQuery := urlQuery.Get("period")
- if periodQuery != "" {
- periodInt, err := strconv.Atoi(periodQuery)
- if err != nil {
- return logical.ErrorResponse("an error occured while parsing period value in url"), err
- }
- period = periodInt
- }
-
- //Read digits
- digitsQuery := urlQuery.Get("digits")
- if digitsQuery != "" {
- digitsInt, err := strconv.Atoi(digitsQuery)
- if err != nil {
- return logical.ErrorResponse("an error occured while parsing digits value in url"), err
- }
- digits = digitsInt
- }
-
- //Read algorithm
- algorithmQuery := urlQuery.Get("algorithm")
- if algorithmQuery != "" {
- algorithm = algorithmQuery
- }
- }
-
- // Translate digits and algorithm to a format the totp library understands
- var keyDigits otplib.Digits
- switch digits {
- case 6:
- keyDigits = otplib.DigitsSix
- case 8:
- keyDigits = otplib.DigitsEight
- default:
- return logical.ErrorResponse("the digits value can only be 6 or 8"), nil
- }
-
- var keyAlgorithm otplib.Algorithm
- switch algorithm {
- case "SHA1":
- keyAlgorithm = otplib.AlgorithmSHA1
- case "SHA256":
- keyAlgorithm = otplib.AlgorithmSHA256
- case "SHA512":
- keyAlgorithm = otplib.AlgorithmSHA512
- default:
- return logical.ErrorResponse("the algorithm value is not valid"), nil
- }
-
- // Enforce input value requirements
- if period <= 0 {
- return logical.ErrorResponse("the period value must be greater than zero"), nil
- }
-
- switch skew {
- case 0:
- case 1:
- default:
- return logical.ErrorResponse("the skew value must be 0 or 1"), nil
- }
-
- // QR size can be zero but it shouldn't be negative
- if qrSize < 0 {
- return logical.ErrorResponse("the qr_size value must be greater than or equal to zero"), nil
- }
-
- if keySize <= 0 {
- return logical.ErrorResponse("the key_size value must be greater than zero"), nil
- }
-
- // Period, Skew and Key Size need to be unsigned ints
- uintPeriod := uint(period)
- uintSkew := uint(skew)
- uintKeySize := uint(keySize)
-
- var response *logical.Response
-
- switch generate {
- case true:
- // If the key is generated, Account Name and Issuer are required.
- if accountName == "" {
- return logical.ErrorResponse("the account_name value is required for generated keys"), nil
- }
-
- if issuer == "" {
- return logical.ErrorResponse("the issuer value is required for generated keys"), nil
- }
-
- // Generate a new key
- keyObject, err := totplib.Generate(totplib.GenerateOpts{
- Issuer: issuer,
- AccountName: accountName,
- Period: uintPeriod,
- Digits: keyDigits,
- Algorithm: keyAlgorithm,
- SecretSize: uintKeySize,
- })
- if err != nil {
- return logical.ErrorResponse("an error occured while generating a key"), err
- }
-
- // Get key string value
- keyString = keyObject.Secret()
-
- // Skip returning the QR code and url if exported is set to false
- if exported {
- // Prepare the url and barcode
- urlString := keyObject.String()
-
- // Don't include QR code is size is set to zero
- if qrSize == 0 {
- response = &logical.Response{
- Data: map[string]interface{}{
- "url": urlString,
- },
- }
- } else {
- barcode, err := keyObject.Image(qrSize, qrSize)
- if err != nil {
- return logical.ErrorResponse("an error occured while generating a QR code image"), err
- }
-
- var buff bytes.Buffer
- png.Encode(&buff, barcode)
- b64Barcode := base64.StdEncoding.EncodeToString(buff.Bytes())
- response = &logical.Response{
- Data: map[string]interface{}{
- "url": urlString,
- "barcode": b64Barcode,
- },
- }
- }
- }
- default:
- if keyString == "" {
- return logical.ErrorResponse("the key value is required"), nil
- }
-
- _, err := base32.StdEncoding.DecodeString(keyString)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "invalid key value: %s", err)), nil
- }
- }
-
- // Store it
- entry, err := logical.StorageEntryJSON("key/"+name, &keyEntry{
- Key: keyString,
- Issuer: issuer,
- AccountName: accountName,
- Period: uintPeriod,
- Algorithm: keyAlgorithm,
- Digits: keyDigits,
- Skew: uintSkew,
- })
- if err != nil {
- return nil, err
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-type keyEntry struct {
- Key string `json:"key" mapstructure:"key" structs:"key"`
- Issuer string `json:"issuer" mapstructure:"issuer" structs:"issuer"`
- AccountName string `json:"account_name" mapstructure:"account_name" structs:"account_name"`
- Period uint `json:"period" mapstructure:"period" structs:"period"`
- Algorithm otplib.Algorithm `json:"algorithm" mapstructure:"algorithm" structs:"algorithm"`
- Digits otplib.Digits `json:"digits" mapstructure:"digits" structs:"digits"`
- Skew uint `json:"skew" mapstructure:"skew" structs:"skew"`
-}
-
-const pathKeyHelpSyn = `
-Manage the keys that can be created with this backend.
-`
-
-const pathKeyHelpDesc = `
-This path lets you manage the keys that can be created with this backend.
-
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go
deleted file mode 100644
index db85ba1..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package transit
-
-import (
- "strings"
-
- "github.com/hashicorp/vault/helper/keysutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend(conf)
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func Backend(conf *logical.BackendConfig) *backend {
- var b backend
- b.Backend = &framework.Backend{
- Paths: []*framework.Path{
- // Rotate/Config needs to come before Keys
- // as the handler is greedy
- b.pathConfig(),
- b.pathRotate(),
- b.pathRewrap(),
- b.pathKeys(),
- b.pathListKeys(),
- b.pathExportKeys(),
- b.pathEncrypt(),
- b.pathDecrypt(),
- b.pathDatakey(),
- b.pathRandom(),
- b.pathHash(),
- b.pathHMAC(),
- b.pathSign(),
- b.pathVerify(),
- },
-
- Secrets: []*framework.Secret{},
- Invalidate: b.invalidate,
- BackendType: logical.TypeLogical,
- }
-
- b.lm = keysutil.NewLockManager(conf.System.CachingDisabled())
-
- return &b
-}
-
-type backend struct {
- *framework.Backend
- lm *keysutil.LockManager
-}
-
-func (b *backend) invalidate(key string) {
- if b.Logger().IsTrace() {
- b.Logger().Trace("transit: invalidating key", "key", key)
- }
- switch {
- case strings.HasPrefix(key, "policy/"):
- name := strings.TrimPrefix(key, "policy/")
- b.lm.InvalidatePolicy(name)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go
deleted file mode 100644
index a9c27bc..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go
+++ /dev/null
@@ -1,1093 +0,0 @@
-package transit
-
-import (
- "encoding/base64"
- "fmt"
- "math/rand"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/keysutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- logicaltest "github.com/hashicorp/vault/logical/testing"
- "github.com/mitchellh/mapstructure"
-)
-
-const (
- testPlaintext = "the quick brown fox"
-)
-
-func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
- config := logical.TestBackendConfig()
- config.StorageView = &logical.InmemStorage{}
-
- b := Backend(config)
- if b == nil {
- t.Fatalf("failed to create backend")
- }
- err := b.Backend.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- return b, config.StorageView
-}
-
-func TestBackend_basic(t *testing.T) {
- decryptData := make(map[string]interface{})
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
- Steps: []logicaltest.TestStep{
- testAccStepListPolicy(t, "test", true),
- testAccStepWritePolicy(t, "test", false),
- testAccStepListPolicy(t, "test", false),
- testAccStepReadPolicy(t, "test", false, false),
- testAccStepEncrypt(t, "test", testPlaintext, decryptData),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepEncrypt(t, "test", "", decryptData),
- testAccStepDecrypt(t, "test", "", decryptData),
- testAccStepDeleteNotDisabledPolicy(t, "test"),
- testAccStepEnableDeletion(t, "test"),
- testAccStepDeletePolicy(t, "test"),
- testAccStepWritePolicy(t, "test", false),
- testAccStepEnableDeletion(t, "test"),
- testAccStepDisableDeletion(t, "test"),
- testAccStepDeleteNotDisabledPolicy(t, "test"),
- testAccStepEnableDeletion(t, "test"),
- testAccStepDeletePolicy(t, "test"),
- testAccStepReadPolicy(t, "test", true, false),
- },
- })
-}
-
-func TestBackend_upsert(t *testing.T) {
- decryptData := make(map[string]interface{})
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
- Steps: []logicaltest.TestStep{
- testAccStepReadPolicy(t, "test", true, false),
- testAccStepListPolicy(t, "test", true),
- testAccStepEncryptUpsert(t, "test", testPlaintext, decryptData),
- testAccStepListPolicy(t, "test", false),
- testAccStepReadPolicy(t, "test", false, false),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- },
- })
-}
-
-func TestBackend_datakey(t *testing.T) {
- dataKeyInfo := make(map[string]interface{})
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
- Steps: []logicaltest.TestStep{
- testAccStepListPolicy(t, "test", true),
- testAccStepWritePolicy(t, "test", false),
- testAccStepListPolicy(t, "test", false),
- testAccStepReadPolicy(t, "test", false, false),
- testAccStepWriteDatakey(t, "test", false, 256, dataKeyInfo),
- testAccStepDecryptDatakey(t, "test", dataKeyInfo),
- testAccStepWriteDatakey(t, "test", true, 128, dataKeyInfo),
- },
- })
-}
-
-func TestBackend_rotation(t *testing.T) {
- decryptData := make(map[string]interface{})
- encryptHistory := make(map[int]map[string]interface{})
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
- Steps: []logicaltest.TestStep{
- testAccStepListPolicy(t, "test", true),
- testAccStepWritePolicy(t, "test", false),
- testAccStepListPolicy(t, "test", false),
- testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 0, encryptHistory),
- testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 1, encryptHistory),
- testAccStepRotate(t, "test"), // now v2
- testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 2, encryptHistory),
- testAccStepRotate(t, "test"), // now v3
- testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 3, encryptHistory),
- testAccStepRotate(t, "test"), // now v4
- testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 4, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 99, encryptHistory),
- testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 99, encryptHistory),
- testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepDeleteNotDisabledPolicy(t, "test"),
- testAccStepAdjustPolicyMinDecryption(t, "test", 3),
- testAccStepAdjustPolicyMinEncryption(t, "test", 4),
- testAccStepReadPolicyWithVersions(t, "test", false, false, 3, 4),
- testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
- testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
- testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory),
- testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepAdjustPolicyMinDecryption(t, "test", 1),
- testAccStepReadPolicyWithVersions(t, "test", false, false, 1, 4),
- testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepRewrap(t, "test", decryptData, 4),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepEnableDeletion(t, "test"),
- testAccStepDeletePolicy(t, "test"),
- testAccStepReadPolicy(t, "test", true, false),
- testAccStepListPolicy(t, "test", true),
- },
- })
-}
-
-func TestBackend_basic_derived(t *testing.T) {
- decryptData := make(map[string]interface{})
- logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
- Steps: []logicaltest.TestStep{
- testAccStepListPolicy(t, "test", true),
- testAccStepWritePolicy(t, "test", true),
- testAccStepListPolicy(t, "test", false),
- testAccStepReadPolicy(t, "test", false, true),
- testAccStepEncryptContext(t, "test", testPlaintext, "my-cool-context", decryptData),
- testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepEnableDeletion(t, "test"),
- testAccStepDeletePolicy(t, "test"),
- testAccStepReadPolicy(t, "test", true, true),
- },
- })
-}
-
-func testAccStepWritePolicy(t *testing.T, name string, derived bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "keys/" + name,
- Data: map[string]interface{}{
- "derived": derived,
- },
- }
-}
-
-func testAccStepListPolicy(t *testing.T, name string, expectNone bool) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ListOperation,
- Path: "keys",
- Check: func(resp *logical.Response) error {
- if resp == nil {
- return fmt.Errorf("missing response")
- }
- if expectNone {
- keysRaw, ok := resp.Data["keys"]
- if ok || keysRaw != nil {
- return fmt.Errorf("response data when expecting none")
- }
- return nil
- }
- if len(resp.Data) == 0 {
- return fmt.Errorf("no data returned")
- }
-
- var d struct {
- Keys []string `mapstructure:"keys"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if len(d.Keys) > 0 && d.Keys[0] != name {
- return fmt.Errorf("bad name: %#v", d)
- }
- if len(d.Keys) != 1 {
- return fmt.Errorf("only 1 key expected, %d returned", len(d.Keys))
- }
- return nil
- },
- }
-}
-
-func testAccStepAdjustPolicyMinDecryption(t *testing.T, name string, minVer int) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "keys/" + name + "/config",
- Data: map[string]interface{}{
- "min_decryption_version": minVer,
- },
- }
-}
-func testAccStepAdjustPolicyMinEncryption(t *testing.T, name string, minVer int) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "keys/" + name + "/config",
- Data: map[string]interface{}{
- "min_encryption_version": minVer,
- },
- }
-}
-
-func testAccStepDisableDeletion(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "keys/" + name + "/config",
- Data: map[string]interface{}{
- "deletion_allowed": false,
- },
- }
-}
-
-func testAccStepEnableDeletion(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "keys/" + name + "/config",
- Data: map[string]interface{}{
- "deletion_allowed": true,
- },
- }
-}
-
-func testAccStepDeletePolicy(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "keys/" + name,
- }
-}
-
-func testAccStepDeleteNotDisabledPolicy(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.DeleteOperation,
- Path: "keys/" + name,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if resp == nil {
- return fmt.Errorf("Got nil response instead of error")
- }
- if resp.IsError() {
- return nil
- }
- return fmt.Errorf("expected error but did not get one")
- },
- }
-}
-
-func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) logicaltest.TestStep {
- return testAccStepReadPolicyWithVersions(t, name, expectNone, derived, 1, 0)
-}
-
-func testAccStepReadPolicyWithVersions(t *testing.T, name string, expectNone, derived bool, minDecryptionVersion int, minEncryptionVersion int) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "keys/" + name,
- Check: func(resp *logical.Response) error {
- if resp == nil && !expectNone {
- return fmt.Errorf("missing response")
- } else if expectNone {
- if resp != nil {
- return fmt.Errorf("response when expecting none")
- }
- return nil
- }
- var d struct {
- Name string `mapstructure:"name"`
- Key []byte `mapstructure:"key"`
- Keys map[string]int64 `mapstructure:"keys"`
- Type string `mapstructure:"type"`
- Derived bool `mapstructure:"derived"`
- KDF string `mapstructure:"kdf"`
- DeletionAllowed bool `mapstructure:"deletion_allowed"`
- ConvergentEncryption bool `mapstructure:"convergent_encryption"`
- MinDecryptionVersion int `mapstructure:"min_decryption_version"`
- MinEncryptionVersion int `mapstructure:"min_encryption_version"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.Name != name {
- return fmt.Errorf("bad name: %#v", d)
- }
- if d.Type != keysutil.KeyType(keysutil.KeyType_AES256_GCM96).String() {
- return fmt.Errorf("bad key type: %#v", d)
- }
- // Should NOT get a key back
- if d.Key != nil {
- return fmt.Errorf("bad: %#v", d)
- }
- if d.Keys == nil {
- return fmt.Errorf("bad: %#v", d)
- }
- if d.MinDecryptionVersion != minDecryptionVersion {
- return fmt.Errorf("bad: %#v", d)
- }
- if d.MinEncryptionVersion != minEncryptionVersion {
- return fmt.Errorf("bad: %#v", d)
- }
- if d.DeletionAllowed == true {
- return fmt.Errorf("bad: %#v", d)
- }
- if d.Derived != derived {
- return fmt.Errorf("bad: %#v", d)
- }
- if derived && d.KDF != "hkdf_sha256" {
- return fmt.Errorf("bad: %#v", d)
- }
- return nil
- },
- }
-}
-
-func testAccStepEncrypt(
- t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "encrypt/" + name,
- Data: map[string]interface{}{
- "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
- },
- Check: func(resp *logical.Response) error {
- var d struct {
- Ciphertext string `mapstructure:"ciphertext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if d.Ciphertext == "" {
- return fmt.Errorf("missing ciphertext")
- }
- decryptData["ciphertext"] = d.Ciphertext
- return nil
- },
- }
-}
-
-func testAccStepEncryptUpsert(
- t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.CreateOperation,
- Path: "encrypt/" + name,
- Data: map[string]interface{}{
- "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
- },
- Check: func(resp *logical.Response) error {
- var d struct {
- Ciphertext string `mapstructure:"ciphertext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if d.Ciphertext == "" {
- return fmt.Errorf("missing ciphertext")
- }
- decryptData["ciphertext"] = d.Ciphertext
- return nil
- },
- }
-}
-
-func testAccStepEncryptContext(
- t *testing.T, name, plaintext, context string, decryptData map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "encrypt/" + name,
- Data: map[string]interface{}{
- "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
- "context": base64.StdEncoding.EncodeToString([]byte(context)),
- },
- Check: func(resp *logical.Response) error {
- var d struct {
- Ciphertext string `mapstructure:"ciphertext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if d.Ciphertext == "" {
- return fmt.Errorf("missing ciphertext")
- }
- decryptData["ciphertext"] = d.Ciphertext
- decryptData["context"] = base64.StdEncoding.EncodeToString([]byte(context))
- return nil
- },
- }
-}
-
-func testAccStepDecrypt(
- t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "decrypt/" + name,
- Data: decryptData,
- Check: func(resp *logical.Response) error {
- var d struct {
- Plaintext string `mapstructure:"plaintext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- // Decode the base64
- plainRaw, err := base64.StdEncoding.DecodeString(d.Plaintext)
- if err != nil {
- return err
- }
-
- if string(plainRaw) != plaintext {
- return fmt.Errorf("plaintext mismatch: %s expect: %s, decryptData was %#v", plainRaw, plaintext, decryptData)
- }
- return nil
- },
- }
-}
-
-func testAccStepRewrap(
- t *testing.T, name string, decryptData map[string]interface{}, expectedVer int) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "rewrap/" + name,
- Data: decryptData,
- Check: func(resp *logical.Response) error {
- var d struct {
- Ciphertext string `mapstructure:"ciphertext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if d.Ciphertext == "" {
- return fmt.Errorf("missing ciphertext")
- }
- splitStrings := strings.Split(d.Ciphertext, ":")
- verString := splitStrings[1][1:]
- ver, err := strconv.Atoi(verString)
- if err != nil {
- return fmt.Errorf("Error pulling out version from verString '%s', ciphertext was %s", verString, d.Ciphertext)
- }
- if ver != expectedVer {
- return fmt.Errorf("Did not get expected version")
- }
- decryptData["ciphertext"] = d.Ciphertext
- return nil
- },
- }
-}
-
-func testAccStepEncryptVX(
- t *testing.T, name, plaintext string, decryptData map[string]interface{},
- ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "encrypt/" + name,
- Data: map[string]interface{}{
- "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)),
- },
- Check: func(resp *logical.Response) error {
- var d struct {
- Ciphertext string `mapstructure:"ciphertext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if d.Ciphertext == "" {
- return fmt.Errorf("missing ciphertext")
- }
- splitStrings := strings.Split(d.Ciphertext, ":")
- splitStrings[1] = "v" + strconv.Itoa(ver)
- ciphertext := strings.Join(splitStrings, ":")
- decryptData["ciphertext"] = ciphertext
- encryptHistory[ver] = map[string]interface{}{
- "ciphertext": ciphertext,
- }
- return nil
- },
- }
-}
-
-func testAccStepLoadVX(
- t *testing.T, name string, decryptData map[string]interface{},
- ver int, encryptHistory map[int]map[string]interface{}) logicaltest.TestStep {
- // This is really a no-op to allow us to do data manip in the check function
- return logicaltest.TestStep{
- Operation: logical.ReadOperation,
- Path: "keys/" + name,
- Check: func(resp *logical.Response) error {
- decryptData["ciphertext"] = encryptHistory[ver]["ciphertext"].(string)
- return nil
- },
- }
-}
-
-func testAccStepDecryptExpectFailure(
- t *testing.T, name, plaintext string, decryptData map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "decrypt/" + name,
- Data: decryptData,
- ErrorOk: true,
- Check: func(resp *logical.Response) error {
- if !resp.IsError() {
- return fmt.Errorf("expected error")
- }
- return nil
- },
- }
-}
-
-func testAccStepRotate(t *testing.T, name string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "keys/" + name + "/rotate",
- }
-}
-
-func testAccStepWriteDatakey(t *testing.T, name string,
- noPlaintext bool, bits int,
- dataKeyInfo map[string]interface{}) logicaltest.TestStep {
- data := map[string]interface{}{}
- subPath := "plaintext"
- if noPlaintext {
- subPath = "wrapped"
- }
- if bits != 256 {
- data["bits"] = bits
- }
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "datakey/" + subPath + "/" + name,
- Data: data,
- Check: func(resp *logical.Response) error {
- var d struct {
- Plaintext string `mapstructure:"plaintext"`
- Ciphertext string `mapstructure:"ciphertext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
- if noPlaintext && len(d.Plaintext) != 0 {
- return fmt.Errorf("received plaintxt when we disabled it")
- }
- if !noPlaintext {
- if len(d.Plaintext) == 0 {
- return fmt.Errorf("did not get plaintext when we expected it")
- }
- dataKeyInfo["plaintext"] = d.Plaintext
- plainBytes, err := base64.StdEncoding.DecodeString(d.Plaintext)
- if err != nil {
- return fmt.Errorf("could not base64 decode plaintext string '%s'", d.Plaintext)
- }
- if len(plainBytes)*8 != bits {
- return fmt.Errorf("returned key does not have correct bit length")
- }
- }
- dataKeyInfo["ciphertext"] = d.Ciphertext
- return nil
- },
- }
-}
-
-func testAccStepDecryptDatakey(t *testing.T, name string,
- dataKeyInfo map[string]interface{}) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "decrypt/" + name,
- Data: dataKeyInfo,
- Check: func(resp *logical.Response) error {
- var d struct {
- Plaintext string `mapstructure:"plaintext"`
- }
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
- return err
- }
-
- if d.Plaintext != dataKeyInfo["plaintext"].(string) {
- return fmt.Errorf("plaintext mismatch: got '%s', expected '%s', decryptData was %#v", d.Plaintext, dataKeyInfo["plaintext"].(string), resp.Data)
- }
- return nil
- },
- }
-}
-
-func TestKeyUpgrade(t *testing.T) {
- key, _ := uuid.GenerateRandomBytes(32)
- p := &keysutil.Policy{
- Name: "test",
- Key: key,
- Type: keysutil.KeyType_AES256_GCM96,
- }
-
- p.MigrateKeyToKeysMap()
-
- if p.Key != nil ||
- p.Keys == nil ||
- len(p.Keys) != 1 ||
- !reflect.DeepEqual(p.Keys[1].Key, key) {
- t.Errorf("bad key migration, result is %#v", p.Keys)
- }
-}
-
-func TestDerivedKeyUpgrade(t *testing.T) {
- storage := &logical.InmemStorage{}
- key, _ := uuid.GenerateRandomBytes(32)
- context, _ := uuid.GenerateRandomBytes(32)
-
- p := &keysutil.Policy{
- Name: "test",
- Key: key,
- Type: keysutil.KeyType_AES256_GCM96,
- Derived: true,
- }
-
- p.MigrateKeyToKeysMap()
- p.Upgrade(storage) // Need to run the upgrade code to make the migration stick
-
- if p.KDF != keysutil.Kdf_hmac_sha256_counter {
- t.Fatalf("bad KDF value by default; counter val is %d, KDF val is %d, policy is %#v", keysutil.Kdf_hmac_sha256_counter, p.KDF, *p)
- }
-
- derBytesOld, err := p.DeriveKey(context, 1)
- if err != nil {
- t.Fatal(err)
- }
-
- derBytesOld2, err := p.DeriveKey(context, 1)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(derBytesOld, derBytesOld2) {
- t.Fatal("mismatch of same context alg")
- }
-
- p.KDF = keysutil.Kdf_hkdf_sha256
- if p.NeedsUpgrade() {
- t.Fatal("expected no upgrade needed")
- }
-
- derBytesNew, err := p.DeriveKey(context, 1)
- if err != nil {
- t.Fatal(err)
- }
-
- derBytesNew2, err := p.DeriveKey(context, 1)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(derBytesNew, derBytesNew2) {
- t.Fatal("mismatch of same context alg")
- }
-
- if reflect.DeepEqual(derBytesOld, derBytesNew) {
- t.Fatal("match of different context alg")
- }
-}
-
-func TestConvergentEncryption(t *testing.T) {
- testConvergentEncryptionCommon(t, 0)
- testConvergentEncryptionCommon(t, 2)
-}
-
-func testConvergentEncryptionCommon(t *testing.T, ver int) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/testkeynonderived",
- Data: map[string]interface{}{
- "derived": false,
- "convergent_encryption": true,
- },
- }
-
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if !resp.IsError() {
- t.Fatalf("bad: expected error response, got %#v", *resp)
- }
-
- p := &keysutil.Policy{
- Name: "testkey",
- Type: keysutil.KeyType_AES256_GCM96,
- Derived: true,
- ConvergentEncryption: true,
- ConvergentVersion: ver,
- }
-
- err = p.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
-
- // First, test using an invalid length of nonce -- this is only used for v1 convergent
- req.Path = "encrypt/testkey"
- if ver < 2 {
- req.Data = map[string]interface{}{
- "plaintext": "emlwIHphcA==", // "zip zap"
- "nonce": "Zm9vIGJhcg==", // "foo bar"
- "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
- }
- resp, err = b.HandleRequest(req)
- if err == nil {
- t.Fatal("expected error, got nil")
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if !resp.IsError() {
- t.Fatalf("expected error response, got %#v", *resp)
- }
-
- // Ensure we fail if we do not provide a nonce
- req.Data = map[string]interface{}{
- "plaintext": "emlwIHphcA==", // "zip zap"
- "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
- }
- resp, err = b.HandleRequest(req)
- if err == nil && (resp == nil || !resp.IsError()) {
- t.Fatal("expected error response")
- }
- }
-
- // Now test encrypting the same value twice
- req.Data = map[string]interface{}{
- "plaintext": "emlwIHphcA==", // "zip zap"
- "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee"
- "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
- }
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext1 := resp.Data["ciphertext"].(string)
-
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext2 := resp.Data["ciphertext"].(string)
-
- if ciphertext1 != ciphertext2 {
- t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext1, ciphertext2)
- }
-
- // For sanity, also check a different nonce value...
- req.Data = map[string]interface{}{
- "plaintext": "emlwIHphcA==", // "zip zap"
- "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour"
- "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
- }
- if ver < 2 {
- req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour"
- } else {
- req.Data["context"] = "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOldandSdd7S"
- }
-
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext3 := resp.Data["ciphertext"].(string)
-
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext4 := resp.Data["ciphertext"].(string)
-
- if ciphertext3 != ciphertext4 {
- t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext3, ciphertext4)
- }
- if ciphertext1 == ciphertext3 {
- t.Fatalf("expected different ciphertexts")
- }
-
- // ...and a different context value
- req.Data = map[string]interface{}{
- "plaintext": "emlwIHphcA==", // "zip zap"
- "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour"
- "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT",
- }
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext5 := resp.Data["ciphertext"].(string)
-
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext6 := resp.Data["ciphertext"].(string)
-
- if ciphertext5 != ciphertext6 {
- t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext5, ciphertext6)
- }
- if ciphertext1 == ciphertext5 {
- t.Fatalf("expected different ciphertexts")
- }
- if ciphertext3 == ciphertext5 {
- t.Fatalf("expected different ciphertexts")
- }
-
- // Finally, check operations on empty values
- // First, check without setting a plaintext at all
- req.Data = map[string]interface{}{
- "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee"
- "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
- }
- resp, err = b.HandleRequest(req)
- if err == nil {
- t.Fatal("expected error, got nil")
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if !resp.IsError() {
- t.Fatalf("expected error response, got: %#v", *resp)
- }
-
- // Now set plaintext to empty
- req.Data = map[string]interface{}{
- "plaintext": "",
- "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee"
- "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
- }
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext7 := resp.Data["ciphertext"].(string)
-
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("got error response: %#v", *resp)
- }
- ciphertext8 := resp.Data["ciphertext"].(string)
-
- if ciphertext7 != ciphertext8 {
- t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext7, ciphertext8)
- }
-}
-
-func TestPolicyFuzzing(t *testing.T) {
- var be *backend
- sysView := logical.TestSystemView()
-
- be = Backend(&logical.BackendConfig{
- System: sysView,
- })
- testPolicyFuzzingCommon(t, be)
-
- sysView.CachingDisabledVal = true
- be = Backend(&logical.BackendConfig{
- System: sysView,
- })
- testPolicyFuzzingCommon(t, be)
-}
-
-func testPolicyFuzzingCommon(t *testing.T, be *backend) {
- storage := &logical.InmemStorage{}
- wg := sync.WaitGroup{}
-
- funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"}
- //keys := []string{"test1", "test2", "test3", "test4", "test5"}
- keys := []string{"test1", "test2", "test3"}
-
- // This is the goroutine loop
- doFuzzy := func(id int) {
- // Check for panics, otherwise notify we're done
- defer func() {
- if err := recover(); err != nil {
- t.Fatalf("got a panic: %v", err)
- }
- wg.Done()
- }()
-
- // Holds the latest encrypted value for each key
- latestEncryptedText := map[string]string{}
-
- startTime := time.Now()
- req := &logical.Request{
- Storage: storage,
- Data: map[string]interface{}{},
- }
- fd := &framework.FieldData{}
-
- var chosenFunc, chosenKey string
-
- //t.Errorf("Starting %d", id)
- for {
- // Stop after 10 seconds
- if time.Now().Sub(startTime) > 10*time.Second {
- return
- }
-
- // Pick a function and a key
- chosenFunc = funcs[rand.Int()%len(funcs)]
- chosenKey = keys[rand.Int()%len(keys)]
-
- fd.Raw = map[string]interface{}{
- "name": chosenKey,
- }
- fd.Schema = be.pathKeys().Fields
-
- // Try to write the key to make sure it exists
- _, err := be.pathPolicyWrite(req, fd)
- if err != nil {
- t.Fatalf("got an error: %v", err)
- }
-
- switch chosenFunc {
- // Encrypt our plaintext and store the result
- case "encrypt":
- //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
- fd.Raw["plaintext"] = base64.StdEncoding.EncodeToString([]byte(testPlaintext))
- fd.Schema = be.pathEncrypt().Fields
- resp, err := be.pathEncryptWrite(req, fd)
- if err != nil {
- t.Fatalf("got an error: %v, resp is %#v", err, *resp)
- }
- latestEncryptedText[chosenKey] = resp.Data["ciphertext"].(string)
-
- // Rotate to a new key version
- case "rotate":
- //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
- fd.Schema = be.pathRotate().Fields
- resp, err := be.pathRotateWrite(req, fd)
- if err != nil {
- t.Fatalf("got an error: %v, resp is %#v, chosenKey is %s", err, *resp, chosenKey)
- }
-
- // Decrypt the ciphertext and compare the result
- case "decrypt":
- //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
- ct := latestEncryptedText[chosenKey]
- if ct == "" {
- continue
- }
-
- fd.Raw["ciphertext"] = ct
- fd.Schema = be.pathDecrypt().Fields
- resp, err := be.pathDecryptWrite(req, fd)
- if err != nil {
- // This could well happen since the min version is jumping around
- if resp.Data["error"].(string) == keysutil.ErrTooOld {
- continue
- }
- t.Fatalf("got an error: %v, resp is %#v, ciphertext was %s, chosenKey is %s, id is %d", err, *resp, ct, chosenKey, id)
- }
- ptb64 := resp.Data["plaintext"].(string)
- pt, err := base64.StdEncoding.DecodeString(ptb64)
- if err != nil {
- t.Fatalf("got an error decoding base64 plaintext: %v", err)
- return
- }
- if string(pt) != testPlaintext {
- t.Fatalf("got bad plaintext back: %s", pt)
- }
-
- // Change the min version, which also tests the archive functionality
- case "change_min_version":
- //t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id)
- resp, err := be.pathPolicyRead(req, fd)
- if err != nil {
- t.Fatalf("got an error reading policy %s: %v", chosenKey, err)
- }
- latestVersion := resp.Data["latest_version"].(int)
-
- // keys start at version 1 so we want [1, latestVersion] not [0, latestVersion)
- setVersion := (rand.Int() % latestVersion) + 1
- fd.Raw["min_decryption_version"] = setVersion
- fd.Schema = be.pathConfig().Fields
- resp, err = be.pathConfigWrite(req, fd)
- if err != nil {
- t.Fatalf("got an error setting min decryption version: %v", err)
- }
- }
- }
- }
-
- // Spawn 1000 of these workers for 10 seconds
- for i := 0; i < 1000; i++ {
- wg.Add(1)
- go doFuzzy(i)
- }
-
- // Wait for them all to finish
- wg.Wait()
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go
deleted file mode 100644
index 7cbd513..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package transit
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathConfig() *framework.Path {
- return &framework.Path{
- Pattern: "keys/" + framework.GenericNameRegex("name") + "/config",
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the key",
- },
-
- "min_decryption_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `If set, the minimum version of the key allowed
-to be decrypted. For signing keys, the minimum
-version allowed to be used for verification.`,
- },
-
- "min_encryption_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `If set, the minimum version of the key allowed
-to be used for encryption; or for signing keys,
-to be used for signing. If set to zero, only
-the latest version of the key is allowed.`,
- },
-
- "deletion_allowed": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Whether to allow deletion of the key",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathConfigWrite,
- },
-
- HelpSynopsis: pathConfigHelpSyn,
- HelpDescription: pathConfigHelpDesc,
- }
-}
-
-func (b *backend) pathConfigWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
-
- // Check if the policy already exists before we lock everything
- p, lock, err := b.lm.GetPolicyExclusive(req.Storage, name)
- if lock != nil {
- defer lock.Unlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse(
- fmt.Sprintf("no existing key named %s could be found", name)),
- logical.ErrInvalidRequest
- }
-
- resp := &logical.Response{}
-
- persistNeeded := false
-
- minDecryptionVersionRaw, ok := d.GetOk("min_decryption_version")
- if ok {
- minDecryptionVersion := minDecryptionVersionRaw.(int)
-
- if minDecryptionVersion < 0 {
- return logical.ErrorResponse("min decryption version cannot be negative"), nil
- }
-
- if minDecryptionVersion == 0 {
- minDecryptionVersion = 1
- resp.AddWarning("since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1")
- }
-
- if minDecryptionVersion != p.MinDecryptionVersion {
- if minDecryptionVersion > p.LatestVersion {
- return logical.ErrorResponse(
- fmt.Sprintf("cannot set min decryption version of %d, latest key version is %d", minDecryptionVersion, p.LatestVersion)), nil
- }
- p.MinDecryptionVersion = minDecryptionVersion
- persistNeeded = true
- }
- }
-
- minEncryptionVersionRaw, ok := d.GetOk("min_encryption_version")
- if ok {
- minEncryptionVersion := minEncryptionVersionRaw.(int)
-
- if minEncryptionVersion < 0 {
- return logical.ErrorResponse("min encryption version cannot be negative"), nil
- }
-
- if minEncryptionVersion != p.MinEncryptionVersion {
- if minEncryptionVersion > p.LatestVersion {
- return logical.ErrorResponse(
- fmt.Sprintf("cannot set min encryption version of %d, latest key version is %d", minEncryptionVersion, p.LatestVersion)), nil
- }
- p.MinEncryptionVersion = minEncryptionVersion
- persistNeeded = true
- }
- }
-
- // Check here to get the final picture after the logic on each
- // individually. MinDecryptionVersion will always be 1 or above.
- if p.MinEncryptionVersion > 0 &&
- p.MinEncryptionVersion < p.MinDecryptionVersion {
- return logical.ErrorResponse(
- fmt.Sprintf("cannot set min encryption/decryption values; min encryption version of %d must be greater than or equal to min decryption version of %d", p.MinEncryptionVersion, p.MinDecryptionVersion)), nil
- }
-
- allowDeletionInt, ok := d.GetOk("deletion_allowed")
- if ok {
- allowDeletion := allowDeletionInt.(bool)
- if allowDeletion != p.DeletionAllowed {
- p.DeletionAllowed = allowDeletion
- persistNeeded = true
- }
- }
-
- // Add this as a guard here before persisting since we now require the min
- // decryption version to start at 1; even if it's not explicitly set here,
- // force the upgrade
- if p.MinDecryptionVersion == 0 {
- p.MinDecryptionVersion = 1
- persistNeeded = true
- }
-
- if !persistNeeded {
- return nil, nil
- }
-
- if len(resp.Warnings) == 0 {
- return nil, p.Persist(req.Storage)
- }
-
- return resp, p.Persist(req.Storage)
-}
-
-const pathConfigHelpSyn = `Configure a named encryption key`
-
-const pathConfigHelpDesc = `
-This path is used to configure the named key. Currently, this
-supports adjusting the minimum version of the key allowed to
-be used for decryption via the min_decryption_version paramter.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go
deleted file mode 100644
index 6819710..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package transit
-
-import (
- "strconv"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestTransit_ConfigSettings(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- doReq := func(req *logical.Request) *logical.Response {
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req)
- }
- return resp
- }
- doErrReq := func(req *logical.Request) {
- resp, err := b.HandleRequest(req)
- if err == nil {
- if resp == nil || !resp.IsError() {
- t.Fatalf("expected error; req:\n%#v\n", *req)
- }
- }
- }
-
- // First create a key
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/aes",
- Data: map[string]interface{}{
- "derived": true,
- },
- }
- doReq(req)
-
- req.Path = "keys/ed"
- req.Data["type"] = "ed25519"
- doReq(req)
-
- delete(req.Data, "derived")
-
- req.Path = "keys/p256"
- req.Data["type"] = "ecdsa-p256"
- doReq(req)
-
- delete(req.Data, "type")
-
- req.Path = "keys/aes/rotate"
- doReq(req)
- doReq(req)
- doReq(req)
- doReq(req)
-
- req.Path = "keys/ed/rotate"
- doReq(req)
- doReq(req)
- doReq(req)
- doReq(req)
-
- req.Path = "keys/p256/rotate"
- doReq(req)
- doReq(req)
- doReq(req)
- doReq(req)
-
- req.Path = "keys/aes/config"
- // Too high
- req.Data["min_decryption_version"] = 7
- doErrReq(req)
- // Too low
- req.Data["min_decryption_version"] = -1
- doErrReq(req)
-
- delete(req.Data, "min_decryption_version")
- // Too high
- req.Data["min_encryption_version"] = 7
- doErrReq(req)
- // Too low
- req.Data["min_encryption_version"] = 7
- doErrReq(req)
-
- // Not allowed, cannot decrypt
- req.Data["min_decryption_version"] = 3
- req.Data["min_encryption_version"] = 2
- doErrReq(req)
-
- // Allowed
- req.Data["min_decryption_version"] = 2
- req.Data["min_encryption_version"] = 3
- doReq(req)
- req.Path = "keys/ed/config"
- doReq(req)
- req.Path = "keys/p256/config"
- doReq(req)
-
- req.Data = map[string]interface{}{
- "plaintext": "abcd",
- "context": "abcd",
- }
-
- maxKeyVersion := 5
- key := "aes"
-
- testHMAC := func(ver int, valid bool) {
- req.Path = "hmac/" + key
- delete(req.Data, "hmac")
- if ver == maxKeyVersion {
- delete(req.Data, "key_version")
- } else {
- req.Data["key_version"] = ver
- }
-
- if !valid {
- doErrReq(req)
- return
- }
-
- resp := doReq(req)
- ct := resp.Data["hmac"].(string)
- if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) {
- t.Fatal("wrong hmac version")
- }
-
- req.Path = "verify/" + key
- delete(req.Data, "key_version")
- req.Data["hmac"] = resp.Data["hmac"]
- doReq(req)
- }
-
- testEncryptDecrypt := func(ver int, valid bool) {
- req.Path = "encrypt/" + key
- delete(req.Data, "ciphertext")
- if ver == maxKeyVersion {
- delete(req.Data, "key_version")
- } else {
- req.Data["key_version"] = ver
- }
-
- if !valid {
- doErrReq(req)
- return
- }
-
- resp := doReq(req)
- ct := resp.Data["ciphertext"].(string)
- if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) {
- t.Fatal("wrong encryption version")
- }
-
- req.Path = "decrypt/" + key
- delete(req.Data, "key_version")
- req.Data["ciphertext"] = resp.Data["ciphertext"]
- doReq(req)
- }
- testEncryptDecrypt(5, true)
- testEncryptDecrypt(4, true)
- testEncryptDecrypt(3, true)
- testEncryptDecrypt(2, false)
- testHMAC(5, true)
- testHMAC(4, true)
- testHMAC(3, true)
- testHMAC(2, false)
-
- delete(req.Data, "plaintext")
- req.Data["input"] = "abcd"
- key = "ed"
- testSignVerify := func(ver int, valid bool) {
- req.Path = "sign/" + key
- delete(req.Data, "signature")
- if ver == maxKeyVersion {
- delete(req.Data, "key_version")
- } else {
- req.Data["key_version"] = ver
- }
-
- if !valid {
- doErrReq(req)
- return
- }
-
- resp := doReq(req)
- ct := resp.Data["signature"].(string)
- if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) {
- t.Fatal("wrong signature version")
- }
-
- req.Path = "verify/" + key
- delete(req.Data, "key_version")
- req.Data["signature"] = resp.Data["signature"]
- doReq(req)
- }
- testSignVerify(5, true)
- testSignVerify(4, true)
- testSignVerify(3, true)
- testSignVerify(2, false)
- testHMAC(5, true)
- testHMAC(4, true)
- testHMAC(3, true)
- testHMAC(2, false)
-
- delete(req.Data, "context")
- key = "p256"
- testSignVerify(5, true)
- testSignVerify(4, true)
- testSignVerify(3, true)
- testSignVerify(2, false)
- testHMAC(5, true)
- testHMAC(4, true)
- testHMAC(3, true)
- testHMAC(2, false)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go
deleted file mode 100644
index 7af1a03..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package transit
-
-import (
- "crypto/rand"
- "encoding/base64"
- "fmt"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathDatakey() *framework.Path {
- return &framework.Path{
- Pattern: "datakey/" + framework.GenericNameRegex("plaintext") + "/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The backend key used for encrypting the data key",
- },
-
- "plaintext": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `"plaintext" will return the key in both plaintext and
-ciphertext; "wrapped" will return the ciphertext only.`,
- },
-
- "context": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Context for key derivation. Required for derived keys.",
- },
-
- "nonce": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Nonce for when convergent encryption v1 is used (only in Vault 0.6.1)",
- },
-
- "bits": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `Number of bits for the key; currently 128, 256,
-and 512 bits are supported. Defaults to 256.`,
- Default: 256,
- },
-
- "key_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `The version of the Vault key to use for
-encryption of the data key. Must be 0 (for latest)
-or a value greater than or equal to the
-min_encryption_version configured on the key.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathDatakeyWrite,
- },
-
- HelpSynopsis: pathDatakeyHelpSyn,
- HelpDescription: pathDatakeyHelpDesc,
- }
-}
-
-func (b *backend) pathDatakeyWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- ver := d.Get("key_version").(int)
-
- plaintext := d.Get("plaintext").(string)
- plaintextAllowed := false
- switch plaintext {
- case "plaintext":
- plaintextAllowed = true
- case "wrapped":
- default:
- return logical.ErrorResponse("Invalid path, must be 'plaintext' or 'wrapped'"), logical.ErrInvalidRequest
- }
-
- var err error
-
- // Decode the context if any
- contextRaw := d.Get("context").(string)
- var context []byte
- if len(contextRaw) != 0 {
- context, err = base64.StdEncoding.DecodeString(contextRaw)
- if err != nil {
- return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
- }
- }
-
- // Decode the nonce if any
- nonceRaw := d.Get("nonce").(string)
- var nonce []byte
- if len(nonceRaw) != 0 {
- nonce, err = base64.StdEncoding.DecodeString(nonceRaw)
- if err != nil {
- return logical.ErrorResponse("failed to base64-decode nonce"), logical.ErrInvalidRequest
- }
- }
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- newKey := make([]byte, 32)
- bits := d.Get("bits").(int)
- switch bits {
- case 512:
- newKey = make([]byte, 64)
- case 256:
- case 128:
- newKey = make([]byte, 16)
- default:
- return logical.ErrorResponse("invalid bit length"), logical.ErrInvalidRequest
- }
- _, err = rand.Read(newKey)
- if err != nil {
- return nil, err
- }
-
- ciphertext, err := p.Encrypt(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey))
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- case errutil.InternalError:
- return nil, err
- default:
- return nil, err
- }
- }
-
- if ciphertext == "" {
- return nil, fmt.Errorf("empty ciphertext returned")
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "ciphertext": ciphertext,
- },
- }
-
- if plaintextAllowed {
- resp.Data["plaintext"] = base64.StdEncoding.EncodeToString(newKey)
- }
-
- return resp, nil
-}
-
-const pathDatakeyHelpSyn = `Generate a data key`
-
-const pathDatakeyHelpDesc = `
-This path can be used to generate a data key: a random
-key of a certain length that can be used for encryption
-and decryption, protected by the named backend key. 128, 256,
-or 512 bits can be specified; if not specified, the default
-is 256 bits. Call with the the "wrapped" path to prevent the
-(base64-encoded) plaintext key from being returned along with
-the encrypted key, the "plaintext" path returns both.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go
deleted file mode 100644
index 9750beb..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package transit
-
-import (
- "encoding/base64"
- "fmt"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/mitchellh/mapstructure"
-)
-
-func (b *backend) pathDecrypt() *framework.Path {
- return &framework.Path{
- Pattern: "decrypt/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the policy",
- },
-
- "ciphertext": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
-The ciphertext to decrypt, provided as returned by encrypt.`,
- },
-
- "context": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
-Base64 encoded context for key derivation. Required if key derivation is
-enabled.`,
- },
-
- "nonce": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
-Base64 encoded nonce value used during encryption. Must be provided if
-convergent encryption is enabled for this key and the key was generated with
-Vault 0.6.1. Not required for keys created in 0.6.2+.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathDecryptWrite,
- },
-
- HelpSynopsis: pathDecryptHelpSyn,
- HelpDescription: pathDecryptHelpDesc,
- }
-}
-
-func (b *backend) pathDecryptWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- batchInputRaw := d.Raw["batch_input"]
- var batchInputItems []BatchRequestItem
- var err error
- if batchInputRaw != nil {
- err = mapstructure.Decode(batchInputRaw, &batchInputItems)
- if err != nil {
- return nil, fmt.Errorf("failed to parse batch input: %v", err)
- }
-
- if len(batchInputItems) == 0 {
- return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest
- }
- } else {
- ciphertext := d.Get("ciphertext").(string)
- if len(ciphertext) == 0 {
- return logical.ErrorResponse("missing ciphertext to decrypt"), logical.ErrInvalidRequest
- }
-
- batchInputItems = make([]BatchRequestItem, 1)
- batchInputItems[0] = BatchRequestItem{
- Ciphertext: ciphertext,
- Context: d.Get("context").(string),
- Nonce: d.Get("nonce").(string),
- }
- }
-
- batchResponseItems := make([]BatchResponseItem, len(batchInputItems))
- contextSet := len(batchInputItems[0].Context) != 0
-
- for i, item := range batchInputItems {
- if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) {
- return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest
- }
-
- if item.Ciphertext == "" {
- batchResponseItems[i].Error = "missing ciphertext to decrypt"
- continue
- }
-
- // Decode the context
- if len(item.Context) != 0 {
- batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context)
- if err != nil {
- batchResponseItems[i].Error = err.Error()
- continue
- }
- }
-
- // Decode the nonce
- if len(item.Nonce) != 0 {
- batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce)
- if err != nil {
- batchResponseItems[i].Error = err.Error()
- continue
- }
- }
- }
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyShared(req.Storage, d.Get("name").(string))
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- for i, item := range batchInputItems {
- if batchResponseItems[i].Error != "" {
- continue
- }
-
- plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- batchResponseItems[i].Error = err.Error()
- continue
- default:
- return nil, err
- }
- }
- batchResponseItems[i].Plaintext = plaintext
- }
-
- resp := &logical.Response{}
- if batchInputRaw != nil {
- resp.Data = map[string]interface{}{
- "batch_results": batchResponseItems,
- }
- } else {
- if batchResponseItems[0].Error != "" {
- return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest
- }
- resp.Data = map[string]interface{}{
- "plaintext": batchResponseItems[0].Plaintext,
- }
- }
-
- return resp, nil
-}
-
-const pathDecryptHelpSyn = `Decrypt a ciphertext value using a named key`
-
-const pathDecryptHelpDesc = `
-This path uses the named key from the request path to decrypt a user
-provided ciphertext. The plaintext is returned base64 encoded.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt_test.go
deleted file mode 100644
index e0d4b6e..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package transit
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// Case1: If batch decryption input is not base64 encoded, it should fail.
-func TestTransit_BatchDecryptionCase1(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchEncryptionInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- map[string]interface{}{"plaintext": "Cg=="},
- }
-
- batchEncryptionData := map[string]interface{}{
- "batch_input": batchEncryptionInput,
- }
-
- batchEncryptionReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchEncryptionData,
- }
- resp, err = b.HandleRequest(batchEncryptionReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchDecryptionData := map[string]interface{}{
- "batch_input": resp.Data["batch_results"],
- }
-
- batchDecryptionReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/upserted_key",
- Storage: s,
- Data: batchDecryptionData,
- }
- resp, err = b.HandleRequest(batchDecryptionReq)
- if err == nil {
- t.Fatalf("expected an error")
- }
-}
-
-// Case2: Normal case of batch decryption
-func TestTransit_BatchDecryptionCase2(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchEncryptionInput := []interface{}{
- map[string]interface{}{"plaintext": "Cg=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- }
- batchEncryptionData := map[string]interface{}{
- "batch_input": batchEncryptionInput,
- }
-
- batchEncryptionReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchEncryptionData,
- }
- resp, err = b.HandleRequest(batchEncryptionReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
- batchDecryptionInput := make([]interface{}, len(batchResponseItems))
- for i, item := range batchResponseItems {
- batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext}
- }
- batchDecryptionData := map[string]interface{}{
- "batch_input": batchDecryptionInput,
- }
-
- batchDecryptionReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/upserted_key",
- Storage: s,
- Data: batchDecryptionData,
- }
- resp, err = b.HandleRequest(batchDecryptionReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchDecryptionResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- plaintext1 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
- plaintext2 := "Cg=="
- for _, item := range batchDecryptionResponseItems {
- if item.Plaintext != plaintext1 && item.Plaintext != plaintext2 {
- t.Fatalf("bad: plaintext: %q", item.Plaintext)
- }
- }
-}
-
-// Case3: Test batch decryption with a derived key
-func TestTransit_BatchDecryptionCase3(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- policyData := map[string]interface{}{
- "derived": true,
- }
-
- policyReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/existing_key",
- Storage: s,
- Data: policyData,
- }
-
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dGVzdGNvbnRleHQ="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dGVzdGNvbnRleHQ="},
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
- batchReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "encrypt/existing_key",
- Storage: s,
- Data: batchData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchDecryptionInputItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- batchDecryptionInput := make([]interface{}, len(batchDecryptionInputItems))
- for i, item := range batchDecryptionInputItems {
- batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext, "context": "dGVzdGNvbnRleHQ="}
- }
-
- batchDecryptionData := map[string]interface{}{
- "batch_input": batchDecryptionInput,
- }
-
- batchDecryptionReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/existing_key",
- Storage: s,
- Data: batchDecryptionData,
- }
- resp, err = b.HandleRequest(batchDecryptionReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchDecryptionResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
- for _, item := range batchDecryptionResponseItems {
- if item.Plaintext != plaintext {
- t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, item.Plaintext)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go
deleted file mode 100644
index 3b60198..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go
+++ /dev/null
@@ -1,302 +0,0 @@
-package transit
-
-import (
- "encoding/base64"
- "fmt"
- "sync"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/helper/keysutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/mitchellh/mapstructure"
-)
-
-// BatchRequestItem represents a request item for batch processing
-type BatchRequestItem struct {
- // Context for key derivation. This is required for derived keys.
- Context string `json:"context" structs:"context" mapstructure:"context"`
-
- // DecodedContext is the base64 decoded version of Context
- DecodedContext []byte
-
- // Plaintext for encryption
- Plaintext string `json:"plaintext" structs:"plaintext" mapstructure:"plaintext"`
-
- // Ciphertext for decryption
- Ciphertext string `json:"ciphertext" structs:"ciphertext" mapstructure:"ciphertext"`
-
- // Nonce to be used when v1 convergent encryption is used
- Nonce string `json:"nonce" structs:"nonce" mapstructure:"nonce"`
-
- // The key version to be used for encryption
- KeyVersion int `json:"key_version" structs:"key_version" mapstructure:"key_version"`
-
- // DecodedNonce is the base64 decoded version of Nonce
- DecodedNonce []byte
-}
-
-// BatchResponseItem represents a response item for batch processing
-type BatchResponseItem struct {
- // Ciphertext for the plaintext present in the corresponding batch
- // request item
- Ciphertext string `json:"ciphertext,omitempty" structs:"ciphertext" mapstructure:"ciphertext"`
-
- // Plaintext for the ciphertext present in the corresponsding batch
- // request item
- Plaintext string `json:"plaintext,omitempty" structs:"plaintext" mapstructure:"plaintext"`
-
- // Error, if set represents a failure encountered while encrypting a
- // corresponding batch request item
- Error string `json:"error,omitempty" structs:"error" mapstructure:"error"`
-}
-
-func (b *backend) pathEncrypt() *framework.Path {
- return &framework.Path{
- Pattern: "encrypt/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the policy",
- },
-
- "plaintext": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Base64 encoded plaintext value to be encrypted",
- },
-
- "context": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Base64 encoded context for key derivation. Required if key derivation is enabled",
- },
-
- "nonce": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `
-Base64 encoded nonce value. Must be provided if convergent encryption is
-enabled for this key and the key was generated with Vault 0.6.1. Not required
-for keys created in 0.6.2+. The value must be exactly 96 bits (12 bytes) long
-and the user must ensure that for any given context (and thus, any given
-encryption key) this nonce value is **never reused**.
-`,
- },
-
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "aes256-gcm96",
- Description: `
-This parameter is required when encryption key is expected to be created.
-When performing an upsert operation, the type of key to create. Currently,
-"aes256-gcm96" (symmetric) is the only type supported. Defaults to
-"aes256-gcm96".`,
- },
-
- "convergent_encryption": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `
-This parameter will only be used when a key is expected to be created. Whether
-to support convergent encryption. This is only supported when using a key with
-key derivation enabled and will require all requests to carry both a context
-and 96-bit (12-byte) nonce. The given nonce will be used in place of a randomly
-generated nonce. As a result, when the same context and nonce are supplied, the
-same ciphertext is generated. It is *very important* when using this mode that
-you ensure that all nonces are unique for a given context. Failing to do so
-will severely impact the ciphertext's security.`,
- },
-
- "key_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `The version of the key to use for encryption.
-Must be 0 (for latest) or a value greater than or equal
-to the min_encryption_version configured on the key.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.CreateOperation: b.pathEncryptWrite,
- logical.UpdateOperation: b.pathEncryptWrite,
- },
-
- ExistenceCheck: b.pathEncryptExistenceCheck,
-
- HelpSynopsis: pathEncryptHelpSyn,
- HelpDescription: pathEncryptHelpDesc,
- }
-}
-
-func (b *backend) pathEncryptExistenceCheck(
- req *logical.Request, d *framework.FieldData) (bool, error) {
- name := d.Get("name").(string)
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return false, err
- }
- return p != nil, nil
-}
-
-func (b *backend) pathEncryptWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- var err error
-
- batchInputRaw := d.Raw["batch_input"]
- var batchInputItems []BatchRequestItem
- if batchInputRaw != nil {
- err = mapstructure.Decode(batchInputRaw, &batchInputItems)
- if err != nil {
- return nil, fmt.Errorf("failed to parse batch input: %v", err)
- }
-
- if len(batchInputItems) == 0 {
- return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest
- }
- } else {
- valueRaw, ok := d.GetOk("plaintext")
- if !ok {
- return logical.ErrorResponse("missing plaintext to encrypt"), logical.ErrInvalidRequest
- }
-
- batchInputItems = make([]BatchRequestItem, 1)
- batchInputItems[0] = BatchRequestItem{
- Plaintext: valueRaw.(string),
- Context: d.Get("context").(string),
- Nonce: d.Get("nonce").(string),
- KeyVersion: d.Get("key_version").(int),
- }
- }
-
- batchResponseItems := make([]BatchResponseItem, len(batchInputItems))
- contextSet := len(batchInputItems[0].Context) != 0
-
- // Before processing the batch request items, get the policy. If the
- // policy is supposed to be upserted, then determine if 'derived' is to
- // be set or not, based on the presence of 'context' field in all the
- // input items.
- for i, item := range batchInputItems {
- if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) {
- return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest
- }
-
- _, err := base64.StdEncoding.DecodeString(item.Plaintext)
- if err != nil {
- batchResponseItems[i].Error = "failed to base64-decode plaintext"
- continue
- }
-
- // Decode the context
- if len(item.Context) != 0 {
- batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context)
- if err != nil {
- batchResponseItems[i].Error = err.Error()
- continue
- }
- }
-
- // Decode the nonce
- if len(item.Nonce) != 0 {
- batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce)
- if err != nil {
- batchResponseItems[i].Error = err.Error()
- continue
- }
- }
- }
-
- // Get the policy
- var p *keysutil.Policy
- var lock *sync.RWMutex
- var upserted bool
- if req.Operation == logical.CreateOperation {
- convergent := d.Get("convergent_encryption").(bool)
- if convergent && !contextSet {
- return logical.ErrorResponse("convergent encryption requires derivation to be enabled, so context is required"), nil
- }
-
- polReq := keysutil.PolicyRequest{
- Storage: req.Storage,
- Name: name,
- Derived: contextSet,
- Convergent: convergent,
- }
-
- keyType := d.Get("type").(string)
- switch keyType {
- case "aes256-gcm96":
- polReq.KeyType = keysutil.KeyType_AES256_GCM96
- case "ecdsa-p256":
- return logical.ErrorResponse(fmt.Sprintf("key type %v not supported for this operation", keyType)), logical.ErrInvalidRequest
- default:
- return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest
- }
-
- p, lock, upserted, err = b.lm.GetPolicyUpsert(polReq)
-
- } else {
- p, lock, err = b.lm.GetPolicyShared(req.Storage, name)
- }
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- // Process batch request items. If encryption of any request
- // item fails, respectively mark the error in the response
- // collection and continue to process other items.
- for i, item := range batchInputItems {
- if batchResponseItems[i].Error != "" {
- continue
- }
-
- ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- batchResponseItems[i].Error = err.Error()
- continue
- default:
- return nil, err
- }
- }
-
- if ciphertext == "" {
- return nil, fmt.Errorf("empty ciphertext returned for input item %d", i)
- }
-
- batchResponseItems[i].Ciphertext = ciphertext
- }
-
- resp := &logical.Response{}
- if batchInputRaw != nil {
- resp.Data = map[string]interface{}{
- "batch_results": batchResponseItems,
- }
- } else {
- if batchResponseItems[0].Error != "" {
- return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest
- }
- resp.Data = map[string]interface{}{
- "ciphertext": batchResponseItems[0].Ciphertext,
- }
- }
-
- if req.Operation == logical.CreateOperation && !upserted {
- resp.AddWarning("Attempted creation of the key during the encrypt operation, but it was created beforehand")
- }
- return resp, nil
-}
-
-const pathEncryptHelpSyn = `Encrypt a plaintext value or a batch of plaintext
-blocks using a named key`
-
-const pathEncryptHelpDesc = `
-This path uses the named key from the request path to encrypt a user provided
-plaintext or a batch of plaintext blocks. The plaintext must be base64 encoded.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt_test.go
deleted file mode 100644
index 6ab20db..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt_test.go
+++ /dev/null
@@ -1,547 +0,0 @@
-package transit
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/mapstructure"
-)
-
-// Case1: Ensure that batch encryption did not affect the normal flow of
-// encrypting the plaintext with a pre-existing key.
-func TestTransit_BatchEncryptionCase1(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- // Create the policy
- policyReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/existing_key",
- Storage: s,
- }
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- encData := map[string]interface{}{
- "plaintext": plaintext,
- }
-
- encReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "encrypt/existing_key",
- Storage: s,
- Data: encData,
- }
- resp, err = b.HandleRequest(encReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- ciphertext := resp.Data["ciphertext"]
-
- decData := map[string]interface{}{
- "ciphertext": ciphertext,
- }
- decReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/existing_key",
- Storage: s,
- Data: decData,
- }
- resp, err = b.HandleRequest(decReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["plaintext"] != plaintext {
- t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
- }
-}
-
-// Case2: Ensure that batch encryption did not affect the normal flow of
-// encrypting the plaintext with the key upserted.
-func TestTransit_BatchEncryptionCase2(t *testing.T) {
- var resp *logical.Response
- var err error
- b, s := createBackendWithStorage(t)
-
- // Upsert the key and encrypt the data
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- encData := map[string]interface{}{
- "plaintext": plaintext,
- }
-
- encReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: encData,
- }
- resp, err = b.HandleRequest(encReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- ciphertext := resp.Data["ciphertext"]
- decData := map[string]interface{}{
- "ciphertext": ciphertext,
- }
-
- policyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "keys/upserted_key",
- Storage: s,
- }
-
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- decReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/upserted_key",
- Storage: s,
- Data: decData,
- }
- resp, err = b.HandleRequest(decReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["plaintext"] != plaintext {
- t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
- }
-}
-
-// Case3: If batch encryption input is not base64 encoded, it should fail.
-func TestTransit_BatchEncryptionCase3(t *testing.T) {
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchInput := `[{"plaintext":"dGhlIHF1aWNrIGJyb3duIGZveA=="}]`
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
-
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchData,
- }
- _, err = b.HandleRequest(batchReq)
- if err == nil {
- t.Fatal("expected an error")
- }
-}
-
-// Case4: Test batch encryption with an existing key
-func TestTransit_BatchEncryptionCase4(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- policyReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/existing_key",
- Storage: s,
- }
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
- batchReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "encrypt/existing_key",
- Storage: s,
- Data: batchData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- decReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/existing_key",
- Storage: s,
- }
-
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- for _, item := range batchResponseItems {
- decReq.Data = map[string]interface{}{
- "ciphertext": item.Ciphertext,
- }
- resp, err = b.HandleRequest(decReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["plaintext"] != plaintext {
- t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
- }
- }
-}
-
-// Case5: Test batch encryption with an existing derived key
-func TestTransit_BatchEncryptionCase5(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- policyData := map[string]interface{}{
- "derived": true,
- }
-
- policyReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/existing_key",
- Storage: s,
- Data: policyData,
- }
-
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
-
- batchReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "encrypt/existing_key",
- Storage: s,
- Data: batchData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- decReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/existing_key",
- Storage: s,
- }
-
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- for _, item := range batchResponseItems {
- decReq.Data = map[string]interface{}{
- "ciphertext": item.Ciphertext,
- "context": "dmlzaGFsCg==",
- }
- resp, err = b.HandleRequest(decReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["plaintext"] != plaintext {
- t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
- }
- }
-}
-
-// Case6: Test batch encryption with an upserted non-derived key
-func TestTransit_BatchEncryptionCase6(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- decReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/upserted_key",
- Storage: s,
- }
-
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- for _, responseItem := range batchResponseItems {
- var item BatchResponseItem
- if err := mapstructure.Decode(responseItem, &item); err != nil {
- t.Fatal(err)
- }
- decReq.Data = map[string]interface{}{
- "ciphertext": item.Ciphertext,
- }
- resp, err = b.HandleRequest(decReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["plaintext"] != plaintext {
- t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
- }
- }
-}
-
-// Case7: Test batch encryption with an upserted derived key
-func TestTransit_BatchEncryptionCase7(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- decReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/upserted_key",
- Storage: s,
- }
-
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- for _, item := range batchResponseItems {
- decReq.Data = map[string]interface{}{
- "ciphertext": item.Ciphertext,
- "context": "dmlzaGFsCg==",
- }
- resp, err = b.HandleRequest(decReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["plaintext"] != plaintext {
- t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"])
- }
- }
-}
-
-// Case8: If plaintext is not base64 encoded, encryption should fail
-func TestTransit_BatchEncryptionCase8(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- // Create the policy
- policyReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/existing_key",
- Storage: s,
- }
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "simple_plaintext"},
- }
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
- batchReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "encrypt/existing_key",
- Storage: s,
- Data: batchData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- plaintext := "simple plaintext"
-
- encData := map[string]interface{}{
- "plaintext": plaintext,
- }
-
- encReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "encrypt/existing_key",
- Storage: s,
- Data: encData,
- }
- resp, err = b.HandleRequest(encReq)
- if err == nil {
- t.Fatal("expected an error")
- }
-}
-
-// Case9: If both plaintext and batch inputs are supplied, plaintext should be
-// ignored.
-func TestTransit_BatchEncryptionCase9(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- }
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- "plaintext": plaintext,
- }
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- _, ok := resp.Data["ciphertext"]
- if ok {
- t.Fatal("ciphertext field should not be set")
- }
-}
-
-// Case10: Inconsistent presence of 'context' in batch input should be caught
-func TestTransit_BatchEncryptionCase10(t *testing.T) {
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
-
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchData,
- }
- _, err = b.HandleRequest(batchReq)
- if err == nil {
- t.Fatalf("expected an error")
- }
-}
-
-// Case11: Incorrect inputs for context and nonce should not fail the operation
-func TestTransit_BatchEncryptionCase11(t *testing.T) {
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchInput := []interface{}{
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "not-encoded"},
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchData,
- }
- _, err = b.HandleRequest(batchReq)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// Case12: Invalid batch input
-func TestTransit_BatchEncryptionCase12(t *testing.T) {
- var err error
- b, s := createBackendWithStorage(t)
-
- batchInput := []interface{}{
- map[string]interface{}{},
- "unexpected_interface",
- }
-
- batchData := map[string]interface{}{
- "batch_input": batchInput,
- }
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchData,
- }
- _, err = b.HandleRequest(batchReq)
- if err == nil {
- t.Fatalf("expected an error")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go
deleted file mode 100644
index a18db91..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package transit
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "errors"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/hashicorp/vault/helper/keysutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- exportTypeEncryptionKey = "encryption-key"
- exportTypeSigningKey = "signing-key"
- exportTypeHMACKey = "hmac-key"
-)
-
-func (b *backend) pathExportKeys() *framework.Path {
- return &framework.Path{
- Pattern: "export/" + framework.GenericNameRegex("type") + "/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("version"),
- Fields: map[string]*framework.FieldSchema{
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Type of key to export (encryption-key, signing-key, hmac-key)",
- },
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the key",
- },
- "version": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Version of the key",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathPolicyExportRead,
- },
-
- HelpSynopsis: pathExportHelpSyn,
- HelpDescription: pathExportHelpDesc,
- }
-}
-
-func (b *backend) pathPolicyExportRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- exportType := d.Get("type").(string)
- name := d.Get("name").(string)
- version := d.Get("version").(string)
-
- switch exportType {
- case exportTypeEncryptionKey:
- case exportTypeSigningKey:
- case exportTypeHMACKey:
- default:
- return logical.ErrorResponse(fmt.Sprintf("invalid export type: %s", exportType)), logical.ErrInvalidRequest
- }
-
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return nil, nil
- }
-
- if !p.Exportable {
- return logical.ErrorResponse("key is not exportable"), nil
- }
-
- switch exportType {
- case exportTypeEncryptionKey:
- if !p.Type.EncryptionSupported() {
- return logical.ErrorResponse("encryption not supported for the key"), logical.ErrInvalidRequest
- }
- case exportTypeSigningKey:
- if !p.Type.SigningSupported() {
- return logical.ErrorResponse("signing not supported for the key"), logical.ErrInvalidRequest
- }
- }
-
- retKeys := map[string]string{}
- switch version {
- case "":
- for k, v := range p.Keys {
- exportKey, err := getExportKey(p, &v, exportType)
- if err != nil {
- return nil, err
- }
- retKeys[strconv.Itoa(k)] = exportKey
- }
-
- default:
- var versionValue int
- if version == "latest" {
- versionValue = p.LatestVersion
- } else {
- version = strings.TrimPrefix(version, "v")
- versionValue, err = strconv.Atoi(version)
- if err != nil {
- return logical.ErrorResponse("invalid key version"), logical.ErrInvalidRequest
- }
- }
-
- if versionValue < p.MinDecryptionVersion {
- return logical.ErrorResponse("version for export is below minimun decryption version"), logical.ErrInvalidRequest
- }
- key, ok := p.Keys[versionValue]
- if !ok {
- return logical.ErrorResponse("version does not exist or cannot be found"), logical.ErrInvalidRequest
- }
-
- exportKey, err := getExportKey(p, &key, exportType)
- if err != nil {
- return nil, err
- }
-
- retKeys[strconv.Itoa(versionValue)] = exportKey
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "name": p.Name,
- "type": p.Type.String(),
- "keys": retKeys,
- },
- }
-
- return resp, nil
-}
-
-func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType string) (string, error) {
- if policy == nil {
- return "", errors.New("nil policy provided")
- }
-
- switch exportType {
- case exportTypeHMACKey:
- return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.HMACKey)), nil
-
- case exportTypeEncryptionKey:
- switch policy.Type {
- case keysutil.KeyType_AES256_GCM96:
- return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil
- }
-
- case exportTypeSigningKey:
- switch policy.Type {
- case keysutil.KeyType_ECDSA_P256:
- ecKey, err := keyEntryToECPrivateKey(key, elliptic.P256())
- if err != nil {
- return "", err
- }
- return ecKey, nil
-
- case keysutil.KeyType_ED25519:
- return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil
- }
- }
-
- return "", fmt.Errorf("unknown key type %v", policy.Type)
-}
-
-func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) {
- if k == nil {
- return "", errors.New("nil KeyEntry provided")
- }
-
- privKey := &ecdsa.PrivateKey{
- PublicKey: ecdsa.PublicKey{
- Curve: curve,
- X: k.EC_X,
- Y: k.EC_Y,
- },
- D: k.EC_D,
- }
- ecder, err := x509.MarshalECPrivateKey(privKey)
- if err != nil {
- return "", err
- }
- if ecder == nil {
- return "", errors.New("No data returned when marshalling to private key")
- }
-
- block := pem.Block{
- Type: "EC PRIVATE KEY",
- Bytes: ecder,
- }
- return strings.TrimSpace(string(pem.EncodeToMemory(&block))), nil
-}
-
-const pathExportHelpSyn = `Export named encryption or signing key`
-
-const pathExportHelpDesc = `
-This path is used to export the named keys that are configured as
-exportable.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go
deleted file mode 100644
index 314653c..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go
+++ /dev/null
@@ -1,414 +0,0 @@
-package transit
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) {
- verifyExportsCorrectVersion(t, "encryption-key", "aes256-gcm96")
- verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p256")
- verifyExportsCorrectVersion(t, "signing-key", "ed25519")
- verifyExportsCorrectVersion(t, "hmac-key", "aes256-gcm96")
- verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p256")
- verifyExportsCorrectVersion(t, "hmac-key", "ed25519")
-}
-
-func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- // First create a key, v1
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- }
- req.Data = map[string]interface{}{
- "exportable": true,
- "type": keyType,
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- verifyVersion := func(versionRequest string, expectedVersion int) {
- req := &logical.Request{
- Storage: storage,
- Operation: logical.ReadOperation,
- Path: fmt.Sprintf("export/%s/foo/%s", exportType, versionRequest),
- }
- rsp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- typRaw, ok := rsp.Data["type"]
- if !ok {
- t.Fatal("no type returned from export")
- }
- typ, ok := typRaw.(string)
- if !ok {
- t.Fatalf("could not find key type, resp data is %#v", rsp.Data)
- }
- if typ != keyType {
- t.Fatalf("key type mismatch; %q vs %q", typ, keyType)
- }
-
- keysRaw, ok := rsp.Data["keys"]
- if !ok {
- t.Fatal("could not find keys value")
- }
- keys, ok := keysRaw.(map[string]string)
- if !ok {
- t.Fatal("could not cast to keys map")
- }
- if len(keys) != 1 {
- t.Fatal("unexpected number of keys found")
- }
-
- for k, _ := range keys {
- if k != strconv.Itoa(expectedVersion) {
- t.Fatalf("expected version %q, received version %q", strconv.Itoa(expectedVersion), k)
- }
- }
- }
-
- verifyVersion("v1", 1)
- verifyVersion("1", 1)
- verifyVersion("latest", 1)
-
- req.Path = "keys/foo/rotate"
- // v2
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- verifyVersion("v1", 1)
- verifyVersion("1", 1)
- verifyVersion("v2", 2)
- verifyVersion("2", 2)
- verifyVersion("latest", 2)
-
- // v3
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- verifyVersion("v1", 1)
- verifyVersion("1", 1)
- verifyVersion("v3", 3)
- verifyVersion("3", 3)
- verifyVersion("latest", 3)
-}
-
-func TestTransit_Export_ValidVersionsOnly(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- // First create a key, v1
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- }
- req.Data = map[string]interface{}{
- "exportable": true,
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Path = "keys/foo/rotate"
- // v2
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- // v3
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- verifyExport := func(validVersions []int) {
- req = &logical.Request{
- Storage: storage,
- Operation: logical.ReadOperation,
- Path: "export/encryption-key/foo",
- }
- rsp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if _, ok := rsp.Data["keys"]; !ok {
- t.Error("no keys returned from export")
- }
-
- keys, ok := rsp.Data["keys"].(map[string]string)
- if !ok {
- t.Error("could not cast to keys object")
- }
- if len(keys) != len(validVersions) {
- t.Errorf("expected %d key count, received %d", len(validVersions), len(keys))
- }
- for _, version := range validVersions {
- if _, ok := keys[strconv.Itoa(version)]; !ok {
- t.Errorf("expecting to find key version %d, not found", version)
- }
- }
- }
-
- verifyExport([]int{1, 2, 3})
-
- req = &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo/config",
- }
- req.Data = map[string]interface{}{
- "min_decryption_version": 3,
- }
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- verifyExport([]int{3})
-
- req = &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo/config",
- }
- req.Data = map[string]interface{}{
- "min_decryption_version": 2,
- }
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- verifyExport([]int{2, 3})
-
- req = &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo/rotate",
- }
- // v4
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- verifyExport([]int{2, 3, 4})
-}
-
-func TestTransit_Export_KeysNotMarkedExportable_ReturnsError(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- }
- req.Data = map[string]interface{}{
- "exportable": false,
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- req = &logical.Request{
- Storage: storage,
- Operation: logical.ReadOperation,
- Path: "export/encryption-key/foo",
- }
- rsp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if !rsp.IsError() {
- t.Fatal("Key not marked as exportble but was exported.")
- }
-}
-
-func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- }
- req.Data = map[string]interface{}{
- "exportable": true,
- "type": "aes256-gcm96",
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- req = &logical.Request{
- Storage: storage,
- Operation: logical.ReadOperation,
- Path: "export/signing-key/foo",
- }
- _, err = b.HandleRequest(req)
- if err == nil {
- t.Fatal("Key does not support signing but was exported without error.")
- }
-}
-
-func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T) {
- testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p256")
- testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ed25519")
-}
-
-func testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T, keyType string) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- }
- req.Data = map[string]interface{}{
- "exportable": true,
- "type": keyType,
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- req = &logical.Request{
- Storage: storage,
- Operation: logical.ReadOperation,
- Path: "export/encryption-key/foo",
- }
- _, err = b.HandleRequest(req)
- if err == nil {
- t.Fatal("Key does not support encryption but was exported without error.")
- }
-}
-
-func TestTransit_Export_KeysDoesNotExist_ReturnsNotFound(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.ReadOperation,
- Path: "export/encryption-key/foo",
- }
- rsp, err := b.HandleRequest(req)
-
- if !(rsp == nil && err == nil) {
- t.Fatal("Key does not exist but does not return not found")
- }
-}
-
-func TestTransit_Export_EncryptionKey_DoesNotExportHMACKey(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- }
- req.Data = map[string]interface{}{
- "exportable": true,
- "type": "aes256-gcm96",
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- req = &logical.Request{
- Storage: storage,
- Operation: logical.ReadOperation,
- Path: "export/encryption-key/foo",
- }
- encryptionKeyRsp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- req.Path = "export/hmac-key/foo"
- hmacKeyRsp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- encryptionKeys, ok := encryptionKeyRsp.Data["keys"].(map[string]string)
- if !ok {
- t.Error("could not cast to keys object")
- }
- hmacKeys, ok := hmacKeyRsp.Data["keys"].(map[string]string)
- if !ok {
- t.Error("could not cast to keys object")
- }
- if len(hmacKeys) != len(encryptionKeys) {
- t.Errorf("hmac (%d) and encyryption (%d) key count don't match",
- len(hmacKeys), len(encryptionKeys))
- }
-
- if reflect.DeepEqual(encryptionKeyRsp.Data, hmacKeyRsp.Data) {
- t.Fatal("Encryption key data matched hmac key data")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash.go
deleted file mode 100644
index 566ac52..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package transit
-
-import (
- "crypto/sha256"
- "crypto/sha512"
- "encoding/base64"
- "encoding/hex"
- "fmt"
- "hash"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathHash() *framework.Path {
- return &framework.Path{
- Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"),
- Fields: map[string]*framework.FieldSchema{
- "input": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The base64-encoded input data",
- },
-
- "algorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "sha2-256",
- Description: `Algorithm to use (POST body parameter). Valid values are:
-
-* sha2-224
-* sha2-256
-* sha2-384
-* sha2-512
-
-Defaults to "sha2-256".`,
- },
-
- "urlalgorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Algorithm to use (POST URL parameter)`,
- },
-
- "format": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "hex",
- Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathHashWrite,
- },
-
- HelpSynopsis: pathHashHelpSyn,
- HelpDescription: pathHashHelpDesc,
- }
-}
-
-func (b *backend) pathHashWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- inputB64 := d.Get("input").(string)
- format := d.Get("format").(string)
- algorithm := d.Get("urlalgorithm").(string)
- if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
- }
-
- input, err := base64.StdEncoding.DecodeString(inputB64)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
- }
-
- switch format {
- case "hex":
- case "base64":
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil
- }
-
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = sha256.New224()
- case "sha2-256":
- hf = sha256.New()
- case "sha2-384":
- hf = sha512.New384()
- case "sha2-512":
- hf = sha512.New()
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- retBytes := hf.Sum(nil)
-
- var retStr string
- switch format {
- case "hex":
- retStr = hex.EncodeToString(retBytes)
- case "base64":
- retStr = base64.StdEncoding.EncodeToString(retBytes)
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "sum": retStr,
- },
- }
- return resp, nil
-}
-
-const pathHashHelpSyn = `Generate a hash sum for input data`
-
-const pathHashHelpDesc = `
-Generates a hash sum of the given algorithm against the given input data.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash_test.go
deleted file mode 100644
index d59976d..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hash_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package transit
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestTransit_Hash(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "hash",
- Data: map[string]interface{}{
- "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
- },
- }
-
- doRequest := func(req *logical.Request, errExpected bool, expected string) {
- resp, err := b.HandleRequest(req)
- if err != nil && !errExpected {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if errExpected {
- if !resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- return
- }
- if resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- sum, ok := resp.Data["sum"]
- if !ok {
- t.Fatal("no sum key found in returned data")
- }
- if sum.(string) != expected {
- t.Fatal("mismatched hashes")
- }
- }
-
- // Test defaults -- sha2-256
- doRequest(req, false, "9ecb36561341d18eb65484e833efea61edc74b84cf5e6ae1b81c63533e25fc8f")
-
- // Test algorithm selection in the path
- req.Path = "hash/sha2-224"
- doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865")
-
- // Reset and test algorithm selection in the data
- req.Path = "hash"
- req.Data["algorithm"] = "sha2-224"
- doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865")
-
- req.Data["algorithm"] = "sha2-384"
- doRequest(req, false, "15af9ec8be783f25c583626e9491dbf129dd6dd620466fdf05b3a1d0bb8381d30f4d3ec29f923ff1e09a0f6b337365a6")
-
- req.Data["algorithm"] = "sha2-512"
- doRequest(req, false, "d9d380f29b97ad6a1d92e987d83fa5a02653301e1006dd2bcd51afa59a9147e9caedaf89521abc0f0b682adcd47fb512b8343c834a32f326fe9bef00542ce887")
-
- // Test returning as base64
- req.Data["format"] = "base64"
- doRequest(req, false, "2dOA8puXrWodkumH2D+loCZTMB4QBt0rzVGvpZqRR+nK7a+JUhq8DwtoKtzUf7USuDQ8g0oy8yb+m+8AVCzohw==")
-
- // Test bad input/format/algorithm
- req.Data["format"] = "base92"
- doRequest(req, true, "")
-
- req.Data["format"] = "hex"
- req.Data["algorithm"] = "foobar"
- doRequest(req, true, "")
-
- req.Data["algorithm"] = "sha2-256"
- req.Data["input"] = "foobar"
- doRequest(req, true, "")
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go
deleted file mode 100644
index 0a4ba19..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package transit
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "crypto/sha512"
- "encoding/base64"
- "fmt"
- "hash"
- "strconv"
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathHMAC() *framework.Path {
- return &framework.Path{
- Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The key to use for the HMAC function",
- },
-
- "input": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The base64-encoded input data",
- },
-
- "algorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "sha2-256",
- Description: `Algorithm to use (POST body parameter). Valid values are:
-
-* sha2-224
-* sha2-256
-* sha2-384
-* sha2-512
-
-Defaults to "sha2-256".`,
- },
-
- "urlalgorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Algorithm to use (POST URL parameter)`,
- },
-
- "key_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `The version of the key to use for generating the HMAC.
-Must be 0 (for latest) or a value greater than or equal
-to the min_encryption_version configured on the key.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathHMACWrite,
- },
-
- HelpSynopsis: pathHMACHelpSyn,
- HelpDescription: pathHMACHelpDesc,
- }
-}
-
-func (b *backend) pathHMACWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- ver := d.Get("key_version").(int)
- inputB64 := d.Get("input").(string)
- algorithm := d.Get("urlalgorithm").(string)
- if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
- }
-
- input, err := base64.StdEncoding.DecodeString(inputB64)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
- }
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- switch {
- case ver == 0:
- // Allowed, will use latest; set explicitly here to ensure the string
- // is generated properly
- ver = p.LatestVersion
- case ver == p.LatestVersion:
- // Allowed
- case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion:
- return logical.ErrorResponse("cannot generate HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest
- }
-
- key, err := p.HMACKey(ver)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- if key == nil {
- return nil, fmt.Errorf("HMAC key value could not be computed")
- }
-
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = hmac.New(sha256.New224, key)
- case "sha2-256":
- hf = hmac.New(sha256.New, key)
- case "sha2-384":
- hf = hmac.New(sha512.New384, key)
- case "sha2-512":
- hf = hmac.New(sha512.New, key)
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- retBytes := hf.Sum(nil)
-
- retStr := base64.StdEncoding.EncodeToString(retBytes)
- retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(ver), retStr)
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "hmac": retStr,
- },
- }
- return resp, nil
-}
-
-func (b *backend) pathHMACVerify(
- req *logical.Request, d *framework.FieldData, verificationHMAC string) (*logical.Response, error) {
-
- name := d.Get("name").(string)
- inputB64 := d.Get("input").(string)
- algorithm := d.Get("urlalgorithm").(string)
- if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
- }
-
- input, err := base64.StdEncoding.DecodeString(inputB64)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
- }
-
- // Verify the prefix
- if !strings.HasPrefix(verificationHMAC, "vault:v") {
- return logical.ErrorResponse("invalid HMAC to verify: no prefix"), logical.ErrInvalidRequest
- }
-
- splitVerificationHMAC := strings.SplitN(strings.TrimPrefix(verificationHMAC, "vault:v"), ":", 2)
- if len(splitVerificationHMAC) != 2 {
- return logical.ErrorResponse("invalid HMAC: wrong number of fields"), logical.ErrInvalidRequest
- }
-
- ver, err := strconv.Atoi(splitVerificationHMAC[0])
- if err != nil {
- return logical.ErrorResponse("invalid HMAC: version number could not be decoded"), logical.ErrInvalidRequest
- }
-
- verBytes, err := base64.StdEncoding.DecodeString(splitVerificationHMAC[1])
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode verification HMAC as base64: %s", err)), logical.ErrInvalidRequest
- }
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- if ver > p.LatestVersion {
- return logical.ErrorResponse("invalid HMAC: version is too new"), logical.ErrInvalidRequest
- }
-
- if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion {
- return logical.ErrorResponse("cannot verify HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest
- }
-
- key, err := p.HMACKey(ver)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- if key == nil {
- return nil, fmt.Errorf("HMAC key value could not be computed")
- }
-
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = hmac.New(sha256.New224, key)
- case "sha2-256":
- hf = hmac.New(sha256.New, key)
- case "sha2-384":
- hf = hmac.New(sha512.New384, key)
- case "sha2-512":
- hf = hmac.New(sha512.New, key)
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- retBytes := hf.Sum(nil)
-
- return &logical.Response{
- Data: map[string]interface{}{
- "valid": hmac.Equal(retBytes, verBytes),
- },
- }, nil
-}
-
-const pathHMACHelpSyn = `Generate an HMAC for input data using the named key`
-
-const pathHMACHelpDesc = `
-Generates an HMAC sum of the given algorithm and key against the given input data.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac_test.go
deleted file mode 100644
index 1dfeb9b..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package transit
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestTransit_HMAC(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- // First create a key
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- // Now, change the key value to something we control
- p, lock, err := b.lm.GetPolicyShared(storage, "foo")
- if err != nil {
- t.Fatal(err)
- }
- // We don't care as we're the only one using this
- lock.RUnlock()
- keyEntry := p.Keys[p.LatestVersion]
- keyEntry.HMACKey = []byte("01234567890123456789012345678901")
- p.Keys[p.LatestVersion] = keyEntry
- if err = p.Persist(storage); err != nil {
- t.Fatal(err)
- }
-
- req.Path = "hmac/foo"
- req.Data = map[string]interface{}{
- "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
- }
-
- doRequest := func(req *logical.Request, errExpected bool, expected string) {
- path := req.Path
- defer func() { req.Path = path }()
-
- resp, err := b.HandleRequest(req)
- if err != nil && !errExpected {
- panic(fmt.Sprintf("%v", err))
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if errExpected {
- if !resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- return
- }
- if resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- value, ok := resp.Data["hmac"]
- if !ok {
- t.Fatalf("no hmac key found in returned data, got resp data %#v", resp.Data)
- }
- if value.(string) != expected {
- panic(fmt.Sprintf("mismatched hashes; expected %s, got resp data %#v", expected, resp.Data))
- }
-
- // Now verify
- req.Path = strings.Replace(req.Path, "hmac", "verify", -1)
- req.Data["hmac"] = value.(string)
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("%v: %v", err, resp)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.Data["valid"].(bool) == false {
- panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp))
- }
- }
-
- // Comparisons are against values generated via openssl
-
- // Test defaults -- sha2-256
- doRequest(req, false, "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=")
-
- // Test algorithm selection in the path
- req.Path = "hmac/foo/sha2-224"
- doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==")
-
- // Reset and test algorithm selection in the data
- req.Path = "hmac/foo"
- req.Data["algorithm"] = "sha2-224"
- doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==")
-
- req.Data["algorithm"] = "sha2-384"
- doRequest(req, false, "vault:v1:jDB9YXdPjpmr29b1JCIEJO93IydlKVfD9mA2EO9OmJtJQg3QAV5tcRRRb7IQGW9p")
-
- req.Data["algorithm"] = "sha2-512"
- doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==")
-
- // Test returning as base64
- req.Data["format"] = "base64"
- doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==")
-
- req.Data["algorithm"] = "foobar"
- doRequest(req, true, "")
-
- req.Data["algorithm"] = "sha2-256"
- req.Data["input"] = "foobar"
- doRequest(req, true, "")
- req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- // Rotate
- err = p.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- keyEntry = p.Keys[2]
- // Set to another value we control
- keyEntry.HMACKey = []byte("12345678901234567890123456789012")
- p.Keys[2] = keyEntry
- if err = p.Persist(storage); err != nil {
- t.Fatal(err)
- }
-
- doRequest(req, false, "vault:v2:Dt+mO/B93kuWUbGMMobwUNX5Wodr6dL3JH4DMfpQ0kw=")
-
- // Verify a previous version
- req.Path = "verify/foo"
-
- req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("%v: %v", err, resp)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.Data["valid"].(bool) == false {
- t.Fatalf("error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp)
- }
-
- // Try a bad value
- req.Data["hmac"] = "vault:v1:UcBvm4VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("%v: %v", err, resp)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.Data["valid"].(bool) {
- t.Fatalf("expected error validating hmac")
- }
-
- // Set min decryption version, attempt to verify
- p.MinDecryptionVersion = 2
- if err = p.Persist(storage); err != nil {
- t.Fatal(err)
- }
-
- req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4="
- resp, err = b.HandleRequest(req)
- if err == nil {
- t.Fatalf("expected an error, got response %#v", resp)
- }
- if err != logical.ErrInvalidRequest {
- t.Fatalf("expected invalid request error, got %v", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go
deleted file mode 100644
index ad9a918..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go
+++ /dev/null
@@ -1,285 +0,0 @@
-package transit
-
-import (
- "crypto/elliptic"
- "encoding/base64"
- "fmt"
- "strconv"
- "time"
-
- "golang.org/x/crypto/ed25519"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/helper/keysutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathListKeys() *framework.Path {
- return &framework.Path{
- Pattern: "keys/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathKeysList,
- },
-
- HelpSynopsis: pathPolicyHelpSyn,
- HelpDescription: pathPolicyHelpDesc,
- }
-}
-
-func (b *backend) pathKeys() *framework.Path {
- return &framework.Path{
- Pattern: "keys/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the key",
- },
-
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "aes256-gcm96",
- Description: `The type of key to create. Currently,
-"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric), and
-'ed25519' (asymmetric) are supported. Defaults to "aes256-gcm96".`,
- },
-
- "derived": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Enables key derivation mode. This
-allows for per-transaction unique
-keys for encryption operations.`,
- },
-
- "convergent_encryption": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Whether to support convergent encryption.
-This is only supported when using a key with
-key derivation enabled and will require all
-requests to carry both a context and 96-bit
-(12-byte) nonce. The given nonce will be used
-in place of a randomly generated nonce. As a
-result, when the same context and nonce are
-supplied, the same ciphertext is generated. It
-is *very important* when using this mode that
-you ensure that all nonces are unique for a
-given context. Failing to do so will severely
-impact the ciphertext's security.`,
- },
-
- "exportable": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: `Enables keys to be exportable.
-This allows for all the valid keys
-in the key ring to be exported.`,
- },
-
- "context": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Base64 encoded context for key derivation.
-When reading a key with key derivation enabled,
-if the key type supports public keys, this will
-return the public key for the given context.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathPolicyWrite,
- logical.DeleteOperation: b.pathPolicyDelete,
- logical.ReadOperation: b.pathPolicyRead,
- },
-
- HelpSynopsis: pathPolicyHelpSyn,
- HelpDescription: pathPolicyHelpDesc,
- }
-}
-
-func (b *backend) pathKeysList(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entries, err := req.Storage.List("policy/")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(entries), nil
-}
-
-func (b *backend) pathPolicyWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- derived := d.Get("derived").(bool)
- convergent := d.Get("convergent_encryption").(bool)
- keyType := d.Get("type").(string)
- exportable := d.Get("exportable").(bool)
-
- if !derived && convergent {
- return logical.ErrorResponse("convergent encryption requires derivation to be enabled"), nil
- }
-
- polReq := keysutil.PolicyRequest{
- Storage: req.Storage,
- Name: name,
- Derived: derived,
- Convergent: convergent,
- Exportable: exportable,
- }
- switch keyType {
- case "aes256-gcm96":
- polReq.KeyType = keysutil.KeyType_AES256_GCM96
- case "ecdsa-p256":
- polReq.KeyType = keysutil.KeyType_ECDSA_P256
- case "ed25519":
- polReq.KeyType = keysutil.KeyType_ED25519
- default:
- return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest
- }
-
- p, lock, upserted, err := b.lm.GetPolicyUpsert(polReq)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return nil, fmt.Errorf("error generating key: returned policy was nil")
- }
-
- resp := &logical.Response{}
- if !upserted {
- resp.AddWarning(fmt.Sprintf("key %s already existed", name))
- }
-
- return nil, nil
-}
-
-// Built-in helper type for returning asymmetric keys
-type asymKey struct {
- Name string `json:"name" structs:"name" mapstructure:"name"`
- PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"`
- CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
-}
-
-func (b *backend) pathPolicyRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
-
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return nil, nil
- }
-
- // Return the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "name": p.Name,
- "type": p.Type.String(),
- "derived": p.Derived,
- "deletion_allowed": p.DeletionAllowed,
- "min_decryption_version": p.MinDecryptionVersion,
- "min_encryption_version": p.MinEncryptionVersion,
- "latest_version": p.LatestVersion,
- "exportable": p.Exportable,
- "supports_encryption": p.Type.EncryptionSupported(),
- "supports_decryption": p.Type.DecryptionSupported(),
- "supports_signing": p.Type.SigningSupported(),
- "supports_derivation": p.Type.DerivationSupported(),
- },
- }
-
- if p.Derived {
- switch p.KDF {
- case keysutil.Kdf_hmac_sha256_counter:
- resp.Data["kdf"] = "hmac-sha256-counter"
- resp.Data["kdf_mode"] = "hmac-sha256-counter"
- case keysutil.Kdf_hkdf_sha256:
- resp.Data["kdf"] = "hkdf_sha256"
- }
- resp.Data["convergent_encryption"] = p.ConvergentEncryption
- if p.ConvergentEncryption {
- resp.Data["convergent_encryption_version"] = p.ConvergentVersion
- }
- }
-
- contextRaw := d.Get("context").(string)
- var context []byte
- if len(contextRaw) != 0 {
- context, err = base64.StdEncoding.DecodeString(contextRaw)
- if err != nil {
- return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
- }
- }
-
- switch p.Type {
- case keysutil.KeyType_AES256_GCM96:
- retKeys := map[string]int64{}
- for k, v := range p.Keys {
- retKeys[strconv.Itoa(k)] = v.DeprecatedCreationTime
- }
- resp.Data["keys"] = retKeys
-
- case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ED25519:
- retKeys := map[string]map[string]interface{}{}
- for k, v := range p.Keys {
- key := asymKey{
- PublicKey: v.FormattedPublicKey,
- CreationTime: v.CreationTime,
- }
- if key.CreationTime.IsZero() {
- key.CreationTime = time.Unix(v.DeprecatedCreationTime, 0)
- }
-
- switch p.Type {
- case keysutil.KeyType_ECDSA_P256:
- key.Name = elliptic.P256().Params().Name
- case keysutil.KeyType_ED25519:
- if p.Derived {
- if len(context) == 0 {
- key.PublicKey = ""
- } else {
- derived, err := p.DeriveKey(context, k)
- if err != nil {
- return nil, fmt.Errorf("failed to derive key to return public component")
- }
- pubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey)
- key.PublicKey = base64.StdEncoding.EncodeToString(pubKey)
- }
- }
- key.Name = "ed25519"
- }
-
- retKeys[strconv.Itoa(k)] = structs.New(key).Map()
- }
- resp.Data["keys"] = retKeys
- }
-
- return resp, nil
-}
-
-func (b *backend) pathPolicyDelete(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
-
- // Delete does its own locking
- err := b.lm.DeletePolicy(req.Storage, name)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error deleting policy %s: %s", name, err)), err
- }
-
- return nil, nil
-}
-
-const pathPolicyHelpSyn = `Managed named encryption keys`
-
-const pathPolicyHelpDesc = `
-This path is used to manage the named keys that are available.
-Doing a write with no value against a new named key will create
-it using a randomly generated key.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go
deleted file mode 100644
index 7a87fdd..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package transit_test
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/builtin/audit/file"
- "github.com/hashicorp/vault/builtin/logical/transit"
- vaulthttp "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestTransit_Issue_2958(t *testing.T) {
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "transit": transit.Factory,
- },
- AuditBackends: map[string]audit.Factory{
- "file": file.Factory,
- },
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
-
- cores := cluster.Cores
-
- vault.TestWaitActive(t, cores[0].Core)
-
- client := cores[0].Client
-
- err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{
- Type: "file",
- Options: map[string]string{
- "file_path": "/dev/null",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
-
- err = client.Sys().Mount("transit", &api.MountInput{
- Type: "transit",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Logical().Write("transit/keys/foo", map[string]interface{}{
- "type": "ecdsa-p256",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Logical().Write("transit/keys/bar", map[string]interface{}{
- "type": "ed25519",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Logical().Read("transit/keys/foo")
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Logical().Read("transit/keys/bar")
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random.go
deleted file mode 100644
index f9190b7..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package transit
-
-import (
- "encoding/base64"
- "encoding/hex"
- "fmt"
- "strconv"
-
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathRandom() *framework.Path {
- return &framework.Path{
- Pattern: "random" + framework.OptionalParamRegex("urlbytes"),
- Fields: map[string]*framework.FieldSchema{
- "urlbytes": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The number of bytes to generate (POST URL parameter)",
- },
-
- "bytes": &framework.FieldSchema{
- Type: framework.TypeInt,
- Default: 32,
- Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).",
- },
-
- "format": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "base64",
- Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRandomWrite,
- },
-
- HelpSynopsis: pathRandomHelpSyn,
- HelpDescription: pathRandomHelpDesc,
- }
-}
-
-func (b *backend) pathRandomWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- bytes := 0
- var err error
- strBytes := d.Get("urlbytes").(string)
- if strBytes != "" {
- bytes, err = strconv.Atoi(strBytes)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil
- }
- } else {
- bytes = d.Get("bytes").(int)
- }
- format := d.Get("format").(string)
-
- if bytes < 1 {
- return logical.ErrorResponse(`"bytes" cannot be less than 1`), nil
- }
-
- switch format {
- case "hex":
- case "base64":
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil
- }
-
- randBytes, err := uuid.GenerateRandomBytes(bytes)
- if err != nil {
- return nil, err
- }
-
- var retStr string
- switch format {
- case "hex":
- retStr = hex.EncodeToString(randBytes)
- case "base64":
- retStr = base64.StdEncoding.EncodeToString(randBytes)
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "random_bytes": retStr,
- },
- }
- return resp, nil
-}
-
-const pathRandomHelpSyn = `Generate random bytes`
-
-const pathRandomHelpDesc = `
-This function can be used to generate high-entropy random bytes.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random_test.go
deleted file mode 100644
index a2711ce..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_random_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package transit
-
-import (
- "encoding/base64"
- "encoding/hex"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestTransit_Random(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "random",
- Data: map[string]interface{}{},
- }
-
- doRequest := func(req *logical.Request, errExpected bool, format string, numBytes int) {
- getResponse := func() []byte {
- resp, err := b.HandleRequest(req)
- if err != nil && !errExpected {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if errExpected {
- if !resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- return nil
- }
- if resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- if _, ok := resp.Data["random_bytes"]; !ok {
- t.Fatal("no random_bytes found in response")
- }
-
- outputStr := resp.Data["random_bytes"].(string)
- var outputBytes []byte
- switch format {
- case "base64":
- outputBytes, err = base64.StdEncoding.DecodeString(outputStr)
- case "hex":
- outputBytes, err = hex.DecodeString(outputStr)
- default:
- t.Fatal("unknown format")
- }
- if err != nil {
- t.Fatal(err)
- }
-
- return outputBytes
- }
-
- rand1 := getResponse()
- // Expected error
- if rand1 == nil {
- return
- }
- rand2 := getResponse()
- if len(rand1) != numBytes || len(rand2) != numBytes {
- t.Fatal("length of output random bytes not what is exepcted")
- }
- if reflect.DeepEqual(rand1, rand2) {
- t.Fatal("found identical ouputs")
- }
- }
-
- // Test defaults
- doRequest(req, false, "base64", 32)
-
- // Test size selection in the path
- req.Path = "random/24"
- req.Data["format"] = "hex"
- doRequest(req, false, "hex", 24)
-
- // Test bad input/format
- req.Path = "random"
- req.Data["format"] = "base92"
- doRequest(req, true, "", 0)
-
- req.Data["format"] = "hex"
- req.Data["bytes"] = -1
- doRequest(req, true, "", 0)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go
deleted file mode 100644
index 81e811a..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package transit
-
-import (
- "encoding/base64"
- "fmt"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/mitchellh/mapstructure"
-)
-
-func (b *backend) pathRewrap() *framework.Path {
- return &framework.Path{
- Pattern: "rewrap/" + framework.GenericNameRegex("name"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the key",
- },
-
- "ciphertext": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Ciphertext value to rewrap",
- },
-
- "context": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Base64 encoded context for key derivation. Required for derived keys.",
- },
-
- "nonce": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Nonce for when convergent encryption is used",
- },
-
- "key_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `The version of the key to use for encryption.
-Must be 0 (for latest) or a value greater than or equal
-to the min_encryption_version configured on the key.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRewrapWrite,
- },
-
- HelpSynopsis: pathRewrapHelpSyn,
- HelpDescription: pathRewrapHelpDesc,
- }
-}
-
-func (b *backend) pathRewrapWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- batchInputRaw := d.Raw["batch_input"]
- var batchInputItems []BatchRequestItem
- var err error
- if batchInputRaw != nil {
- err = mapstructure.Decode(batchInputRaw, &batchInputItems)
- if err != nil {
- return nil, fmt.Errorf("failed to parse batch input: %v", err)
- }
-
- if len(batchInputItems) == 0 {
- return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest
- }
- } else {
- ciphertext := d.Get("ciphertext").(string)
- if len(ciphertext) == 0 {
- return logical.ErrorResponse("missing ciphertext to decrypt"), logical.ErrInvalidRequest
- }
-
- batchInputItems = make([]BatchRequestItem, 1)
- batchInputItems[0] = BatchRequestItem{
- Ciphertext: ciphertext,
- Context: d.Get("context").(string),
- Nonce: d.Get("nonce").(string),
- KeyVersion: d.Get("key_version").(int),
- }
- }
-
- batchResponseItems := make([]BatchResponseItem, len(batchInputItems))
- contextSet := len(batchInputItems[0].Context) != 0
-
- for i, item := range batchInputItems {
- if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) {
- return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest
- }
-
- if item.Ciphertext == "" {
- batchResponseItems[i].Error = "missing ciphertext to decrypt"
- continue
- }
-
- // Decode the context
- if len(item.Context) != 0 {
- batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context)
- if err != nil {
- batchResponseItems[i].Error = err.Error()
- continue
- }
- }
-
- // Decode the nonce
- if len(item.Nonce) != 0 {
- batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce)
- if err != nil {
- batchResponseItems[i].Error = err.Error()
- continue
- }
- }
- }
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyShared(req.Storage, d.Get("name").(string))
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- for i, item := range batchInputItems {
- if batchResponseItems[i].Error != "" {
- continue
- }
-
- plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- batchResponseItems[i].Error = err.Error()
- continue
- default:
- return nil, err
- }
- }
-
- ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, plaintext)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- batchResponseItems[i].Error = err.Error()
- continue
- case errutil.InternalError:
- return nil, err
- default:
- return nil, err
- }
- }
-
- if ciphertext == "" {
- return nil, fmt.Errorf("empty ciphertext returned for input item %d", i)
- }
-
- batchResponseItems[i].Ciphertext = ciphertext
- }
-
- resp := &logical.Response{}
- if batchInputRaw != nil {
- resp.Data = map[string]interface{}{
- "batch_results": batchResponseItems,
- }
- } else {
- if batchResponseItems[0].Error != "" {
- return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest
- }
- resp.Data = map[string]interface{}{
- "ciphertext": batchResponseItems[0].Ciphertext,
- }
- }
-
- return resp, nil
-}
-
-const pathRewrapHelpSyn = `Rewrap ciphertext`
-
-const pathRewrapHelpDesc = `
-After key rotation, this function can be used to rewrap the given ciphertext or
-a batch of given ciphertext blocks with the latest version of the named key.
-If the given ciphertext is already using the latest version of the key, this
-function is a no-op.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap_test.go
deleted file mode 100644
index ae4d002..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap_test.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package transit
-
-import (
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// Check the normal flow of rewrap
-func TestTransit_BatchRewrapCase1(t *testing.T) {
- var resp *logical.Response
- var err error
- b, s := createBackendWithStorage(t)
-
- // Upsert the key and encrypt the data
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- encData := map[string]interface{}{
- "plaintext": plaintext,
- }
-
- // Create a key and encrypt a plaintext
- encReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: encData,
- }
- resp, err = b.HandleRequest(encReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Cache the ciphertext
- ciphertext := resp.Data["ciphertext"]
- if !strings.HasPrefix(ciphertext.(string), "vault:v1") {
- t.Fatalf("bad: ciphertext version: expected: 'vault:v1', actual: %s", ciphertext)
- }
-
- rewrapData := map[string]interface{}{
- "ciphertext": ciphertext,
- }
-
- // Read the policy and check if the latest version is 1
- policyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "keys/upserted_key",
- Storage: s,
- }
-
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["latest_version"] != 1 {
- t.Fatalf("bad: latest_version: expected: 1, actual: %d", resp.Data["latest_version"])
- }
-
- rotateReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/upserted_key/rotate",
- Storage: s,
- }
- resp, err = b.HandleRequest(rotateReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Read the policy again and the latest version is 2
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["latest_version"] != 2 {
- t.Fatalf("bad: latest_version: expected: 2, actual: %d", resp.Data["latest_version"])
- }
-
- // Rewrap the ciphertext and check that they are different
- rewrapReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "rewrap/upserted_key",
- Storage: s,
- Data: rewrapData,
- }
-
- resp, err = b.HandleRequest(rewrapReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if ciphertext.(string) == resp.Data["ciphertext"].(string) {
- t.Fatalf("bad: ciphertexts are same before and after rewrap")
- }
-
- if !strings.HasPrefix(resp.Data["ciphertext"].(string), "vault:v2") {
- t.Fatalf("bad: ciphertext version: expected: 'vault:v2', actual: %s", resp.Data["ciphertext"].(string))
- }
-}
-
-// Check the normal flow of rewrap with upserted key
-func TestTransit_BatchRewrapCase2(t *testing.T) {
- var resp *logical.Response
- var err error
- b, s := createBackendWithStorage(t)
-
- // Upsert the key and encrypt the data
- plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- encData := map[string]interface{}{
- "plaintext": plaintext,
- "context": "dmlzaGFsCg==",
- }
-
- // Create a key and encrypt a plaintext
- encReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: encData,
- }
- resp, err = b.HandleRequest(encReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Cache the ciphertext
- ciphertext := resp.Data["ciphertext"]
- if !strings.HasPrefix(ciphertext.(string), "vault:v1") {
- t.Fatalf("bad: ciphertext version: expected: 'vault:v1', actual: %s", ciphertext)
- }
-
- rewrapData := map[string]interface{}{
- "ciphertext": ciphertext,
- "context": "dmlzaGFsCg==",
- }
-
- // Read the policy and check if the latest version is 1
- policyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "keys/upserted_key",
- Storage: s,
- }
-
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["latest_version"] != 1 {
- t.Fatalf("bad: latest_version: expected: 1, actual: %d", resp.Data["latest_version"])
- }
-
- rotateReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/upserted_key/rotate",
- Storage: s,
- }
- resp, err = b.HandleRequest(rotateReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- // Read the policy again and the latest version is 2
- resp, err = b.HandleRequest(policyReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if resp.Data["latest_version"] != 2 {
- t.Fatalf("bad: latest_version: expected: 2, actual: %d", resp.Data["latest_version"])
- }
-
- // Rewrap the ciphertext and check that they are different
- rewrapReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "rewrap/upserted_key",
- Storage: s,
- Data: rewrapData,
- }
-
- resp, err = b.HandleRequest(rewrapReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- if ciphertext.(string) == resp.Data["ciphertext"].(string) {
- t.Fatalf("bad: ciphertexts are same before and after rewrap")
- }
-
- if !strings.HasPrefix(resp.Data["ciphertext"].(string), "vault:v2") {
- t.Fatalf("bad: ciphertext version: expected: 'vault:v2', actual: %s", resp.Data["ciphertext"].(string))
- }
-}
-
-// Batch encrypt plaintexts, rotate the keys and rewrap all the ciphertexts
-func TestTransit_BatchRewrapCase3(t *testing.T) {
- var resp *logical.Response
- var err error
-
- b, s := createBackendWithStorage(t)
-
- batchEncryptionInput := []interface{}{
- map[string]interface{}{"plaintext": "dmlzaGFsCg=="},
- map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="},
- }
- batchEncryptionData := map[string]interface{}{
- "batch_input": batchEncryptionInput,
- }
- batchReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "encrypt/upserted_key",
- Storage: s,
- Data: batchEncryptionData,
- }
- resp, err = b.HandleRequest(batchReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchEncryptionResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- batchRewrapInput := make([]interface{}, len(batchEncryptionResponseItems))
- for i, item := range batchEncryptionResponseItems {
- batchRewrapInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext}
- }
-
- batchRewrapData := map[string]interface{}{
- "batch_input": batchRewrapInput,
- }
-
- rotateReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "keys/upserted_key/rotate",
- Storage: s,
- }
- resp, err = b.HandleRequest(rotateReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- rewrapReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "rewrap/upserted_key",
- Storage: s,
- Data: batchRewrapData,
- }
-
- resp, err = b.HandleRequest(rewrapReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- batchRewrapResponseItems := resp.Data["batch_results"].([]BatchResponseItem)
-
- if len(batchRewrapResponseItems) != len(batchEncryptionResponseItems) {
- t.Fatalf("bad: length of input and output or rewrap are not matching; expected: %d, actual: %d", len(batchEncryptionResponseItems), len(batchRewrapResponseItems))
- }
-
- decReq := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "decrypt/upserted_key",
- Storage: s,
- }
-
- for i, eItem := range batchEncryptionResponseItems {
- rItem := batchRewrapResponseItems[i]
-
- if eItem.Ciphertext == rItem.Ciphertext {
- t.Fatalf("bad: rewrap input and output are the same")
- }
-
- if !strings.HasPrefix(rItem.Ciphertext, "vault:v2") {
- t.Fatalf("bad: invalid version of ciphertext in rewrap response; expected: 'vault:v2', actual: %s", rItem.Ciphertext)
- }
-
- decReq.Data = map[string]interface{}{
- "ciphertext": rItem.Ciphertext,
- }
-
- resp, err = b.HandleRequest(decReq)
- if err != nil || (resp != nil && resp.IsError()) {
- t.Fatalf("err:%v resp:%#v", err, resp)
- }
-
- plaintext1 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
- plaintext2 := "dmlzaGFsCg=="
- if resp.Data["plaintext"] != plaintext1 && resp.Data["plaintext"] != plaintext2 {
- t.Fatalf("bad: plaintext. Expected: %q or %q, Actual: %q", plaintext1, plaintext2, resp.Data["plaintext"])
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rotate.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rotate.go
deleted file mode 100644
index 743fcf2..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rotate.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package transit
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathRotate() *framework.Path {
- return &framework.Path{
- Pattern: "keys/" + framework.GenericNameRegex("name") + "/rotate",
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the key",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRotateWrite,
- },
-
- HelpSynopsis: pathRotateHelpSyn,
- HelpDescription: pathRotateHelpDesc,
- }
-}
-
-func (b *backend) pathRotateWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyExclusive(req.Storage, name)
- if lock != nil {
- defer lock.Unlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("key not found"), logical.ErrInvalidRequest
- }
-
- // Rotate the policy
- err = p.Rotate(req.Storage)
-
- return nil, err
-}
-
-const pathRotateHelpSyn = `Rotate named encryption key`
-
-const pathRotateHelpDesc = `
-This path is used to rotate the named key. After rotation,
-new encryption requests using this name will use the new key,
-but decryption will still be supported for older versions.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go
deleted file mode 100644
index 549ae05..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go
+++ /dev/null
@@ -1,310 +0,0 @@
-package transit
-
-import (
- "crypto/sha256"
- "crypto/sha512"
- "encoding/base64"
- "fmt"
- "hash"
-
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *backend) pathSign() *framework.Path {
- return &framework.Path{
- Pattern: "sign/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The key to use",
- },
-
- "input": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The base64-encoded input data",
- },
-
- "context": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Base64 encoded context for key derivation. Required if key
-derivation is enabled; currently only available with ed25519 keys.`,
- },
-
- "algorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "sha2-256",
- Description: `Hash algorithm to use (POST body parameter). Valid values are:
-
-* sha2-224
-* sha2-256
-* sha2-384
-* sha2-512
-
-Defaults to "sha2-256". Not valid for all key types,
-including ed25519.`,
- },
-
- "urlalgorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Hash algorithm to use (POST URL parameter)`,
- },
-
- "key_version": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: `The version of the key to use for signing.
-Must be 0 (for latest) or a value greater than or equal
-to the min_encryption_version configured on the key.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathSignWrite,
- },
-
- HelpSynopsis: pathSignHelpSyn,
- HelpDescription: pathSignHelpDesc,
- }
-}
-
-func (b *backend) pathVerify() *framework.Path {
- return &framework.Path{
- Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"),
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The key to use",
- },
-
- "context": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Base64 encoded context for key derivation. Required if key
-derivation is enabled; currently only available with ed25519 keys.`,
- },
-
- "signature": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The signature, including vault header/key version",
- },
-
- "hmac": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The HMAC, including vault header/key version",
- },
-
- "input": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The base64-encoded input data to verify",
- },
-
- "urlalgorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Hash algorithm to use (POST URL parameter)`,
- },
-
- "algorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "sha2-256",
- Description: `Hash algorithm to use (POST body parameter). Valid values are:
-
-* sha2-224
-* sha2-256
-* sha2-384
-* sha2-512
-
-Defaults to "sha2-256". Not valid for all key types.`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathVerifyWrite,
- },
-
- HelpSynopsis: pathVerifyHelpSyn,
- HelpDescription: pathVerifyHelpDesc,
- }
-}
-
-func (b *backend) pathSignWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("name").(string)
- ver := d.Get("key_version").(int)
- inputB64 := d.Get("input").(string)
- algorithm := d.Get("urlalgorithm").(string)
- if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
- }
-
- input, err := base64.StdEncoding.DecodeString(inputB64)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
- }
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- if !p.Type.SigningSupported() {
- return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest
- }
-
- contextRaw := d.Get("context").(string)
- var context []byte
- if len(contextRaw) != 0 {
- context, err = base64.StdEncoding.DecodeString(contextRaw)
- if err != nil {
- return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
- }
- }
-
- if p.Type.HashSignatureInput() {
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = sha256.New224()
- case "sha2-256":
- hf = sha256.New()
- case "sha2-384":
- hf = sha512.New384()
- case "sha2-512":
- hf = sha512.New()
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- input = hf.Sum(nil)
- }
-
- sig, err := p.Sign(ver, context, input)
- if err != nil {
- return nil, err
- }
- if sig == nil {
- return nil, fmt.Errorf("signature could not be computed")
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "signature": sig.Signature,
- },
- }
-
- if len(sig.PublicKey) > 0 {
- resp.Data["public_key"] = sig.PublicKey
- }
-
- return resp, nil
-}
-
-func (b *backend) pathVerifyWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- sig := d.Get("signature").(string)
- hmac := d.Get("hmac").(string)
- switch {
- case sig != "" && hmac != "":
- return logical.ErrorResponse("provide one of 'signature' or 'hmac'"), logical.ErrInvalidRequest
-
- case sig == "" && hmac == "":
- return logical.ErrorResponse("neither a 'signature' nor an 'hmac' were given to verify"), logical.ErrInvalidRequest
-
- case hmac != "":
- return b.pathHMACVerify(req, d, hmac)
- }
-
- name := d.Get("name").(string)
- inputB64 := d.Get("input").(string)
- algorithm := d.Get("urlalgorithm").(string)
- if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
- }
-
- input, err := base64.StdEncoding.DecodeString(inputB64)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
- }
-
- // Get the policy
- p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- return nil, err
- }
- if p == nil {
- return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
- }
-
- if !p.Type.SigningSupported() {
- return logical.ErrorResponse(fmt.Sprintf("key type %v does not support verification", p.Type)), logical.ErrInvalidRequest
- }
-
- contextRaw := d.Get("context").(string)
- var context []byte
- if len(contextRaw) != 0 {
- context, err = base64.StdEncoding.DecodeString(contextRaw)
- if err != nil {
- return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
- }
- }
-
- if p.Type.HashSignatureInput() {
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = sha256.New224()
- case "sha2-256":
- hf = sha256.New()
- case "sha2-384":
- hf = sha512.New384()
- case "sha2-512":
- hf = sha512.New()
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- input = hf.Sum(nil)
- }
-
- valid, err := p.VerifySignature(context, input, sig)
- if err != nil {
- switch err.(type) {
- case errutil.UserError:
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- case errutil.InternalError:
- return nil, err
- default:
- return nil, err
- }
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "valid": valid,
- },
- }
- return resp, nil
-}
-
-const pathSignHelpSyn = `Generate a signature for input data using the named key`
-
-const pathSignHelpDesc = `
-Generates a signature of the input data using the named key and the given hash algorithm.
-`
-const pathVerifyHelpSyn = `Verify a signature or HMAC for input data created using the named key`
-
-const pathVerifyHelpDesc = `
-Verifies a signature or HMAC of the input data using the named key and the given hash algorithm.
-`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go
deleted file mode 100644
index 4abdad6..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package transit
-
-import (
- "encoding/base64"
- "strings"
- "testing"
-
- "golang.org/x/crypto/ed25519"
-
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/mapstructure"
-)
-
-func TestTransit_SignVerify_P256(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- // First create a key
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- Data: map[string]interface{}{
- "type": "ecdsa-p256",
- },
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- // Now, change the key value to something we control
- p, lock, err := b.lm.GetPolicyShared(storage, "foo")
- if err != nil {
- t.Fatal(err)
- }
- // We don't care as we're the only one using this
- lock.RUnlock()
-
- // Useful code to output a key for openssl verification
- /*
- {
- key := p.Keys[p.LatestVersion]
- keyBytes, _ := x509.MarshalECPrivateKey(&ecdsa.PrivateKey{
- PublicKey: ecdsa.PublicKey{
- Curve: elliptic.P256(),
- X: key.X,
- Y: key.Y,
- },
- D: key.D,
- })
- pemBlock := &pem.Block{
- Type: "EC PRIVATE KEY",
- Bytes: keyBytes,
- }
- pemBytes := pem.EncodeToMemory(pemBlock)
- t.Fatalf("X: %s, Y: %s, D: %s, marshaled: %s", key.X.Text(16), key.Y.Text(16), key.D.Text(16), string(pemBytes))
- }
- */
-
- keyEntry := p.Keys[p.LatestVersion]
- _, ok := keyEntry.EC_X.SetString("7336010a6da5935113d26d9ea4bb61b3b8d102c9a8083ed432f9b58fd7e80686", 16)
- if !ok {
- t.Fatal("could not set X")
- }
- _, ok = keyEntry.EC_Y.SetString("4040aa31864691a8a9e7e3ec9250e85425b797ad7be34ba8df62bfbad45ebb0e", 16)
- if !ok {
- t.Fatal("could not set Y")
- }
- _, ok = keyEntry.EC_D.SetString("99e5569be8683a2691dfc560ca9dfa71e887867a3af60635a08a3e3655aba3ef", 16)
- if !ok {
- t.Fatal("could not set D")
- }
- p.Keys[p.LatestVersion] = keyEntry
- if err = p.Persist(storage); err != nil {
- t.Fatal(err)
- }
- req.Data = map[string]interface{}{
- "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
- }
-
- signRequest := func(req *logical.Request, errExpected bool, postpath string) string {
- req.Path = "sign/foo" + postpath
- resp, err := b.HandleRequest(req)
- if err != nil && !errExpected {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if errExpected {
- if !resp.IsError() {
- t.Fatalf("bad: should have gotten error response: %#v", *resp)
- }
- return ""
- }
- if resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- value, ok := resp.Data["signature"]
- if !ok {
- t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data)
- }
- return value.(string)
- }
-
- verifyRequest := func(req *logical.Request, errExpected bool, postpath, sig string) {
- req.Path = "verify/foo" + postpath
- req.Data["signature"] = sig
- resp, err := b.HandleRequest(req)
- if err != nil && !errExpected {
- t.Fatalf("got error: %v, sig was %v", err, sig)
- }
- if errExpected {
- if resp != nil && !resp.IsError() {
- t.Fatalf("bad: should have gotten error response: %#v", *resp)
- }
- return
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- value, ok := resp.Data["valid"]
- if !ok {
- t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data)
- }
- if !value.(bool) && !errExpected {
- t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp)
- }
- }
-
- // Comparisons are against values generated via openssl
-
- // Test defaults -- sha2-256
- sig := signRequest(req, false, "")
- verifyRequest(req, false, "", sig)
-
- // Test a bad signature
- verifyRequest(req, true, "", sig[0:len(sig)-2])
-
- // Test a signature generated with the same key by openssl
- sig = `vault:v1:MEUCIAgnEl9V8P305EBAlz68Nq4jZng5fE8k6MactcnlUw9dAiEAvJVePg3dazW6MaW7lRAVtEz82QJDVmR98tXCl8Pc7DA=`
- verifyRequest(req, false, "", sig)
-
- // Test algorithm selection in the path
- sig = signRequest(req, false, "/sha2-224")
- verifyRequest(req, false, "/sha2-224", sig)
-
- // Reset and test algorithm selection in the data
- req.Data["algorithm"] = "sha2-224"
- sig = signRequest(req, false, "")
- verifyRequest(req, false, "", sig)
-
- req.Data["algorithm"] = "sha2-384"
- sig = signRequest(req, false, "")
- verifyRequest(req, false, "", sig)
-
- // Test 512 and save sig for later to ensure we can't validate once min
- // decryption version is set
- req.Data["algorithm"] = "sha2-512"
- sig = signRequest(req, false, "")
- verifyRequest(req, false, "", sig)
-
- v1sig := sig
-
- // Test bad algorithm
- req.Data["algorithm"] = "foobar"
- signRequest(req, true, "")
-
- // Test bad input
- req.Data["algorithm"] = "sha2-256"
- req.Data["input"] = "foobar"
- signRequest(req, true, "")
-
- // Rotate and set min decryption version
- err = p.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- err = p.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
-
- p.MinDecryptionVersion = 2
- if err = p.Persist(storage); err != nil {
- t.Fatal(err)
- }
-
- req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA=="
- req.Data["algorithm"] = "sha2-256"
- // Make sure signing still works fine
- sig = signRequest(req, false, "")
- verifyRequest(req, false, "", sig)
- // Now try the v1
- verifyRequest(req, true, "", v1sig)
-}
-
-func TestTransit_SignVerify_ED25519(t *testing.T) {
- var b *backend
- sysView := logical.TestSystemView()
- storage := &logical.InmemStorage{}
-
- b = Backend(&logical.BackendConfig{
- StorageView: storage,
- System: sysView,
- })
-
- // First create a key
- req := &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/foo",
- Data: map[string]interface{}{
- "type": "ed25519",
- },
- }
- _, err := b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- // Now create a derived key"
- req = &logical.Request{
- Storage: storage,
- Operation: logical.UpdateOperation,
- Path: "keys/bar",
- Data: map[string]interface{}{
- "type": "ed25519",
- "derived": true,
- },
- }
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- // Get the keys for later
- fooP, lock, err := b.lm.GetPolicyShared(storage, "foo")
- if err != nil {
- t.Fatal(err)
- }
- // We don't care as we're the only one using this
- lock.RUnlock()
-
- barP, lock, err := b.lm.GetPolicyShared(storage, "bar")
- if err != nil {
- t.Fatal(err)
- }
- lock.RUnlock()
-
- signRequest := func(req *logical.Request, errExpected bool, postpath string) string {
- // Delete any key that exists in the request
- delete(req.Data, "public_key")
- req.Path = "sign/" + postpath
- resp, err := b.HandleRequest(req)
- if err != nil && !errExpected {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if errExpected {
- if !resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- return ""
- }
- if resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- value, ok := resp.Data["signature"]
- if !ok {
- t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data)
- }
- // memoize any pubic key
- if key, ok := resp.Data["public_key"]; ok {
- req.Data["public_key"] = key
- }
- return value.(string)
- }
-
- verifyRequest := func(req *logical.Request, errExpected bool, postpath, sig string) {
- req.Path = "verify/" + postpath
- req.Data["signature"] = sig
- resp, err := b.HandleRequest(req)
- if err != nil && !errExpected {
- t.Fatalf("got error: %v, sig was %v", err, sig)
- }
- if errExpected {
- if resp != nil && !resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- return
- }
- if resp == nil {
- t.Fatal("expected non-nil response")
- }
- if resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
- }
- value, ok := resp.Data["valid"]
- if !ok {
- t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data)
- }
- if !value.(bool) && !errExpected {
- t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp)
- }
-
- if pubKeyRaw, ok := req.Data["public_key"]; ok {
- input, _ := base64.StdEncoding.DecodeString(req.Data["input"].(string))
- splitSig := strings.Split(sig, ":")
- signature, _ := base64.StdEncoding.DecodeString(splitSig[2])
- if !ed25519.Verify(ed25519.PublicKey(pubKeyRaw.([]byte)), input, signature) && !errExpected {
- t.Fatal("invalid signature")
- }
-
- keyReadReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "keys/" + postpath,
- }
- keyReadResp, err := b.HandleRequest(keyReadReq)
- if err != nil {
- t.Fatal(err)
- }
- val := keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")]
- var ak asymKey
- if err := mapstructure.Decode(val, &ak); err != nil {
- t.Fatal(err)
- }
- if ak.PublicKey != "" {
- t.Fatal("got non-empty public key")
- }
- keyReadReq.Data = map[string]interface{}{
- "context": "abcd",
- }
- keyReadResp, err = b.HandleRequest(keyReadReq)
- if err != nil {
- t.Fatal(err)
- }
- val = keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")]
- if err := mapstructure.Decode(val, &ak); err != nil {
- t.Fatal(err)
- }
- if ak.PublicKey != base64.StdEncoding.EncodeToString(pubKeyRaw.([]byte)) {
- t.Fatalf("got incorrect public key; got %q, expected %q\nasymKey struct is\n%#v", ak.PublicKey, pubKeyRaw, ak)
- }
- }
- }
-
- req.Data = map[string]interface{}{
- "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
- "context": "abcd",
- }
-
- // Test defaults
- sig := signRequest(req, false, "foo")
- verifyRequest(req, false, "foo", sig)
-
- sig = signRequest(req, false, "bar")
- verifyRequest(req, false, "bar", sig)
-
- // Test a bad signature
- verifyRequest(req, true, "foo", sig[0:len(sig)-2])
- verifyRequest(req, true, "bar", sig[0:len(sig)-2])
-
- v1sig := sig
-
- // Rotate and set min decryption version
- err = fooP.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- err = fooP.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- fooP.MinDecryptionVersion = 2
- if err = fooP.Persist(storage); err != nil {
- t.Fatal(err)
- }
- err = barP.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- err = barP.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- barP.MinDecryptionVersion = 2
- if err = barP.Persist(storage); err != nil {
- t.Fatal(err)
- }
-
- // Make sure signing still works fine
- sig = signRequest(req, false, "foo")
- verifyRequest(req, false, "foo", sig)
- // Now try the v1
- verifyRequest(req, true, "foo", v1sig)
- // Repeat with the other key
- sig = signRequest(req, false, "bar")
- verifyRequest(req, false, "bar", sig)
- verifyRequest(req, true, "bar", v1sig)
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go b/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go
deleted file mode 100644
index a1c781f..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package plugin
-
-import (
- "fmt"
- "net/rpc"
- "reflect"
- "sync"
-
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- bplugin "github.com/hashicorp/vault/logical/plugin"
-)
-
-var (
- ErrMismatchType = fmt.Errorf("mismatch on mounted backend and plugin backend type")
- ErrMismatchPaths = fmt.Errorf("mismatch on mounted backend and plugin backend special paths")
-)
-
-// Factory returns a configured plugin logical.Backend.
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- _, ok := conf.Config["plugin_name"]
- if !ok {
- return nil, fmt.Errorf("plugin_name not provided")
- }
- b, err := Backend(conf)
- if err != nil {
- return nil, err
- }
-
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-// Backend returns an instance of the backend, either as a plugin if external
-// or as a concrete implementation if builtin, casted as logical.Backend.
-func Backend(conf *logical.BackendConfig) (logical.Backend, error) {
- var b backend
-
- name := conf.Config["plugin_name"]
- sys := conf.System
-
- // NewBackend with isMetadataMode set to true
- raw, err := bplugin.NewBackend(name, sys, conf.Logger, true)
- if err != nil {
- return nil, err
- }
- err = raw.Setup(conf)
- if err != nil {
- return nil, err
- }
- // Get SpecialPaths and BackendType
- paths := raw.SpecialPaths()
- btype := raw.Type()
-
- // Cleanup meta plugin backend
- raw.Cleanup()
-
- // Initialize b.Backend with dummy backend since plugin
- // backends will need to be lazy loaded.
- b.Backend = &framework.Backend{
- PathsSpecial: paths,
- BackendType: btype,
- }
-
- b.config = conf
-
- return &b, nil
-}
-
-// backend is a thin wrapper around plugin.BackendPluginClient
-type backend struct {
- logical.Backend
- sync.RWMutex
-
- config *logical.BackendConfig
-
- // Used to detect if we already reloaded
- canary string
-
- // Used to detect if plugin is set
- loaded bool
-}
-
-func (b *backend) reloadBackend() error {
- b.Logger().Trace("plugin: reloading plugin backend", "plugin", b.config.Config["plugin_name"])
- return b.startBackend()
-}
-
-// startBackend starts a plugin backend
-func (b *backend) startBackend() error {
- pluginName := b.config.Config["plugin_name"]
-
- // Ensure proper cleanup of the backend (i.e. call client.Kill())
- b.Backend.Cleanup()
-
- nb, err := bplugin.NewBackend(pluginName, b.config.System, b.config.Logger, false)
- if err != nil {
- return err
- }
- err = nb.Setup(b.config)
- if err != nil {
- return err
- }
-
- // If the backend has not been loaded (i.e. still in metadata mode),
- // check if type and special paths still matches
- if !b.loaded {
- if b.Backend.Type() != nb.Type() {
- nb.Cleanup()
- b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchType)
- return ErrMismatchType
- }
- if !reflect.DeepEqual(b.Backend.SpecialPaths(), nb.SpecialPaths()) {
- nb.Cleanup()
- b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchPaths)
- return ErrMismatchPaths
- }
- }
-
- b.Backend = nb
- b.loaded = true
-
- // Call initialize
- if err := b.Backend.Initialize(); err != nil {
- return err
- }
-
- return nil
-}
-
-// HandleRequest is a thin wrapper implementation of HandleRequest that includes automatic plugin reload.
-func (b *backend) HandleRequest(req *logical.Request) (*logical.Response, error) {
- b.RLock()
- canary := b.canary
-
- // Lazy-load backend
- if !b.loaded {
- // Upgrade lock
- b.RUnlock()
- b.Lock()
- // Check once more after lock swap
- if !b.loaded {
- err := b.startBackend()
- if err != nil {
- b.Unlock()
- return nil, err
- }
- }
- b.Unlock()
- b.RLock()
- }
- resp, err := b.Backend.HandleRequest(req)
- b.RUnlock()
- // Need to compare string value for case were err comes from plugin RPC
- // and is returned as plugin.BasicError type.
- if err != nil && err.Error() == rpc.ErrShutdown.Error() {
- // Reload plugin if it's an rpc.ErrShutdown
- b.Lock()
- if b.canary == canary {
- err := b.reloadBackend()
- if err != nil {
- b.Unlock()
- return nil, err
- }
- b.canary, err = uuid.GenerateUUID()
- if err != nil {
- b.Unlock()
- return nil, err
- }
- }
- b.Unlock()
-
- // Try request once more
- b.RLock()
- defer b.RUnlock()
- return b.Backend.HandleRequest(req)
- }
- return resp, err
-}
-
-// HandleExistenceCheck is a thin wrapper implementation of HandleRequest that includes automatic plugin reload.
-func (b *backend) HandleExistenceCheck(req *logical.Request) (bool, bool, error) {
- b.RLock()
- canary := b.canary
-
- // Lazy-load backend
- if !b.loaded {
- // Upgrade lock
- b.RUnlock()
- b.Lock()
- // Check once more after lock swap
- if !b.loaded {
- err := b.startBackend()
- if err != nil {
- b.Unlock()
- return false, false, err
- }
- }
- b.Unlock()
- b.RLock()
- }
-
- checkFound, exists, err := b.Backend.HandleExistenceCheck(req)
- b.RUnlock()
- if err != nil && err.Error() == rpc.ErrShutdown.Error() {
- // Reload plugin if it's an rpc.ErrShutdown
- b.Lock()
- if b.canary == canary {
- err := b.reloadBackend()
- if err != nil {
- b.Unlock()
- return false, false, err
- }
- b.canary, err = uuid.GenerateUUID()
- if err != nil {
- b.Unlock()
- return false, false, err
- }
- }
- b.Unlock()
-
- // Try request once more
- b.RLock()
- defer b.RUnlock()
- return b.Backend.HandleExistenceCheck(req)
- }
- return checkFound, exists, err
-}
diff --git a/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go
deleted file mode 100644
index 5b07197..0000000
--- a/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package plugin
-
-import (
- "fmt"
- "os"
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/helper/pluginutil"
- vaulthttp "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin"
- "github.com/hashicorp/vault/logical/plugin/mock"
- "github.com/hashicorp/vault/vault"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestBackend_impl(t *testing.T) {
- var _ logical.Backend = &backend{}
-}
-
-func TestBackend(t *testing.T) {
- config, cleanup := testConfig(t)
- defer cleanup()
-
- _, err := Backend(config)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_Factory(t *testing.T) {
- config, cleanup := testConfig(t)
- defer cleanup()
-
- _, err := Factory(config)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackend_PluginMain(t *testing.T) {
- args := []string{}
- if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadaModeEnv) != "true" {
- return
- }
-
- caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
- if caPEM == "" {
- t.Fatal("CA cert not passed in")
- }
-
- args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM))
-
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(args)
- tlsConfig := apiClientMeta.GetTLSConfig()
- tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig)
-
- err := plugin.Serve(&plugin.ServeOpts{
- BackendFactoryFunc: mock.Factory,
- TLSProviderFunc: tlsProviderFunc,
- })
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func testConfig(t *testing.T) (*logical.BackendConfig, func()) {
- cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- cores := cluster.Cores
-
- core := cores[0]
-
- sys := vault.TestDynamicSystemView(core.Core)
-
- config := &logical.BackendConfig{
- Logger: logformat.NewVaultLogger(log.LevelTrace),
- System: sys,
- Config: map[string]string{
- "plugin_name": "mock-plugin",
- },
- }
-
- os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
-
- vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMain")
-
- return config, func() {
- cluster.Cleanup()
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/cli/commands.go b/vendor/github.com/hashicorp/vault/cli/commands.go
deleted file mode 100644
index 22c8640..0000000
--- a/vendor/github.com/hashicorp/vault/cli/commands.go
+++ /dev/null
@@ -1,389 +0,0 @@
-package cli
-
-import (
- "os"
-
- auditFile "github.com/hashicorp/vault/builtin/audit/file"
- auditSocket "github.com/hashicorp/vault/builtin/audit/socket"
- auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/version"
-
- credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin"
- credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
- credAppId "github.com/hashicorp/vault/builtin/credential/app-id"
- credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
- credAws "github.com/hashicorp/vault/builtin/credential/aws"
- credCert "github.com/hashicorp/vault/builtin/credential/cert"
- credGitHub "github.com/hashicorp/vault/builtin/credential/github"
- credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
- credOkta "github.com/hashicorp/vault/builtin/credential/okta"
- credRadius "github.com/hashicorp/vault/builtin/credential/radius"
- credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
-
- physAzure "github.com/hashicorp/vault/physical/azure"
- physCassandra "github.com/hashicorp/vault/physical/cassandra"
- physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb"
- physConsul "github.com/hashicorp/vault/physical/consul"
- physCouchDB "github.com/hashicorp/vault/physical/couchdb"
- physDynamoDB "github.com/hashicorp/vault/physical/dynamodb"
- physEtcd "github.com/hashicorp/vault/physical/etcd"
- physFile "github.com/hashicorp/vault/physical/file"
- physGCS "github.com/hashicorp/vault/physical/gcs"
- physInmem "github.com/hashicorp/vault/physical/inmem"
- physMSSQL "github.com/hashicorp/vault/physical/mssql"
- physMySQL "github.com/hashicorp/vault/physical/mysql"
- physPostgreSQL "github.com/hashicorp/vault/physical/postgresql"
- physS3 "github.com/hashicorp/vault/physical/s3"
- physSwift "github.com/hashicorp/vault/physical/swift"
- physZooKeeper "github.com/hashicorp/vault/physical/zookeeper"
-
- "github.com/hashicorp/vault/builtin/logical/aws"
- "github.com/hashicorp/vault/builtin/logical/cassandra"
- "github.com/hashicorp/vault/builtin/logical/consul"
- "github.com/hashicorp/vault/builtin/logical/database"
- "github.com/hashicorp/vault/builtin/logical/mongodb"
- "github.com/hashicorp/vault/builtin/logical/mssql"
- "github.com/hashicorp/vault/builtin/logical/mysql"
- "github.com/hashicorp/vault/builtin/logical/pki"
- "github.com/hashicorp/vault/builtin/logical/postgresql"
- "github.com/hashicorp/vault/builtin/logical/rabbitmq"
- "github.com/hashicorp/vault/builtin/logical/ssh"
- "github.com/hashicorp/vault/builtin/logical/totp"
- "github.com/hashicorp/vault/builtin/logical/transit"
- "github.com/hashicorp/vault/builtin/plugin"
-
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/command"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/meta"
- "github.com/mitchellh/cli"
-)
-
-// Commands returns the mapping of CLI commands for Vault. The meta
-// parameter lets you set meta options for all commands.
-func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory {
- if metaPtr == nil {
- metaPtr = &meta.Meta{
- TokenHelper: command.DefaultTokenHelper,
- }
- }
-
- if metaPtr.Ui == nil {
- metaPtr.Ui = &cli.BasicUi{
- Writer: os.Stdout,
- ErrorWriter: os.Stderr,
- }
- }
-
- return map[string]cli.CommandFactory{
- "init": func() (cli.Command, error) {
- return &command.InitCommand{
- Meta: *metaPtr,
- }, nil
- },
- "server": func() (cli.Command, error) {
- c := &command.ServerCommand{
- Meta: *metaPtr,
- AuditBackends: map[string]audit.Factory{
- "file": auditFile.Factory,
- "syslog": auditSyslog.Factory,
- "socket": auditSocket.Factory,
- },
- CredentialBackends: map[string]logical.Factory{
- "approle": credAppRole.Factory,
- "cert": credCert.Factory,
- "aws": credAws.Factory,
- "app-id": credAppId.Factory,
- "gcp": credGcp.Factory,
- "github": credGitHub.Factory,
- "userpass": credUserpass.Factory,
- "ldap": credLdap.Factory,
- "okta": credOkta.Factory,
- "radius": credRadius.Factory,
- "kubernetes": credKube.Factory,
- "plugin": plugin.Factory,
- },
- LogicalBackends: map[string]logical.Factory{
- "aws": aws.Factory,
- "consul": consul.Factory,
- "postgresql": postgresql.Factory,
- "cassandra": cassandra.Factory,
- "pki": pki.Factory,
- "transit": transit.Factory,
- "mongodb": mongodb.Factory,
- "mssql": mssql.Factory,
- "mysql": mysql.Factory,
- "ssh": ssh.Factory,
- "rabbitmq": rabbitmq.Factory,
- "database": database.Factory,
- "totp": totp.Factory,
- "plugin": plugin.Factory,
- },
-
- ShutdownCh: command.MakeShutdownCh(),
- SighupCh: command.MakeSighupCh(),
- }
-
- c.PhysicalBackends = map[string]physical.Factory{
- "azure": physAzure.NewAzureBackend,
- "cassandra": physCassandra.NewCassandraBackend,
- "cockroachdb": physCockroachDB.NewCockroachDBBackend,
- "consul": physConsul.NewConsulBackend,
- "couchdb": physCouchDB.NewCouchDBBackend,
- "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend,
- "dynamodb": physDynamoDB.NewDynamoDBBackend,
- "etcd": physEtcd.NewEtcdBackend,
- "file": physFile.NewFileBackend,
- "file_transactional": physFile.NewTransactionalFileBackend,
- "gcs": physGCS.NewGCSBackend,
- "inmem": physInmem.NewInmem,
- "inmem_ha": physInmem.NewInmemHA,
- "inmem_transactional": physInmem.NewTransactionalInmem,
- "inmem_transactional_ha": physInmem.NewTransactionalInmemHA,
- "mssql": physMSSQL.NewMSSQLBackend,
- "mysql": physMySQL.NewMySQLBackend,
- "postgresql": physPostgreSQL.NewPostgreSQLBackend,
- "s3": physS3.NewS3Backend,
- "swift": physSwift.NewSwiftBackend,
- "zookeeper": physZooKeeper.NewZooKeeperBackend,
- }
-
- return c, nil
- },
-
- "ssh": func() (cli.Command, error) {
- return &command.SSHCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "path-help": func() (cli.Command, error) {
- return &command.PathHelpCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "auth": func() (cli.Command, error) {
- return &command.AuthCommand{
- Meta: *metaPtr,
- Handlers: map[string]command.AuthHandler{
- "github": &credGitHub.CLIHandler{},
- "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
- "ldap": &credLdap.CLIHandler{},
- "okta": &credOkta.CLIHandler{},
- "cert": &credCert.CLIHandler{},
- "aws": &credAws.CLIHandler{},
- "radius": &credUserpass.CLIHandler{DefaultMount: "radius"},
- },
- }, nil
- },
-
- "auth-enable": func() (cli.Command, error) {
- return &command.AuthEnableCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "auth-disable": func() (cli.Command, error) {
- return &command.AuthDisableCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "audit-list": func() (cli.Command, error) {
- return &command.AuditListCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "audit-disable": func() (cli.Command, error) {
- return &command.AuditDisableCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "audit-enable": func() (cli.Command, error) {
- return &command.AuditEnableCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "key-status": func() (cli.Command, error) {
- return &command.KeyStatusCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "policies": func() (cli.Command, error) {
- return &command.PolicyListCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "policy-delete": func() (cli.Command, error) {
- return &command.PolicyDeleteCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "policy-write": func() (cli.Command, error) {
- return &command.PolicyWriteCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "read": func() (cli.Command, error) {
- return &command.ReadCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "unwrap": func() (cli.Command, error) {
- return &command.UnwrapCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "list": func() (cli.Command, error) {
- return &command.ListCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "write": func() (cli.Command, error) {
- return &command.WriteCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "delete": func() (cli.Command, error) {
- return &command.DeleteCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "rekey": func() (cli.Command, error) {
- return &command.RekeyCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "generate-root": func() (cli.Command, error) {
- return &command.GenerateRootCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "renew": func() (cli.Command, error) {
- return &command.RenewCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "revoke": func() (cli.Command, error) {
- return &command.RevokeCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "seal": func() (cli.Command, error) {
- return &command.SealCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "status": func() (cli.Command, error) {
- return &command.StatusCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "unseal": func() (cli.Command, error) {
- return &command.UnsealCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "step-down": func() (cli.Command, error) {
- return &command.StepDownCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "mount": func() (cli.Command, error) {
- return &command.MountCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "mounts": func() (cli.Command, error) {
- return &command.MountsCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "mount-tune": func() (cli.Command, error) {
- return &command.MountTuneCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "remount": func() (cli.Command, error) {
- return &command.RemountCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "rotate": func() (cli.Command, error) {
- return &command.RotateCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "unmount": func() (cli.Command, error) {
- return &command.UnmountCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "token-create": func() (cli.Command, error) {
- return &command.TokenCreateCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "token-lookup": func() (cli.Command, error) {
- return &command.TokenLookupCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "token-renew": func() (cli.Command, error) {
- return &command.TokenRenewCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "token-revoke": func() (cli.Command, error) {
- return &command.TokenRevokeCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "capabilities": func() (cli.Command, error) {
- return &command.CapabilitiesCommand{
- Meta: *metaPtr,
- }, nil
- },
-
- "version": func() (cli.Command, error) {
- versionInfo := version.GetVersion()
-
- return &command.VersionCommand{
- VersionInfo: versionInfo,
- Ui: metaPtr.Ui,
- }, nil
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/cli/help.go b/vendor/github.com/hashicorp/vault/cli/help.go
deleted file mode 100644
index bd66e33..0000000
--- a/vendor/github.com/hashicorp/vault/cli/help.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package cli
-
-import (
- "bytes"
- "fmt"
- "sort"
- "strings"
-
- "github.com/mitchellh/cli"
-)
-
-// HelpFunc is a cli.HelpFunc that can is used to output the help for Vault.
-func HelpFunc(commands map[string]cli.CommandFactory) string {
- commonNames := map[string]struct{}{
- "delete": struct{}{},
- "path-help": struct{}{},
- "read": struct{}{},
- "renew": struct{}{},
- "revoke": struct{}{},
- "write": struct{}{},
- "server": struct{}{},
- "status": struct{}{},
- "unwrap": struct{}{},
- }
-
- // Determine the maximum key length, and classify based on type
- commonCommands := make(map[string]cli.CommandFactory)
- otherCommands := make(map[string]cli.CommandFactory)
- maxKeyLen := 0
- for key, f := range commands {
- if len(key) > maxKeyLen {
- maxKeyLen = len(key)
- }
-
- if _, ok := commonNames[key]; ok {
- commonCommands[key] = f
- } else {
- otherCommands[key] = f
- }
- }
-
- var buf bytes.Buffer
- buf.WriteString("usage: vault [-version] [-help] [args]\n\n")
- buf.WriteString("Common commands:\n")
- buf.WriteString(listCommands(commonCommands, maxKeyLen))
- buf.WriteString("\nAll other commands:\n")
- buf.WriteString(listCommands(otherCommands, maxKeyLen))
- return buf.String()
-}
-
-// listCommands just lists the commands in the map with the
-// given maximum key length.
-func listCommands(commands map[string]cli.CommandFactory, maxKeyLen int) string {
- var buf bytes.Buffer
-
- // Get the list of keys so we can sort them, and also get the maximum
- // key length so they can be aligned properly.
- keys := make([]string, 0, len(commands))
- for key, _ := range commands {
- keys = append(keys, key)
- }
- sort.Strings(keys)
-
- for _, key := range keys {
- commandFunc, ok := commands[key]
- if !ok {
- // This should never happen since we JUST built the list of
- // keys.
- panic("command not found: " + key)
- }
-
- command, err := commandFunc()
- if err != nil {
- panic(fmt.Sprintf("command '%s' failed to load: %s", key, err))
- }
-
- key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key)))
- buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis()))
- }
-
- return buf.String()
-}
diff --git a/vendor/github.com/hashicorp/vault/cli/main.go b/vendor/github.com/hashicorp/vault/cli/main.go
deleted file mode 100644
index 000e1e9..0000000
--- a/vendor/github.com/hashicorp/vault/cli/main.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package cli
-
-import (
- "fmt"
- "os"
-
- "github.com/mitchellh/cli"
-)
-
-func Run(args []string) int {
- return RunCustom(args, Commands(nil))
-}
-
-func RunCustom(args []string, commands map[string]cli.CommandFactory) int {
- // Get the command line args. We shortcut "--version" and "-v" to
- // just show the version.
- for _, arg := range args {
- if arg == "-v" || arg == "-version" || arg == "--version" {
- newArgs := make([]string, len(args)+1)
- newArgs[0] = "version"
- copy(newArgs[1:], args)
- args = newArgs
- break
- }
- }
-
- // Build the commands to include in the help now. This is pretty...
- // tedious, but we don't have a better way at the moment.
- commandsInclude := make([]string, 0, len(commands))
- for k, _ := range commands {
- switch k {
- case "token-disk":
- default:
- commandsInclude = append(commandsInclude, k)
- }
- }
-
- cli := &cli.CLI{
- Args: args,
- Commands: commands,
- Name: "vault",
- Autocomplete: true,
- HelpFunc: cli.FilteredHelpFunc(commandsInclude, HelpFunc),
- }
-
- exitCode, err := cli.Run()
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err.Error())
- return 1
- }
-
- return exitCode
-}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_disable.go b/vendor/github.com/hashicorp/vault/command/audit_disable.go
deleted file mode 100644
index 31c4457..0000000
--- a/vendor/github.com/hashicorp/vault/command/audit_disable.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// AuditDisableCommand is a Command that mounts a new mount.
-type AuditDisableCommand struct {
- meta.Meta
-}
-
-func (c *AuditDisableCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\naudit-disable expects one argument: the id to disable"))
- return 1
- }
-
- id := args[0]
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().DisableAudit(id); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error disabling audit backend: %s", err))
- return 2
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Successfully disabled audit backend '%s' if it was enabled", id))
-
- return 0
-}
-
-func (c *AuditDisableCommand) Synopsis() string {
- return "Disable an audit backend"
-}
-
-func (c *AuditDisableCommand) Help() string {
- helpText := `
-Usage: vault audit-disable [options] id
-
- Disable an audit backend.
-
- Once the audit backend is disabled no more audit logs will be sent to
- it. The data associated with the audit backend isn't affected.
-
- The "id" parameter should map to the "path" used in "audit-enable". If
- no path was provided to "audit-enable" you should use the backend
- type (e.g. "file").
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_disable_test.go b/vendor/github.com/hashicorp/vault/command/audit_disable_test.go
deleted file mode 100644
index 500ee9c..0000000
--- a/vendor/github.com/hashicorp/vault/command/audit_disable_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestAuditDisable(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &AuditDisableCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "noop",
- }
-
- // Run once to get the client
- c.Run(args)
-
- // Get the client
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %#v", err)
- }
- if err := client.Sys().EnableAudit("noop", "noop", "", nil); err != nil {
- t.Fatalf("err: %#v", err)
- }
-
- // Run again
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestAuditDisableWithOptions(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &AuditDisableCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "noop",
- }
-
- // Run once to get the client
- c.Run(args)
-
- // Get the client
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %#v", err)
- }
- if err := client.Sys().EnableAuditWithOptions("noop", &api.EnableAuditOptions{
- Type: "noop",
- Description: "noop",
- }); err != nil {
- t.Fatalf("err: %#v", err)
- }
-
- // Run again
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_enable.go b/vendor/github.com/hashicorp/vault/command/audit_enable.go
deleted file mode 100644
index 680a94e..0000000
--- a/vendor/github.com/hashicorp/vault/command/audit_enable.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package command
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/kv-builder"
- "github.com/hashicorp/vault/meta"
- "github.com/mitchellh/mapstructure"
- "github.com/posener/complete"
-)
-
-// AuditEnableCommand is a Command that mounts a new mount.
-type AuditEnableCommand struct {
- meta.Meta
-
- // A test stdin that can be used for tests
- testStdin io.Reader
-}
-
-func (c *AuditEnableCommand) Run(args []string) int {
- var desc, path string
- var local bool
- flags := c.Meta.FlagSet("audit-enable", meta.FlagSetDefault)
- flags.StringVar(&desc, "description", "", "")
- flags.StringVar(&path, "path", "", "")
- flags.BoolVar(&local, "local", false, "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) < 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\naudit-enable expects at least one argument: the type to enable"))
- return 1
- }
-
- auditType := args[0]
- if path == "" {
- path = auditType
- }
-
- // Build the options
- var stdin io.Reader = os.Stdin
- if c.testStdin != nil {
- stdin = c.testStdin
- }
- builder := &kvbuilder.Builder{Stdin: stdin}
- if err := builder.Add(args[1:]...); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error parsing options: %s", err))
- return 1
- }
-
- var opts map[string]string
- if err := mapstructure.WeakDecode(builder.Map(), &opts); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error parsing options: %s", err))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 1
- }
-
- err = client.Sys().EnableAuditWithOptions(path, &api.EnableAuditOptions{
- Type: auditType,
- Description: desc,
- Options: opts,
- Local: local,
- })
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error enabling audit backend: %s", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Successfully enabled audit backend '%s' with path '%s'!", auditType, path))
- return 0
-}
-
-func (c *AuditEnableCommand) Synopsis() string {
- return "Enable an audit backend"
-}
-
-func (c *AuditEnableCommand) Help() string {
- helpText := `
-Usage: vault audit-enable [options] type [config...]
-
- Enable an audit backend.
-
- This command enables an audit backend of type "type". Additional
- options for configuring the audit backend can be specified after the
- type in the same format as the "vault write" command in key/value pairs.
-
- For example, to configure the file audit backend to write audit logs at
- the path /var/log/audit.log:
-
- $ vault audit-enable file file_path=/var/log/audit.log
-
- For information on available configuration options, please see the
- documentation.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Audit Enable Options:
-
- -description= A human-friendly description for the backend. This
- shows up only when querying the enabled backends.
-
- -path= Specify a unique path for this audit backend. This
- is purely for referencing this audit backend. By
- default this will be the backend type.
-
- -local Mark the mount as a local mount. Local mounts
- are not replicated nor (if a secondary)
- removed by replication.
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *AuditEnableCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictSet(
- "file",
- "syslog",
- "socket",
- )
-}
-
-func (c *AuditEnableCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-description": complete.PredictNothing,
- "-path": complete.PredictNothing,
- "-local": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_enable_test.go b/vendor/github.com/hashicorp/vault/command/audit_enable_test.go
deleted file mode 100644
index 118f103..0000000
--- a/vendor/github.com/hashicorp/vault/command/audit_enable_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package command
-
-import (
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestAuditEnable(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &AuditEnableCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "noop",
- "foo=bar",
- }
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Get the client
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %#v", err)
- }
-
- audits, err := client.Sys().ListAudit()
- if err != nil {
- t.Fatalf("err: %#v", err)
- }
-
- audit, ok := audits["noop/"]
- if !ok {
- t.Fatalf("err: %#v", audits)
- }
-
- expected := map[string]string{"foo": "bar"}
- if !reflect.DeepEqual(audit.Options, expected) {
- t.Fatalf("err: %#v", audit)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_list.go b/vendor/github.com/hashicorp/vault/command/audit_list.go
deleted file mode 100644
index b9914eb..0000000
--- a/vendor/github.com/hashicorp/vault/command/audit_list.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package command
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/hashicorp/vault/meta"
- "github.com/ryanuber/columnize"
-)
-
-// AuditListCommand is a Command that lists the enabled audits.
-type AuditListCommand struct {
- meta.Meta
-}
-
-func (c *AuditListCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("audit-list", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- audits, err := client.Sys().ListAudit()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading audits: %s", err))
- return 2
- }
-
- if len(audits) == 0 {
- c.Ui.Error(fmt.Sprintf(
- "No audit backends are enabled. Use `vault audit-enable` to\n" +
- "enable an audit backend."))
- return 1
- }
-
- paths := make([]string, 0, len(audits))
- for path, _ := range audits {
- paths = append(paths, path)
- }
- sort.Strings(paths)
-
- columns := []string{"Path | Type | Description | Replication Behavior | Options"}
- for _, path := range paths {
- audit := audits[path]
- opts := make([]string, 0, len(audit.Options))
- for k, v := range audit.Options {
- opts = append(opts, k+"="+v)
- }
- replicatedBehavior := "replicated"
- if audit.Local {
- replicatedBehavior = "local"
- }
- columns = append(columns, fmt.Sprintf(
- "%s | %s | %s | %s | %s", audit.Path, audit.Type, audit.Description, replicatedBehavior, strings.Join(opts, " ")))
- }
-
- c.Ui.Output(columnize.SimpleFormat(columns))
- return 0
-}
-
-func (c *AuditListCommand) Synopsis() string {
- return "Lists enabled audit backends in Vault"
-}
-
-func (c *AuditListCommand) Help() string {
- helpText := `
-Usage: vault audit-list [options]
-
- List the enabled audit backends.
-
- The output lists the enabled audit backends and the options for those
- backends. The options may contain sensitive information, and therefore
- only a root Vault user can view this.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/audit_list_test.go b/vendor/github.com/hashicorp/vault/command/audit_list_test.go
deleted file mode 100644
index 01d4f83..0000000
--- a/vendor/github.com/hashicorp/vault/command/audit_list_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestAuditList(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &AuditListCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run once to get the client
- c.Run(args)
-
- // Get the client
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %#v", err)
- }
- if err := client.Sys().EnableAuditWithOptions("foo", &api.EnableAuditOptions{
- Type: "noop",
- Description: "noop",
- Options: nil,
- }); err != nil {
- t.Fatalf("err: %#v", err)
- }
-
- // Run again
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/auth.go b/vendor/github.com/hashicorp/vault/command/auth.go
deleted file mode 100644
index 00b21ce..0000000
--- a/vendor/github.com/hashicorp/vault/command/auth.go
+++ /dev/null
@@ -1,557 +0,0 @@
-package command
-
-import (
- "bufio"
- "encoding/json"
- "fmt"
- "io"
- "os"
- "sort"
- "strconv"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/kv-builder"
- "github.com/hashicorp/vault/helper/password"
- "github.com/hashicorp/vault/meta"
- "github.com/mitchellh/mapstructure"
- "github.com/posener/complete"
- "github.com/ryanuber/columnize"
-)
-
-// AuthHandler is the interface that any auth handlers must implement
-// to enable auth via the CLI.
-type AuthHandler interface {
- Auth(*api.Client, map[string]string) (*api.Secret, error)
- Help() string
-}
-
-// AuthCommand is a Command that handles authentication.
-type AuthCommand struct {
- meta.Meta
-
- Handlers map[string]AuthHandler
-
- // The fields below can be overwritten for tests
- testStdin io.Reader
-}
-
-func (c *AuthCommand) Run(args []string) int {
- var method, authPath string
- var methods, methodHelp, noVerify, noStore, tokenOnly bool
- flags := c.Meta.FlagSet("auth", meta.FlagSetDefault)
- flags.BoolVar(&methods, "methods", false, "")
- flags.BoolVar(&methodHelp, "method-help", false, "")
- flags.BoolVar(&noVerify, "no-verify", false, "")
- flags.BoolVar(&noStore, "no-store", false, "")
- flags.BoolVar(&tokenOnly, "token-only", false, "")
- flags.StringVar(&method, "method", "", "method")
- flags.StringVar(&authPath, "path", "", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- if methods {
- return c.listMethods()
- }
-
- args = flags.Args()
-
- tokenHelper, err := c.TokenHelper()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing token helper: %s\n\n"+
- "Please verify that the token helper is available and properly\n"+
- "configured for your system. Please refer to the documentation\n"+
- "on token helpers for more information.",
- err))
- return 1
- }
-
- // token is where the final token will go
- handler := c.Handlers[method]
-
- // Read token from stdin if first arg is exactly "-"
- var stdin io.Reader = os.Stdin
- if c.testStdin != nil {
- stdin = c.testStdin
- }
-
- if len(args) > 0 && args[0] == "-" {
- stdinR := bufio.NewReader(stdin)
- args[0], err = stdinR.ReadString('\n')
- if err != nil && err != io.EOF {
- c.Ui.Error(fmt.Sprintf("Error reading from stdin: %s", err))
- return 1
- }
- args[0] = strings.TrimSpace(args[0])
- }
-
- if method == "" {
- token := ""
- if len(args) > 0 {
- token = args[0]
- }
-
- handler = &tokenAuthHandler{Token: token}
- args = nil
-
- switch authPath {
- case "", "auth/token":
- default:
- c.Ui.Error("Token authentication does not support custom paths")
- return 1
- }
- }
-
- if handler == nil {
- methods := make([]string, 0, len(c.Handlers))
- for k := range c.Handlers {
- methods = append(methods, k)
- }
- sort.Strings(methods)
-
- c.Ui.Error(fmt.Sprintf(
- "Unknown authentication method: %s\n\n"+
- "Please use a supported authentication method. The list of supported\n"+
- "authentication methods is shown below. Note that this list may not\n"+
- "be exhaustive: Vault may support other auth methods. For auth methods\n"+
- "unsupported by the CLI, please use the HTTP API.\n\n"+
- "%s",
- method,
- strings.Join(methods, ", ")))
- return 1
- }
-
- if methodHelp {
- c.Ui.Output(handler.Help())
- return 0
- }
-
- // Warn if the VAULT_TOKEN environment variable is set, as that will take
- // precedence. Don't output on token-only since we're likely piping output.
- if os.Getenv("VAULT_TOKEN") != "" && !tokenOnly {
- c.Ui.Output("==> WARNING: VAULT_TOKEN environment variable set!\n")
- c.Ui.Output(" The environment variable takes precedence over the value")
- c.Ui.Output(" set by the auth command. Either update the value of the")
- c.Ui.Output(" environment variable or unset it to use the new token.\n")
- }
-
- var vars map[string]string
- if len(args) > 0 {
- builder := kvbuilder.Builder{Stdin: os.Stdin}
- if err := builder.Add(args...); err != nil {
- c.Ui.Error(err.Error())
- return 1
- }
-
- if err := mapstructure.Decode(builder.Map(), &vars); err != nil {
- c.Ui.Error(fmt.Sprintf("Error parsing options: %s", err))
- return 1
- }
- } else {
- vars = make(map[string]string)
- }
-
- // Build the client so we can auth
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client to auth: %s", err))
- return 1
- }
-
- if authPath != "" {
- vars["mount"] = authPath
- }
-
- // Authenticate
- secret, err := handler.Auth(client, vars)
- if err != nil {
- c.Ui.Error(err.Error())
- return 1
- }
- if secret == nil {
- c.Ui.Error("Empty response from auth helper")
- return 1
- }
-
- // If we had requested a wrapped token, we want to unset that request
- // before performing further functions
- client.SetWrappingLookupFunc(func(string, string) string {
- return ""
- })
-
-CHECK_TOKEN:
- var token string
- switch {
- case secret == nil:
- c.Ui.Error("Empty response from auth helper")
- return 1
-
- case secret.Auth != nil:
- token = secret.Auth.ClientToken
-
- case secret.WrapInfo != nil:
- if secret.WrapInfo.WrappedAccessor == "" {
- c.Ui.Error("Got a wrapped response from Vault but wrapped reply does not seem to contain a token")
- return 1
- }
- if tokenOnly {
- c.Ui.Output(secret.WrapInfo.Token)
- return 0
- }
- if noStore {
- return OutputSecret(c.Ui, "table", secret)
- }
- client.SetToken(secret.WrapInfo.Token)
- secret, err = client.Logical().Unwrap("")
- goto CHECK_TOKEN
-
- default:
- c.Ui.Error("No auth or wrapping info in auth helper response")
- return 1
- }
-
- // Cache the previous token so that it can be restored if authentication fails
- var previousToken string
- if previousToken, err = tokenHelper.Get(); err != nil {
- c.Ui.Error(fmt.Sprintf("Error caching the previous token: %s\n\n", err))
- return 1
- }
-
- if tokenOnly {
- c.Ui.Output(token)
- return 0
- }
-
- // Store the token!
- if !noStore {
- if err := tokenHelper.Store(token); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error storing token: %s\n\n"+
- "Authentication was not successful and did not persist.\n"+
- "Please reauthenticate, or fix the issue above if possible.",
- err))
- return 1
- }
- }
-
- if noVerify {
- c.Ui.Output(fmt.Sprintf(
- "Authenticated - no token verification has been performed.",
- ))
-
- if noStore {
- if err := tokenHelper.Erase(); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error removing prior token: %s\n\n"+
- "Authentication was successful, but unable to remove the\n"+
- "previous token.",
- err))
- return 1
- }
- }
- return 0
- }
-
- // Build the client again so it can read the token we just wrote
- client, err = c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client to verify the token: %s", err))
- if !noStore {
- if err := tokenHelper.Store(previousToken); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error restoring the previous token: %s\n\n"+
- "Please reauthenticate with a valid token.",
- err))
- }
- }
- return 1
- }
- client.SetWrappingLookupFunc(func(string, string) string {
- return ""
- })
-
- // If in no-store mode it won't have read the token from a token-helper (or
- // will read an old one) so set it explicitly
- if noStore {
- client.SetToken(token)
- }
-
- // Verify the token
- secret, err = client.Auth().Token().LookupSelf()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error validating token: %s", err))
- if err := tokenHelper.Store(previousToken); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error restoring the previous token: %s\n\n"+
- "Please reauthenticate with a valid token.",
- err))
- }
- return 1
- }
- if secret == nil && !noStore {
- c.Ui.Error(fmt.Sprintf("Error: Invalid token"))
- if err := tokenHelper.Store(previousToken); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error restoring the previous token: %s\n\n"+
- "Please reauthenticate with a valid token.",
- err))
- }
- return 1
- }
-
- if noStore {
- if err := tokenHelper.Erase(); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error removing prior token: %s\n\n"+
- "Authentication was successful, but unable to remove the\n"+
- "previous token.",
- err))
- return 1
- }
- }
-
- // Get the policies we have
- policiesRaw, ok := secret.Data["policies"]
- if !ok || policiesRaw == nil {
- policiesRaw = []interface{}{"unknown"}
- }
- var policies []string
- for _, v := range policiesRaw.([]interface{}) {
- policies = append(policies, v.(string))
- }
-
- output := "Successfully authenticated! You are now logged in."
- if noStore {
- output += "\nThe token has not been stored to the configured token helper."
- }
- if method != "" {
- output += "\nThe token below is already saved in the session. You do not"
- output += "\nneed to \"vault auth\" again with the token."
- }
- output += fmt.Sprintf("\ntoken: %s", secret.Data["id"])
- output += fmt.Sprintf("\ntoken_duration: %s", secret.Data["ttl"].(json.Number).String())
- if len(policies) > 0 {
- output += fmt.Sprintf("\ntoken_policies: %v", policies)
- }
-
- c.Ui.Output(output)
-
- return 0
-
-}
-
-func (c *AuthCommand) getMethods() (map[string]*api.AuthMount, error) {
- client, err := c.Client()
- if err != nil {
- return nil, err
- }
- client.SetWrappingLookupFunc(func(string, string) string {
- return ""
- })
-
- auth, err := client.Sys().ListAuth()
- if err != nil {
- return nil, err
- }
-
- return auth, nil
-}
-
-func (c *AuthCommand) listMethods() int {
- auth, err := c.getMethods()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading auth table: %s", err))
- return 1
- }
-
- paths := make([]string, 0, len(auth))
- for path := range auth {
- paths = append(paths, path)
- }
- sort.Strings(paths)
-
- columns := []string{"Path | Type | Accessor | Default TTL | Max TTL | Replication Behavior | Description"}
- for _, path := range paths {
- auth := auth[path]
- defTTL := "system"
- if auth.Config.DefaultLeaseTTL != 0 {
- defTTL = strconv.Itoa(auth.Config.DefaultLeaseTTL)
- }
- maxTTL := "system"
- if auth.Config.MaxLeaseTTL != 0 {
- maxTTL = strconv.Itoa(auth.Config.MaxLeaseTTL)
- }
- replicatedBehavior := "replicated"
- if auth.Local {
- replicatedBehavior = "local"
- }
- columns = append(columns, fmt.Sprintf(
- "%s | %s | %s | %s | %s | %s | %s", path, auth.Type, auth.Accessor, defTTL, maxTTL, replicatedBehavior, auth.Description))
- }
-
- c.Ui.Output(columnize.SimpleFormat(columns))
- return 0
-}
-
-func (c *AuthCommand) Synopsis() string {
- return "Prints information about how to authenticate with Vault"
-}
-
-func (c *AuthCommand) Help() string {
- helpText := `
-Usage: vault auth [options] [auth-information]
-
- Authenticate with Vault using the given token or via any supported
- authentication backend.
-
- By default, the -method is assumed to be token. If not supplied via the
- command-line, a prompt for input will be shown. If the authentication
- information is "-", it will be read from stdin.
-
- The -method option allows alternative authentication methods to be used,
- such as userpass, GitHub, or TLS certificates. For these, additional
- values as "key=value" pairs may be required. For example, to authenticate
- to the userpass auth backend:
-
- $ vault auth -method=userpass username=my-username
-
- Use "-method-help" to get help for a specific method.
-
- If an auth backend is enabled at a different path, the "-method" flag
- should still point to the canonical name, and the "-path" flag should be
- used. If a GitHub auth backend was mounted as "github-private", one would
- authenticate to this backend via:
-
- $ vault auth -method=github -path=github-private
-
- The value of the "-path" flag is supplied to auth providers as the "mount"
- option in the payload to specify the mount point.
-
- If response wrapping is used (via -wrap-ttl), the returned token will be
- automatically unwrapped unless:
- * -token-only is used, in which case the wrapping token will be output
- * -no-store is used, in which case the details of the wrapping token
- will be printed
-
-General Options:
-
- ` + meta.GeneralOptionsUsage() + `
-
-Auth Options:
-
- -method=name Use the method given here, which is a type of backend, not
- the path. If this authentication method is not available,
- exit with code 1.
-
- -method-help If set, the help for the selected method will be shown.
-
- -methods List the available auth methods.
-
- -no-verify Do not verify the token after creation; avoids a use count
- decrement.
-
- -no-store Do not store the token after creation; it will only be
- displayed in the command output.
-
- -token-only Output only the token to stdout. This implies -no-verify
- and -no-store.
-
- -path The path at which the auth backend is enabled. If an auth
- backend is mounted at multiple paths, this option can be
- used to authenticate against specific paths.
-`
- return strings.TrimSpace(helpText)
-}
-
-// tokenAuthHandler handles retrieving the token from the command-line.
-type tokenAuthHandler struct {
- Token string
-}
-
-func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (*api.Secret, error) {
- token := h.Token
- if token == "" {
- var err error
-
- // No arguments given, read the token from user input
- fmt.Printf("Token (will be hidden): ")
- token, err = password.Read(os.Stdin)
- fmt.Printf("\n")
- if err != nil {
- return nil, fmt.Errorf(
- "Error attempting to ask for token. The raw error message\n"+
- "is shown below, but the most common reason for this error is\n"+
- "that you attempted to pipe a value into auth. If you want to\n"+
- "pipe the token, please pass '-' as the token argument.\n\n"+
- "Raw error: %s", err)
- }
- }
-
- if token == "" {
- return nil, fmt.Errorf(
- "A token must be passed to auth. Please view the help\n" +
- "for more information.")
- }
-
- return &api.Secret{
- Auth: &api.SecretAuth{
- ClientToken: token,
- },
- }, nil
-}
-
-func (h *tokenAuthHandler) Help() string {
- help := `
-No method selected with the "-method" flag, so the "auth" command assumes
-you'll be using raw token authentication. For this, specify the token to
-authenticate as the parameter to "vault auth". Example:
-
- vault auth 123456
-
-The token used to authenticate must come from some other source. A root
-token is created when Vault is first initialized. After that, subsequent
-tokens are created via the API or command line interface (with the
-"token"-prefixed commands).
-`
-
- return strings.TrimSpace(help)
-}
-
-func (c *AuthCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictNothing
-}
-
-func (c *AuthCommand) AutocompleteFlags() complete.Flags {
- var predictFunc complete.PredictFunc = func(a complete.Args) []string {
- auths, err := c.getMethods()
- if err != nil {
- return []string{}
- }
-
- methods := make([]string, 0, len(auths))
- for _, auth := range auths {
- if strings.HasPrefix(auth.Type, a.Last) {
- methods = append(methods, auth.Type)
- }
- }
-
- return methods
- }
-
- return complete.Flags{
- "-method": predictFunc,
- "-methods": complete.PredictNothing,
- "-method-help": complete.PredictNothing,
- "-no-verify": complete.PredictNothing,
- "-no-store": complete.PredictNothing,
- "-token-only": complete.PredictNothing,
- "-path": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_disable.go b/vendor/github.com/hashicorp/vault/command/auth_disable.go
deleted file mode 100644
index 621ce59..0000000
--- a/vendor/github.com/hashicorp/vault/command/auth_disable.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// AuthDisableCommand is a Command that enables a new endpoint.
-type AuthDisableCommand struct {
- meta.Meta
-}
-
-func (c *AuthDisableCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("auth-disable", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nauth-disable expects one argument: the path to disable."))
- return 1
- }
-
- path := args[0]
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().DisableAuth(path); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error: %s", err))
- return 2
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Disabled auth provider at path '%s' if it was enabled", path))
-
- return 0
-}
-
-func (c *AuthDisableCommand) Synopsis() string {
- return "Disable an auth provider"
-}
-
-func (c *AuthDisableCommand) Help() string {
- helpText := `
-Usage: vault auth-disable [options] path
-
- Disable an already-enabled auth provider.
-
- Once the auth provider is disabled its path can no longer be used
- to authenticate. All access tokens generated via the disabled auth provider
- will be revoked. This command will block until all tokens are revoked.
- If the command is exited early the tokens will still be revoked.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_disable_test.go b/vendor/github.com/hashicorp/vault/command/auth_disable_test.go
deleted file mode 100644
index fb2b91f..0000000
--- a/vendor/github.com/hashicorp/vault/command/auth_disable_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestAuthDisable(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &AuthDisableCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "noop",
- }
-
- // Run the command once to setup the client, it will fail
- c.Run(args)
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := client.Sys().EnableAuth("noop", "noop", ""); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- mounts, err := client.Sys().ListAuth()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if _, ok := mounts["noop"]; ok {
- t.Fatal("should not have noop mount")
- }
-}
-
-func TestAuthDisableWithOptions(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &AuthDisableCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "noop",
- }
-
- // Run the command once to setup the client, it will fail
- c.Run(args)
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := client.Sys().EnableAuthWithOptions("noop", &api.EnableAuthOptions{
- Type: "noop",
- Description: "",
- }); err != nil {
- t.Fatalf("err: %#v", err)
- }
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- mounts, err := client.Sys().ListAuth()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if _, ok := mounts["noop"]; ok {
- t.Fatal("should not have noop mount")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_enable.go b/vendor/github.com/hashicorp/vault/command/auth_enable.go
deleted file mode 100644
index e6b7f20..0000000
--- a/vendor/github.com/hashicorp/vault/command/auth_enable.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
- "github.com/posener/complete"
-)
-
-// AuthEnableCommand is a Command that enables a new endpoint.
-type AuthEnableCommand struct {
- meta.Meta
-}
-
-func (c *AuthEnableCommand) Run(args []string) int {
- var description, path, pluginName string
- var local bool
- flags := c.Meta.FlagSet("auth-enable", meta.FlagSetDefault)
- flags.StringVar(&description, "description", "", "")
- flags.StringVar(&path, "path", "", "")
- flags.StringVar(&pluginName, "plugin-name", "", "")
- flags.BoolVar(&local, "local", false, "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nauth-enable expects one argument: the type to enable."))
- return 1
- }
-
- authType := args[0]
-
- // If no path is specified, we default the path to the backend type
- // or use the plugin name if it's a plugin backend
- if path == "" {
- if authType == "plugin" {
- path = pluginName
- } else {
- path = authType
- }
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().EnableAuthWithOptions(path, &api.EnableAuthOptions{
- Type: authType,
- Description: description,
- Config: api.AuthConfigInput{
- PluginName: pluginName,
- },
- Local: local,
- }); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error: %s", err))
- return 2
- }
-
- authTypeOutput := fmt.Sprintf("'%s'", authType)
- if authType == "plugin" {
- authTypeOutput = fmt.Sprintf("plugin '%s'", pluginName)
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Successfully enabled %s at '%s'!",
- authTypeOutput, path))
-
- return 0
-}
-
-func (c *AuthEnableCommand) Synopsis() string {
- return "Enable a new auth provider"
-}
-
-func (c *AuthEnableCommand) Help() string {
- helpText := `
-Usage: vault auth-enable [options] type
-
- Enable a new auth provider.
-
- This command enables a new auth provider. An auth provider is responsible
- for authenticating a user and assigning them policies with which they can
- access Vault.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Auth Enable Options:
-
- -description= Human-friendly description of the purpose of the
- auth provider. This shows up in the auth -methods command.
-
- -path= Mount point for the auth provider. This defaults
- to the type of the mount. This will make the auth
- provider available at "/auth/"
-
- -plugin-name Name of the auth plugin to use based from the name
- in the plugin catalog.
-
- -local Mark the mount as a local mount. Local mounts
- are not replicated nor (if a secondary)
- removed by replication.
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *AuthEnableCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictSet(
- "approle",
- "cert",
- "aws",
- "app-id",
- "gcp",
- "github",
- "userpass",
- "ldap",
- "okta",
- "radius",
- "plugin",
- )
-
-}
-
-func (c *AuthEnableCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-description": complete.PredictNothing,
- "-path": complete.PredictNothing,
- "-plugin-name": complete.PredictNothing,
- "-local": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_enable_test.go b/vendor/github.com/hashicorp/vault/command/auth_enable_test.go
deleted file mode 100644
index 0f83487..0000000
--- a/vendor/github.com/hashicorp/vault/command/auth_enable_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestAuthEnable(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &AuthEnableCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "noop",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mounts, err := client.Sys().ListAuth()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mount, ok := mounts["noop/"]
- if !ok {
- t.Fatal("should have noop mount")
- }
- if mount.Type != "noop" {
- t.Fatal("should be noop type")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_test.go b/vendor/github.com/hashicorp/vault/command/auth_test.go
deleted file mode 100644
index 8243129..0000000
--- a/vendor/github.com/hashicorp/vault/command/auth_test.go
+++ /dev/null
@@ -1,400 +0,0 @@
-package command
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
-
- credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
- "github.com/hashicorp/vault/logical"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestAuth_methods(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- testAuthInit(t)
-
- ui := new(cli.MockUi)
- c := &AuthCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- }
-
- args := []string{
- "-address", addr,
- "-methods",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- output := ui.OutputWriter.String()
- if !strings.Contains(output, "token") {
- t.Fatalf("bad: %#v", output)
- }
-}
-
-func TestAuth_token(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- testAuthInit(t)
-
- ui := new(cli.MockUi)
- c := &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- }
-
- args := []string{
- "-address", addr,
- token,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- helper, err := c.TokenHelper()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual, err := helper.Get()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if actual != token {
- t.Fatalf("bad: %s", actual)
- }
-}
-
-func TestAuth_wrapping(t *testing.T) {
- baseConfig := &vault.CoreConfig{
- CredentialBackends: map[string]logical.Factory{
- "userpass": credUserpass.Factory,
- },
- }
- cluster := vault.NewTestCluster(t, baseConfig, &vault.TestClusterOptions{
- HandlerFunc: http.Handler,
- BaseListenAddress: "127.0.0.1:8200",
- })
- cluster.Start()
- defer cluster.Cleanup()
-
- testAuthInit(t)
-
- client := cluster.Cores[0].Client
- err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{
- Type: "userpass",
- })
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{
- "password": "bar",
- "policies": "zip,zap",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- ui := new(cli.MockUi)
- c := &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- Handlers: map[string]AuthHandler{
- "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
- },
- }
-
- args := []string{
- "-address",
- "https://127.0.0.1:8200",
- "-tls-skip-verify",
- "-method",
- "userpass",
- "username=foo",
- "password=bar",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Test again with wrapping
- ui = new(cli.MockUi)
- c = &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- Handlers: map[string]AuthHandler{
- "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
- },
- }
-
- args = []string{
- "-address",
- "https://127.0.0.1:8200",
- "-tls-skip-verify",
- "-wrap-ttl",
- "5m",
- "-method",
- "userpass",
- "username=foo",
- "password=bar",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Test again with no-store
- ui = new(cli.MockUi)
- c = &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- Handlers: map[string]AuthHandler{
- "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
- },
- }
-
- args = []string{
- "-address",
- "https://127.0.0.1:8200",
- "-tls-skip-verify",
- "-wrap-ttl",
- "5m",
- "-no-store",
- "-method",
- "userpass",
- "username=foo",
- "password=bar",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Test again with wrapping and token-only
- ui = new(cli.MockUi)
- c = &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- Handlers: map[string]AuthHandler{
- "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
- },
- }
-
- args = []string{
- "-address",
- "https://127.0.0.1:8200",
- "-tls-skip-verify",
- "-wrap-ttl",
- "5m",
- "-token-only",
- "-method",
- "userpass",
- "username=foo",
- "password=bar",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- token := strings.TrimSpace(ui.OutputWriter.String())
- if token == "" {
- t.Fatal("expected to find token in output")
- }
- secret, err := client.Logical().Unwrap(token)
- if err != nil {
- t.Fatal(err)
- }
- if secret.Auth.ClientToken == "" {
- t.Fatal("no client token found")
- }
-}
-
-func TestAuth_token_nostore(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- testAuthInit(t)
-
- ui := new(cli.MockUi)
- c := &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- }
-
- args := []string{
- "-address", addr,
- "-no-store",
- token,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- helper, err := c.TokenHelper()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual, err := helper.Get()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if actual != "" {
- t.Fatalf("bad: %s", actual)
- }
-}
-
-func TestAuth_stdin(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- testAuthInit(t)
-
- stdinR, stdinW := io.Pipe()
- ui := new(cli.MockUi)
- c := &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- testStdin: stdinR,
- }
-
- go func() {
- stdinW.Write([]byte(token))
- stdinW.Close()
- }()
-
- args := []string{
- "-address", addr,
- "-",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestAuth_badToken(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- testAuthInit(t)
-
- ui := new(cli.MockUi)
- c := &AuthCommand{
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- }
-
- args := []string{
- "-address", addr,
- "not-a-valid-token",
- }
- if code := c.Run(args); code != 1 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestAuth_method(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- testAuthInit(t)
-
- ui := new(cli.MockUi)
- c := &AuthCommand{
- Handlers: map[string]AuthHandler{
- "test": &testAuthHandler{},
- },
- Meta: meta.Meta{
- Ui: ui,
- TokenHelper: DefaultTokenHelper,
- },
- }
-
- args := []string{
- "-address", addr,
- "-method=test",
- "foo=" + token,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- helper, err := c.TokenHelper()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual, err := helper.Get()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if actual != token {
- t.Fatalf("bad: %s", actual)
- }
-}
-
-func testAuthInit(t *testing.T) {
- td, err := ioutil.TempDir("", "vault")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Set the HOME env var so we get that right
- os.Setenv("HOME", td)
-
- // Write a .vault config to use our custom token helper
- config := fmt.Sprintf(
- "token_helper = \"\"\n")
- ioutil.WriteFile(filepath.Join(td, ".vault"), []byte(config), 0644)
-}
-
-type testAuthHandler struct{}
-
-func (h *testAuthHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
- return &api.Secret{
- Auth: &api.SecretAuth{
- ClientToken: m["foo"],
- },
- }, nil
-}
-
-func (h *testAuthHandler) Help() string { return "" }
diff --git a/vendor/github.com/hashicorp/vault/command/capabilities.go b/vendor/github.com/hashicorp/vault/command/capabilities.go
deleted file mode 100644
index bb60bd4..0000000
--- a/vendor/github.com/hashicorp/vault/command/capabilities.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// CapabilitiesCommand is a Command that enables a new endpoint.
-type CapabilitiesCommand struct {
- meta.Meta
-}
-
-func (c *CapabilitiesCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("capabilities", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) > 2 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\ncapabilities expects at most two arguments"))
- return 1
- }
-
- var token string
- var path string
- switch {
- case len(args) == 1:
- path = args[0]
- case len(args) == 2:
- token = args[0]
- path = args[1]
- default:
- flags.Usage()
- c.Ui.Error(fmt.Sprintf("\ncapabilities expects at least one argument"))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- var capabilities []string
- if token == "" {
- capabilities, err = client.Sys().CapabilitiesSelf(path)
- } else {
- capabilities, err = client.Sys().Capabilities(token, path)
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error retrieving capabilities: %s", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf("Capabilities: %s", capabilities))
- return 0
-}
-
-func (c *CapabilitiesCommand) Synopsis() string {
- return "Fetch the capabilities of a token on a given path"
-}
-
-func (c *CapabilitiesCommand) Help() string {
- helpText := `
-Usage: vault capabilities [options] [token] path
-
- Fetch the capabilities of a token on a given path.
- If a token is provided as an argument, the '/sys/capabilities' endpoint will be invoked
- with the given token; otherwise the '/sys/capabilities-self' endpoint will be invoked
- with the client token.
-
- If a token does not have any capability on a given path, or if any of the policies
- belonging to the token explicitly have ["deny"] capability, or if the argument path
- is invalid, this command will respond with a ["deny"].
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/capabilities_test.go b/vendor/github.com/hashicorp/vault/command/capabilities_test.go
deleted file mode 100644
index 5d106a1..0000000
--- a/vendor/github.com/hashicorp/vault/command/capabilities_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestCapabilities_Basic(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
- ui := new(cli.MockUi)
- c := &CapabilitiesCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- var args []string
-
- args = []string{"-address", addr}
- if code := c.Run(args); code == 0 {
- t.Fatalf("expected failure due to no args")
- }
-
- args = []string{"-address", addr, "testpath"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- args = []string{"-address", addr, token, "test"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- args = []string{"-address", addr, "invalidtoken", "test"}
- if code := c.Run(args); code == 0 {
- t.Fatalf("expected failure due to invalid token")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/command_test.go b/vendor/github.com/hashicorp/vault/command/command_test.go
deleted file mode 100644
index 763587a..0000000
--- a/vendor/github.com/hashicorp/vault/command/command_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
-)
-
-func testClient(t *testing.T, addr string, token string) *api.Client {
- config := api.DefaultConfig()
- config.Address = addr
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- client.SetToken(token)
-
- return client
-}
diff --git a/vendor/github.com/hashicorp/vault/command/config.go b/vendor/github.com/hashicorp/vault/command/config.go
deleted file mode 100644
index ee866fa..0000000
--- a/vendor/github.com/hashicorp/vault/command/config.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package command
-
-import (
- "fmt"
- "io/ioutil"
- "os"
-
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/hcl"
- "github.com/hashicorp/hcl/hcl/ast"
- "github.com/mitchellh/go-homedir"
-)
-
-const (
- // DefaultConfigPath is the default path to the configuration file
- DefaultConfigPath = "~/.vault"
-
- // ConfigPathEnv is the environment variable that can be used to
- // override where the Vault configuration is.
- ConfigPathEnv = "VAULT_CONFIG_PATH"
-)
-
-// Config is the CLI configuration for Vault that can be specified via
-// a `$HOME/.vault` file which is HCL-formatted (therefore HCL or JSON).
-type DefaultConfig struct {
- // TokenHelper is the executable/command that is executed for storing
- // and retrieving the authentication token for the Vault CLI. If this
- // is not specified, then vault's internal token store will be used, which
- // stores the token on disk unencrypted.
- TokenHelper string `hcl:"token_helper"`
-}
-
-// Config loads the configuration and returns it. If the configuration
-// is already loaded, it is returned.
-func Config() (*DefaultConfig, error) {
- var err error
- config, err := LoadConfig("")
- if err != nil {
- return nil, err
- }
-
- return config, nil
-}
-
-// LoadConfig reads the configuration from the given path. If path is
-// empty, then the default path will be used, or the environment variable
-// if set.
-func LoadConfig(path string) (*DefaultConfig, error) {
- if path == "" {
- path = DefaultConfigPath
- }
- if v := os.Getenv(ConfigPathEnv); v != "" {
- path = v
- }
-
- // NOTE: requires HOME env var to be set
- path, err := homedir.Expand(path)
- if err != nil {
- return nil, fmt.Errorf("Error expanding config path %s: %s", path, err)
- }
-
- contents, err := ioutil.ReadFile(path)
- if err != nil && !os.IsNotExist(err) {
- return nil, err
- }
-
- return ParseConfig(string(contents))
-}
-
-// ParseConfig parses the given configuration as a string.
-func ParseConfig(contents string) (*DefaultConfig, error) {
- root, err := hcl.Parse(contents)
- if err != nil {
- return nil, err
- }
-
- // Top-level item should be the object list
- list, ok := root.Node.(*ast.ObjectList)
- if !ok {
- return nil, fmt.Errorf("Failed to parse config: does not contain a root object")
- }
-
- valid := []string{
- "token_helper",
- }
- if err := checkHCLKeys(list, valid); err != nil {
- return nil, err
- }
-
- var c DefaultConfig
- if err := hcl.DecodeObject(&c, list); err != nil {
- return nil, err
- }
- return &c, nil
-}
-
-func checkHCLKeys(node ast.Node, valid []string) error {
- var list *ast.ObjectList
- switch n := node.(type) {
- case *ast.ObjectList:
- list = n
- case *ast.ObjectType:
- list = n.List
- default:
- return fmt.Errorf("cannot check HCL keys of type %T", n)
- }
-
- validMap := make(map[string]struct{}, len(valid))
- for _, v := range valid {
- validMap[v] = struct{}{}
- }
-
- var result error
- for _, item := range list.Items {
- key := item.Keys[0].Token.Value().(string)
- if _, ok := validMap[key]; !ok {
- result = multierror.Append(result, fmt.Errorf(
- "invalid key '%s' on line %d", key, item.Assign.Line))
- }
- }
-
- return result
-}
diff --git a/vendor/github.com/hashicorp/vault/command/config_test.go b/vendor/github.com/hashicorp/vault/command/config_test.go
deleted file mode 100644
index edbf557..0000000
--- a/vendor/github.com/hashicorp/vault/command/config_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package command
-
-import (
- "path/filepath"
- "reflect"
- "strings"
- "testing"
-)
-
-const FixturePath = "./test-fixtures"
-
-func TestLoadConfig(t *testing.T) {
- config, err := LoadConfig(filepath.Join(FixturePath, "config.hcl"))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := &DefaultConfig{
- TokenHelper: "foo",
- }
- if !reflect.DeepEqual(expected, config) {
- t.Fatalf("bad: %#v", config)
- }
-}
-
-func TestLoadConfig_noExist(t *testing.T) {
- config, err := LoadConfig("nope/not-once/.never")
- if err != nil {
- t.Fatal(err)
- }
-
- if config.TokenHelper != "" {
- t.Errorf("expected %q to be %q", config.TokenHelper, "")
- }
-}
-
-func TestParseConfig_badKeys(t *testing.T) {
- _, err := ParseConfig(`
-token_helper = "/token"
-nope = "true"
-`)
- if err == nil {
- t.Fatal("expected error")
- }
-
- if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") {
- t.Errorf("bad error: %s", err.Error())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/delete.go b/vendor/github.com/hashicorp/vault/command/delete.go
deleted file mode 100644
index d9a8ee8..0000000
--- a/vendor/github.com/hashicorp/vault/command/delete.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// DeleteCommand is a Command that puts data into the Vault.
-type DeleteCommand struct {
- meta.Meta
-}
-
-func (c *DeleteCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("delete", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- c.Ui.Error("delete expects one argument")
- flags.Usage()
- return 1
- }
-
- path := args[0]
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if _, err := client.Logical().Delete(path); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error deleting '%s': %s", path, err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf("Success! Deleted '%s' if it existed.", path))
- return 0
-}
-
-func (c *DeleteCommand) Synopsis() string {
- return "Delete operation on secrets in Vault"
-}
-
-func (c *DeleteCommand) Help() string {
- helpText := `
-Usage: vault delete [options] path
-
- Delete data (secrets or configuration) from Vault.
-
- Delete sends a delete operation request to the given path. The
- behavior of the delete is determined by the backend at the given
- path. For example, deleting "aws/policy/ops" will delete the "ops"
- policy for the AWS backend. Use "vault help" for more details on
- whether delete is supported for a path and what the behavior is.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/delete_test.go b/vendor/github.com/hashicorp/vault/command/delete_test.go
deleted file mode 100644
index c5efc41..0000000
--- a/vendor/github.com/hashicorp/vault/command/delete_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestDelete(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &DeleteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret/foo",
- }
-
- // Run once so the client is setup, ignore errors
- c.Run(args)
-
- // Get the client so we can write data
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- data := map[string]interface{}{"value": "bar"}
- if _, err := client.Logical().Write("secret/foo", data); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Run the delete
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- resp, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/format.go b/vendor/github.com/hashicorp/vault/command/format.go
deleted file mode 100644
index 38f24d4..0000000
--- a/vendor/github.com/hashicorp/vault/command/format.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package command
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/ghodss/yaml"
- "github.com/hashicorp/vault/api"
- "github.com/mitchellh/cli"
- "github.com/posener/complete"
- "github.com/ryanuber/columnize"
-)
-
-var predictFormat complete.Predictor = complete.PredictSet("json", "yaml")
-
-func OutputSecret(ui cli.Ui, format string, secret *api.Secret) int {
- return outputWithFormat(ui, format, secret, secret)
-}
-
-func OutputList(ui cli.Ui, format string, secret *api.Secret) int {
- return outputWithFormat(ui, format, secret, secret.Data["keys"])
-}
-
-func outputWithFormat(ui cli.Ui, format string, secret *api.Secret, data interface{}) int {
- formatter, ok := Formatters[strings.ToLower(format)]
- if !ok {
- ui.Error(fmt.Sprintf("Invalid output format: %s", format))
- return 1
- }
- if err := formatter.Output(ui, secret, data); err != nil {
- ui.Error(fmt.Sprintf("Could not output secret: %s", err.Error()))
- return 1
- }
- return 0
-}
-
-type Formatter interface {
- Output(ui cli.Ui, secret *api.Secret, data interface{}) error
-}
-
-var Formatters = map[string]Formatter{
- "json": JsonFormatter{},
- "table": TableFormatter{},
- "yaml": YamlFormatter{},
- "yml": YamlFormatter{},
-}
-
-// An output formatter for json output of an object
-type JsonFormatter struct {
-}
-
-func (j JsonFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error {
- b, err := json.Marshal(data)
- if err == nil {
- var out bytes.Buffer
- json.Indent(&out, b, "", "\t")
- ui.Output(out.String())
- }
- return err
-}
-
-// An output formatter for yaml output format of an object
-type YamlFormatter struct {
-}
-
-func (y YamlFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error {
- b, err := yaml.Marshal(data)
- if err == nil {
- ui.Output(strings.TrimSpace(string(b)))
- }
- return err
-}
-
-// An output formatter for table output of an object
-type TableFormatter struct {
-}
-
-func (t TableFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error {
- // TODO: this should really use reflection like the other formatters do
- if s, ok := data.(*api.Secret); ok {
- return t.OutputSecret(ui, secret, s)
- }
- if s, ok := data.([]interface{}); ok {
- return t.OutputList(ui, secret, s)
- }
- return errors.New("Cannot use the table formatter for this type")
-}
-
-func (t TableFormatter) OutputList(ui cli.Ui, secret *api.Secret, list []interface{}) error {
- config := columnize.DefaultConfig()
- config.Delim = "♨"
- config.Glue = "\t"
- config.Prefix = ""
-
- input := make([]string, 0, 5)
-
- if len(list) > 0 {
- input = append(input, "Keys")
- input = append(input, "----")
-
- keys := make([]string, 0, len(list))
- for _, k := range list {
- keys = append(keys, k.(string))
- }
- sort.Strings(keys)
-
- for _, k := range keys {
- input = append(input, fmt.Sprintf("%s", k))
- }
- }
-
- tableOutputStr := columnize.Format(input, config)
-
- // Print the warning separately because the length of first
- // column in the output will be increased by the length of
- // the longest warning string making the output look bad.
- warningsInput := make([]string, 0, 5)
- if len(secret.Warnings) != 0 {
- warningsInput = append(warningsInput, "")
- warningsInput = append(warningsInput, "The following warnings were returned from the Vault server:")
- for _, warning := range secret.Warnings {
- warningsInput = append(warningsInput, fmt.Sprintf("* %s", warning))
- }
- }
-
- warningsOutputStr := columnize.Format(warningsInput, config)
-
- ui.Output(fmt.Sprintf("%s\n%s", tableOutputStr, warningsOutputStr))
-
- return nil
-}
-
-func (t TableFormatter) OutputSecret(ui cli.Ui, secret, s *api.Secret) error {
- config := columnize.DefaultConfig()
- config.Delim = "♨"
- config.Glue = "\t"
- config.Prefix = ""
-
- input := make([]string, 0, 5)
-
- onceHeader := &sync.Once{}
- headerFunc := func() {
- input = append(input, fmt.Sprintf("Key %s Value", config.Delim))
- input = append(input, fmt.Sprintf("--- %s -----", config.Delim))
- }
-
- if s.LeaseDuration > 0 {
- onceHeader.Do(headerFunc)
- if s.LeaseID != "" {
- input = append(input, fmt.Sprintf("lease_id %s %s", config.Delim, s.LeaseID))
- input = append(input, fmt.Sprintf(
- "lease_duration %s %s", config.Delim, (time.Second*time.Duration(s.LeaseDuration)).String()))
- } else {
- input = append(input, fmt.Sprintf(
- "refresh_interval %s %s", config.Delim, (time.Second*time.Duration(s.LeaseDuration)).String()))
- }
- if s.LeaseID != "" {
- input = append(input, fmt.Sprintf(
- "lease_renewable %s %s", config.Delim, strconv.FormatBool(s.Renewable)))
- }
- }
-
- if s.Auth != nil {
- onceHeader.Do(headerFunc)
- input = append(input, fmt.Sprintf("token %s %s", config.Delim, s.Auth.ClientToken))
- input = append(input, fmt.Sprintf("token_accessor %s %s", config.Delim, s.Auth.Accessor))
- input = append(input, fmt.Sprintf("token_duration %s %s", config.Delim, (time.Second*time.Duration(s.Auth.LeaseDuration)).String()))
- input = append(input, fmt.Sprintf("token_renewable %s %v", config.Delim, s.Auth.Renewable))
- input = append(input, fmt.Sprintf("token_policies %s %v", config.Delim, s.Auth.Policies))
- for k, v := range s.Auth.Metadata {
- input = append(input, fmt.Sprintf("token_meta_%s %s %#v", k, config.Delim, v))
- }
- }
-
- if s.WrapInfo != nil {
- onceHeader.Do(headerFunc)
- input = append(input, fmt.Sprintf("wrapping_token: %s %s", config.Delim, s.WrapInfo.Token))
- input = append(input, fmt.Sprintf("wrapping_token_ttl: %s %s", config.Delim, (time.Second*time.Duration(s.WrapInfo.TTL)).String()))
- input = append(input, fmt.Sprintf("wrapping_token_creation_time: %s %s", config.Delim, s.WrapInfo.CreationTime.String()))
- input = append(input, fmt.Sprintf("wrapping_token_creation_path: %s %s", config.Delim, s.WrapInfo.CreationPath))
- if s.WrapInfo.WrappedAccessor != "" {
- input = append(input, fmt.Sprintf("wrapped_accessor: %s %s", config.Delim, s.WrapInfo.WrappedAccessor))
- }
- }
-
- if s.Data != nil && len(s.Data) > 0 {
- onceHeader.Do(headerFunc)
- keys := make([]string, 0, len(s.Data))
- for k := range s.Data {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for _, k := range keys {
- input = append(input, fmt.Sprintf("%s %s %v", k, config.Delim, s.Data[k]))
- }
- }
-
- tableOutputStr := columnize.Format(input, config)
-
- // Print the warning separately because the length of first
- // column in the output will be increased by the length of
- // the longest warning string making the output look bad.
- warningsInput := make([]string, 0, 5)
- if len(s.Warnings) != 0 {
- warningsInput = append(warningsInput, "")
- warningsInput = append(warningsInput, "The following warnings were returned from the Vault server:")
- for _, warning := range s.Warnings {
- warningsInput = append(warningsInput, fmt.Sprintf("* %s", warning))
- }
- }
-
- warningsOutputStr := columnize.Format(warningsInput, config)
-
- ui.Output(fmt.Sprintf("%s\n%s", tableOutputStr, warningsOutputStr))
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/command/format_test.go b/vendor/github.com/hashicorp/vault/command/format_test.go
deleted file mode 100644
index 8e32d24..0000000
--- a/vendor/github.com/hashicorp/vault/command/format_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package command
-
-import (
- "strings"
- "testing"
-
- "github.com/ghodss/yaml"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-var output string
-
-type mockUi struct {
- t *testing.T
- SampleData string
-}
-
-func (m mockUi) Ask(_ string) (string, error) {
- m.t.FailNow()
- return "", nil
-}
-func (m mockUi) AskSecret(_ string) (string, error) {
- m.t.FailNow()
- return "", nil
-}
-func (m mockUi) Output(s string) {
- output = s
-}
-func (m mockUi) Info(s string) {
- m.t.Log(s)
-}
-func (m mockUi) Error(s string) {
- m.t.Log(s)
-}
-func (m mockUi) Warn(s string) {
- m.t.Log(s)
-}
-
-func TestJsonFormatter(t *testing.T) {
- ui := mockUi{t: t, SampleData: "something"}
- if err := outputWithFormat(ui, "json", nil, ui); err != 0 {
- t.Fatal(err)
- }
- var newUi mockUi
- if err := jsonutil.DecodeJSON([]byte(output), &newUi); err != nil {
- t.Fatal(err)
- }
- if newUi.SampleData != ui.SampleData {
- t.Fatalf(`values not equal ("%s" != "%s")`,
- newUi.SampleData,
- ui.SampleData)
- }
-}
-
-func TestYamlFormatter(t *testing.T) {
- ui := mockUi{t: t, SampleData: "something"}
- if err := outputWithFormat(ui, "yaml", nil, ui); err != 0 {
- t.Fatal(err)
- }
- var newUi mockUi
- err := yaml.Unmarshal([]byte(output), &newUi)
- if err != nil {
- t.Fatal(err)
- }
- if newUi.SampleData != ui.SampleData {
- t.Fatalf(`values not equal ("%s" != "%s")`,
- newUi.SampleData,
- ui.SampleData)
- }
-}
-
-func TestTableFormatter(t *testing.T) {
- ui := mockUi{t: t}
- s := api.Secret{Data: map[string]interface{}{"k": "something"}}
- if err := outputWithFormat(ui, "table", &s, &s); err != 0 {
- t.Fatal(err)
- }
- if !strings.Contains(output, "something") {
- t.Fatal("did not find 'something'")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/generate-root.go b/vendor/github.com/hashicorp/vault/command/generate-root.go
deleted file mode 100644
index 2d9521b..0000000
--- a/vendor/github.com/hashicorp/vault/command/generate-root.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package command
-
-import (
- "crypto/rand"
- "encoding/base64"
- "fmt"
- "os"
- "strings"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/password"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/helper/xor"
- "github.com/hashicorp/vault/meta"
- "github.com/posener/complete"
-)
-
-// GenerateRootCommand is a Command that generates a new root token.
-type GenerateRootCommand struct {
- meta.Meta
-
- // Key can be used to pre-seed the key. If it is set, it will not
- // be asked with the `password` helper.
- Key string
-
- // The nonce for the rekey request to send along
- Nonce string
-}
-
-func (c *GenerateRootCommand) Run(args []string) int {
- var init, cancel, status, genotp bool
- var nonce, decode, otp, pgpKey string
- var pgpKeyArr pgpkeys.PubKeyFilesFlag
- flags := c.Meta.FlagSet("generate-root", meta.FlagSetDefault)
- flags.BoolVar(&init, "init", false, "")
- flags.BoolVar(&cancel, "cancel", false, "")
- flags.BoolVar(&status, "status", false, "")
- flags.BoolVar(&genotp, "genotp", false, "")
- flags.StringVar(&decode, "decode", "", "")
- flags.StringVar(&otp, "otp", "", "")
- flags.StringVar(&nonce, "nonce", "", "")
- flags.Var(&pgpKeyArr, "pgp-key", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- if genotp {
- buf := make([]byte, 16)
- readLen, err := rand.Read(buf)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error reading random bytes: %s", err))
- return 1
- }
- if readLen != 16 {
- c.Ui.Error(fmt.Sprintf("Read %d bytes when we should have read 16", readLen))
- return 1
- }
- c.Ui.Output(fmt.Sprintf("OTP: %s", base64.StdEncoding.EncodeToString(buf)))
- return 0
- }
-
- if len(decode) > 0 {
- if len(otp) == 0 {
- c.Ui.Error("Both the value to decode and the OTP must be passed in")
- return 1
- }
- return c.decode(decode, otp)
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- // Check if the root generation is started
- rootGenerationStatus, err := client.Sys().GenerateRootStatus()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error reading root generation status: %s", err))
- return 1
- }
-
- // If we are initing, or if we are not started but are not running a
- // special function, check otp and pgpkey
- checkOtpPgp := false
- switch {
- case init:
- checkOtpPgp = true
- case cancel:
- case status:
- case genotp:
- case len(decode) != 0:
- case rootGenerationStatus.Started:
- default:
- checkOtpPgp = true
- }
- if checkOtpPgp {
- switch {
- case len(otp) == 0 && (pgpKeyArr == nil || len(pgpKeyArr) == 0):
- c.Ui.Error(c.Help())
- return 1
- case len(otp) != 0 && pgpKeyArr != nil && len(pgpKeyArr) != 0:
- c.Ui.Error(c.Help())
- return 1
- case len(otp) != 0:
- err := c.verifyOTP(otp)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error verifying the provided OTP: %s", err))
- return 1
- }
- case pgpKeyArr != nil:
- if len(pgpKeyArr) != 1 {
- c.Ui.Error("Could not parse PGP key")
- return 1
- }
- if len(pgpKeyArr[0]) == 0 {
- c.Ui.Error("Got an empty PGP key")
- return 1
- }
- pgpKey = pgpKeyArr[0]
- default:
- panic("unreachable case")
- }
- }
-
- if nonce != "" {
- c.Nonce = nonce
- }
-
- // Check if we are running doing any restricted variants
- switch {
- case init:
- return c.initGenerateRoot(client, otp, pgpKey)
- case cancel:
- return c.cancelGenerateRoot(client)
- case status:
- return c.rootGenerationStatus(client)
- }
-
- // Start the root generation process if not started
- if !rootGenerationStatus.Started {
- rootGenerationStatus, err = client.Sys().GenerateRootInit(otp, pgpKey)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error initializing root generation: %s", err))
- return 1
- }
- c.Nonce = rootGenerationStatus.Nonce
- }
-
- serverNonce := rootGenerationStatus.Nonce
-
- // Get the unseal key
- args = flags.Args()
- key := c.Key
- if len(args) > 0 {
- key = args[0]
- }
- if key == "" {
- c.Nonce = serverNonce
- fmt.Printf("Root generation operation nonce: %s\n", serverNonce)
- fmt.Printf("Key (will be hidden): ")
- key, err = password.Read(os.Stdin)
- fmt.Printf("\n")
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error attempting to ask for password. The raw error message\n"+
- "is shown below, but the most common reason for this error is\n"+
- "that you attempted to pipe a value into unseal or you're\n"+
- "executing `vault generate-root` from outside of a terminal.\n\n"+
- "You should use `vault generate-root` from a terminal for maximum\n"+
- "security. If this isn't an option, the unseal key can be passed\n"+
- "in using the first parameter.\n\n"+
- "Raw error: %s", err))
- return 1
- }
- }
-
- // Provide the key, this may potentially complete the update
- statusResp, err := client.Sys().GenerateRootUpdate(strings.TrimSpace(key), c.Nonce)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error attempting generate-root update: %s", err))
- return 1
- }
-
- c.dumpStatus(statusResp)
-
- return 0
-}
-
-func (c *GenerateRootCommand) verifyOTP(otp string) error {
- if len(otp) == 0 {
- return fmt.Errorf("No OTP passed in")
- }
- otpBytes, err := base64.StdEncoding.DecodeString(otp)
- if err != nil {
- return fmt.Errorf("Error decoding base64 OTP value: %s", err)
- }
- if otpBytes == nil || len(otpBytes) != 16 {
- return fmt.Errorf("Decoded OTP value is invalid or wrong length")
- }
-
- return nil
-}
-
-func (c *GenerateRootCommand) decode(encodedVal, otp string) int {
- tokenBytes, err := xor.XORBase64(encodedVal, otp)
- if err != nil {
- c.Ui.Error(err.Error())
- return 1
- }
-
- token, err := uuid.FormatUUID(tokenBytes)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error formatting base64 token value: %v", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf("Root token: %s", token))
-
- return 0
-}
-
-// initGenerateRoot is used to start the generation process
-func (c *GenerateRootCommand) initGenerateRoot(client *api.Client, otp string, pgpKey string) int {
- // Start the rekey
- status, err := client.Sys().GenerateRootInit(otp, pgpKey)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error initializing root generation: %s", err))
- return 1
- }
-
- c.dumpStatus(status)
-
- return 0
-}
-
-// cancelGenerateRoot is used to abort the generation process
-func (c *GenerateRootCommand) cancelGenerateRoot(client *api.Client) int {
- err := client.Sys().GenerateRootCancel()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to cancel root generation: %s", err))
- return 1
- }
- c.Ui.Output("Root generation canceled.")
- return 0
-}
-
-// rootGenerationStatus is used just to fetch and dump the status
-func (c *GenerateRootCommand) rootGenerationStatus(client *api.Client) int {
- // Check the status
- status, err := client.Sys().GenerateRootStatus()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error reading root generation status: %s", err))
- return 1
- }
-
- c.dumpStatus(status)
-
- return 0
-}
-
-// dumpStatus dumps the status to output
-func (c *GenerateRootCommand) dumpStatus(status *api.GenerateRootStatusResponse) {
- // Dump the status
- statString := fmt.Sprintf(
- "Nonce: %s\n"+
- "Started: %v\n"+
- "Generate Root Progress: %d\n"+
- "Required Keys: %d\n"+
- "Complete: %t",
- status.Nonce,
- status.Started,
- status.Progress,
- status.Required,
- status.Complete,
- )
- if len(status.PGPFingerprint) > 0 {
- statString = fmt.Sprintf("%s\nPGP Fingerprint: %s", statString, status.PGPFingerprint)
- }
- if len(status.EncodedRootToken) > 0 {
- statString = fmt.Sprintf("%s\n\nEncoded root token: %s", statString, status.EncodedRootToken)
- }
- c.Ui.Output(statString)
-}
-
-func (c *GenerateRootCommand) Synopsis() string {
- return "Generates a new root token"
-}
-
-func (c *GenerateRootCommand) Help() string {
- helpText := `
-Usage: vault generate-root [options] [key]
-
- 'generate-root' is used to create a new root token.
-
- Root generation can only be done when the vault is already unsealed. The
- operation is done online, but requires that a threshold of the current unseal
- keys be provided.
-
- One (and only one) of the following must be provided when initializing the
- root generation attempt:
-
- 1) A 16-byte, base64-encoded One Time Password (OTP) provided in the '-otp'
- flag; the token is XOR'd with this value before it is returned once the final
- unseal key has been provided. The '-decode' operation can be used with this
- value and the OTP to output the final token value. The '-genotp' flag can be
- used to generate a suitable value.
-
- or
-
- 2) A file containing a PGP key (binary or base64-encoded) or a Keybase.io
- username in the format of "keybase:" in the '-pgp-key' flag. The
- final token value will be encrypted with this public key and base64-encoded.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Generate Root Options:
-
- -init Initialize the root generation attempt. This can only
- be done if no generation is already initiated.
-
- -cancel Reset the root generation process by throwing away
- prior unseal keys and the configuration.
-
- -status Prints the status of the current attempt. This can be
- used to see the status without attempting to provide
- an unseal key.
-
- -decode=abcd Decodes and outputs the generated root token. The OTP
- used at '-init' time must be provided in the '-otp'
- parameter.
-
- -genotp Returns a high-quality OTP suitable for passing into
- the '-init' method.
-
- -otp=abcd The base64-encoded 16-byte OTP for use with the
- '-init' or '-decode' methods.
-
- -pgp-key A file on disk containing a binary- or base64-format
- public PGP key, or a Keybase username specified as
- "keybase:". The output root token will be
- encrypted and base64-encoded, in order, with the given
- public key.
-
- -nonce=abcd The nonce provided at initialization time. This same
- nonce value must be provided with each unseal key. If
- the unseal key is not being passed in via the command
- line the nonce parameter is not required, and will
- instead be displayed with the key prompt.
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *GenerateRootCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictNothing
-}
-
-func (c *GenerateRootCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-init": complete.PredictNothing,
- "-cancel": complete.PredictNothing,
- "-status": complete.PredictNothing,
- "-decode": complete.PredictNothing,
- "-genotp": complete.PredictNothing,
- "-otp": complete.PredictNothing,
- "-pgp-key": complete.PredictNothing,
- "-nonce": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/generate-root_test.go b/vendor/github.com/hashicorp/vault/command/generate-root_test.go
deleted file mode 100644
index 31d956d..0000000
--- a/vendor/github.com/hashicorp/vault/command/generate-root_test.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package command
-
-import (
- "encoding/base64"
- "encoding/hex"
- "os"
- "strings"
- "testing"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/helper/xor"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestGenerateRoot_Cancel(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &GenerateRootCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
-
- args := []string{"-address", addr, "-init", "-otp", otp}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- args = []string{"-address", addr, "-cancel"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- config, err := core.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if config != nil {
- t.Fatal("should not have a config for root generation")
- }
-}
-
-func TestGenerateRoot_status(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &GenerateRootCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
-
- args := []string{"-address", addr, "-init", "-otp", otp}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- args = []string{"-address", addr, "-status"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- if !strings.Contains(ui.OutputWriter.String(), "Started: true") {
- t.Fatalf("bad: %s", ui.OutputWriter.String())
- }
-}
-
-func TestGenerateRoot_OTP(t *testing.T) {
- core, ts, keys, _ := vault.TestCoreWithTokenStore(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &GenerateRootCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- // Generate an OTP
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
-
- // Init the attempt
- args := []string{
- "-address", addr,
- "-init",
- "-otp", otp,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- config, err := core.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- for _, key := range keys {
- ui = new(cli.MockUi)
- c = &GenerateRootCommand{
- Key: hex.EncodeToString(key),
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- c.Nonce = config.Nonce
-
- // Provide the key
- args = []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- }
-
- beforeNAfter := strings.Split(ui.OutputWriter.String(), "Encoded root token: ")
- if len(beforeNAfter) != 2 {
- t.Fatalf("did not find encoded root token in %s", ui.OutputWriter.String())
- }
- encodedToken := strings.TrimSpace(beforeNAfter[1])
-
- decodedToken, err := xor.XORBase64(encodedToken, otp)
- if err != nil {
- t.Fatal(err)
- }
-
- token, err := uuid.FormatUUID(decodedToken)
- if err != nil {
- t.Fatal(err)
- }
-
- req := logical.TestRequest(t, logical.ReadOperation, "lookup-self")
- req.ClientToken = token
-
- resp, err := ts.HandleRequest(req)
- if err != nil {
- t.Fatalf("error running token lookup-self: %v", err)
- }
- if resp == nil {
- t.Fatalf("got nil resp with token lookup-self")
- }
- if resp.Data == nil {
- t.Fatalf("got nil resp.Data with token lookup-self")
- }
-
- if resp.Data["orphan"].(bool) != true ||
- resp.Data["ttl"].(int64) != 0 ||
- resp.Data["num_uses"].(int) != 0 ||
- resp.Data["meta"].(map[string]string) != nil ||
- len(resp.Data["policies"].([]string)) != 1 ||
- resp.Data["policies"].([]string)[0] != "root" {
- t.Fatalf("bad: %#v", resp.Data)
- }
-
- // Clear the output and run a decode to verify we get the same result
- ui.OutputWriter.Reset()
- args = []string{
- "-address", addr,
- "-decode", encodedToken,
- "-otp", otp,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- beforeNAfter = strings.Split(ui.OutputWriter.String(), "Root token: ")
- if len(beforeNAfter) != 2 {
- t.Fatalf("did not find decoded root token in %s", ui.OutputWriter.String())
- }
-
- outToken := strings.TrimSpace(beforeNAfter[1])
- if outToken != token {
- t.Fatalf("tokens do not match:\n%s\n%s", token, outToken)
- }
-}
-
-func TestGenerateRoot_PGP(t *testing.T) {
- core, ts, keys, _ := vault.TestCoreWithTokenStore(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &GenerateRootCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- tempDir, pubFiles, err := getPubKeyFiles(t)
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tempDir)
-
- // Init the attempt
- args := []string{
- "-address", addr,
- "-init",
- "-pgp-key", pubFiles[0],
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- config, err := core.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- for _, key := range keys {
- c = &GenerateRootCommand{
- Key: hex.EncodeToString(key),
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- c.Nonce = config.Nonce
-
- // Provide the key
- args = []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- }
-
- beforeNAfter := strings.Split(ui.OutputWriter.String(), "Encoded root token: ")
- if len(beforeNAfter) != 2 {
- t.Fatalf("did not find encoded root token in %s", ui.OutputWriter.String())
- }
- encodedToken := strings.TrimSpace(beforeNAfter[1])
-
- ptBuf, err := pgpkeys.DecryptBytes(encodedToken, pgpkeys.TestPrivKey1)
- if err != nil {
- t.Fatal(err)
- }
- if ptBuf == nil {
- t.Fatal("returned plain text buffer is nil")
- }
-
- token := ptBuf.String()
-
- req := logical.TestRequest(t, logical.ReadOperation, "lookup-self")
- req.ClientToken = token
-
- resp, err := ts.HandleRequest(req)
- if err != nil {
- t.Fatalf("error running token lookup-self: %v", err)
- }
- if resp == nil {
- t.Fatalf("got nil resp with token lookup-self")
- }
- if resp.Data == nil {
- t.Fatalf("got nil resp.Data with token lookup-self")
- }
-
- if resp.Data["orphan"].(bool) != true ||
- resp.Data["ttl"].(int64) != 0 ||
- resp.Data["num_uses"].(int) != 0 ||
- resp.Data["meta"].(map[string]string) != nil ||
- len(resp.Data["policies"].([]string)) != 1 ||
- resp.Data["policies"].([]string)[0] != "root" {
- t.Fatalf("bad: %#v", resp.Data)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/init.go b/vendor/github.com/hashicorp/vault/command/init.go
deleted file mode 100644
index 470c325..0000000
--- a/vendor/github.com/hashicorp/vault/command/init.go
+++ /dev/null
@@ -1,406 +0,0 @@
-package command
-
-import (
- "fmt"
- "net/url"
- "os"
- "runtime"
- "strings"
-
- consulapi "github.com/hashicorp/consul/api"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/physical/consul"
- "github.com/posener/complete"
-)
-
-// InitCommand is a Command that initializes a new Vault server.
-type InitCommand struct {
- meta.Meta
-}
-
-func (c *InitCommand) Run(args []string) int {
- var threshold, shares, storedShares, recoveryThreshold, recoveryShares int
- var pgpKeys, recoveryPgpKeys, rootTokenPgpKey pgpkeys.PubKeyFilesFlag
- var auto, check bool
- var consulServiceName string
- flags := c.Meta.FlagSet("init", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- flags.IntVar(&shares, "key-shares", 5, "")
- flags.IntVar(&threshold, "key-threshold", 3, "")
- flags.IntVar(&storedShares, "stored-shares", 0, "")
- flags.Var(&pgpKeys, "pgp-keys", "")
- flags.Var(&rootTokenPgpKey, "root-token-pgp-key", "")
- flags.IntVar(&recoveryShares, "recovery-shares", 5, "")
- flags.IntVar(&recoveryThreshold, "recovery-threshold", 3, "")
- flags.Var(&recoveryPgpKeys, "recovery-pgp-keys", "")
- flags.BoolVar(&check, "check", false, "")
- flags.BoolVar(&auto, "auto", false, "")
- flags.StringVar(&consulServiceName, "consul-service", consul.DefaultServiceName, "")
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- initRequest := &api.InitRequest{
- SecretShares: shares,
- SecretThreshold: threshold,
- StoredShares: storedShares,
- PGPKeys: pgpKeys,
- RecoveryShares: recoveryShares,
- RecoveryThreshold: recoveryThreshold,
- RecoveryPGPKeys: recoveryPgpKeys,
- }
-
- switch len(rootTokenPgpKey) {
- case 0:
- case 1:
- initRequest.RootTokenPGPKey = rootTokenPgpKey[0]
- default:
- c.Ui.Error("Only one PGP key can be specified for encrypting the root token")
- return 1
- }
-
- // If running in 'auto' mode, run service discovery based on environment
- // variables of Consul.
- if auto {
-
- // Create configuration for Consul
- consulConfig := consulapi.DefaultConfig()
-
- // Create a client to communicate with Consul
- consulClient, err := consulapi.NewClient(consulConfig)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to create Consul client:%v", err))
- return 1
- }
-
- // Fetch Vault's protocol scheme from the client
- vaultclient, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to fetch Vault client: %v", err))
- return 1
- }
-
- if vaultclient.Address() == "" {
- c.Ui.Error("Failed to fetch Vault client address")
- return 1
- }
-
- clientURL, err := url.Parse(vaultclient.Address())
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %v", err))
- return 1
- }
-
- if clientURL == nil {
- c.Ui.Error("Failed to parse Vault client address")
- return 1
- }
-
- var uninitializedVaults []string
- var initializedVault string
-
- // Query the nodes belonging to the cluster
- if services, _, err := consulClient.Catalog().Service(consulServiceName, "", &consulapi.QueryOptions{AllowStale: true}); err == nil {
- Loop:
- for _, service := range services {
- vaultAddress := &url.URL{
- Scheme: clientURL.Scheme,
- Host: fmt.Sprintf("%s:%d", service.ServiceAddress, service.ServicePort),
- }
-
- // Set VAULT_ADDR to the discovered node
- os.Setenv(api.EnvVaultAddress, vaultAddress.String())
-
- // Create a client to communicate with the discovered node
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
- return 1
- }
-
- // Check the initialization status of the discovered node
- inited, err := client.Sys().InitStatus()
- switch {
- case err != nil:
- c.Ui.Error(fmt.Sprintf("Error checking initialization status of discovered node: %+q. Err: %v", vaultAddress.String(), err))
- return 1
- case inited:
- // One of the nodes in the cluster is initialized. Break out.
- initializedVault = vaultAddress.String()
- break Loop
- default:
- // Vault is uninitialized.
- uninitializedVaults = append(uninitializedVaults, vaultAddress.String())
- }
- }
- }
-
- export := "export"
- quote := "'"
- if runtime.GOOS == "windows" {
- export = "set"
- quote = ""
- }
-
- if initializedVault != "" {
- vaultURL, err := url.Parse(initializedVault)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", initializedVault, err))
- }
- c.Ui.Output(fmt.Sprintf("Discovered an initialized Vault node at %+q, using Consul service name %+q", vaultURL.String(), consulServiceName))
- c.Ui.Output("\nSet the following environment variable to operate on the discovered Vault:\n")
- c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
- return 0
- }
-
- switch len(uninitializedVaults) {
- case 0:
- c.Ui.Error(fmt.Sprintf("Failed to discover Vault nodes using Consul service name %+q", consulServiceName))
- return 1
- case 1:
- // There was only one node found in the Vault cluster and it
- // was uninitialized.
-
- vaultURL, err := url.Parse(uninitializedVaults[0])
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", uninitializedVaults[0], err))
- }
-
- // Set the VAULT_ADDR to the discovered node. This will ensure
- // that the client created will operate on the discovered node.
- os.Setenv(api.EnvVaultAddress, vaultURL.String())
-
- // Let the client know that initialization is perfomed on the
- // discovered node.
- c.Ui.Output(fmt.Sprintf("Discovered Vault at %+q using Consul service name %+q\n", vaultURL.String(), consulServiceName))
-
- // Attempt initializing it
- ret := c.runInit(check, initRequest)
-
- // Regardless of success or failure, instruct client to update VAULT_ADDR
- c.Ui.Output("\nSet the following environment variable to operate on the discovered Vault:\n")
- c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
-
- return ret
- default:
- // If more than one Vault node were discovered, print out all of them,
- // requiring the client to update VAULT_ADDR and to run init again.
- c.Ui.Output(fmt.Sprintf("Discovered more than one uninitialized Vaults using Consul service name %+q\n", consulServiceName))
- c.Ui.Output("To initialize these Vaults, set any *one* of the following environment variables and run 'vault init':")
-
- // Print valid commands to make setting the variables easier
- for _, vaultNode := range uninitializedVaults {
- vaultURL, err := url.Parse(vaultNode)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", vaultNode, err))
- }
- c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
-
- }
- return 0
- }
- }
-
- return c.runInit(check, initRequest)
-}
-
-func (c *InitCommand) runInit(check bool, initRequest *api.InitRequest) int {
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 1
- }
-
- if check {
- return c.checkStatus(client)
- }
-
- resp, err := client.Sys().Init(initRequest)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing Vault: %s", err))
- return 1
- }
-
- for i, key := range resp.Keys {
- if resp.KeysB64 != nil && len(resp.KeysB64) == len(resp.Keys) {
- c.Ui.Output(fmt.Sprintf("Unseal Key %d: %s", i+1, resp.KeysB64[i]))
- } else {
- c.Ui.Output(fmt.Sprintf("Unseal Key %d: %s", i+1, key))
- }
- }
- for i, key := range resp.RecoveryKeys {
- if resp.RecoveryKeysB64 != nil && len(resp.RecoveryKeysB64) == len(resp.RecoveryKeys) {
- c.Ui.Output(fmt.Sprintf("Recovery Key %d: %s", i+1, resp.RecoveryKeysB64[i]))
- } else {
- c.Ui.Output(fmt.Sprintf("Recovery Key %d: %s", i+1, key))
- }
- }
-
- c.Ui.Output(fmt.Sprintf("Initial Root Token: %s", resp.RootToken))
-
- if initRequest.StoredShares < 1 {
- c.Ui.Output(fmt.Sprintf(
- "\n"+
- "Vault initialized with %d keys and a key threshold of %d. Please\n"+
- "securely distribute the above keys. When the vault is re-sealed,\n"+
- "restarted, or stopped, you must provide at least %d of these keys\n"+
- "to unseal it again.\n\n"+
- "Vault does not store the master key. Without at least %d keys,\n"+
- "your vault will remain permanently sealed.",
- initRequest.SecretShares,
- initRequest.SecretThreshold,
- initRequest.SecretThreshold,
- initRequest.SecretThreshold,
- ))
- } else {
- c.Ui.Output(
- "\n" +
- "Vault initialized successfully.",
- )
- }
- if len(resp.RecoveryKeys) > 0 {
- c.Ui.Output(fmt.Sprintf(
- "\n"+
- "Recovery key initialized with %d keys and a key threshold of %d. Please\n"+
- "securely distribute the above keys.",
- initRequest.RecoveryShares,
- initRequest.RecoveryThreshold,
- ))
- }
-
- return 0
-}
-
-func (c *InitCommand) checkStatus(client *api.Client) int {
- inited, err := client.Sys().InitStatus()
- switch {
- case err != nil:
- c.Ui.Error(fmt.Sprintf(
- "Error checking initialization status: %s", err))
- return 1
- case inited:
- c.Ui.Output("Vault has been initialized")
- return 0
- default:
- c.Ui.Output("Vault is not initialized")
- return 2
- }
-}
-
-func (c *InitCommand) Synopsis() string {
- return "Initialize a new Vault server"
-}
-
-func (c *InitCommand) Help() string {
- helpText := `
-Usage: vault init [options]
-
- Initialize a new Vault server.
-
- This command connects to a Vault server and initializes it for the
- first time. This sets up the initial set of master keys and the
- backend data store structure.
-
- This command can't be called on an already-initialized Vault server.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Init Options:
-
- -check Don't actually initialize, just check if Vault is
- already initialized. A return code of 0 means Vault
- is initialized; a return code of 2 means Vault is not
- initialized; a return code of 1 means an error was
- encountered.
-
- -key-shares=5 The number of key shares to split the master key
- into.
-
- -key-threshold=3 The number of key shares required to reconstruct
- the master key.
-
- -stored-shares=0 The number of unseal keys to store. Only used with
- Vault HSM. Must currently be equivalent to the
- number of shares.
-
- -pgp-keys If provided, must be a comma-separated list of
- files on disk containing binary- or base64-format
- public PGP keys, or Keybase usernames specified as
- "keybase:". The output unseal keys will
- be encrypted and base64-encoded, in order, with the
- given public keys. If you want to use them with the
- 'vault unseal' command, you will need to base64-
- decode and decrypt; this will be the plaintext
- unseal key. When 'stored-shares' are not used, the
- number of entries in this field must match 'key-shares'.
- When 'stored-shares' are used, the number of entries
- should match the difference between 'key-shares'
- and 'stored-shares'.
-
- -root-token-pgp-key If provided, a file on disk with a binary- or
- base64-format public PGP key, or a Keybase username
- specified as "keybase:". The output root
- token will be encrypted and base64-encoded, in
- order, with the given public key. You will need
- to base64-decode and decrypt the result.
-
- -recovery-shares=5 The number of key shares to split the recovery key
- into. Only used with Vault HSM.
-
- -recovery-threshold=3 The number of key shares required to reconstruct
- the recovery key. Only used with Vault HSM.
-
- -recovery-pgp-keys If provided, behaves like "pgp-keys" but for the
- recovery key shares. Only used with Vault HSM.
-
- -auto If set, performs service discovery using Consul.
- When all the nodes of a Vault cluster are
- registered with Consul, setting this flag will
- trigger service discovery using the service name
- with which Vault nodes are registered. This option
- works well when each Vault cluster is registered
- under a unique service name. Note that, when Consul
- is serving as Vault's HA backend, Vault nodes are
- registered with Consul by default. The service name
- can be changed using 'consul-service' flag. Ensure
- that environment variables required to communicate
- with Consul, like (CONSUL_HTTP_ADDR,
- CONSUL_HTTP_TOKEN, CONSUL_HTTP_SSL, et al) are
- properly set. When only one Vault node is
- discovered, it will be initialized and when more
- than one Vault node is discovered, they will be
- output for easy selection.
-
- -consul-service Service name under which all the nodes of a Vault
- cluster are registered with Consul. Note that, when
- Vault uses Consul as its HA backend, by default,
- Vault will register itself as a service with Consul
- with the service name "vault". This name can be
- modified in Vault's configuration file, using the
- "service" option for the Consul backend.
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *InitCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictNothing
-}
-
-func (c *InitCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-check": complete.PredictNothing,
- "-key-shares": complete.PredictNothing,
- "-key-threshold": complete.PredictNothing,
- "-pgp-keys": complete.PredictNothing,
- "-root-token-pgp-key": complete.PredictNothing,
- "-recovery-shares": complete.PredictNothing,
- "-recovery-threshold": complete.PredictNothing,
- "-recovery-pgp-keys": complete.PredictNothing,
- "-auto": complete.PredictNothing,
- "-consul-service": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/init_test.go b/vendor/github.com/hashicorp/vault/command/init_test.go
deleted file mode 100644
index e09ba80..0000000
--- a/vendor/github.com/hashicorp/vault/command/init_test.go
+++ /dev/null
@@ -1,343 +0,0 @@
-package command
-
-import (
- "bytes"
- "encoding/base64"
- "os"
- "reflect"
- "regexp"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/packet"
- "github.com/mitchellh/cli"
-)
-
-func TestInit(t *testing.T) {
- ui := new(cli.MockUi)
- c := &InitCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- core := vault.TestCore(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- init, err := core.Initialized()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if init {
- t.Fatal("should not be initialized")
- }
-
- args := []string{"-address", addr}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- init, err = core.Initialized()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !init {
- t.Fatal("should be initialized")
- }
-
- sealConf, err := core.SealAccess().BarrierConfig()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- expected := &vault.SealConfig{
- Type: "shamir",
- SecretShares: 5,
- SecretThreshold: 3,
- }
- if !reflect.DeepEqual(expected, sealConf) {
- t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, sealConf)
- }
-}
-
-func TestInit_Check(t *testing.T) {
- ui := new(cli.MockUi)
- c := &InitCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- core := vault.TestCore(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- // Should return 2, not initialized
- args := []string{"-address", addr, "-check"}
- if code := c.Run(args); code != 2 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Now initialize it
- args = []string{"-address", addr}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Should return 0, initialized
- args = []string{"-address", addr, "-check"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- init, err := core.Initialized()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !init {
- t.Fatal("should be initialized")
- }
-}
-
-func TestInit_custom(t *testing.T) {
- ui := new(cli.MockUi)
- c := &InitCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- core := vault.TestCore(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- init, err := core.Initialized()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if init {
- t.Fatal("should not be initialized")
- }
-
- args := []string{
- "-address", addr,
- "-key-shares", "7",
- "-key-threshold", "3",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- init, err = core.Initialized()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !init {
- t.Fatal("should be initialized")
- }
-
- sealConf, err := core.SealAccess().BarrierConfig()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- expected := &vault.SealConfig{
- Type: "shamir",
- SecretShares: 7,
- SecretThreshold: 3,
- }
- if !reflect.DeepEqual(expected, sealConf) {
- t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, sealConf)
- }
-
- re, err := regexp.Compile("\\s+Initial Root Token:\\s+(.*)")
- if err != nil {
- t.Fatalf("Error compiling regex: %s", err)
- }
- matches := re.FindAllStringSubmatch(ui.OutputWriter.String(), -1)
- if len(matches) != 1 {
- t.Fatalf("Unexpected number of tokens found, got %d", len(matches))
- }
-
- rootToken := matches[0][1]
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("Error fetching client: %v", err)
- }
-
- client.SetToken(rootToken)
-
- re, err = regexp.Compile("\\s*Unseal Key \\d+: (.*)")
- if err != nil {
- t.Fatalf("Error compiling regex: %s", err)
- }
- matches = re.FindAllStringSubmatch(ui.OutputWriter.String(), -1)
- if len(matches) != 7 {
- t.Fatalf("Unexpected number of keys returned, got %d, matches was \n\n%#v\n\n, input was \n\n%s\n\n", len(matches), matches, ui.OutputWriter.String())
- }
-
- var unsealed bool
- for i := 0; i < 3; i++ {
- decodedKey, err := base64.StdEncoding.DecodeString(strings.TrimSpace(matches[i][1]))
- if err != nil {
- t.Fatalf("err decoding key %v: %v", matches[i][1], err)
- }
- unsealed, err = core.Unseal(decodedKey)
- if err != nil {
- t.Fatalf("err during unseal: %v; key was %v", err, matches[i][1])
- }
- }
- if !unsealed {
- t.Fatal("expected to be unsealed")
- }
-
- tokenInfo, err := client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatalf("Error looking up root token info: %v", err)
- }
-
- if tokenInfo.Data["policies"].([]interface{})[0].(string) != "root" {
- t.Fatalf("expected root policy")
- }
-}
-
-func TestInit_PGP(t *testing.T) {
- ui := new(cli.MockUi)
- c := &InitCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- core := vault.TestCore(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- init, err := core.Initialized()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if init {
- t.Fatal("should not be initialized")
- }
-
- tempDir, pubFiles, err := getPubKeyFiles(t)
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tempDir)
-
- args := []string{
- "-address", addr,
- "-key-shares", "2",
- "-pgp-keys", pubFiles[0] + ",@" + pubFiles[1] + "," + pubFiles[2],
- "-key-threshold", "2",
- "-root-token-pgp-key", pubFiles[0],
- }
-
- // This should fail, as key-shares does not match pgp-keys size
- if code := c.Run(args); code == 0 {
- t.Fatalf("bad (command should have failed): %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- args = []string{
- "-address", addr,
- "-key-shares", "4",
- "-pgp-keys", pubFiles[0] + ",@" + pubFiles[1] + "," + pubFiles[2] + "," + pubFiles[3],
- "-key-threshold", "2",
- "-root-token-pgp-key", pubFiles[0],
- }
-
- ui.OutputWriter.Reset()
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- init, err = core.Initialized()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !init {
- t.Fatal("should be initialized")
- }
-
- sealConf, err := core.SealAccess().BarrierConfig()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- pgpKeys := []string{}
- for _, pubFile := range pubFiles {
- pub, err := pgpkeys.ReadPGPFile(pubFile)
- if err != nil {
- t.Fatalf("bad: %v", err)
- }
- pgpKeys = append(pgpKeys, pub)
- }
-
- expected := &vault.SealConfig{
- Type: "shamir",
- SecretShares: 4,
- SecretThreshold: 2,
- PGPKeys: pgpKeys,
- }
- if !reflect.DeepEqual(expected, sealConf) {
- t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, sealConf)
- }
-
- re, err := regexp.Compile("\\s+Initial Root Token:\\s+(.*)")
- if err != nil {
- t.Fatalf("Error compiling regex: %s", err)
- }
- matches := re.FindAllStringSubmatch(ui.OutputWriter.String(), -1)
- if len(matches) != 1 {
- t.Fatalf("Unexpected number of tokens found, got %d", len(matches))
- }
-
- encRootToken := matches[0][1]
- privKeyBytes, err := base64.StdEncoding.DecodeString(pgpkeys.TestPrivKey1)
- if err != nil {
- t.Fatalf("error decoding private key: %v", err)
- }
- ptBuf := bytes.NewBuffer(nil)
- entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes)))
- if err != nil {
- t.Fatalf("Error parsing private key: %s", err)
- }
- var rootBytes []byte
- rootBytes, err = base64.StdEncoding.DecodeString(encRootToken)
- if err != nil {
- t.Fatalf("Error decoding root token: %s", err)
- }
- entityList := &openpgp.EntityList{entity}
- md, err := openpgp.ReadMessage(bytes.NewBuffer(rootBytes), entityList, nil, nil)
- if err != nil {
- t.Fatalf("Error decrypting root token: %s", err)
- }
- ptBuf.ReadFrom(md.UnverifiedBody)
- rootToken := ptBuf.String()
-
- parseDecryptAndTestUnsealKeys(t, ui.OutputWriter.String(), rootToken, false, nil, nil, core)
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("Error fetching client: %v", err)
- }
-
- client.SetToken(rootToken)
-
- tokenInfo, err := client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatalf("Error looking up root token info: %v", err)
- }
-
- if tokenInfo.Data["policies"].([]interface{})[0].(string) != "root" {
- t.Fatalf("expected root policy")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/key_status.go b/vendor/github.com/hashicorp/vault/command/key_status.go
deleted file mode 100644
index ff1b086..0000000
--- a/vendor/github.com/hashicorp/vault/command/key_status.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// KeyStatusCommand is a Command that provides information about the key status
-type KeyStatusCommand struct {
- meta.Meta
-}
-
-func (c *KeyStatusCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("key-status", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- status, err := client.Sys().KeyStatus()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading audits: %s", err))
- return 2
- }
-
- c.Ui.Output(fmt.Sprintf("Key Term: %d", status.Term))
- c.Ui.Output(fmt.Sprintf("Installation Time: %v", status.InstallTime))
- return 0
-}
-
-func (c *KeyStatusCommand) Synopsis() string {
- return "Provides information about the active encryption key"
-}
-
-func (c *KeyStatusCommand) Help() string {
- helpText := `
-Usage: vault key-status [options]
-
- Provides information about the active encryption key. Specifically,
- the current key term and the key installation time.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/key_status_test.go b/vendor/github.com/hashicorp/vault/command/key_status_test.go
deleted file mode 100644
index 0adcefa..0000000
--- a/vendor/github.com/hashicorp/vault/command/key_status_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestKeyStatus(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &KeyStatusCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/list.go b/vendor/github.com/hashicorp/vault/command/list.go
deleted file mode 100644
index 71bf388..0000000
--- a/vendor/github.com/hashicorp/vault/command/list.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package command
-
-import (
- "flag"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
-)
-
-// ListCommand is a Command that lists data from the Vault.
-type ListCommand struct {
- meta.Meta
-}
-
-func (c *ListCommand) Run(args []string) int {
- var format string
- var err error
- var secret *api.Secret
- var flags *flag.FlagSet
- flags = c.Meta.FlagSet("list", meta.FlagSetDefault)
- flags.StringVar(&format, "format", "table", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 || len(args[0]) == 0 {
- c.Ui.Error("list expects one argument")
- flags.Usage()
- return 1
- }
-
- path := args[0]
- if path[0] == '/' {
- path = path[1:]
- }
-
- if !strings.HasSuffix(path, "/") {
- path = path + "/"
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- secret, err = client.Logical().List(path)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading %s: %s", path, err))
- return 1
- }
- if secret == nil {
- c.Ui.Error(fmt.Sprintf(
- "No value found at %s", path))
- return 1
- }
- if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 {
- return OutputSecret(c.Ui, format, secret)
- }
-
- if secret.Data["keys"] == nil {
- c.Ui.Error("No entries found")
- return 0
- }
-
- return OutputList(c.Ui, format, secret)
-}
-
-func (c *ListCommand) Synopsis() string {
- return "List data or secrets in Vault"
-}
-
-func (c *ListCommand) Help() string {
- helpText :=
- `
-Usage: vault list [options] path
-
- List data from Vault.
-
- Retrieve a listing of available data. The data returned, if any, is backend-
- and endpoint-specific.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Read Options:
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/list_test.go b/vendor/github.com/hashicorp/vault/command/list_test.go
deleted file mode 100644
index 1f75c0b..0000000
--- a/vendor/github.com/hashicorp/vault/command/list_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package command
-
-import (
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestList(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &ReadCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "-format", "json",
- "secret",
- }
-
- // Run once so the client is setup, ignore errors
- c.Run(args)
-
- // Get the client so we can write data
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- data := map[string]interface{}{"value": "bar"}
- if _, err := client.Logical().Write("secret/foo", data); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- data = map[string]interface{}{"value": "bar"}
- if _, err := client.Logical().Write("secret/foo/bar", data); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- secret, err := client.Logical().List("secret/")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if secret == nil {
- t.Fatalf("err: No value found at secret/")
- }
-
- if secret.Data == nil {
- t.Fatalf("err: Data not found")
- }
-
- exp := map[string]interface{}{
- "keys": []interface{}{"foo", "foo/"},
- }
-
- if !reflect.DeepEqual(secret.Data, exp) {
- t.Fatalf("err: expected %#v, got %#v", exp, secret.Data)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/mount.go b/vendor/github.com/hashicorp/vault/command/mount.go
deleted file mode 100644
index 895e7b8..0000000
--- a/vendor/github.com/hashicorp/vault/command/mount.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
- "github.com/posener/complete"
-)
-
-// MountCommand is a Command that mounts a new mount.
-type MountCommand struct {
- meta.Meta
-}
-
-func (c *MountCommand) Run(args []string) int {
- var description, path, defaultLeaseTTL, maxLeaseTTL, pluginName string
- var local, forceNoCache bool
- flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
- flags.StringVar(&description, "description", "", "")
- flags.StringVar(&path, "path", "", "")
- flags.StringVar(&defaultLeaseTTL, "default-lease-ttl", "", "")
- flags.StringVar(&maxLeaseTTL, "max-lease-ttl", "", "")
- flags.StringVar(&pluginName, "plugin-name", "", "")
- flags.BoolVar(&forceNoCache, "force-no-cache", false, "")
- flags.BoolVar(&local, "local", false, "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nmount expects one argument: the type to mount."))
- return 1
- }
-
- mountType := args[0]
-
- // If no path is specified, we default the path to the backend type
- // or use the plugin name if it's a plugin backend
- if path == "" {
- if mountType == "plugin" {
- path = pluginName
- } else {
- path = mountType
- }
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- mountInfo := &api.MountInput{
- Type: mountType,
- Description: description,
- Config: api.MountConfigInput{
- DefaultLeaseTTL: defaultLeaseTTL,
- MaxLeaseTTL: maxLeaseTTL,
- ForceNoCache: forceNoCache,
- PluginName: pluginName,
- },
- Local: local,
- }
-
- if err := client.Sys().Mount(path, mountInfo); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Mount error: %s", err))
- return 2
- }
-
- mountTypeOutput := fmt.Sprintf("'%s'", mountType)
- if mountType == "plugin" {
- mountTypeOutput = fmt.Sprintf("plugin '%s'", pluginName)
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Successfully mounted %s at '%s'!",
- mountTypeOutput, path))
-
- return 0
-}
-
-func (c *MountCommand) Synopsis() string {
- return "Mount a logical backend"
-}
-
-func (c *MountCommand) Help() string {
- helpText := `
-Usage: vault mount [options] type
-
- Mount a logical backend.
-
- This command mounts a logical backend for storing and/or generating
- secrets.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Mount Options:
-
- -description= Human-friendly description of the purpose for
- the mount. This shows up in the mounts command.
-
- -path= Mount point for the logical backend. This
- defaults to the type of the mount.
-
- -default-lease-ttl= Default lease time-to-live for this backend.
- If not specified, uses the global default, or
- the previously set value. Set to '0' to
- explicitly set it to use the global default.
-
- -max-lease-ttl= Max lease time-to-live for this backend.
- If not specified, uses the global default, or
- the previously set value. Set to '0' to
- explicitly set it to use the global default.
-
- -force-no-cache Forces the backend to disable caching. If not
- specified, uses the global default. This does
- not affect caching of the underlying encrypted
- data storage.
-
- -plugin-name Name of the plugin to mount based from the name
- in the plugin catalog.
-
- -local Mark the mount as a local mount. Local mounts
- are not replicated nor (if a secondary)
- removed by replication.
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *MountCommand) AutocompleteArgs() complete.Predictor {
- // This list does not contain deprecated backends
- return complete.PredictSet(
- "aws",
- "consul",
- "pki",
- "transit",
- "ssh",
- "rabbitmq",
- "database",
- "totp",
- "plugin",
- )
-
-}
-
-func (c *MountCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-description": complete.PredictNothing,
- "-path": complete.PredictNothing,
- "-default-lease-ttl": complete.PredictNothing,
- "-max-lease-ttl": complete.PredictNothing,
- "-force-no-cache": complete.PredictNothing,
- "-plugin-name": complete.PredictNothing,
- "-local": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/mount_test.go b/vendor/github.com/hashicorp/vault/command/mount_test.go
deleted file mode 100644
index ea9108c..0000000
--- a/vendor/github.com/hashicorp/vault/command/mount_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestMount(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &MountCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "kv",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mounts, err := client.Sys().ListMounts()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mount, ok := mounts["kv/"]
- if !ok {
- t.Fatal("should have kv mount")
- }
- if mount.Type != "kv" {
- t.Fatal("should be kv type")
- }
-}
-
-func TestMount_Generic(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &MountCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "generic",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mounts, err := client.Sys().ListMounts()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mount, ok := mounts["generic/"]
- if !ok {
- t.Fatal("should have generic mount path")
- }
- if mount.Type != "generic" {
- t.Fatal("should be generic type")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/mount_tune.go b/vendor/github.com/hashicorp/vault/command/mount_tune.go
deleted file mode 100644
index e1efdd2..0000000
--- a/vendor/github.com/hashicorp/vault/command/mount_tune.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
-)
-
-// MountTuneCommand is a Command that remounts a mounted secret backend
-// to a new endpoint.
-type MountTuneCommand struct {
- meta.Meta
-}
-
-func (c *MountTuneCommand) Run(args []string) int {
- var defaultLeaseTTL, maxLeaseTTL string
- flags := c.Meta.FlagSet("mount-tune", meta.FlagSetDefault)
- flags.StringVar(&defaultLeaseTTL, "default-lease-ttl", "", "")
- flags.StringVar(&maxLeaseTTL, "max-lease-ttl", "", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nmount-tune expects one arguments: the mount path"))
- return 1
- }
-
- path := args[0]
-
- mountConfig := api.MountConfigInput{
- DefaultLeaseTTL: defaultLeaseTTL,
- MaxLeaseTTL: maxLeaseTTL,
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().TuneMount(path, mountConfig); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Mount tune error: %s", err))
- return 2
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Successfully tuned mount '%s'!", path))
-
- return 0
-}
-
-func (c *MountTuneCommand) Synopsis() string {
- return "Tune mount configuration parameters"
-}
-
-func (c *MountTuneCommand) Help() string {
- helpText := `
- Usage: vault mount-tune [options] path
-
- Tune configuration options for a mounted secret backend.
-
- Example: vault mount-tune -default-lease-ttl="24h" secret
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Mount Options:
-
- -default-lease-ttl= Default lease time-to-live for this backend.
- If not specified, uses the system default, or
- the previously set value. Set to 'system' to
- explicitly set it to use the system default.
-
- -max-lease-ttl= Max lease time-to-live for this backend.
- If not specified, uses the system default, or
- the previously set value. Set to 'system' to
- explicitly set it to use the system default.
-
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/mounts.go b/vendor/github.com/hashicorp/vault/command/mounts.go
deleted file mode 100644
index 2615776..0000000
--- a/vendor/github.com/hashicorp/vault/command/mounts.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package command
-
-import (
- "fmt"
- "sort"
- "strconv"
- "strings"
-
- "github.com/hashicorp/vault/meta"
- "github.com/ryanuber/columnize"
-)
-
-// MountsCommand is a Command that lists the mounts.
-type MountsCommand struct {
- meta.Meta
-}
-
-func (c *MountsCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("mounts", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- mounts, err := client.Sys().ListMounts()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading mounts: %s", err))
- return 2
- }
-
- paths := make([]string, 0, len(mounts))
- for path := range mounts {
- paths = append(paths, path)
- }
- sort.Strings(paths)
-
- columns := []string{"Path | Type | Accessor | Plugin | Default TTL | Max TTL | Force No Cache | Replication Behavior | Description"}
- for _, path := range paths {
- mount := mounts[path]
- pluginName := "n/a"
- if mount.Config.PluginName != "" {
- pluginName = mount.Config.PluginName
- }
- defTTL := "system"
- switch {
- case mount.Type == "system":
- defTTL = "n/a"
- case mount.Type == "cubbyhole":
- defTTL = "n/a"
- case mount.Config.DefaultLeaseTTL != 0:
- defTTL = strconv.Itoa(mount.Config.DefaultLeaseTTL)
- }
- maxTTL := "system"
- switch {
- case mount.Type == "system":
- maxTTL = "n/a"
- case mount.Type == "cubbyhole":
- maxTTL = "n/a"
- case mount.Config.MaxLeaseTTL != 0:
- maxTTL = strconv.Itoa(mount.Config.MaxLeaseTTL)
- }
- replicatedBehavior := "replicated"
- if mount.Local {
- replicatedBehavior = "local"
- }
- columns = append(columns, fmt.Sprintf(
- "%s | %s | %s | %s | %s | %s | %v | %s | %s", path, mount.Type, mount.Accessor, pluginName, defTTL, maxTTL,
- mount.Config.ForceNoCache, replicatedBehavior, mount.Description))
- }
-
- c.Ui.Output(columnize.SimpleFormat(columns))
- return 0
-}
-
-func (c *MountsCommand) Synopsis() string {
- return "Lists mounted backends in Vault"
-}
-
-func (c *MountsCommand) Help() string {
- helpText := `
-Usage: vault mounts [options]
-
- Outputs information about the mounted backends.
-
- This command lists the mounted backends, their mount points, the
- configured TTLs, and a human-friendly description of the mount point.
- A TTL of 'system' indicates that the system default is being used.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/mounts_test.go b/vendor/github.com/hashicorp/vault/command/mounts_test.go
deleted file mode 100644
index 55e5f67..0000000
--- a/vendor/github.com/hashicorp/vault/command/mounts_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestMounts(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &MountsCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/path_help.go b/vendor/github.com/hashicorp/vault/command/path_help.go
deleted file mode 100644
index 6eed960..0000000
--- a/vendor/github.com/hashicorp/vault/command/path_help.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// PathHelpCommand is a Command that lists the mounts.
-type PathHelpCommand struct {
- meta.Meta
-}
-
-func (c *PathHelpCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("help", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error("\nhelp expects a single argument")
- return 1
- }
-
- path := args[0]
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- help, err := client.Help(path)
- if err != nil {
- if strings.Contains(err.Error(), "Vault is sealed") {
- c.Ui.Error(`Error: Vault is sealed.
-
-The path-help command requires the vault to be unsealed so that
-mount points of secret backends are known.`)
- } else {
- c.Ui.Error(fmt.Sprintf(
- "Error reading help: %s", err))
- }
- return 1
- }
-
- c.Ui.Output(help.Help)
- return 0
-}
-
-func (c *PathHelpCommand) Synopsis() string {
- return "Look up the help for a path"
-}
-
-func (c *PathHelpCommand) Help() string {
- helpText := `
-Usage: vault path-help [options] path
-
- Look up the help for a path.
-
- All endpoints in Vault from system paths, secret paths, and credential
- providers provide built-in help. This command looks up and outputs that
- help.
-
- The command requires that the vault be unsealed, because otherwise
- the mount points of the backends are unknown.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/path_help_test.go b/vendor/github.com/hashicorp/vault/command/path_help_test.go
deleted file mode 100644
index 46219ba..0000000
--- a/vendor/github.com/hashicorp/vault/command/path_help_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestHelp(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &PathHelpCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "sys/mounts",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/pgp_test.go b/vendor/github.com/hashicorp/vault/command/pgp_test.go
deleted file mode 100644
index c368e31..0000000
--- a/vendor/github.com/hashicorp/vault/command/pgp_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package command
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "io/ioutil"
- "reflect"
- "regexp"
- "sort"
- "testing"
-
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/vault"
-
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/packet"
-)
-
-func getPubKeyFiles(t *testing.T) (string, []string, error) {
- tempDir, err := ioutil.TempDir("", "vault-test")
- if err != nil {
- t.Fatalf("Error creating temporary directory: %s", err)
- }
-
- pubFiles := []string{
- tempDir + "/pubkey1",
- tempDir + "/pubkey2",
- tempDir + "/pubkey3",
- tempDir + "/aapubkey1",
- }
- decoder := base64.StdEncoding
- pub1Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey1)
- if err != nil {
- t.Fatalf("Error decoding bytes for public key 1: %s", err)
- }
- err = ioutil.WriteFile(pubFiles[0], pub1Bytes, 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 1 to temp file: %s", err)
- }
- pub2Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey2)
- if err != nil {
- t.Fatalf("Error decoding bytes for public key 2: %s", err)
- }
- err = ioutil.WriteFile(pubFiles[1], pub2Bytes, 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 2 to temp file: %s", err)
- }
- pub3Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey3)
- if err != nil {
- t.Fatalf("Error decoding bytes for public key 3: %s", err)
- }
- err = ioutil.WriteFile(pubFiles[2], pub3Bytes, 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 3 to temp file: %s", err)
- }
- err = ioutil.WriteFile(pubFiles[3], []byte(pgpkeys.TestAAPubKey1), 0755)
- if err != nil {
- t.Fatalf("Error writing aa pub key 1 to temp file: %s", err)
- }
-
- return tempDir, pubFiles, nil
-}
-
-func parseDecryptAndTestUnsealKeys(t *testing.T,
- input, rootToken string,
- fingerprints bool,
- backupKeys map[string][]string,
- backupKeysB64 map[string][]string,
- core *vault.Core) {
-
- decoder := base64.StdEncoding
- priv1Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey1)
- if err != nil {
- t.Fatalf("Error decoding bytes for private key 1: %s", err)
- }
- priv2Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey2)
- if err != nil {
- t.Fatalf("Error decoding bytes for private key 2: %s", err)
- }
- priv3Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey3)
- if err != nil {
- t.Fatalf("Error decoding bytes for private key 3: %s", err)
- }
-
- privBytes := [][]byte{
- priv1Bytes,
- priv2Bytes,
- priv3Bytes,
- }
-
- testFunc := func(bkeys map[string][]string) {
- var re *regexp.Regexp
- if fingerprints {
- re, err = regexp.Compile("\\s*Key\\s+\\d+\\s+fingerprint:\\s+([0-9a-fA-F]+);\\s+value:\\s+(.*)")
- } else {
- re, err = regexp.Compile("\\s*Key\\s+\\d+:\\s+(.*)")
- }
- if err != nil {
- t.Fatalf("Error compiling regex: %s", err)
- }
- matches := re.FindAllStringSubmatch(input, -1)
- if len(matches) != 4 {
- t.Fatalf("Unexpected number of keys returned, got %d, matches was \n\n%#v\n\n, input was \n\n%s\n\n", len(matches), matches, input)
- }
-
- encodedKeys := []string{}
- matchedFingerprints := []string{}
- for _, tuple := range matches {
- if fingerprints {
- if len(tuple) != 3 {
- t.Fatalf("Key not found: %#v", tuple)
- }
- matchedFingerprints = append(matchedFingerprints, tuple[1])
- encodedKeys = append(encodedKeys, tuple[2])
- } else {
- if len(tuple) != 2 {
- t.Fatalf("Key not found: %#v", tuple)
- }
- encodedKeys = append(encodedKeys, tuple[1])
- }
- }
-
- if bkeys != nil && len(matchedFingerprints) != 0 {
- testMap := map[string][]string{}
- for i, v := range matchedFingerprints {
- testMap[v] = append(testMap[v], encodedKeys[i])
- sort.Strings(testMap[v])
- }
- if !reflect.DeepEqual(testMap, bkeys) {
- t.Fatalf("test map and backup map do not match, test map is\n%#v\nbackup map is\n%#v", testMap, bkeys)
- }
- }
-
- unsealKeys := []string{}
- ptBuf := bytes.NewBuffer(nil)
- for i, privKeyBytes := range privBytes {
- if i > 2 {
- break
- }
- ptBuf.Reset()
- entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes)))
- if err != nil {
- t.Fatalf("Error parsing private key %d: %s", i, err)
- }
- var keyBytes []byte
- keyBytes, err = base64.StdEncoding.DecodeString(encodedKeys[i])
- if err != nil {
- t.Fatalf("Error decoding key %d: %s", i, err)
- }
- entityList := &openpgp.EntityList{entity}
- md, err := openpgp.ReadMessage(bytes.NewBuffer(keyBytes), entityList, nil, nil)
- if err != nil {
- t.Fatalf("Error decrypting with key %d (%s): %s", i, encodedKeys[i], err)
- }
- ptBuf.ReadFrom(md.UnverifiedBody)
- unsealKeys = append(unsealKeys, ptBuf.String())
- }
-
- err = core.Seal(rootToken)
- if err != nil {
- t.Fatalf("Error sealing vault with provided root token: %s", err)
- }
-
- for i, unsealKey := range unsealKeys {
- unsealBytes, err := hex.DecodeString(unsealKey)
- if err != nil {
- t.Fatalf("Error hex decoding unseal key %s: %s", unsealKey, err)
- }
- unsealed, err := core.Unseal(unsealBytes)
- if err != nil {
- t.Fatalf("Error using unseal key %s: %s", unsealKey, err)
- }
- if i >= 2 && !unsealed {
- t.Fatalf("Error: Provided two unseal keys but core is not unsealed")
- }
- }
- }
-
- testFunc(backupKeysB64)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_delete.go b/vendor/github.com/hashicorp/vault/command/policy_delete.go
deleted file mode 100644
index ff8342a..0000000
--- a/vendor/github.com/hashicorp/vault/command/policy_delete.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// PolicyDeleteCommand is a Command that enables a new endpoint.
-type PolicyDeleteCommand struct {
- meta.Meta
-}
-
-func (c *PolicyDeleteCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("policy-delete", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\npolicy-delete expects exactly one argument"))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- name := args[0]
- if err := client.Sys().DeletePolicy(name); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error: %s", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf("Policy '%s' deleted.", name))
- return 0
-}
-
-func (c *PolicyDeleteCommand) Synopsis() string {
- return "Delete a policy from the server"
-}
-
-func (c *PolicyDeleteCommand) Help() string {
- helpText := `
-Usage: vault policy-delete [options] name
-
- Delete a policy with the given name.
-
- Once the policy is deleted, all users associated with the policy will
- be affected immediately. When a user is associated with a policy that
- doesn't exist, it is identical to not being associated with that policy.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_delete_test.go b/vendor/github.com/hashicorp/vault/command/policy_delete_test.go
deleted file mode 100644
index 4f62a10..0000000
--- a/vendor/github.com/hashicorp/vault/command/policy_delete_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestPolicyDelete(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &PolicyDeleteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "foo",
- }
-
- // Run once so the client is setup, ignore errors
- c.Run(args)
-
- // Get the client so we can write data
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if err := client.Sys().PutPolicy("foo", testPolicyDeleteRules); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Test that the delete works
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Test the policy is gone
- rules, err := client.Sys().GetPolicy("foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if rules != "" {
- t.Fatalf("bad: %#v", rules)
- }
-}
-
-const testPolicyDeleteRules = `
-path "sys" {
- policy = "deny"
-}
-`
diff --git a/vendor/github.com/hashicorp/vault/command/policy_list.go b/vendor/github.com/hashicorp/vault/command/policy_list.go
deleted file mode 100644
index 73cb9c5..0000000
--- a/vendor/github.com/hashicorp/vault/command/policy_list.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// PolicyListCommand is a Command that enables a new endpoint.
-type PolicyListCommand struct {
- meta.Meta
-}
-
-func (c *PolicyListCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("policy-list", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) == 1 {
- return c.read(args[0])
- } else if len(args) == 0 {
- return c.list()
- } else {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\npolicies expects zero or one arguments"))
- return 1
- }
-}
-
-func (c *PolicyListCommand) list() int {
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- policies, err := client.Sys().ListPolicies()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error: %s", err))
- return 1
- }
-
- for _, p := range policies {
- c.Ui.Output(p)
- }
-
- return 0
-}
-
-func (c *PolicyListCommand) read(n string) int {
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- rules, err := client.Sys().GetPolicy(n)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error: %s", err))
- return 1
- }
-
- c.Ui.Output(rules)
- return 0
-}
-
-func (c *PolicyListCommand) Synopsis() string {
- return "List the policies on the server"
-}
-
-func (c *PolicyListCommand) Help() string {
- helpText := `
-Usage: vault policies [options] [name]
-
- List the policies that are available or read a single policy.
-
- This command lists the policies that are written to the Vault server.
- If a name of a policy is specified, that policy is outputted.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_list_test.go b/vendor/github.com/hashicorp/vault/command/policy_list_test.go
deleted file mode 100644
index b2afe29..0000000
--- a/vendor/github.com/hashicorp/vault/command/policy_list_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestPolicyList(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &PolicyListCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestPolicyRead(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &PolicyListCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "root",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_write.go b/vendor/github.com/hashicorp/vault/command/policy_write.go
deleted file mode 100644
index 59b26fb..0000000
--- a/vendor/github.com/hashicorp/vault/command/policy_write.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package command
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// PolicyWriteCommand is a Command that enables a new endpoint.
-type PolicyWriteCommand struct {
- meta.Meta
-}
-
-func (c *PolicyWriteCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("policy-write", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 2 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\npolicy-write expects exactly two arguments"))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- // Policies are normalized to lowercase
- name := strings.ToLower(args[0])
- path := args[1]
-
- // Read the policy
- var f io.Reader = os.Stdin
- if path != "-" {
- file, err := os.Open(path)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error opening file: %s", err))
- return 1
- }
- defer file.Close()
- f = file
- }
- var buf bytes.Buffer
- if _, err := io.Copy(&buf, f); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading file: %s", err))
- return 1
- }
- rules := buf.String()
-
- if err := client.Sys().PutPolicy(name, rules); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error: %s", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf("Policy '%s' written.", name))
- return 0
-}
-
-func (c *PolicyWriteCommand) Synopsis() string {
- return "Write a policy to the server"
-}
-
-func (c *PolicyWriteCommand) Help() string {
- helpText := `
-Usage: vault policy-write [options] name path
-
- Write a policy with the given name from the contents of a file or stdin.
-
- If the path is "-", the policy is read from stdin. Otherwise, it is
- loaded from the file at the given path.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_write_test.go b/vendor/github.com/hashicorp/vault/command/policy_write_test.go
deleted file mode 100644
index d0deeaa..0000000
--- a/vendor/github.com/hashicorp/vault/command/policy_write_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestPolicyWrite(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &PolicyWriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "foo",
- "./test-fixtures/policy.hcl",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/read.go b/vendor/github.com/hashicorp/vault/command/read.go
deleted file mode 100644
index d989178..0000000
--- a/vendor/github.com/hashicorp/vault/command/read.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package command
-
-import (
- "flag"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
- "github.com/posener/complete"
-)
-
-// ReadCommand is a Command that reads data from the Vault.
-type ReadCommand struct {
- meta.Meta
-}
-
-func (c *ReadCommand) Run(args []string) int {
- var format string
- var field string
- var err error
- var secret *api.Secret
- var flags *flag.FlagSet
- flags = c.Meta.FlagSet("read", meta.FlagSetDefault)
- flags.StringVar(&format, "format", "table", "")
- flags.StringVar(&field, "field", "", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 || len(args[0]) == 0 {
- c.Ui.Error("read expects one argument")
- flags.Usage()
- return 1
- }
-
- path := args[0]
- if path[0] == '/' {
- path = path[1:]
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- secret, err = client.Logical().Read(path)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading %s: %s", path, err))
- return 1
- }
- if secret == nil {
- c.Ui.Error(fmt.Sprintf(
- "No value found at %s", path))
- return 1
- }
-
- // Handle single field output
- if field != "" {
- return PrintRawField(c.Ui, secret, field)
- }
-
- return OutputSecret(c.Ui, format, secret)
-}
-
-func (c *ReadCommand) Synopsis() string {
- return "Read data or secrets from Vault"
-}
-
-func (c *ReadCommand) Help() string {
- helpText := `
-Usage: vault read [options] path
-
- Read data from Vault.
-
- Reads data at the given path from Vault. This can be used to read
- secrets and configuration as well as generate dynamic values from
- materialized backends. Please reference the documentation for the
- backends in use to determine key structure.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Read Options:
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-
- -field=field If included, the raw value of the specified field
- will be output raw to stdout.
-
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *ReadCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictNothing
-}
-
-func (c *ReadCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-format": predictFormat,
- "-field": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/read_test.go b/vendor/github.com/hashicorp/vault/command/read_test.go
deleted file mode 100644
index 5cf0f08..0000000
--- a/vendor/github.com/hashicorp/vault/command/read_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestRead(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &ReadCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "sys/mounts",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestRead_notFound(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &ReadCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret/nope",
- }
- if code := c.Run(args); code != 1 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestRead_field(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &ReadCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "-field", "value",
- "secret/foo",
- }
-
- // Run once so the client is setup, ignore errors
- c.Run(args)
-
- // Get the client so we can write data
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- data := map[string]interface{}{"value": "bar"}
- if _, err := client.Logical().Write("secret/foo", data); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Run the read
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- output := ui.OutputWriter.String()
- if output != "bar\n" {
- t.Fatalf("unexpectd output:\n%s", output)
- }
-}
-
-func TestRead_field_notFound(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &ReadCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "-field", "nope",
- "secret/foo",
- }
-
- // Run once so the client is setup, ignore errors
- c.Run(args)
-
- // Get the client so we can write data
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- data := map[string]interface{}{"value": "bar"}
- if _, err := client.Logical().Write("secret/foo", data); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Run the read
- if code := c.Run(args); code != 1 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/rekey.go b/vendor/github.com/hashicorp/vault/command/rekey.go
deleted file mode 100644
index bf47c2c..0000000
--- a/vendor/github.com/hashicorp/vault/command/rekey.go
+++ /dev/null
@@ -1,441 +0,0 @@
-package command
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/password"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/meta"
- "github.com/posener/complete"
-)
-
-// RekeyCommand is a Command that rekeys the vault.
-type RekeyCommand struct {
- meta.Meta
-
- // Key can be used to pre-seed the key. If it is set, it will not
- // be asked with the `password` helper.
- Key string
-
- // The nonce for the rekey request to send along
- Nonce string
-
- // Whether to use the recovery key instead of barrier key, if available
- RecoveryKey bool
-}
-
-func (c *RekeyCommand) Run(args []string) int {
- var init, cancel, status, delete, retrieve, backup, recoveryKey bool
- var shares, threshold int
- var nonce string
- var pgpKeys pgpkeys.PubKeyFilesFlag
- flags := c.Meta.FlagSet("rekey", meta.FlagSetDefault)
- flags.BoolVar(&init, "init", false, "")
- flags.BoolVar(&cancel, "cancel", false, "")
- flags.BoolVar(&status, "status", false, "")
- flags.BoolVar(&delete, "delete", false, "")
- flags.BoolVar(&retrieve, "retrieve", false, "")
- flags.BoolVar(&backup, "backup", false, "")
- flags.BoolVar(&recoveryKey, "recovery-key", c.RecoveryKey, "")
- flags.IntVar(&shares, "key-shares", 5, "")
- flags.IntVar(&threshold, "key-threshold", 3, "")
- flags.StringVar(&nonce, "nonce", "", "")
- flags.Var(&pgpKeys, "pgp-keys", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- if nonce != "" {
- c.Nonce = nonce
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- // Check if we are running doing any restricted variants
- switch {
- case init:
- return c.initRekey(client, shares, threshold, pgpKeys, backup, recoveryKey)
- case cancel:
- return c.cancelRekey(client, recoveryKey)
- case status:
- return c.rekeyStatus(client, recoveryKey)
- case retrieve:
- return c.rekeyRetrieveStored(client, recoveryKey)
- case delete:
- return c.rekeyDeleteStored(client, recoveryKey)
- }
-
- // Check if the rekey is started
- var rekeyStatus *api.RekeyStatusResponse
- if recoveryKey {
- rekeyStatus, err = client.Sys().RekeyRecoveryKeyStatus()
- } else {
- rekeyStatus, err = client.Sys().RekeyStatus()
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error reading rekey status: %s", err))
- return 1
- }
-
- // Start the rekey process if not started
- if !rekeyStatus.Started {
- if recoveryKey {
- rekeyStatus, err = client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{
- SecretShares: shares,
- SecretThreshold: threshold,
- PGPKeys: pgpKeys,
- Backup: backup,
- })
- } else {
- rekeyStatus, err = client.Sys().RekeyInit(&api.RekeyInitRequest{
- SecretShares: shares,
- SecretThreshold: threshold,
- PGPKeys: pgpKeys,
- Backup: backup,
- })
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error initializing rekey: %s", err))
- return 1
- }
- c.Nonce = rekeyStatus.Nonce
- }
-
- shares = rekeyStatus.N
- threshold = rekeyStatus.T
- serverNonce := rekeyStatus.Nonce
-
- // Get the unseal key
- args = flags.Args()
- key := c.Key
- if len(args) > 0 {
- key = args[0]
- }
- if key == "" {
- c.Nonce = serverNonce
- fmt.Printf("Rekey operation nonce: %s\n", serverNonce)
- fmt.Printf("Key (will be hidden): ")
- key, err = password.Read(os.Stdin)
- fmt.Printf("\n")
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error attempting to ask for password. The raw error message\n"+
- "is shown below, but the most common reason for this error is\n"+
- "that you attempted to pipe a value into unseal or you're\n"+
- "executing `vault rekey` from outside of a terminal.\n\n"+
- "You should use `vault rekey` from a terminal for maximum\n"+
- "security. If this isn't an option, the unseal key can be passed\n"+
- "in using the first parameter.\n\n"+
- "Raw error: %s", err))
- return 1
- }
- }
-
- // Provide the key, this may potentially complete the update
- var result *api.RekeyUpdateResponse
- if recoveryKey {
- result, err = client.Sys().RekeyRecoveryKeyUpdate(strings.TrimSpace(key), c.Nonce)
- } else {
- result, err = client.Sys().RekeyUpdate(strings.TrimSpace(key), c.Nonce)
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error attempting rekey update: %s", err))
- return 1
- }
-
- // If we are not complete, then dump the status
- if !result.Complete {
- return c.rekeyStatus(client, recoveryKey)
- }
-
- // Space between the key prompt, if any, and the output
- c.Ui.Output("\n")
- // Provide the keys
- var haveB64 bool
- if result.KeysB64 != nil && len(result.KeysB64) == len(result.Keys) {
- haveB64 = true
- }
- for i, key := range result.Keys {
- if len(result.PGPFingerprints) > 0 {
- if haveB64 {
- c.Ui.Output(fmt.Sprintf("Key %d fingerprint: %s; value: %s", i+1, result.PGPFingerprints[i], result.KeysB64[i]))
- } else {
- c.Ui.Output(fmt.Sprintf("Key %d fingerprint: %s; value: %s", i+1, result.PGPFingerprints[i], key))
- }
- } else {
- if haveB64 {
- c.Ui.Output(fmt.Sprintf("Key %d: %s", i+1, result.KeysB64[i]))
- } else {
- c.Ui.Output(fmt.Sprintf("Key %d: %s", i+1, key))
- }
- }
- }
-
- c.Ui.Output(fmt.Sprintf("\nOperation nonce: %s", result.Nonce))
-
- if len(result.PGPFingerprints) > 0 && result.Backup {
- c.Ui.Output(fmt.Sprintf(
- "\n" +
- "The encrypted unseal keys have been backed up to \"core/unseal-keys-backup\"\n" +
- "in your physical backend. It is your responsibility to remove these if and\n" +
- "when desired.",
- ))
- }
-
- c.Ui.Output(fmt.Sprintf(
- "\n"+
- "Vault rekeyed with %d keys and a key threshold of %d. Please\n"+
- "securely distribute the above keys. When the vault is re-sealed,\n"+
- "restarted, or stopped, you must provide at least %d of these keys\n"+
- "to unseal it again.\n\n"+
- "Vault does not store the master key. Without at least %d keys,\n"+
- "your vault will remain permanently sealed.",
- shares,
- threshold,
- threshold,
- threshold,
- ))
-
- return 0
-}
-
-// initRekey is used to start the rekey process
-func (c *RekeyCommand) initRekey(client *api.Client,
- shares, threshold int,
- pgpKeys pgpkeys.PubKeyFilesFlag,
- backup, recoveryKey bool) int {
- // Start the rekey
- request := &api.RekeyInitRequest{
- SecretShares: shares,
- SecretThreshold: threshold,
- PGPKeys: pgpKeys,
- Backup: backup,
- }
- var status *api.RekeyStatusResponse
- var err error
- if recoveryKey {
- status, err = client.Sys().RekeyRecoveryKeyInit(request)
- } else {
- status, err = client.Sys().RekeyInit(request)
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error initializing rekey: %s", err))
- return 1
- }
-
- if pgpKeys == nil || len(pgpKeys) == 0 {
- c.Ui.Output(`
-WARNING: If you lose the keys after they are returned to you, there is no
-recovery. Consider using the '-pgp-keys' option to protect the returned unseal
-keys along with '-backup=true' to allow recovery of the encrypted keys in case
-of emergency. They can easily be deleted at a later time with
-'vault rekey -delete'.
-`)
- }
-
- if pgpKeys != nil && len(pgpKeys) > 0 && !backup {
- c.Ui.Output(`
-WARNING: You are using PGP keys for encryption, but have not set the option to
-back up the new unseal keys to physical storage. If you lose the keys after
-they are returned to you, there is no recovery. Consider setting '-backup=true'
-to allow recovery of the encrypted keys in case of emergency. They can easily
-be deleted at a later time with 'vault rekey -delete'.
-`)
- }
-
- // Provide the current status
- return c.dumpRekeyStatus(status)
-}
-
-// cancelRekey is used to abort the rekey process
-func (c *RekeyCommand) cancelRekey(client *api.Client, recovery bool) int {
- var err error
- if recovery {
- err = client.Sys().RekeyRecoveryKeyCancel()
- } else {
- err = client.Sys().RekeyCancel()
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to cancel rekey: %s", err))
- return 1
- }
- c.Ui.Output("Rekey canceled.")
- return 0
-}
-
-// rekeyStatus is used just to fetch and dump the status
-func (c *RekeyCommand) rekeyStatus(client *api.Client, recovery bool) int {
- // Check the status
- var status *api.RekeyStatusResponse
- var err error
- if recovery {
- status, err = client.Sys().RekeyRecoveryKeyStatus()
- } else {
- status, err = client.Sys().RekeyStatus()
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error reading rekey status: %s", err))
- return 1
- }
-
- return c.dumpRekeyStatus(status)
-}
-
-func (c *RekeyCommand) dumpRekeyStatus(status *api.RekeyStatusResponse) int {
- // Dump the status
- statString := fmt.Sprintf(
- "Nonce: %s\n"+
- "Started: %t\n"+
- "Key Shares: %d\n"+
- "Key Threshold: %d\n"+
- "Rekey Progress: %d\n"+
- "Required Keys: %d",
- status.Nonce,
- status.Started,
- status.N,
- status.T,
- status.Progress,
- status.Required,
- )
- if len(status.PGPFingerprints) != 0 {
- statString = fmt.Sprintf("%s\nPGP Key Fingerprints: %s", statString, status.PGPFingerprints)
- statString = fmt.Sprintf("%s\nBackup Storage: %t", statString, status.Backup)
- }
- c.Ui.Output(statString)
- return 0
-}
-
-func (c *RekeyCommand) rekeyRetrieveStored(client *api.Client, recovery bool) int {
- var storedKeys *api.RekeyRetrieveResponse
- var err error
- if recovery {
- storedKeys, err = client.Sys().RekeyRetrieveRecoveryBackup()
- } else {
- storedKeys, err = client.Sys().RekeyRetrieveBackup()
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error retrieving stored keys: %s", err))
- return 1
- }
-
- secret := &api.Secret{
- Data: structs.New(storedKeys).Map(),
- }
-
- return OutputSecret(c.Ui, "table", secret)
-}
-
-func (c *RekeyCommand) rekeyDeleteStored(client *api.Client, recovery bool) int {
- var err error
- if recovery {
- err = client.Sys().RekeyDeleteRecoveryBackup()
- } else {
- err = client.Sys().RekeyDeleteBackup()
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to delete stored keys: %s", err))
- return 1
- }
- c.Ui.Output("Stored keys deleted.")
- return 0
-}
-
-func (c *RekeyCommand) Synopsis() string {
- return "Rekeys Vault to generate new unseal keys"
-}
-
-func (c *RekeyCommand) Help() string {
- helpText := `
-Usage: vault rekey [options] [key]
-
- Rekey is used to change the unseal keys. This can be done to generate
- a new set of unseal keys or to change the number of shares and the
- required threshold.
-
- Rekey can only be done when the vault is already unsealed. The operation
- is done online, but requires that a threshold of the current unseal
- keys be provided.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Rekey Options:
-
- -init Initialize the rekey operation by setting the desired
- number of shares and the key threshold. This can only be
- done if no rekey is already initiated.
-
- -cancel Reset the rekey process by throwing away
- prior keys and the rekey configuration.
-
- -status Prints the status of the current rekey operation.
- This can be used to see the status without attempting
- to provide an unseal key.
-
- -retrieve Retrieve backed-up keys. Only available if the PGP keys
- were provided and the backup has not been deleted.
-
- -delete Delete any backed-up keys.
-
- -key-shares=5 The number of key shares to split the master key
- into.
-
- -key-threshold=3 The number of key shares required to reconstruct
- the master key.
-
- -nonce=abcd The nonce provided at rekey initialization time. This
- same nonce value must be provided with each unseal
- key. If the unseal key is not being passed in via the
- the command line the nonce parameter is not required,
- and will instead be displayed with the key prompt.
-
- -pgp-keys If provided, must be a comma-separated list of
- files on disk containing binary- or base64-format
- public PGP keys, or Keybase usernames specified as
- "keybase:". The number of given entries
- must match 'key-shares'. The output unseal keys will
- be encrypted and base64-encoded, in order, with the
- given public keys. If you want to use them with the
- 'vault unseal' command, you will need to base64-decode
- and decrypt; this will be the plaintext unseal key.
-
- -backup=false If true, and if the key shares are PGP-encrypted, a
- plaintext backup of the PGP-encrypted keys will be
- stored at "core/unseal-keys-backup" in your physical
- storage. You can retrieve or delete them via the
- 'sys/rekey/backup' endpoint.
-
- -recovery-key=false Whether to rekey the recovery key instead of the
- barrier key. Only used with Vault HSM.
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *RekeyCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictNothing
-}
-
-func (c *RekeyCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-init": complete.PredictNothing,
- "-cancel": complete.PredictNothing,
- "-status": complete.PredictNothing,
- "-retrieve": complete.PredictNothing,
- "-delete": complete.PredictNothing,
- "-key-shares": complete.PredictNothing,
- "-key-threshold": complete.PredictNothing,
- "-nonce": complete.PredictNothing,
- "-pgp-keys": complete.PredictNothing,
- "-backup": complete.PredictNothing,
- "-recovery-key": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/rekey_test.go b/vendor/github.com/hashicorp/vault/command/rekey_test.go
deleted file mode 100644
index 6f12d78..0000000
--- a/vendor/github.com/hashicorp/vault/command/rekey_test.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package command
-
-import (
- "encoding/hex"
- "os"
- "sort"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestRekey(t *testing.T) {
- core, keys, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
-
- for i, key := range keys {
- c := &RekeyCommand{
- Key: hex.EncodeToString(key),
- RecoveryKey: false,
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- if i > 0 {
- conf, err := core.RekeyConfig(false)
- if err != nil {
- t.Fatal(err)
- }
- c.Nonce = conf.Nonce
- }
-
- args := []string{"-address", addr}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- }
-
- config, err := core.SealAccess().BarrierConfig()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if config.SecretShares != 5 {
- t.Fatal("should rekey")
- }
-}
-
-func TestRekey_arg(t *testing.T) {
- core, keys, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
-
- for i, key := range keys {
- c := &RekeyCommand{
- RecoveryKey: false,
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- if i > 0 {
- conf, err := core.RekeyConfig(false)
- if err != nil {
- t.Fatal(err)
- }
- c.Nonce = conf.Nonce
- }
-
- args := []string{"-address", addr, hex.EncodeToString(key)}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- }
-
- config, err := core.SealAccess().BarrierConfig()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if config.SecretShares != 5 {
- t.Fatal("should rekey")
- }
-}
-
-func TestRekey_init(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
-
- c := &RekeyCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "-init",
- "-key-threshold", "10",
- "-key-shares", "10",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- config, err := core.RekeyConfig(false)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if config.SecretShares != 10 {
- t.Fatal("should rekey")
- }
- if config.SecretThreshold != 10 {
- t.Fatal("should rekey")
- }
-}
-
-func TestRekey_cancel(t *testing.T) {
- core, keys, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &RekeyCommand{
- Key: hex.EncodeToString(keys[0]),
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- args := []string{"-address", addr, "-init"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- args = []string{"-address", addr, "-cancel"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- config, err := core.RekeyConfig(false)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if config != nil {
- t.Fatal("should not rekey")
- }
-}
-
-func TestRekey_status(t *testing.T) {
- core, keys, _ := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &RekeyCommand{
- Key: hex.EncodeToString(keys[0]),
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- args := []string{"-address", addr, "-init"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- args = []string{"-address", addr, "-status"}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- if !strings.Contains(ui.OutputWriter.String(), "Started: true") {
- t.Fatalf("bad: %s", ui.OutputWriter.String())
- }
-}
-
-func TestRekey_init_pgp(t *testing.T) {
- core, keys, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- bc := &logical.BackendConfig{
- Logger: nil,
- System: logical.StaticSystemView{
- DefaultLeaseTTLVal: time.Hour * 24,
- MaxLeaseTTLVal: time.Hour * 24 * 32,
- },
- }
- sysBackend := vault.NewSystemBackend(core)
- err := sysBackend.Backend.Setup(bc)
- if err != nil {
- t.Fatal(err)
- }
-
- ui := new(cli.MockUi)
- c := &RekeyCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- tempDir, pubFiles, err := getPubKeyFiles(t)
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tempDir)
-
- args := []string{
- "-address", addr,
- "-init",
- "-key-shares", "4",
- "-pgp-keys", pubFiles[0] + ",@" + pubFiles[1] + "," + pubFiles[2] + "," + pubFiles[3],
- "-key-threshold", "2",
- "-backup", "true",
- }
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- config, err := core.RekeyConfig(false)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if config.SecretShares != 4 {
- t.Fatal("should rekey")
- }
- if config.SecretThreshold != 2 {
- t.Fatal("should rekey")
- }
-
- for _, key := range keys {
- c = &RekeyCommand{
- Key: hex.EncodeToString(key),
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- c.Nonce = config.Nonce
-
- args = []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- }
-
- type backupStruct struct {
- Keys map[string][]string
- KeysB64 map[string][]string
- }
- backupVals := &backupStruct{}
-
- req := logical.TestRequest(t, logical.ReadOperation, "rekey/backup")
- resp, err := sysBackend.HandleRequest(req)
- if err != nil {
- t.Fatalf("error running backed-up unseal key fetch: %v", err)
- }
- if resp == nil {
- t.Fatalf("got nil resp with unseal key fetch")
- }
- if resp.Data["keys"] == nil {
- t.Fatalf("could not retrieve unseal keys from token")
- }
- if resp.Data["nonce"] != config.Nonce {
- t.Fatalf("nonce mismatch between rekey and backed-up keys")
- }
-
- backupVals.Keys = resp.Data["keys"].(map[string][]string)
- backupVals.KeysB64 = resp.Data["keys_base64"].(map[string][]string)
-
- // Now delete and try again; the values should be inaccessible
- req = logical.TestRequest(t, logical.DeleteOperation, "rekey/backup")
- resp, err = sysBackend.HandleRequest(req)
- if err != nil {
- t.Fatalf("error running backed-up unseal key delete: %v", err)
- }
- req = logical.TestRequest(t, logical.ReadOperation, "rekey/backup")
- resp, err = sysBackend.HandleRequest(req)
- if err != nil {
- t.Fatalf("error running backed-up unseal key fetch: %v", err)
- }
- if resp == nil {
- t.Fatalf("got nil resp with unseal key fetch")
- }
- if resp.Data["keys"] != nil {
- t.Fatalf("keys found when they should have been deleted")
- }
-
- // Sort, because it'll be tested with DeepEqual later
- for k, _ := range backupVals.Keys {
- sort.Strings(backupVals.Keys[k])
- sort.Strings(backupVals.KeysB64[k])
- }
-
- parseDecryptAndTestUnsealKeys(t, ui.OutputWriter.String(), token, true, backupVals.Keys, backupVals.KeysB64, core)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/remount.go b/vendor/github.com/hashicorp/vault/command/remount.go
deleted file mode 100644
index a36f141..0000000
--- a/vendor/github.com/hashicorp/vault/command/remount.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// RemountCommand is a Command that remounts a mounted secret backend
-// to a new endpoint.
-type RemountCommand struct {
- meta.Meta
-}
-
-func (c *RemountCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("remount", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 2 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nremount expects two arguments: the from and to path"))
- return 1
- }
-
- from := args[0]
- to := args[1]
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().Remount(from, to); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Unmount error: %s", err))
- return 2
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Successfully remounted from '%s' to '%s'!", from, to))
-
- return 0
-}
-
-func (c *RemountCommand) Synopsis() string {
- return "Remount a secret backend to a new path"
-}
-
-func (c *RemountCommand) Help() string {
- helpText := `
-Usage: vault remount [options] from to
-
- Remount a mounted secret backend to a new path.
-
- This command remounts a secret backend that is already mounted to
- a new path. All the secrets from the old path will be revoked, but
- the data associated with the backend (such as configuration), will
- be preserved.
-
- Example: vault remount secret/ kv/
-
-General Options:
-` + meta.GeneralOptionsUsage()
-
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/remount_test.go b/vendor/github.com/hashicorp/vault/command/remount_test.go
deleted file mode 100644
index 7ec1321..0000000
--- a/vendor/github.com/hashicorp/vault/command/remount_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestRemount(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &RemountCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret/", "kv",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mounts, err := client.Sys().ListMounts()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- _, ok := mounts["secret/"]
- if ok {
- t.Fatal("should not have mount")
- }
-
- _, ok = mounts["kv/"]
- if !ok {
- t.Fatal("should have kv")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/renew.go b/vendor/github.com/hashicorp/vault/command/renew.go
deleted file mode 100644
index 6a3eafe..0000000
--- a/vendor/github.com/hashicorp/vault/command/renew.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package command
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// RenewCommand is a Command that mounts a new mount.
-type RenewCommand struct {
- meta.Meta
-}
-
-func (c *RenewCommand) Run(args []string) int {
- var format string
- flags := c.Meta.FlagSet("renew", meta.FlagSetDefault)
- flags.StringVar(&format, "format", "table", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) < 1 || len(args) >= 3 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nrenew expects at least one argument: the lease ID to renew"))
- return 1
- }
-
- var increment int
- leaseId := args[0]
- if len(args) > 1 {
- parsed, err := strconv.ParseInt(args[1], 10, 0)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Invalid increment, must be an int: %s", err))
- return 1
- }
-
- increment = int(parsed)
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- secret, err := client.Sys().Renew(leaseId, increment)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Renew error: %s", err))
- return 1
- }
-
- return OutputSecret(c.Ui, format, secret)
-}
-
-func (c *RenewCommand) Synopsis() string {
- return "Renew the lease of a secret"
-}
-
-func (c *RenewCommand) Help() string {
- helpText := `
-Usage: vault renew [options] id [increment]
-
- Renew the lease on a secret, extending the time that it can be used
- before it is revoked by Vault.
-
- Every secret in Vault has a lease associated with it. If the user of
- the secret wants to use it longer than the lease, then it must be
- renewed. Renewing the lease will not change the contents of the secret.
-
- To renew a secret, run this command with the lease ID returned when it
- was read. Optionally, request a specific increment in seconds. Vault
- is not required to honor this request.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Renew Options:
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/renew_test.go b/vendor/github.com/hashicorp/vault/command/renew_test.go
deleted file mode 100644
index 2191662..0000000
--- a/vendor/github.com/hashicorp/vault/command/renew_test.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestRenew(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &RenewCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- // write a secret with a lease
- client := testClient(t, addr, token)
- _, err := client.Logical().Write("secret/foo", map[string]interface{}{
- "key": "value",
- "lease": "1m",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // read the secret to get its lease ID
- secret, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- args := []string{
- "-address", addr,
- secret.LeaseID,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestRenewBothWays(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- // write a secret with a lease
- client := testClient(t, addr, token)
- _, err := client.Logical().Write("secret/foo", map[string]interface{}{
- "key": "value",
- "ttl": "1m",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // read the secret to get its lease ID
- secret, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Test one renew path
- r := client.NewRequest("PUT", "/v1/sys/renew")
- body := map[string]interface{}{
- "lease_id": secret.LeaseID,
- }
- if err := r.SetJSONBody(body); err != nil {
- t.Fatal(err)
- }
- resp, err := client.RawRequest(r)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- secret, err = api.ParseSecret(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- if secret.LeaseDuration != 60 {
- t.Fatal("bad lease duration")
- }
-
- // Test another
- r = client.NewRequest("PUT", "/v1/sys/leases/renew")
- body = map[string]interface{}{
- "lease_id": secret.LeaseID,
- }
- if err := r.SetJSONBody(body); err != nil {
- t.Fatal(err)
- }
- resp, err = client.RawRequest(r)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- secret, err = api.ParseSecret(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- if secret.LeaseDuration != 60 {
- t.Fatal("bad lease duration")
- }
-
- // Test the other
- r = client.NewRequest("PUT", "/v1/sys/renew/"+secret.LeaseID)
- resp, err = client.RawRequest(r)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- secret, err = api.ParseSecret(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- if secret.LeaseDuration != 60 {
- t.Fatalf("bad lease duration; secret is %#v\n", *secret)
- }
-
- // Test another
- r = client.NewRequest("PUT", "/v1/sys/leases/renew/"+secret.LeaseID)
- resp, err = client.RawRequest(r)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- secret, err = api.ParseSecret(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- if secret.LeaseDuration != 60 {
- t.Fatalf("bad lease duration; secret is %#v\n", *secret)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/revoke.go b/vendor/github.com/hashicorp/vault/command/revoke.go
deleted file mode 100644
index 50933ad..0000000
--- a/vendor/github.com/hashicorp/vault/command/revoke.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// RevokeCommand is a Command that mounts a new mount.
-type RevokeCommand struct {
- meta.Meta
-}
-
-func (c *RevokeCommand) Run(args []string) int {
- var prefix, force bool
- flags := c.Meta.FlagSet("revoke", meta.FlagSetDefault)
- flags.BoolVar(&prefix, "prefix", false, "")
- flags.BoolVar(&force, "force", false, "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nrevoke expects one argument: the ID to revoke"))
- return 1
- }
- leaseId := args[0]
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- switch {
- case force && !prefix:
- c.Ui.Error(fmt.Sprintf(
- "-force requires -prefix"))
- return 1
- case force && prefix:
- err = client.Sys().RevokeForce(leaseId)
- case prefix:
- err = client.Sys().RevokePrefix(leaseId)
- default:
- err = client.Sys().Revoke(leaseId)
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Revoke error: %s", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf("Success! Revoked the secret with ID '%s', if it existed.", leaseId))
- return 0
-}
-
-func (c *RevokeCommand) Synopsis() string {
- return "Revoke a secret."
-}
-
-func (c *RevokeCommand) Help() string {
- helpText := `
-Usage: vault revoke [options] id
-
- Revoke a secret by its lease ID.
-
- This command revokes a secret by its lease ID that was returned with it. Once
- the key is revoked, it is no longer valid.
-
- With the -prefix flag, the revoke is done by prefix: any secret prefixed with
- the given partial ID is revoked. Lease IDs are structured in such a way to
- make revocation of prefixes useful.
-
- With the -force flag, the lease is removed from Vault even if the revocation
- fails. This is meant for certain recovery scenarios and should not be used
- lightly. This option requires -prefix.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Revoke Options:
-
- -prefix=true Revoke all secrets with the matching prefix. This
- defaults to false: an exact revocation.
-
- -force=true Delete the lease even if the actual revocation
- operation fails.
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/revoke_test.go b/vendor/github.com/hashicorp/vault/command/revoke_test.go
deleted file mode 100644
index cb9febf..0000000
--- a/vendor/github.com/hashicorp/vault/command/revoke_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestRevoke(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &RevokeCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- client := testClient(t, addr, token)
- _, err := client.Logical().Write("secret/foo", map[string]interface{}{
- "key": "value",
- "lease": "1m",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- secret, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- args := []string{
- "-address", addr,
- secret.LeaseID,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/rotate.go b/vendor/github.com/hashicorp/vault/command/rotate.go
deleted file mode 100644
index 9da3873..0000000
--- a/vendor/github.com/hashicorp/vault/command/rotate.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// RotateCommand is a Command that rotates the encryption key being used
-type RotateCommand struct {
- meta.Meta
-}
-
-func (c *RotateCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("rotate", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- // Rotate the key
- err = client.Sys().Rotate()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error with key rotation: %s", err))
- return 2
- }
-
- // Print the key status
- status, err := client.Sys().KeyStatus()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error reading audits: %s", err))
- return 2
- }
-
- c.Ui.Output(fmt.Sprintf("Key Term: %d", status.Term))
- c.Ui.Output(fmt.Sprintf("Installation Time: %v", status.InstallTime))
- return 0
-}
-
-func (c *RotateCommand) Synopsis() string {
- return "Rotates the backend encryption key used to persist data"
-}
-
-func (c *RotateCommand) Help() string {
- helpText := `
-Usage: vault rotate [options]
-
- Rotates the backend encryption key which is used to secure data
- written to the storage backend. This is done by installing a new key
- which encrypts new data, while old keys are still used to decrypt
- secrets written previously. This is an online operation and is not
- disruptive.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/rotate_test.go b/vendor/github.com/hashicorp/vault/command/rotate_test.go
deleted file mode 100644
index 257f280..0000000
--- a/vendor/github.com/hashicorp/vault/command/rotate_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestRotate(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &RotateCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/seal.go b/vendor/github.com/hashicorp/vault/command/seal.go
deleted file mode 100644
index 033c164..0000000
--- a/vendor/github.com/hashicorp/vault/command/seal.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// SealCommand is a Command that seals the vault.
-type SealCommand struct {
- meta.Meta
-}
-
-func (c *SealCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("seal", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().Seal(); err != nil {
- c.Ui.Error(fmt.Sprintf("Error sealing: %s", err))
- return 1
- }
-
- c.Ui.Output("Vault is now sealed.")
- return 0
-}
-
-func (c *SealCommand) Synopsis() string {
- return "Seals the Vault server"
-}
-
-func (c *SealCommand) Help() string {
- helpText := `
-Usage: vault seal [options]
-
- Seal the vault.
-
- Sealing a vault tells the Vault server to stop responding to any
- access operations until it is unsealed again. A sealed vault throws away
- its master key to unlock the data, so it is physically blocked from
- responding to operations again until the vault is unsealed with
- the "unseal" command or via the API.
-
- This command is idempotent, if the vault is already sealed it does nothing.
-
- If an unseal has started, sealing the vault will reset the unsealing
- process. You'll have to re-enter every portion of the master key again.
- This is the same as running "vault unseal -reset".
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/seal_test.go b/vendor/github.com/hashicorp/vault/command/seal_test.go
deleted file mode 100644
index c224aee..0000000
--- a/vendor/github.com/hashicorp/vault/command/seal_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func Test_Seal(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &SealCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{"-address", addr}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- sealed, err := core.Sealed()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !sealed {
- t.Fatal("should be sealed")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server.go b/vendor/github.com/hashicorp/vault/command/server.go
deleted file mode 100644
index e089ef2..0000000
--- a/vendor/github.com/hashicorp/vault/command/server.go
+++ /dev/null
@@ -1,1333 +0,0 @@
-package command
-
-import (
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "os/signal"
- "path/filepath"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "syscall"
- "time"
-
- "golang.org/x/net/http2"
-
- colorable "github.com/mattn/go-colorable"
- log "github.com/mgutz/logxi/v1"
- testing "github.com/mitchellh/go-testing-interface"
- "github.com/posener/complete"
-
- "google.golang.org/grpc/grpclog"
-
- "github.com/armon/go-metrics"
- "github.com/armon/go-metrics/circonus"
- "github.com/armon/go-metrics/datadog"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/command/server"
- "github.com/hashicorp/vault/helper/flag-slice"
- "github.com/hashicorp/vault/helper/gated-writer"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/helper/mlock"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/reload"
- vaulthttp "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/vault"
- "github.com/hashicorp/vault/version"
-)
-
-// ServerCommand is a Command that starts the Vault server.
-type ServerCommand struct {
- AuditBackends map[string]audit.Factory
- CredentialBackends map[string]logical.Factory
- LogicalBackends map[string]logical.Factory
- PhysicalBackends map[string]physical.Factory
-
- ShutdownCh chan struct{}
- SighupCh chan struct{}
-
- WaitGroup *sync.WaitGroup
-
- meta.Meta
-
- logGate *gatedwriter.Writer
- logger log.Logger
-
- cleanupGuard sync.Once
-
- reloadFuncsLock *sync.RWMutex
- reloadFuncs *map[string][]reload.ReloadFunc
-}
-
-func (c *ServerCommand) Run(args []string) int {
- var dev, verifyOnly, devHA, devTransactional, devLeasedKV, devThreeNode bool
- var configPath []string
- var logLevel, devRootTokenID, devListenAddress, devPluginDir string
- var devLatency, devLatencyJitter int
- flags := c.Meta.FlagSet("server", meta.FlagSetDefault)
- flags.BoolVar(&dev, "dev", false, "")
- flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "")
- flags.StringVar(&devListenAddress, "dev-listen-address", "", "")
- flags.StringVar(&devPluginDir, "dev-plugin-dir", "", "")
- flags.StringVar(&logLevel, "log-level", "info", "")
- flags.IntVar(&devLatency, "dev-latency", 0, "")
- flags.IntVar(&devLatencyJitter, "dev-latency-jitter", 20, "")
- flags.BoolVar(&verifyOnly, "verify-only", false, "")
- flags.BoolVar(&devHA, "dev-ha", false, "")
- flags.BoolVar(&devTransactional, "dev-transactional", false, "")
- flags.BoolVar(&devLeasedKV, "dev-leased-kv", false, "")
- flags.BoolVar(&devThreeNode, "dev-three-node", false, "")
- flags.Usage = func() { c.Ui.Output(c.Help()) }
- flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- // Create a logger. We wrap it in a gated writer so that it doesn't
- // start logging too early.
- c.logGate = &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
- var level int
- logLevel = strings.ToLower(strings.TrimSpace(logLevel))
- switch logLevel {
- case "trace":
- level = log.LevelTrace
- case "debug":
- level = log.LevelDebug
- case "info":
- level = log.LevelInfo
- case "notice":
- level = log.LevelNotice
- case "warn":
- level = log.LevelWarn
- case "err":
- level = log.LevelError
- default:
- c.Ui.Output(fmt.Sprintf("Unknown log level %s", logLevel))
- return 1
- }
-
- logFormat := os.Getenv("VAULT_LOG_FORMAT")
- if logFormat == "" {
- logFormat = os.Getenv("LOGXI_FORMAT")
- }
- switch strings.ToLower(logFormat) {
- case "vault", "vault_json", "vault-json", "vaultjson", "json", "":
- c.logger = logformat.NewVaultLoggerWithWriter(c.logGate, level)
- default:
- c.logger = log.NewLogger(c.logGate, "vault")
- c.logger.SetLevel(level)
- }
- grpclog.SetLogger(&grpclogFaker{
- logger: c.logger,
- })
-
- if os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") != "" && devRootTokenID == "" {
- devRootTokenID = os.Getenv("VAULT_DEV_ROOT_TOKEN_ID")
- }
-
- if os.Getenv("VAULT_DEV_LISTEN_ADDRESS") != "" && devListenAddress == "" {
- devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS")
- }
-
- if devHA || devTransactional || devLeasedKV || devThreeNode {
- dev = true
- }
-
- // Validation
- if !dev {
- switch {
- case len(configPath) == 0:
- c.Ui.Output("At least one config path must be specified with -config")
- flags.Usage()
- return 1
- case devRootTokenID != "":
- c.Ui.Output("Root token ID can only be specified with -dev")
- flags.Usage()
- return 1
- }
- }
-
- // Load the configuration
- var config *server.Config
- if dev {
- config = server.DevConfig(devHA, devTransactional)
- if devListenAddress != "" {
- config.Listeners[0].Config["address"] = devListenAddress
- }
- }
- for _, path := range configPath {
- current, err := server.LoadConfig(path, c.logger)
- if err != nil {
- c.Ui.Output(fmt.Sprintf(
- "Error loading configuration from %s: %s", path, err))
- return 1
- }
-
- if config == nil {
- config = current
- } else {
- config = config.Merge(current)
- }
- }
-
- // Ensure at least one config was found.
- if config == nil {
- c.Ui.Output("No configuration files found.")
- return 1
- }
-
- // Ensure that a backend is provided
- if config.Storage == nil {
- c.Ui.Output("A storage backend must be specified")
- return 1
- }
-
- // If mlockall(2) isn't supported, show a warning. We disable this
- // in dev because it is quite scary to see when first using Vault.
- if !dev && !mlock.Supported() {
- c.Ui.Output("==> WARNING: mlock not supported on this system!\n")
- c.Ui.Output(" An `mlockall(2)`-like syscall to prevent memory from being")
- c.Ui.Output(" swapped to disk is not supported on this system. Running")
- c.Ui.Output(" Vault on an mlockall(2) enabled system is much more secure.\n")
- }
-
- if err := c.setupTelemetry(config); err != nil {
- c.Ui.Output(fmt.Sprintf("Error initializing telemetry: %s", err))
- return 1
- }
-
- // Initialize the backend
- factory, exists := c.PhysicalBackends[config.Storage.Type]
- if !exists {
- c.Ui.Output(fmt.Sprintf(
- "Unknown storage type %s",
- config.Storage.Type))
- return 1
- }
- backend, err := factory(config.Storage.Config, c.logger)
- if err != nil {
- c.Ui.Output(fmt.Sprintf(
- "Error initializing storage of type %s: %s",
- config.Storage.Type, err))
- return 1
- }
-
- infoKeys := make([]string, 0, 10)
- info := make(map[string]string)
-
- var seal vault.Seal = &vault.DefaultSeal{}
-
- // Ensure that the seal finalizer is called, even if using verify-only
- defer func() {
- if seal != nil {
- err = seal.Finalize()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error finalizing seals: %v", err))
- }
- }
- }()
-
- if seal == nil {
- c.Ui.Error(fmt.Sprintf("Could not create seal"))
- return 1
- }
-
- coreConfig := &vault.CoreConfig{
- Physical: backend,
- RedirectAddr: config.Storage.RedirectAddr,
- HAPhysical: nil,
- Seal: seal,
- AuditBackends: c.AuditBackends,
- CredentialBackends: c.CredentialBackends,
- LogicalBackends: c.LogicalBackends,
- Logger: c.logger,
- DisableCache: config.DisableCache,
- DisableMlock: config.DisableMlock,
- MaxLeaseTTL: config.MaxLeaseTTL,
- DefaultLeaseTTL: config.DefaultLeaseTTL,
- ClusterName: config.ClusterName,
- CacheSize: config.CacheSize,
- PluginDirectory: config.PluginDirectory,
- EnableRaw: config.EnableRawEndpoint,
- }
- if dev {
- coreConfig.DevToken = devRootTokenID
- if devLeasedKV {
- coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory
- }
- if devPluginDir != "" {
- coreConfig.PluginDirectory = devPluginDir
- }
- if devLatency > 0 {
- injectLatency := time.Duration(devLatency) * time.Millisecond
- if _, txnOK := backend.(physical.Transactional); txnOK {
- coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger)
- } else {
- coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger)
- }
- }
- }
-
- if devThreeNode {
- return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, devListenAddress)
- }
-
- var disableClustering bool
-
- // Initialize the separate HA storage backend, if it exists
- var ok bool
- if config.HAStorage != nil {
- factory, exists := c.PhysicalBackends[config.HAStorage.Type]
- if !exists {
- c.Ui.Output(fmt.Sprintf(
- "Unknown HA storage type %s",
- config.HAStorage.Type))
- return 1
- }
- habackend, err := factory(config.HAStorage.Config, c.logger)
- if err != nil {
- c.Ui.Output(fmt.Sprintf(
- "Error initializing HA storage of type %s: %s",
- config.HAStorage.Type, err))
- return 1
- }
-
- if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
- c.Ui.Output("Specified HA storage does not support HA")
- return 1
- }
-
- if !coreConfig.HAPhysical.HAEnabled() {
- c.Ui.Output("Specified HA storage has HA support disabled; please consult documentation")
- return 1
- }
-
- coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
- disableClustering = config.HAStorage.DisableClustering
- if !disableClustering {
- coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
- }
- } else {
- if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
- coreConfig.RedirectAddr = config.Storage.RedirectAddr
- disableClustering = config.Storage.DisableClustering
- if !disableClustering {
- coreConfig.ClusterAddr = config.Storage.ClusterAddr
- }
- }
- }
-
- if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
- coreConfig.RedirectAddr = envRA
- } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
- coreConfig.RedirectAddr = envAA
- }
-
- // Attempt to detect the redirect address, if possible
- var detect physical.RedirectDetect
- if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
- detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
- } else {
- detect, ok = coreConfig.Physical.(physical.RedirectDetect)
- }
- if ok && coreConfig.RedirectAddr == "" {
- redirect, err := c.detectRedirect(detect, config)
- if err != nil {
- c.Ui.Output(fmt.Sprintf("Error detecting redirect address: %s", err))
- } else if redirect == "" {
- c.Ui.Output("Failed to detect redirect address.")
- } else {
- coreConfig.RedirectAddr = redirect
- }
- }
- if coreConfig.RedirectAddr == "" && dev {
- coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
- }
-
- // After the redirect bits are sorted out, if no cluster address was
- // explicitly given, derive one from the redirect addr
- if disableClustering {
- coreConfig.ClusterAddr = ""
- } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
- coreConfig.ClusterAddr = envCA
- } else {
- var addrToUse string
- switch {
- case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "":
- addrToUse = coreConfig.RedirectAddr
- case dev:
- addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
- default:
- goto CLUSTER_SYNTHESIS_COMPLETE
- }
- u, err := url.ParseRequestURI(addrToUse)
- if err != nil {
- c.Ui.Output(fmt.Sprintf("Error parsing synthesized cluster address %s: %v", addrToUse, err))
- return 1
- }
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- // This sucks, as it's a const in the function but not exported in the package
- if strings.Contains(err.Error(), "missing port in address") {
- host = u.Host
- port = "443"
- } else {
- c.Ui.Output(fmt.Sprintf("Error parsing redirect address: %v", err))
- return 1
- }
- }
- nPort, err := strconv.Atoi(port)
- if err != nil {
- c.Ui.Output(fmt.Sprintf("Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err))
- return 1
- }
- u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
- // Will always be TLS-secured
- u.Scheme = "https"
- coreConfig.ClusterAddr = u.String()
- }
-
-CLUSTER_SYNTHESIS_COMPLETE:
-
- if coreConfig.ClusterAddr != "" {
- // Force https as we'll always be TLS-secured
- u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
- if err != nil {
- c.Ui.Output(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.RedirectAddr, err))
- return 1
- }
- u.Scheme = "https"
- coreConfig.ClusterAddr = u.String()
- }
-
- // Initialize the core
- core, newCoreError := vault.NewCore(coreConfig)
- if newCoreError != nil {
- if !errwrap.ContainsType(newCoreError, new(vault.NonFatalError)) {
- c.Ui.Output(fmt.Sprintf("Error initializing core: %s", newCoreError))
- return 1
- }
- }
-
- // Copy the reload funcs pointers back
- c.reloadFuncs = coreConfig.ReloadFuncs
- c.reloadFuncsLock = coreConfig.ReloadFuncsLock
-
- // Compile server information for output later
- info["storage"] = config.Storage.Type
- info["log level"] = logLevel
- info["mlock"] = fmt.Sprintf(
- "supported: %v, enabled: %v",
- mlock.Supported(), !config.DisableMlock && mlock.Supported())
- infoKeys = append(infoKeys, "log level", "mlock", "storage")
-
- if coreConfig.ClusterAddr != "" {
- info["cluster address"] = coreConfig.ClusterAddr
- infoKeys = append(infoKeys, "cluster address")
- }
- if coreConfig.RedirectAddr != "" {
- info["redirect address"] = coreConfig.RedirectAddr
- infoKeys = append(infoKeys, "redirect address")
- }
-
- if config.HAStorage != nil {
- info["HA storage"] = config.HAStorage.Type
- infoKeys = append(infoKeys, "HA storage")
- } else {
- // If the storage supports HA, then note it
- if coreConfig.HAPhysical != nil {
- if coreConfig.HAPhysical.HAEnabled() {
- info["storage"] += " (HA available)"
- } else {
- info["storage"] += " (HA disabled)"
- }
- }
- }
-
- clusterAddrs := []*net.TCPAddr{}
-
- // Initialize the listeners
- c.reloadFuncsLock.Lock()
- lns := make([]net.Listener, 0, len(config.Listeners))
- for i, lnConfig := range config.Listeners {
- ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.logGate)
- if err != nil {
- c.Ui.Output(fmt.Sprintf(
- "Error initializing listener of type %s: %s",
- lnConfig.Type, err))
- return 1
- }
-
- lns = append(lns, ln)
-
- if reloadFunc != nil {
- relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type]
- relSlice = append(relSlice, reloadFunc)
- (*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice
- }
-
- if !disableClustering && lnConfig.Type == "tcp" {
- var addrRaw interface{}
- var addr string
- var ok bool
- if addrRaw, ok = lnConfig.Config["cluster_address"]; ok {
- addr = addrRaw.(string)
- tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
- if err != nil {
- c.Ui.Output(fmt.Sprintf(
- "Error resolving cluster_address: %s",
- err))
- return 1
- }
- clusterAddrs = append(clusterAddrs, tcpAddr)
- } else {
- tcpAddr, ok := ln.Addr().(*net.TCPAddr)
- if !ok {
- c.Ui.Output("Failed to parse tcp listener")
- return 1
- }
- clusterAddr := &net.TCPAddr{
- IP: tcpAddr.IP,
- Port: tcpAddr.Port + 1,
- }
- clusterAddrs = append(clusterAddrs, clusterAddr)
- addr = clusterAddr.String()
- }
- props["cluster address"] = addr
- }
-
- // Store the listener props for output later
- key := fmt.Sprintf("listener %d", i+1)
- propsList := make([]string, 0, len(props))
- for k, v := range props {
- propsList = append(propsList, fmt.Sprintf(
- "%s: %q", k, v))
- }
- sort.Strings(propsList)
- infoKeys = append(infoKeys, key)
- info[key] = fmt.Sprintf(
- "%s (%s)", lnConfig.Type, strings.Join(propsList, ", "))
-
- }
- c.reloadFuncsLock.Unlock()
- if !disableClustering {
- if c.logger.IsTrace() {
- c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs)
- }
- }
-
- // Make sure we close all listeners from this point on
- listenerCloseFunc := func() {
- for _, ln := range lns {
- ln.Close()
- }
- }
-
- defer c.cleanupGuard.Do(listenerCloseFunc)
-
- infoKeys = append(infoKeys, "version")
- verInfo := version.GetVersion()
- info["version"] = verInfo.FullVersionNumber(false)
- if verInfo.Revision != "" {
- info["version sha"] = strings.Trim(verInfo.Revision, "'")
- infoKeys = append(infoKeys, "version sha")
- }
- infoKeys = append(infoKeys, "cgo")
- info["cgo"] = "disabled"
- if version.CgoEnabled {
- info["cgo"] = "enabled"
- }
-
- // Server configuration output
- padding := 24
- sort.Strings(infoKeys)
- c.Ui.Output("==> Vault server configuration:\n")
- for _, k := range infoKeys {
- c.Ui.Output(fmt.Sprintf(
- "%s%s: %s",
- strings.Repeat(" ", padding-len(k)),
- strings.Title(k),
- info[k]))
- }
- c.Ui.Output("")
-
- if verifyOnly {
- return 0
- }
-
- // Perform service discovery registrations and initialization of
- // HTTP server after the verifyOnly check.
-
- // Instantiate the wait group
- c.WaitGroup = &sync.WaitGroup{}
-
- // If the backend supports service discovery, run service discovery
- if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
- sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery)
- if ok {
- activeFunc := func() bool {
- if isLeader, _, _, err := core.Leader(); err == nil {
- return isLeader
- }
- return false
- }
-
- sealedFunc := func() bool {
- if sealed, err := core.Sealed(); err == nil {
- return sealed
- }
- return true
- }
-
- if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, sealedFunc); err != nil {
- c.Ui.Output(fmt.Sprintf("Error initializing service discovery: %v", err))
- return 1
- }
- }
- }
-
- handler := vaulthttp.Handler(core)
-
- // This needs to happen before we first unseal, so before we trigger dev
- // mode if it's set
- core.SetClusterListenerAddrs(clusterAddrs)
- core.SetClusterHandler(handler)
-
- // If we're in Dev mode, then initialize the core
- if dev {
- init, err := c.enableDev(core, coreConfig)
- if err != nil {
- c.Ui.Output(fmt.Sprintf(
- "Error initializing Dev mode: %s", err))
- return 1
- }
-
- export := "export"
- quote := "'"
- if runtime.GOOS == "windows" {
- export = "set"
- quote = ""
- }
-
- c.Ui.Output(fmt.Sprintf(
- "==> WARNING: Dev mode is enabled!\n\n"+
- "In this mode, Vault is completely in-memory and unsealed.\n"+
- "Vault is configured to only have a single unseal key. The root\n"+
- "token has already been authenticated with the CLI, so you can\n"+
- "immediately begin using the Vault CLI.\n\n"+
- "The only step you need to take is to set the following\n"+
- "environment variables:\n\n"+
- " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"].(string)+quote+"\n\n"+
- "The unseal key and root token are reproduced below in case you\n"+
- "want to seal/unseal the Vault or play with authentication.\n\n"+
- "Unseal Key: %s\nRoot Token: %s\n",
- base64.StdEncoding.EncodeToString(init.SecretShares[0]),
- init.RootToken,
- ))
- }
-
- // Initialize the HTTP server
- server := &http.Server{}
- if err := http2.ConfigureServer(server, nil); err != nil {
- c.Ui.Output(fmt.Sprintf("Error configuring server for HTTP/2: %s", err))
- return 1
- }
- server.Handler = handler
- for _, ln := range lns {
- go server.Serve(ln)
- }
-
- if newCoreError != nil {
- c.Ui.Output("==> Warning:\n\nNon-fatal error during initialization; check the logs for more information.")
- c.Ui.Output("")
- }
-
- // Output the header that the server has started
- c.Ui.Output("==> Vault server started! Log data will stream in below:\n")
-
- // Release the log gate.
- c.logGate.Flush()
-
- // Write out the PID to the file now that server has successfully started
- if err := c.storePidFile(config.PidFile); err != nil {
- c.Ui.Output(fmt.Sprintf("Error storing PID: %v", err))
- return 1
- }
-
- defer func() {
- if err := c.removePidFile(config.PidFile); err != nil {
- c.Ui.Output(fmt.Sprintf("Error deleting the PID file: %v", err))
- }
- }()
-
- // Wait for shutdown
- shutdownTriggered := false
-
- for !shutdownTriggered {
- select {
- case <-c.ShutdownCh:
- c.Ui.Output("==> Vault shutdown triggered")
-
- // Stop the listners so that we don't process further client requests.
- c.cleanupGuard.Do(listenerCloseFunc)
-
- // Shutdown will wait until after Vault is sealed, which means the
- // request forwarding listeners will also be closed (and also
- // waited for).
- if err := core.Shutdown(); err != nil {
- c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err))
- }
-
- shutdownTriggered = true
-
- case <-c.SighupCh:
- c.Ui.Output("==> Vault reload triggered")
- if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, configPath); err != nil {
- c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
- }
- }
- }
-
- // Wait for dependent goroutines to complete
- c.WaitGroup.Wait()
- return 0
-}
-
-func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) {
- // Initialize it with a basic single key
- init, err := core.Initialize(&vault.InitParams{
- BarrierConfig: &vault.SealConfig{
- SecretShares: 1,
- SecretThreshold: 1,
- },
- RecoveryConfig: nil,
- })
- if err != nil {
- return nil, err
- }
-
- // Copy the key so that it can be zeroed
- key := make([]byte, len(init.SecretShares[0]))
- copy(key, init.SecretShares[0])
-
- // Unseal the core
- unsealed, err := core.Unseal(key)
- if err != nil {
- return nil, err
- }
- if !unsealed {
- return nil, fmt.Errorf("failed to unseal Vault for dev mode")
- }
-
- isLeader, _, _, err := core.Leader()
- if err != nil && err != vault.ErrHANotEnabled {
- return nil, fmt.Errorf("failed to check active status: %v", err)
- }
- if err == nil {
- leaderCount := 5
- for !isLeader {
- if leaderCount == 0 {
- buf := make([]byte, 1<<16)
- runtime.Stack(buf, true)
- return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s\n", buf)
- }
- time.Sleep(1 * time.Second)
- isLeader, _, _, err = core.Leader()
- if err != nil {
- return nil, fmt.Errorf("failed to check active status: %v", err)
- }
- leaderCount--
- }
- }
-
- if coreConfig.DevToken != "" {
- req := &logical.Request{
- ID: "dev-gen-root",
- Operation: logical.UpdateOperation,
- ClientToken: init.RootToken,
- Path: "auth/token/create",
- Data: map[string]interface{}{
- "id": coreConfig.DevToken,
- "policies": []string{"root"},
- "no_parent": true,
- "no_default_policy": true,
- },
- }
- resp, err := core.HandleRequest(req)
- if err != nil {
- return nil, fmt.Errorf("failed to create root token with ID %s: %s", coreConfig.DevToken, err)
- }
- if resp == nil {
- return nil, fmt.Errorf("nil response when creating root token with ID %s", coreConfig.DevToken)
- }
- if resp.Auth == nil {
- return nil, fmt.Errorf("nil auth when creating root token with ID %s", coreConfig.DevToken)
- }
-
- init.RootToken = resp.Auth.ClientToken
-
- req.ID = "dev-revoke-init-root"
- req.Path = "auth/token/revoke-self"
- req.Data = nil
- resp, err = core.HandleRequest(req)
- if err != nil {
- return nil, fmt.Errorf("failed to revoke initial root token: %s", err)
- }
- }
-
- // Set the token
- tokenHelper, err := c.TokenHelper()
- if err != nil {
- return nil, err
- }
- if err := tokenHelper.Store(init.RootToken); err != nil {
- return nil, err
- }
-
- return init, nil
-}
-
-func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress string) int {
- testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- BaseListenAddress: devListenAddress,
- })
- defer c.cleanupGuard.Do(testCluster.Cleanup)
-
- info["cluster parameters path"] = testCluster.TempDir
- info["log level"] = "trace"
- infoKeys = append(infoKeys, "cluster parameters path", "log level")
-
- for i, core := range testCluster.Cores {
- info[fmt.Sprintf("node %d redirect address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String())
- infoKeys = append(infoKeys, fmt.Sprintf("node %d redirect address", i))
- }
-
- infoKeys = append(infoKeys, "version")
- verInfo := version.GetVersion()
- info["version"] = verInfo.FullVersionNumber(false)
- if verInfo.Revision != "" {
- info["version sha"] = strings.Trim(verInfo.Revision, "'")
- infoKeys = append(infoKeys, "version sha")
- }
- infoKeys = append(infoKeys, "cgo")
- info["cgo"] = "disabled"
- if version.CgoEnabled {
- info["cgo"] = "enabled"
- }
-
- // Server configuration output
- padding := 24
- sort.Strings(infoKeys)
- c.Ui.Output("==> Vault server configuration:\n")
- for _, k := range infoKeys {
- c.Ui.Output(fmt.Sprintf(
- "%s%s: %s",
- strings.Repeat(" ", padding-len(k)),
- strings.Title(k),
- info[k]))
- }
- c.Ui.Output("")
-
- for _, core := range testCluster.Cores {
- core.Server.Handler = vaulthttp.Handler(core.Core)
- core.SetClusterHandler(core.Server.Handler)
- }
-
- testCluster.Start()
-
- if base.DevToken != "" {
- req := &logical.Request{
- ID: "dev-gen-root",
- Operation: logical.UpdateOperation,
- ClientToken: testCluster.RootToken,
- Path: "auth/token/create",
- Data: map[string]interface{}{
- "id": base.DevToken,
- "policies": []string{"root"},
- "no_parent": true,
- "no_default_policy": true,
- },
- }
- resp, err := testCluster.Cores[0].HandleRequest(req)
- if err != nil {
- c.Ui.Output(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
- return 1
- }
- if resp == nil {
- c.Ui.Output(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken))
- return 1
- }
- if resp.Auth == nil {
- c.Ui.Output(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken))
- return 1
- }
-
- testCluster.RootToken = resp.Auth.ClientToken
-
- req.ID = "dev-revoke-init-root"
- req.Path = "auth/token/revoke-self"
- req.Data = nil
- resp, err = testCluster.Cores[0].HandleRequest(req)
- if err != nil {
- c.Ui.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
- return 1
- }
- }
-
- // Set the token
- tokenHelper, err := c.TokenHelper()
- if err != nil {
- c.Ui.Output(fmt.Sprintf("%v", err))
- return 1
- }
- if err := tokenHelper.Store(testCluster.RootToken); err != nil {
- c.Ui.Output(fmt.Sprintf("%v", err))
- return 1
- }
-
- if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil {
- c.Ui.Output(fmt.Sprintf("%v", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf(
- "==> Three node dev mode is enabled\n\n" +
- "The unseal key and root token are reproduced below in case you\n" +
- "want to seal/unseal the Vault or play with authentication.\n",
- ))
-
- for i, key := range testCluster.BarrierKeys {
- c.Ui.Output(fmt.Sprintf(
- "Unseal Key %d: %s",
- i+1, base64.StdEncoding.EncodeToString(key),
- ))
- }
-
- c.Ui.Output(fmt.Sprintf(
- "\nRoot Token: %s\n", testCluster.RootToken,
- ))
-
- c.Ui.Output(fmt.Sprintf(
- "\nUseful env vars:\n"+
- "VAULT_TOKEN=%s\n"+
- "VAULT_ADDR=%s\n"+
- "VAULT_CACERT=%s/ca_cert.pem\n",
- testCluster.RootToken,
- testCluster.Cores[0].Client.Address(),
- testCluster.TempDir,
- ))
-
- // Output the header that the server has started
- c.Ui.Output("==> Vault server started! Log data will stream in below:\n")
-
- // Release the log gate.
- c.logGate.Flush()
-
- // Wait for shutdown
- shutdownTriggered := false
-
- for !shutdownTriggered {
- select {
- case <-c.ShutdownCh:
- c.Ui.Output("==> Vault shutdown triggered")
-
- // Stop the listners so that we don't process further client requests.
- c.cleanupGuard.Do(testCluster.Cleanup)
-
- // Shutdown will wait until after Vault is sealed, which means the
- // request forwarding listeners will also be closed (and also
- // waited for).
- for _, core := range testCluster.Cores {
- if err := core.Shutdown(); err != nil {
- c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err))
- }
- }
-
- shutdownTriggered = true
-
- case <-c.SighupCh:
- c.Ui.Output("==> Vault reload triggered")
- for _, core := range testCluster.Cores {
- if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil {
- c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
- }
- }
- }
- }
-
- return 0
-}
-
-// detectRedirect is used to attempt redirect address detection
-func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
- config *server.Config) (string, error) {
- // Get the hostname
- host, err := detect.DetectHostAddr()
- if err != nil {
- return "", err
- }
-
- // set [] for ipv6 addresses
- if strings.Contains(host, ":") && !strings.Contains(host, "]") {
- host = "[" + host + "]"
- }
-
- // Default the port and scheme
- scheme := "https"
- port := 8200
-
- // Attempt to detect overrides
- for _, list := range config.Listeners {
- // Only attempt TCP
- if list.Type != "tcp" {
- continue
- }
-
- // Check if TLS is disabled
- if val, ok := list.Config["tls_disable"]; ok {
- disable, err := parseutil.ParseBool(val)
- if err != nil {
- return "", fmt.Errorf("tls_disable: %s", err)
- }
-
- if disable {
- scheme = "http"
- }
- }
-
- // Check for address override
- var addr string
- addrRaw, ok := list.Config["address"]
- if !ok {
- addr = "127.0.0.1:8200"
- } else {
- addr = addrRaw.(string)
- }
-
- // Check for localhost
- hostStr, portStr, err := net.SplitHostPort(addr)
- if err != nil {
- continue
- }
- if hostStr == "127.0.0.1" {
- host = hostStr
- }
-
- // Check for custom port
- listPort, err := strconv.Atoi(portStr)
- if err != nil {
- continue
- }
- port = listPort
- }
-
- // Build a URL
- url := &url.URL{
- Scheme: scheme,
- Host: fmt.Sprintf("%s:%d", host, port),
- }
-
- // Return the URL string
- return url.String(), nil
-}
-
-// setupTelemetry is used to setup the telemetry sub-systems
-func (c *ServerCommand) setupTelemetry(config *server.Config) error {
- /* Setup telemetry
- Aggregate on 10 second intervals for 1 minute. Expose the
- metrics over stderr when there is a SIGUSR1 received.
- */
- inm := metrics.NewInmemSink(10*time.Second, time.Minute)
- metrics.DefaultInmemSignal(inm)
-
- var telConfig *server.Telemetry
- if config.Telemetry == nil {
- telConfig = &server.Telemetry{}
- } else {
- telConfig = config.Telemetry
- }
-
- metricsConf := metrics.DefaultConfig("vault")
- metricsConf.EnableHostname = !telConfig.DisableHostname
-
- // Configure the statsite sink
- var fanout metrics.FanoutSink
- if telConfig.StatsiteAddr != "" {
- sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr)
- if err != nil {
- return err
- }
- fanout = append(fanout, sink)
- }
-
- // Configure the statsd sink
- if telConfig.StatsdAddr != "" {
- sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr)
- if err != nil {
- return err
- }
- fanout = append(fanout, sink)
- }
-
- // Configure the Circonus sink
- if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" {
- cfg := &circonus.Config{}
- cfg.Interval = telConfig.CirconusSubmissionInterval
- cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken
- cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp
- cfg.CheckManager.API.URL = telConfig.CirconusAPIURL
- cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL
- cfg.CheckManager.Check.ID = telConfig.CirconusCheckID
- cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation
- cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID
- cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag
- cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName
- cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags
- cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID
- cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag
-
- if cfg.CheckManager.API.TokenApp == "" {
- cfg.CheckManager.API.TokenApp = "vault"
- }
-
- if cfg.CheckManager.Check.DisplayName == "" {
- cfg.CheckManager.Check.DisplayName = "Vault"
- }
-
- if cfg.CheckManager.Check.SearchTag == "" {
- cfg.CheckManager.Check.SearchTag = "service:vault"
- }
-
- sink, err := circonus.NewCirconusSink(cfg)
- if err != nil {
- return err
- }
- sink.Start()
- fanout = append(fanout, sink)
- }
-
- if telConfig.DogStatsDAddr != "" {
- var tags []string
-
- if telConfig.DogStatsDTags != nil {
- tags = telConfig.DogStatsDTags
- }
-
- sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName)
- if err != nil {
- return fmt.Errorf("failed to start DogStatsD sink. Got: %s", err)
- }
- sink.SetTags(tags)
- fanout = append(fanout, sink)
- }
-
- // Initialize the global sink
- if len(fanout) > 0 {
- fanout = append(fanout, inm)
- metrics.NewGlobal(metricsConf, fanout)
- } else {
- metricsConf.EnableHostname = false
- metrics.NewGlobal(metricsConf, inm)
- }
- return nil
-}
-
-func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reload.ReloadFunc, configPath []string) error {
- lock.RLock()
- defer lock.RUnlock()
-
- var reloadErrors *multierror.Error
-
- for k, relFuncs := range *reloadFuncs {
- switch {
- case strings.HasPrefix(k, "listener|"):
- for _, relFunc := range relFuncs {
- if relFunc != nil {
- if err := relFunc(nil); err != nil {
- reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading listener: %v", err))
- }
- }
- }
-
- case strings.HasPrefix(k, "audit_file|"):
- for _, relFunc := range relFuncs {
- if relFunc != nil {
- if err := relFunc(nil); err != nil {
- reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading file audit backend at path %s: %v", strings.TrimPrefix(k, "audit_file|"), err))
- }
- }
- }
- }
- }
-
- return reloadErrors.ErrorOrNil()
-}
-
-func (c *ServerCommand) Synopsis() string {
- return "Start a Vault server"
-}
-
-func (c *ServerCommand) Help() string {
- helpText := `
-Usage: vault server [options]
-
- Start a Vault server.
-
- This command starts a Vault server that responds to API requests.
- Vault will start in a "sealed" state. The Vault must be unsealed
- with "vault unseal" or the API before this server can respond to requests.
- This must be done for every server.
-
- If the server is being started against a storage backend that is
- brand new (no existing Vault data in it), it must be initialized with
- "vault init" or the API first.
-
-
-General Options:
-
- -config= Path to the configuration file or directory. This can
- be specified multiple times. If it is a directory,
- all files with a ".hcl" or ".json" suffix will be
- loaded.
-
- -dev Enables Dev mode. In this mode, Vault is completely
- in-memory and unsealed. Do not run the Dev server in
- production!
-
- -dev-root-token-id="" If set, the root token returned in Dev mode will have
- the given ID. This *only* has an effect when running
- in Dev mode. Can also be specified with the
- VAULT_DEV_ROOT_TOKEN_ID environment variable.
-
- -dev-listen-address="" If set, this overrides the normal Dev mode listen
- address of "127.0.0.1:8200". Can also be specified
- with the VAULT_DEV_LISTEN_ADDRESS environment
- variable.
-
- -log-level=info Log verbosity. Defaults to "info", will be output to
- stderr. Supported values: "trace", "debug", "info",
- "warn", "err"
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *ServerCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictNothing
-}
-
-func (c *ServerCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-config": complete.PredictOr(complete.PredictFiles("*.hcl"), complete.PredictFiles("*.json")),
- "-dev": complete.PredictNothing,
- "-dev-root-token-id": complete.PredictNothing,
- "-dev-listen-address": complete.PredictNothing,
- "-log-level": complete.PredictSet("trace", "debug", "info", "warn", "err"),
- }
-}
-
-// storePidFile is used to write out our PID to a file if necessary
-func (c *ServerCommand) storePidFile(pidPath string) error {
- // Quit fast if no pidfile
- if pidPath == "" {
- return nil
- }
-
- // Open the PID file
- pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return fmt.Errorf("could not open pid file: %v", err)
- }
- defer pidFile.Close()
-
- // Write out the PID
- pid := os.Getpid()
- _, err = pidFile.WriteString(fmt.Sprintf("%d", pid))
- if err != nil {
- return fmt.Errorf("could not write to pid file: %v", err)
- }
- return nil
-}
-
-// removePidFile is used to cleanup the PID file if necessary
-func (c *ServerCommand) removePidFile(pidPath string) error {
- if pidPath == "" {
- return nil
- }
- return os.Remove(pidPath)
-}
-
-// MakeShutdownCh returns a channel that can be used for shutdown
-// notifications for commands. This channel will send a message for every
-// SIGINT or SIGTERM received.
-func MakeShutdownCh() chan struct{} {
- resultCh := make(chan struct{})
-
- shutdownCh := make(chan os.Signal, 4)
- signal.Notify(shutdownCh, os.Interrupt, syscall.SIGTERM)
- go func() {
- <-shutdownCh
- close(resultCh)
- }()
- return resultCh
-}
-
-// MakeSighupCh returns a channel that can be used for SIGHUP
-// reloading. This channel will send a message for every
-// SIGHUP received.
-func MakeSighupCh() chan struct{} {
- resultCh := make(chan struct{})
-
- signalCh := make(chan os.Signal, 4)
- signal.Notify(signalCh, syscall.SIGHUP)
- go func() {
- for {
- <-signalCh
- resultCh <- struct{}{}
- }
- }()
- return resultCh
-}
-
-type grpclogFaker struct {
- logger log.Logger
-}
-
-func (g *grpclogFaker) Fatal(args ...interface{}) {
- g.logger.Error(fmt.Sprint(args...))
- os.Exit(1)
-}
-
-func (g *grpclogFaker) Fatalf(format string, args ...interface{}) {
- g.logger.Error(fmt.Sprintf(format, args...))
- os.Exit(1)
-}
-
-func (g *grpclogFaker) Fatalln(args ...interface{}) {
- g.logger.Error(fmt.Sprintln(args...))
- os.Exit(1)
-}
-
-func (g *grpclogFaker) Print(args ...interface{}) {
- g.logger.Warn(fmt.Sprint(args...))
-}
-
-func (g *grpclogFaker) Printf(format string, args ...interface{}) {
- g.logger.Warn(fmt.Sprintf(format, args...))
-}
-
-func (g *grpclogFaker) Println(args ...interface{}) {
- g.logger.Warn(fmt.Sprintln(args...))
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/config.go b/vendor/github.com/hashicorp/vault/command/server/config.go
deleted file mode 100644
index 8f78ac0..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/config.go
+++ /dev/null
@@ -1,808 +0,0 @@
-package server
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/hcl"
- "github.com/hashicorp/hcl/hcl/ast"
- "github.com/hashicorp/vault/helper/parseutil"
-)
-
-// Config is the configuration for the vault server.
-type Config struct {
- Listeners []*Listener `hcl:"-"`
- Storage *Storage `hcl:"-"`
- HAStorage *Storage `hcl:"-"`
-
- HSM *HSM `hcl:"-"`
-
- CacheSize int `hcl:"cache_size"`
- DisableCache bool `hcl:"-"`
- DisableCacheRaw interface{} `hcl:"disable_cache"`
- DisableMlock bool `hcl:"-"`
- DisableMlockRaw interface{} `hcl:"disable_mlock"`
-
- EnableUI bool `hcl:"-"`
- EnableUIRaw interface{} `hcl:"ui"`
-
- Telemetry *Telemetry `hcl:"telemetry"`
-
- MaxLeaseTTL time.Duration `hcl:"-"`
- MaxLeaseTTLRaw interface{} `hcl:"max_lease_ttl"`
- DefaultLeaseTTL time.Duration `hcl:"-"`
- DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"`
-
- ClusterName string `hcl:"cluster_name"`
- ClusterCipherSuites string `hcl:"cluster_cipher_suites"`
-
- PluginDirectory string `hcl:"plugin_directory"`
-
- PidFile string `hcl:"pid_file"`
- EnableRawEndpoint bool `hcl:"-"`
- EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"`
-}
-
-// DevConfig is a Config that is used for dev mode of Vault.
-func DevConfig(ha, transactional bool) *Config {
- ret := &Config{
- DisableCache: false,
- DisableMlock: true,
- EnableRawEndpoint: true,
-
- Storage: &Storage{
- Type: "inmem",
- },
-
- Listeners: []*Listener{
- &Listener{
- Type: "tcp",
- Config: map[string]interface{}{
- "address": "127.0.0.1:8200",
- "tls_disable": true,
- "proxy_protocol_behavior": "allow_authorized",
- "proxy_protocol_authorized_addrs": "127.0.0.1:8200",
- },
- },
- },
-
- EnableUI: true,
-
- Telemetry: &Telemetry{},
- }
-
- switch {
- case ha && transactional:
- ret.Storage.Type = "inmem_transactional_ha"
- case !ha && transactional:
- ret.Storage.Type = "inmem_transactional"
- case ha && !transactional:
- ret.Storage.Type = "inmem_ha"
- }
-
- return ret
-}
-
-// Listener is the listener configuration for the server.
-type Listener struct {
- Type string
- Config map[string]interface{}
-}
-
-func (l *Listener) GoString() string {
- return fmt.Sprintf("*%#v", *l)
-}
-
-// Storage is the underlying storage configuration for the server.
-type Storage struct {
- Type string
- RedirectAddr string
- ClusterAddr string
- DisableClustering bool
- Config map[string]string
-}
-
-func (b *Storage) GoString() string {
- return fmt.Sprintf("*%#v", *b)
-}
-
-// HSM contains HSM configuration for the server
-type HSM struct {
- Type string
- Config map[string]string
-}
-
-func (h *HSM) GoString() string {
- return fmt.Sprintf("*%#v", *h)
-}
-
-// Telemetry is the telemetry configuration for the server
-type Telemetry struct {
- StatsiteAddr string `hcl:"statsite_address"`
- StatsdAddr string `hcl:"statsd_address"`
-
- DisableHostname bool `hcl:"disable_hostname"`
-
- // Circonus: see https://github.com/circonus-labs/circonus-gometrics
- // for more details on the various configuration options.
- // Valid configuration combinations:
- // - CirconusAPIToken
- // metric management enabled (search for existing check or create a new one)
- // - CirconusSubmissionUrl
- // metric management disabled (use check with specified submission_url,
- // broker must be using a public SSL certificate)
- // - CirconusAPIToken + CirconusCheckSubmissionURL
- // metric management enabled (use check with specified submission_url)
- // - CirconusAPIToken + CirconusCheckID
- // metric management enabled (use check with specified id)
-
- // CirconusAPIToken is a valid API Token used to create/manage check. If provided,
- // metric management is enabled.
- // Default: none
- CirconusAPIToken string `hcl:"circonus_api_token"`
- // CirconusAPIApp is an app name associated with API token.
- // Default: "consul"
- CirconusAPIApp string `hcl:"circonus_api_app"`
- // CirconusAPIURL is the base URL to use for contacting the Circonus API.
- // Default: "https://api.circonus.com/v2"
- CirconusAPIURL string `hcl:"circonus_api_url"`
- // CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus.
- // Default: 10s
- CirconusSubmissionInterval string `hcl:"circonus_submission_interval"`
- // CirconusCheckSubmissionURL is the check.config.submission_url field from a
- // previously created HTTPTRAP check.
- // Default: none
- CirconusCheckSubmissionURL string `hcl:"circonus_submission_url"`
- // CirconusCheckID is the check id (not check bundle id) from a previously created
- // HTTPTRAP check. The numeric portion of the check._cid field.
- // Default: none
- CirconusCheckID string `hcl:"circonus_check_id"`
- // CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered,
- // if the metric already exists and is NOT active. If check management is enabled, the default
- // behavior is to add new metrics as they are encoutered. If the metric already exists in the
- // check, it will *NOT* be activated. This setting overrides that behavior.
- // Default: "false"
- CirconusCheckForceMetricActivation string `hcl:"circonus_check_force_metric_activation"`
- // CirconusCheckInstanceID serves to uniquely identify the metrics comming from this "instance".
- // It can be used to maintain metric continuity with transient or ephemeral instances as
- // they move around within an infrastructure.
- // Default: hostname:app
- CirconusCheckInstanceID string `hcl:"circonus_check_instance_id"`
- // CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
- // narrow down the search results when neither a Submission URL or Check ID is provided.
- // Default: service:app (e.g. service:consul)
- CirconusCheckSearchTag string `hcl:"circonus_check_search_tag"`
- // CirconusCheckTags is a comma separated list of tags to apply to the check. Note that
- // the value of CirconusCheckSearchTag will always be added to the check.
- // Default: none
- CirconusCheckTags string `mapstructure:"circonus_check_tags"`
- // CirconusCheckDisplayName is the name for the check which will be displayed in the Circonus UI.
- // Default: value of CirconusCheckInstanceID
- CirconusCheckDisplayName string `mapstructure:"circonus_check_display_name"`
- // CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion
- // of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID
- // is provided, an attempt will be made to search for an existing check using Instance ID and
- // Search Tag. If one is not found, a new HTTPTRAP check will be created.
- // Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated
- // with the specified API token or the default Circonus Broker.
- // Default: none
- CirconusBrokerID string `hcl:"circonus_broker_id"`
- // CirconusBrokerSelectTag is a special tag which will be used to select a broker when
- // a Broker ID is not provided. The best use of this is to as a hint for which broker
- // should be used based on *where* this particular instance is running.
- // (e.g. a specific geo location or datacenter, dc:sfo)
- // Default: none
- CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"`
-
- // Dogstats:
- // DogStatsdAddr is the address of a dogstatsd instance. If provided,
- // metrics will be sent to that instance
- DogStatsDAddr string `hcl:"dogstatsd_addr"`
-
- // DogStatsdTags are the global tags that should be sent with each packet to dogstatsd
- // It is a list of strings, where each string looks like "my_tag_name:my_tag_value"
- DogStatsDTags []string `hcl:"dogstatsd_tags"`
-}
-
-func (s *Telemetry) GoString() string {
- return fmt.Sprintf("*%#v", *s)
-}
-
-// Merge merges two configurations.
-func (c *Config) Merge(c2 *Config) *Config {
- if c2 == nil {
- return c
- }
-
- result := new(Config)
- for _, l := range c.Listeners {
- result.Listeners = append(result.Listeners, l)
- }
- for _, l := range c2.Listeners {
- result.Listeners = append(result.Listeners, l)
- }
-
- result.Storage = c.Storage
- if c2.Storage != nil {
- result.Storage = c2.Storage
- }
-
- result.HAStorage = c.HAStorage
- if c2.HAStorage != nil {
- result.HAStorage = c2.HAStorage
- }
-
- result.HSM = c.HSM
- if c2.HSM != nil {
- result.HSM = c2.HSM
- }
-
- result.Telemetry = c.Telemetry
- if c2.Telemetry != nil {
- result.Telemetry = c2.Telemetry
- }
-
- result.CacheSize = c.CacheSize
- if c2.CacheSize != 0 {
- result.CacheSize = c2.CacheSize
- }
-
- // merging these booleans via an OR operation
- result.DisableCache = c.DisableCache
- if c2.DisableCache {
- result.DisableCache = c2.DisableCache
- }
-
- result.DisableMlock = c.DisableMlock
- if c2.DisableMlock {
- result.DisableMlock = c2.DisableMlock
- }
-
- // merge these integers via a MAX operation
- result.MaxLeaseTTL = c.MaxLeaseTTL
- if c2.MaxLeaseTTL > result.MaxLeaseTTL {
- result.MaxLeaseTTL = c2.MaxLeaseTTL
- }
-
- result.DefaultLeaseTTL = c.DefaultLeaseTTL
- if c2.DefaultLeaseTTL > result.DefaultLeaseTTL {
- result.DefaultLeaseTTL = c2.DefaultLeaseTTL
- }
-
- result.ClusterName = c.ClusterName
- if c2.ClusterName != "" {
- result.ClusterName = c2.ClusterName
- }
-
- result.ClusterCipherSuites = c.ClusterCipherSuites
- if c2.ClusterCipherSuites != "" {
- result.ClusterCipherSuites = c2.ClusterCipherSuites
- }
-
- result.EnableUI = c.EnableUI
- if c2.EnableUI {
- result.EnableUI = c2.EnableUI
- }
-
- result.EnableRawEndpoint = c.EnableRawEndpoint
- if c2.EnableRawEndpoint {
- result.EnableRawEndpoint = c2.EnableRawEndpoint
- }
-
- result.PluginDirectory = c.PluginDirectory
- if c2.PluginDirectory != "" {
- result.PluginDirectory = c2.PluginDirectory
- }
-
- result.PidFile = c.PidFile
- if c2.PidFile != "" {
- result.PidFile = c2.PidFile
- }
-
- return result
-}
-
-// LoadConfig loads the configuration at the given path, regardless if
-// its a file or directory.
-func LoadConfig(path string, logger log.Logger) (*Config, error) {
- fi, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
-
- if fi.IsDir() {
- return LoadConfigDir(path, logger)
- }
- return LoadConfigFile(path, logger)
-}
-
-// LoadConfigFile loads the configuration from the given file.
-func LoadConfigFile(path string, logger log.Logger) (*Config, error) {
- // Read the file
- d, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return ParseConfig(string(d), logger)
-}
-
-func ParseConfig(d string, logger log.Logger) (*Config, error) {
- // Parse!
- obj, err := hcl.Parse(d)
- if err != nil {
- return nil, err
- }
-
- // Start building the result
- var result Config
- if err := hcl.DecodeObject(&result, obj); err != nil {
- return nil, err
- }
-
- if result.MaxLeaseTTLRaw != nil {
- if result.MaxLeaseTTL, err = parseutil.ParseDurationSecond(result.MaxLeaseTTLRaw); err != nil {
- return nil, err
- }
- }
- if result.DefaultLeaseTTLRaw != nil {
- if result.DefaultLeaseTTL, err = parseutil.ParseDurationSecond(result.DefaultLeaseTTLRaw); err != nil {
- return nil, err
- }
- }
-
- if result.EnableUIRaw != nil {
- if result.EnableUI, err = parseutil.ParseBool(result.EnableUIRaw); err != nil {
- return nil, err
- }
- }
-
- if result.DisableCacheRaw != nil {
- if result.DisableCache, err = parseutil.ParseBool(result.DisableCacheRaw); err != nil {
- return nil, err
- }
- }
-
- if result.DisableMlockRaw != nil {
- if result.DisableMlock, err = parseutil.ParseBool(result.DisableMlockRaw); err != nil {
- return nil, err
- }
- }
-
- if result.EnableRawEndpointRaw != nil {
- if result.EnableRawEndpoint, err = parseutil.ParseBool(result.EnableRawEndpointRaw); err != nil {
- return nil, err
- }
- }
-
- list, ok := obj.Node.(*ast.ObjectList)
- if !ok {
- return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
- }
-
- valid := []string{
- "storage",
- "ha_storage",
- "backend",
- "ha_backend",
- "hsm",
- "listener",
- "cache_size",
- "disable_cache",
- "disable_mlock",
- "ui",
- "telemetry",
- "default_lease_ttl",
- "max_lease_ttl",
- "cluster_name",
- "cluster_cipher_suites",
- "plugin_directory",
- "pid_file",
- "raw_storage_endpoint",
- }
- if err := checkHCLKeys(list, valid); err != nil {
- return nil, err
- }
-
- // Look for storage but still support old backend
- if o := list.Filter("storage"); len(o.Items) > 0 {
- if err := parseStorage(&result, o, "storage"); err != nil {
- return nil, fmt.Errorf("error parsing 'storage': %s", err)
- }
- } else {
- if o := list.Filter("backend"); len(o.Items) > 0 {
- if err := parseStorage(&result, o, "backend"); err != nil {
- return nil, fmt.Errorf("error parsing 'backend': %s", err)
- }
- }
- }
-
- if o := list.Filter("ha_storage"); len(o.Items) > 0 {
- if err := parseHAStorage(&result, o, "ha_storage"); err != nil {
- return nil, fmt.Errorf("error parsing 'ha_storage': %s", err)
- }
- } else {
- if o := list.Filter("ha_backend"); len(o.Items) > 0 {
- if err := parseHAStorage(&result, o, "ha_backend"); err != nil {
- return nil, fmt.Errorf("error parsing 'ha_backend': %s", err)
- }
- }
- }
-
- if o := list.Filter("hsm"); len(o.Items) > 0 {
- if err := parseHSMs(&result, o); err != nil {
- return nil, fmt.Errorf("error parsing 'hsm': %s", err)
- }
- }
-
- if o := list.Filter("listener"); len(o.Items) > 0 {
- if err := parseListeners(&result, o); err != nil {
- return nil, fmt.Errorf("error parsing 'listener': %s", err)
- }
- }
-
- if o := list.Filter("telemetry"); len(o.Items) > 0 {
- if err := parseTelemetry(&result, o); err != nil {
- return nil, fmt.Errorf("error parsing 'telemetry': %s", err)
- }
- }
-
- return &result, nil
-}
-
-// LoadConfigDir loads all the configurations in the given directory
-// in alphabetical order.
-func LoadConfigDir(dir string, logger log.Logger) (*Config, error) {
- f, err := os.Open(dir)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- fi, err := f.Stat()
- if err != nil {
- return nil, err
- }
- if !fi.IsDir() {
- return nil, fmt.Errorf(
- "configuration path must be a directory: %s",
- dir)
- }
-
- var files []string
- err = nil
- for err != io.EOF {
- var fis []os.FileInfo
- fis, err = f.Readdir(128)
- if err != nil && err != io.EOF {
- return nil, err
- }
-
- for _, fi := range fis {
- // Ignore directories
- if fi.IsDir() {
- continue
- }
-
- // Only care about files that are valid to load.
- name := fi.Name()
- skip := true
- if strings.HasSuffix(name, ".hcl") {
- skip = false
- } else if strings.HasSuffix(name, ".json") {
- skip = false
- }
- if skip || isTemporaryFile(name) {
- continue
- }
-
- path := filepath.Join(dir, name)
- files = append(files, path)
- }
- }
-
- var result *Config
- for _, f := range files {
- config, err := LoadConfigFile(f, logger)
- if err != nil {
- return nil, fmt.Errorf("Error loading %s: %s", f, err)
- }
-
- if result == nil {
- result = config
- } else {
- result = result.Merge(config)
- }
- }
-
- return result, nil
-}
-
-// isTemporaryFile returns true or false depending on whether the
-// provided file name is a temporary file for the following editors:
-// emacs or vim.
-func isTemporaryFile(name string) bool {
- return strings.HasSuffix(name, "~") || // vim
- strings.HasPrefix(name, ".#") || // emacs
- (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
-}
-
-func parseStorage(result *Config, list *ast.ObjectList, name string) error {
- if len(list.Items) > 1 {
- return fmt.Errorf("only one %q block is permitted", name)
- }
-
- // Get our item
- item := list.Items[0]
-
- key := name
- if len(item.Keys) > 0 {
- key = item.Keys[0].Token.Value().(string)
- }
-
- var m map[string]string
- if err := hcl.DecodeObject(&m, item.Val); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
- }
-
- // Pull out the redirect address since it's common to all backends
- var redirectAddr string
- if v, ok := m["redirect_addr"]; ok {
- redirectAddr = v
- delete(m, "redirect_addr")
- } else if v, ok := m["advertise_addr"]; ok {
- redirectAddr = v
- delete(m, "advertise_addr")
- }
-
- // Pull out the cluster address since it's common to all backends
- var clusterAddr string
- if v, ok := m["cluster_addr"]; ok {
- clusterAddr = v
- delete(m, "cluster_addr")
- }
-
- var disableClustering bool
- var err error
- if v, ok := m["disable_clustering"]; ok {
- disableClustering, err = strconv.ParseBool(v)
- if err != nil {
- return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
- }
- delete(m, "disable_clustering")
- }
-
- result.Storage = &Storage{
- RedirectAddr: redirectAddr,
- ClusterAddr: clusterAddr,
- DisableClustering: disableClustering,
- Type: strings.ToLower(key),
- Config: m,
- }
- return nil
-}
-
-func parseHAStorage(result *Config, list *ast.ObjectList, name string) error {
- if len(list.Items) > 1 {
- return fmt.Errorf("only one %q block is permitted", name)
- }
-
- // Get our item
- item := list.Items[0]
-
- key := name
- if len(item.Keys) > 0 {
- key = item.Keys[0].Token.Value().(string)
- }
-
- var m map[string]string
- if err := hcl.DecodeObject(&m, item.Val); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
- }
-
- // Pull out the redirect address since it's common to all backends
- var redirectAddr string
- if v, ok := m["redirect_addr"]; ok {
- redirectAddr = v
- delete(m, "redirect_addr")
- } else if v, ok := m["advertise_addr"]; ok {
- redirectAddr = v
- delete(m, "advertise_addr")
- }
-
- // Pull out the cluster address since it's common to all backends
- var clusterAddr string
- if v, ok := m["cluster_addr"]; ok {
- clusterAddr = v
- delete(m, "cluster_addr")
- }
-
- var disableClustering bool
- var err error
- if v, ok := m["disable_clustering"]; ok {
- disableClustering, err = strconv.ParseBool(v)
- if err != nil {
- return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
- }
- delete(m, "disable_clustering")
- }
-
- result.HAStorage = &Storage{
- RedirectAddr: redirectAddr,
- ClusterAddr: clusterAddr,
- DisableClustering: disableClustering,
- Type: strings.ToLower(key),
- Config: m,
- }
- return nil
-}
-
-func parseHSMs(result *Config, list *ast.ObjectList) error {
- if len(list.Items) > 1 {
- return fmt.Errorf("only one 'hsm' block is permitted")
- }
-
- // Get our item
- item := list.Items[0]
-
- key := "hsm"
- if len(item.Keys) > 0 {
- key = item.Keys[0].Token.Value().(string)
- }
-
- valid := []string{
- "lib",
- "slot",
- "pin",
- "mechanism",
- "key_label",
- "generate_key",
- "regenerate_key",
- }
- if err := checkHCLKeys(item.Val, valid); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("hsm.%s:", key))
- }
-
- var m map[string]string
- if err := hcl.DecodeObject(&m, item.Val); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("hsm.%s:", key))
- }
-
- result.HSM = &HSM{
- Type: strings.ToLower(key),
- Config: m,
- }
-
- return nil
-}
-
-func parseListeners(result *Config, list *ast.ObjectList) error {
- listeners := make([]*Listener, 0, len(list.Items))
- for _, item := range list.Items {
- key := "listener"
- if len(item.Keys) > 0 {
- key = item.Keys[0].Token.Value().(string)
- }
-
- valid := []string{
- "address",
- "cluster_address",
- "endpoint",
- "infrastructure",
- "node_id",
- "proxy_protocol_behavior",
- "proxy_protocol_authorized_addrs",
- "tls_disable",
- "tls_cert_file",
- "tls_key_file",
- "tls_min_version",
- "tls_cipher_suites",
- "tls_prefer_server_cipher_suites",
- "tls_require_and_verify_client_cert",
- "tls_client_ca_file",
- "token",
- }
- if err := checkHCLKeys(item.Val, valid); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key))
- }
-
- var m map[string]interface{}
- if err := hcl.DecodeObject(&m, item.Val); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key))
- }
-
- lnType := strings.ToLower(key)
-
- listeners = append(listeners, &Listener{
- Type: lnType,
- Config: m,
- })
- }
-
- result.Listeners = listeners
- return nil
-}
-
-func parseTelemetry(result *Config, list *ast.ObjectList) error {
- if len(list.Items) > 1 {
- return fmt.Errorf("only one 'telemetry' block is permitted")
- }
-
- // Get our one item
- item := list.Items[0]
-
- // Check for invalid keys
- valid := []string{
- "circonus_api_token",
- "circonus_api_app",
- "circonus_api_url",
- "circonus_submission_interval",
- "circonus_submission_url",
- "circonus_check_id",
- "circonus_check_force_metric_activation",
- "circonus_check_instance_id",
- "circonus_check_search_tag",
- "circonus_check_display_name",
- "circonus_check_tags",
- "circonus_broker_id",
- "circonus_broker_select_tag",
- "disable_hostname",
- "dogstatsd_addr",
- "dogstatsd_tags",
- "statsd_address",
- "statsite_address",
- }
- if err := checkHCLKeys(item.Val, valid); err != nil {
- return multierror.Prefix(err, "telemetry:")
- }
-
- var t Telemetry
- if err := hcl.DecodeObject(&t, item.Val); err != nil {
- return multierror.Prefix(err, "telemetry:")
- }
-
- if result.Telemetry == nil {
- result.Telemetry = &Telemetry{}
- }
-
- if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil {
- return multierror.Prefix(err, "telemetry:")
- }
- return nil
-}
-
-func checkHCLKeys(node ast.Node, valid []string) error {
- var list *ast.ObjectList
- switch n := node.(type) {
- case *ast.ObjectList:
- list = n
- case *ast.ObjectType:
- list = n.List
- default:
- return fmt.Errorf("cannot check HCL keys of type %T", n)
- }
-
- validMap := make(map[string]struct{}, len(valid))
- for _, v := range valid {
- validMap[v] = struct{}{}
- }
-
- var result error
- for _, item := range list.Items {
- key := item.Keys[0].Token.Value().(string)
- if _, ok := validMap[key]; !ok {
- result = multierror.Append(result, fmt.Errorf(
- "invalid key '%s' on line %d", key, item.Assign.Line))
- }
- }
-
- return result
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/config_test.go b/vendor/github.com/hashicorp/vault/command/server/config_test.go
deleted file mode 100644
index bdc9128..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/config_test.go
+++ /dev/null
@@ -1,380 +0,0 @@
-package server
-
-import (
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/hcl"
- "github.com/hashicorp/hcl/hcl/ast"
- "github.com/hashicorp/vault/helper/logformat"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestLoadConfigFile(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- config, err := LoadConfigFile("./test-fixtures/config.hcl", logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := &Config{
- Listeners: []*Listener{
- &Listener{
- Type: "tcp",
- Config: map[string]interface{}{
- "address": "127.0.0.1:443",
- },
- },
- },
-
- Storage: &Storage{
- Type: "consul",
- RedirectAddr: "foo",
- Config: map[string]string{
- "foo": "bar",
- },
- },
-
- HAStorage: &Storage{
- Type: "consul",
- RedirectAddr: "snafu",
- Config: map[string]string{
- "bar": "baz",
- },
- DisableClustering: true,
- },
-
- Telemetry: &Telemetry{
- StatsdAddr: "bar",
- StatsiteAddr: "foo",
- DisableHostname: false,
- DogStatsDAddr: "127.0.0.1:7254",
- DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
- },
-
- DisableCache: true,
- DisableCacheRaw: true,
- DisableMlock: true,
- DisableMlockRaw: true,
- EnableUI: true,
- EnableUIRaw: true,
-
- EnableRawEndpoint: true,
- EnableRawEndpointRaw: true,
-
- MaxLeaseTTL: 10 * time.Hour,
- MaxLeaseTTLRaw: "10h",
- DefaultLeaseTTL: 10 * time.Hour,
- DefaultLeaseTTLRaw: "10h",
- ClusterName: "testcluster",
-
- PidFile: "./pidfile",
- }
- if !reflect.DeepEqual(config, expected) {
- t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
- }
-}
-
-func TestLoadConfigFile_json(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- config, err := LoadConfigFile("./test-fixtures/config.hcl.json", logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := &Config{
- Listeners: []*Listener{
- &Listener{
- Type: "tcp",
- Config: map[string]interface{}{
- "address": "127.0.0.1:443",
- },
- },
- },
-
- Storage: &Storage{
- Type: "consul",
- Config: map[string]string{
- "foo": "bar",
- },
- DisableClustering: true,
- },
-
- ClusterCipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
-
- Telemetry: &Telemetry{
- StatsiteAddr: "baz",
- StatsdAddr: "",
- DisableHostname: false,
- CirconusAPIToken: "",
- CirconusAPIApp: "",
- CirconusAPIURL: "",
- CirconusSubmissionInterval: "",
- CirconusCheckSubmissionURL: "",
- CirconusCheckID: "",
- CirconusCheckForceMetricActivation: "",
- CirconusCheckInstanceID: "",
- CirconusCheckSearchTag: "",
- CirconusCheckDisplayName: "",
- CirconusCheckTags: "",
- CirconusBrokerID: "",
- CirconusBrokerSelectTag: "",
- },
-
- MaxLeaseTTL: 10 * time.Hour,
- MaxLeaseTTLRaw: "10h",
- DefaultLeaseTTL: 10 * time.Hour,
- DefaultLeaseTTLRaw: "10h",
- ClusterName: "testcluster",
- DisableCacheRaw: interface{}(nil),
- DisableMlockRaw: interface{}(nil),
- EnableUI: true,
- EnableUIRaw: true,
- PidFile: "./pidfile",
- EnableRawEndpoint: true,
- EnableRawEndpointRaw: true,
- }
- if !reflect.DeepEqual(config, expected) {
- t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
- }
-}
-
-func TestLoadConfigFile_json2(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- config, err := LoadConfigFile("./test-fixtures/config2.hcl.json", logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := &Config{
- Listeners: []*Listener{
- &Listener{
- Type: "tcp",
- Config: map[string]interface{}{
- "address": "127.0.0.1:443",
- },
- },
- &Listener{
- Type: "tcp",
- Config: map[string]interface{}{
- "address": "127.0.0.1:444",
- },
- },
- },
-
- Storage: &Storage{
- Type: "consul",
- Config: map[string]string{
- "foo": "bar",
- },
- DisableClustering: true,
- },
-
- HAStorage: &Storage{
- Type: "consul",
- Config: map[string]string{
- "bar": "baz",
- },
- },
-
- CacheSize: 45678,
-
- EnableUI: true,
-
- EnableRawEndpoint: true,
-
- Telemetry: &Telemetry{
- StatsiteAddr: "foo",
- StatsdAddr: "bar",
- DisableHostname: true,
- CirconusAPIToken: "0",
- CirconusAPIApp: "vault",
- CirconusAPIURL: "http://api.circonus.com/v2",
- CirconusSubmissionInterval: "10s",
- CirconusCheckSubmissionURL: "https://someplace.com/metrics",
- CirconusCheckID: "0",
- CirconusCheckForceMetricActivation: "true",
- CirconusCheckInstanceID: "node1:vault",
- CirconusCheckSearchTag: "service:vault",
- CirconusCheckDisplayName: "node1:vault",
- CirconusCheckTags: "cat1:tag1,cat2:tag2",
- CirconusBrokerID: "0",
- CirconusBrokerSelectTag: "dc:sfo",
- },
- }
- if !reflect.DeepEqual(config, expected) {
- }
-}
-
-func TestLoadConfigDir(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- config, err := LoadConfigDir("./test-fixtures/config-dir", logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := &Config{
- DisableCache: true,
- DisableMlock: true,
-
- Listeners: []*Listener{
- &Listener{
- Type: "tcp",
- Config: map[string]interface{}{
- "address": "127.0.0.1:443",
- },
- },
- },
-
- Storage: &Storage{
- Type: "consul",
- Config: map[string]string{
- "foo": "bar",
- },
- DisableClustering: true,
- },
-
- EnableUI: true,
-
- EnableRawEndpoint: true,
-
- Telemetry: &Telemetry{
- StatsiteAddr: "qux",
- StatsdAddr: "baz",
- DisableHostname: true,
- },
-
- MaxLeaseTTL: 10 * time.Hour,
- DefaultLeaseTTL: 10 * time.Hour,
- ClusterName: "testcluster",
- }
- if !reflect.DeepEqual(config, expected) {
- t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
- }
-}
-
-func TestParseListeners(t *testing.T) {
- obj, _ := hcl.Parse(strings.TrimSpace(`
-listener "tcp" {
- address = "127.0.0.1:443"
- cluster_address = "127.0.0.1:8201"
- tls_disable = false
- tls_cert_file = "./certs/server.crt"
- tls_key_file = "./certs/server.key"
- tls_client_ca_file = "./certs/rootca.crt"
- tls_min_version = "tls12"
- tls_require_and_verify_client_cert = true
-}`))
-
- var config Config
- list, _ := obj.Node.(*ast.ObjectList)
- objList := list.Filter("listener")
- parseListeners(&config, objList)
- listeners := config.Listeners
- if len(listeners) == 0 {
- t.Fatalf("expected at least one listener in the config")
- }
- listener := listeners[0]
- if listener.Type != "tcp" {
- t.Fatalf("expected tcp listener in the config")
- }
-
- expected := &Config{
- Listeners: []*Listener{
- &Listener{
- Type: "tcp",
- Config: map[string]interface{}{
- "address": "127.0.0.1:443",
- "cluster_address": "127.0.0.1:8201",
- "tls_disable": false,
- "tls_cert_file": "./certs/server.crt",
- "tls_key_file": "./certs/server.key",
- "tls_client_ca_file": "./certs/rootca.crt",
- "tls_min_version": "tls12",
- "tls_require_and_verify_client_cert": true,
- },
- },
- },
- }
-
- if !reflect.DeepEqual(config, *expected) {
- t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, *expected)
- }
-
-}
-
-func TestParseConfig_badTopLevel(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- _, err := ParseConfig(strings.TrimSpace(`
-backend {}
-bad = "one"
-nope = "yes"
-`), logger)
-
- if err == nil {
- t.Fatal("expected error")
- }
-
- if !strings.Contains(err.Error(), "invalid key 'bad' on line 2") {
- t.Errorf("bad error: %q", err)
- }
-
- if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") {
- t.Errorf("bad error: %q", err)
- }
-}
-
-func TestParseConfig_badListener(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- _, err := ParseConfig(strings.TrimSpace(`
-listener "tcp" {
- address = "1.2.3.3"
- bad = "one"
- nope = "yes"
-}
-`), logger)
-
- if err == nil {
- t.Fatal("expected error")
- }
-
- if !strings.Contains(err.Error(), "listeners.tcp: invalid key 'bad' on line 3") {
- t.Errorf("bad error: %q", err)
- }
-
- if !strings.Contains(err.Error(), "listeners.tcp: invalid key 'nope' on line 4") {
- t.Errorf("bad error: %q", err)
- }
-}
-
-func TestParseConfig_badTelemetry(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- _, err := ParseConfig(strings.TrimSpace(`
-telemetry {
- statsd_address = "1.2.3.3"
- bad = "one"
- nope = "yes"
-}
-`), logger)
-
- if err == nil {
- t.Fatal("expected error")
- }
-
- if !strings.Contains(err.Error(), "telemetry: invalid key 'bad' on line 3") {
- t.Errorf("bad error: %q", err)
- }
-
- if !strings.Contains(err.Error(), "telemetry: invalid key 'nope' on line 4") {
- t.Errorf("bad error: %q", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener.go b/vendor/github.com/hashicorp/vault/command/server/listener.go
deleted file mode 100644
index 4f9aedf..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/listener.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package server
-
-import (
- // We must import sha512 so that it registers with the runtime so that
- // certificates that use it can be parsed.
- _ "crypto/sha512"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io"
- "io/ioutil"
- "net"
-
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/proxyutil"
- "github.com/hashicorp/vault/helper/reload"
- "github.com/hashicorp/vault/helper/tlsutil"
-)
-
-// ListenerFactory is the factory function to create a listener.
-type ListenerFactory func(map[string]interface{}, io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error)
-
-// BuiltinListeners is the list of built-in listener types.
-var BuiltinListeners = map[string]ListenerFactory{
- "tcp": tcpListenerFactory,
-}
-
-// NewListener creates a new listener of the given type with the given
-// configuration. The type is looked up in the BuiltinListeners map.
-func NewListener(t string, config map[string]interface{}, logger io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) {
- f, ok := BuiltinListeners[t]
- if !ok {
- return nil, nil, nil, fmt.Errorf("unknown listener type: %s", t)
- }
-
- return f(config, logger)
-}
-
-func listenerWrapProxy(ln net.Listener, config map[string]interface{}) (net.Listener, error) {
- behaviorRaw, ok := config["proxy_protocol_behavior"]
- if !ok {
- return ln, nil
- }
-
- behavior, ok := behaviorRaw.(string)
- if !ok {
- return nil, fmt.Errorf("failed parsing proxy_protocol_behavior value: not a string")
- }
-
- authorizedAddrsRaw, ok := config["proxy_protocol_authorized_addrs"]
- if !ok {
- return nil, fmt.Errorf("proxy_protocol_behavior set but no proxy_protocol_authorized_addrs value")
- }
-
- proxyProtoConfig := &proxyutil.ProxyProtoConfig{
- Behavior: behavior,
- }
- if err := proxyProtoConfig.SetAuthorizedAddrs(authorizedAddrsRaw); err != nil {
- return nil, fmt.Errorf("failed parsing proxy_protocol_authorized_addrs: %v", err)
- }
-
- newLn, err := proxyutil.WrapInProxyProto(ln, proxyProtoConfig)
- if err != nil {
- return nil, fmt.Errorf("failed configuring PROXY protocol wrapper: %s", err)
- }
-
- return newLn, nil
-}
-
-func listenerWrapTLS(
- ln net.Listener,
- props map[string]string,
- config map[string]interface{}) (net.Listener, map[string]string, reload.ReloadFunc, error) {
- props["tls"] = "disabled"
-
- if v, ok := config["tls_disable"]; ok {
- disabled, err := parseutil.ParseBool(v)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("invalid value for 'tls_disable': %v", err)
- }
- if disabled {
- return ln, props, nil, nil
- }
- }
-
- _, ok := config["tls_cert_file"]
- if !ok {
- return nil, nil, nil, fmt.Errorf("'tls_cert_file' must be set")
- }
-
- _, ok = config["tls_key_file"]
- if !ok {
- return nil, nil, nil, fmt.Errorf("'tls_key_file' must be set")
- }
-
- cg := reload.NewCertificateGetter(config["tls_cert_file"].(string), config["tls_key_file"].(string))
-
- if err := cg.Reload(config); err != nil {
- return nil, nil, nil, fmt.Errorf("error loading TLS cert: %s", err)
- }
-
- var tlsvers string
- tlsversRaw, ok := config["tls_min_version"]
- if !ok {
- tlsvers = "tls12"
- } else {
- tlsvers = tlsversRaw.(string)
- }
-
- tlsConf := &tls.Config{}
- tlsConf.GetCertificate = cg.GetCertificate
- tlsConf.NextProtos = []string{"h2", "http/1.1"}
- tlsConf.MinVersion, ok = tlsutil.TLSLookup[tlsvers]
- if !ok {
- return nil, nil, nil, fmt.Errorf("'tls_min_version' value %s not supported, please specify one of [tls10,tls11,tls12]", tlsvers)
- }
- tlsConf.ClientAuth = tls.RequestClientCert
-
- if v, ok := config["tls_cipher_suites"]; ok {
- ciphers, err := tlsutil.ParseCiphers(v.(string))
- if err != nil {
- return nil, nil, nil, fmt.Errorf("invalid value for 'tls_cipher_suites': %v", err)
- }
- tlsConf.CipherSuites = ciphers
- }
- if v, ok := config["tls_prefer_server_cipher_suites"]; ok {
- preferServer, err := parseutil.ParseBool(v)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("invalid value for 'tls_prefer_server_cipher_suites': %v", err)
- }
- tlsConf.PreferServerCipherSuites = preferServer
- }
- if v, ok := config["tls_require_and_verify_client_cert"]; ok {
- requireClient, err := parseutil.ParseBool(v)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("invalid value for 'tls_require_and_verify_client_cert': %v", err)
- }
- if requireClient {
- tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
- }
- if tlsClientCaFile, ok := config["tls_client_ca_file"]; ok {
- caPool := x509.NewCertPool()
- data, err := ioutil.ReadFile(tlsClientCaFile.(string))
- if err != nil {
- return nil, nil, nil, fmt.Errorf("failed to read tls_client_ca_file: %v", err)
- }
-
- if !caPool.AppendCertsFromPEM(data) {
- return nil, nil, nil, fmt.Errorf("failed to parse CA certificate in tls_client_ca_file")
- }
- tlsConf.ClientCAs = caPool
- }
- }
-
- ln = tls.NewListener(ln, tlsConf)
- props["tls"] = "enabled"
- return ln, props, cg.Reload, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go
deleted file mode 100644
index b0ab687..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package server
-
-import (
- "io"
- "net"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/helper/reload"
-)
-
-func tcpListenerFactory(config map[string]interface{}, _ io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) {
- bind_proto := "tcp"
- var addr string
- addrRaw, ok := config["address"]
- if !ok {
- addr = "127.0.0.1:8200"
- } else {
- addr = addrRaw.(string)
- }
-
- // If they've passed 0.0.0.0, we only want to bind on IPv4
- // rather than golang's dual stack default
- if strings.HasPrefix(addr, "0.0.0.0:") {
- bind_proto = "tcp4"
- }
-
- ln, err := net.Listen(bind_proto, addr)
- if err != nil {
- return nil, nil, nil, err
- }
-
- ln = tcpKeepAliveListener{ln.(*net.TCPListener)}
-
- ln, err = listenerWrapProxy(ln, config)
- if err != nil {
- return nil, nil, nil, err
- }
-
- props := map[string]string{"addr": addr}
- return listenerWrapTLS(ln, props, config)
-}
-
-// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
-// connections. It's used by ListenAndServe and ListenAndServeTLS so
-// dead TCP connections (e.g. closing laptop mid-download) eventually
-// go away.
-//
-// This is copied directly from the Go source code.
-type tcpKeepAliveListener struct {
- *net.TCPListener
-}
-
-func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
- tc, err := ln.AcceptTCP()
- if err != nil {
- return
- }
- tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Minute)
- return tc, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go
deleted file mode 100644
index 4da12b3..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package server
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "math/rand"
- "net"
- "os"
- "testing"
- "time"
-)
-
-func TestTCPListener(t *testing.T) {
- ln, _, _, err := tcpListenerFactory(map[string]interface{}{
- "address": "127.0.0.1:0",
- "tls_disable": "1",
- }, nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- connFn := func(lnReal net.Listener) (net.Conn, error) {
- return net.Dial("tcp", ln.Addr().String())
- }
-
- testListenerImpl(t, ln, connFn, "")
-}
-
-// TestTCPListener_tls tests both TLS generally and also the reload capability
-// of core, system backend, and the listener logic
-func TestTCPListener_tls(t *testing.T) {
- wd, _ := os.Getwd()
- wd += "/test-fixtures/reload/"
-
- td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63))
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(td)
-
- // Setup initial certs
- inBytes, _ := ioutil.ReadFile(wd + "reload_ca.pem")
- certPool := x509.NewCertPool()
- ok := certPool.AppendCertsFromPEM(inBytes)
- if !ok {
- t.Fatal("not ok when appending CA cert")
- }
-
- ln, _, _, err := tcpListenerFactory(map[string]interface{}{
- "address": "127.0.0.1:0",
- "tls_cert_file": wd + "reload_foo.pem",
- "tls_key_file": wd + "reload_foo.key",
- "tls_require_and_verify_client_cert": "true",
- "tls_client_ca_file": wd + "reload_ca.pem",
- }, nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- cwd, _ := os.Getwd()
-
- clientCert, _ := tls.LoadX509KeyPair(
- cwd+"/test-fixtures/reload/reload_foo.pem",
- cwd+"/test-fixtures/reload/reload_foo.key")
-
- connFn := func(lnReal net.Listener) (net.Conn, error) {
- conn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
- RootCAs: certPool,
- Certificates: []tls.Certificate{clientCert},
- })
-
- if err != nil {
- return nil, err
- }
- if err = conn.Handshake(); err != nil {
- return nil, err
- }
- return conn, nil
- }
-
- testListenerImpl(t, ln, connFn, "foo.example.com")
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_test.go b/vendor/github.com/hashicorp/vault/command/server/listener_test.go
deleted file mode 100644
index e7fb4d3..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/listener_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package server
-
-import (
- "bytes"
- "crypto/tls"
- "io"
- "net"
- "testing"
-)
-
-type testListenerConnFn func(net.Listener) (net.Conn, error)
-
-func testListenerImpl(t *testing.T, ln net.Listener, connFn testListenerConnFn, certName string) {
- serverCh := make(chan net.Conn, 1)
- go func() {
- server, err := ln.Accept()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if certName != "" {
- tlsConn := server.(*tls.Conn)
- tlsConn.Handshake()
- }
- serverCh <- server
- }()
-
- client, err := connFn(ln)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if certName != "" {
- tlsConn := client.(*tls.Conn)
- if len(tlsConn.ConnectionState().PeerCertificates) != 1 {
- t.Fatalf("err: number of certs too long")
- }
- peerName := tlsConn.ConnectionState().PeerCertificates[0].Subject.CommonName
- if peerName != certName {
- t.Fatalf("err: bad cert name %s, expected %s", peerName, certName)
- }
- }
-
- server := <-serverCh
- defer client.Close()
- defer server.Close()
-
- var buf bytes.Buffer
- copyCh := make(chan struct{})
- go func() {
- io.Copy(&buf, server)
- close(copyCh)
- }()
-
- if _, err := client.Write([]byte("foo")); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- client.Close()
-
- <-copyCh
- if buf.String() != "foo" {
- t.Fatalf("bad: %v", buf.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config-dir/bar.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config-dir/bar.json
deleted file mode 100644
index 16055e8..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config-dir/bar.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "ui":false,
-
- "listener": {
- "tcp": {
- "address": "127.0.0.1:443"
- }
- },
-
- "max_lease_ttl": "10h"
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json
deleted file mode 100644
index 918af56..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "listener": [{
- "tcp": {
- "address": "127.0.0.1:443"
- }
- }],
- "cluster_cipher_suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- "storage": {
- "consul": {
- "foo": "bar",
- "disable_clustering": "true"
- }
- },
- "telemetry": {
- "statsite_address": "baz"
- },
- "max_lease_ttl": "10h",
- "default_lease_ttl": "10h",
- "cluster_name":"testcluster",
- "ui":true,
- "pid_file":"./pidfile",
- "raw_storage_endpoint":true
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json
deleted file mode 100644
index e1eb73e..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json
+++ /dev/null
@@ -1,46 +0,0 @@
-{
- "ui":true,
- "raw_storage_endpoint":true,
- "listener":[
- {
- "tcp":{
- "address":"127.0.0.1:443"
- }
- },
- {
- "tcp":{
- "address":"127.0.0.1:444"
- }
- }
- ],
- "storage":{
- "consul":{
- "foo":"bar"
- }
- },
- "ha_storage":{
- "consul":{
- "bar":"baz",
- "disable_clustering": "true"
- }
- },
- "cache_size": 45678,
- "telemetry":{
- "statsd_address":"bar",
- "statsite_address":"foo",
- "disable_hostname":true,
- "circonus_api_token": "0",
- "circonus_api_app": "vault",
- "circonus_api_url": "http://api.circonus.com/v2",
- "circonus_submission_interval": "10s",
- "circonus_submission_url": "https://someplace.com/metrics",
- "circonus_check_id": "0",
- "circonus_check_force_metric_activation": "true",
- "circonus_check_instance_id": "node1:vault",
- "circonus_check_search_tag": "service:vault",
- "circonus_check_display_name": "node1:vault",
- "circonus_check_tags": "cat1:tag1,cat2:tag2",
- "circonus_broker_id": "0",
- "circonus_broker_select_tag": "dc:sfo"
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.key b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.key
deleted file mode 100644
index 10849fb..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju
-Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj
-7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl
-/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz
-q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7
-XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ
-ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF
-V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q
-g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ
-zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt
-V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC
-is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS
-Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU
-8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB
-1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L
-m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti
-y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/
-XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z
-kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7
-qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX
-Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft
-b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT
-9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH
-4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab
-JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.pem b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.pem
deleted file mode 100644
index a8217be..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_bar.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw
-MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon
-mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm
-MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy
-uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e
-e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c
-NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw
-gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F
-7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl
-vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL
-BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw
-SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP
-UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC
-a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q
-W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj
-RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY=
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_ca.pem b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_ca.pem
deleted file mode 100644
index 72a7444..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_ca.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw
-MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k
-JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM
-SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+
-VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/
-9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad
-KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD
-VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb
-U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG
-A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9
-hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX
-Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf
-oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8
-Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a
-mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895
-XRz2GCwCNyvW
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.key b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.key
deleted file mode 100644
index 86e6cce..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i
-ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX
-xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A
-A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc
-gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g
-Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV
-I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io
-yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds
-a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey
-szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX
-Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU
-02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK
-BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ
-LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa
-69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L
-M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1
-Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV
-gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/
-p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X
-PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/
-3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO
-FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3
-bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT
-jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa
-5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.pem b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.pem
deleted file mode 100644
index c8b868b..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/reload/reload_foo.pem
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw
-MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D
-j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1
-bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6
-EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58
-sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l
-8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw
-gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ
-dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl
-vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL
-BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy
-fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc
-sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh
-RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2
-oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene
-Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE=
------END CERTIFICATE-----
diff --git a/vendor/github.com/hashicorp/vault/command/server_ha_test.go b/vendor/github.com/hashicorp/vault/command/server_ha_test.go
deleted file mode 100644
index a9b1188..0000000
--- a/vendor/github.com/hashicorp/vault/command/server_ha_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// +build !race
-
-package command
-
-import (
- "io/ioutil"
- "os"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/physical"
- "github.com/mitchellh/cli"
-
- physConsul "github.com/hashicorp/vault/physical/consul"
-)
-
-// The following tests have a go-metrics/exp manager race condition
-func TestServer_CommonHA(t *testing.T) {
- ui := new(cli.MockUi)
- c := &ServerCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- PhysicalBackends: map[string]physical.Factory{
- "consul": physConsul.NewConsulBackend,
- },
- }
-
- tmpfile, err := ioutil.TempFile("", "")
- if err != nil {
- t.Fatalf("error creating temp dir: %v", err)
- }
-
- tmpfile.WriteString(basehcl + consulhcl)
- tmpfile.Close()
- defer os.Remove(tmpfile.Name())
-
- args := []string{"-config", tmpfile.Name(), "-verify-only", "true"}
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String())
- }
-
- if !strings.Contains(ui.OutputWriter.String(), "(HA available)") {
- t.Fatalf("did not find HA available: %s", ui.OutputWriter.String())
- }
-}
-
-func TestServer_GoodSeparateHA(t *testing.T) {
- ui := new(cli.MockUi)
- c := &ServerCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- PhysicalBackends: map[string]physical.Factory{
- "consul": physConsul.NewConsulBackend,
- },
- }
-
- tmpfile, err := ioutil.TempFile("", "")
- if err != nil {
- t.Fatalf("error creating temp dir: %v", err)
- }
-
- tmpfile.WriteString(basehcl + consulhcl + haconsulhcl)
- tmpfile.Close()
- defer os.Remove(tmpfile.Name())
-
- args := []string{"-config", tmpfile.Name(), "-verify-only", "true"}
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String())
- }
-
- if !strings.Contains(ui.OutputWriter.String(), "HA Storage:") {
- t.Fatalf("did not find HA Storage: %s", ui.OutputWriter.String())
- }
-}
-
-func TestServer_BadSeparateHA(t *testing.T) {
- ui := new(cli.MockUi)
- c := &ServerCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- PhysicalBackends: map[string]physical.Factory{
- "consul": physConsul.NewConsulBackend,
- },
- }
-
- tmpfile, err := ioutil.TempFile("", "")
- if err != nil {
- t.Fatalf("error creating temp dir: %v", err)
- }
-
- tmpfile.WriteString(basehcl + consulhcl + badhaconsulhcl)
- tmpfile.Close()
- defer os.Remove(tmpfile.Name())
-
- args := []string{"-config", tmpfile.Name()}
-
- if code := c.Run(args); code == 0 {
- t.Fatalf("bad: should have gotten an error on a bad HA config")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server_test.go b/vendor/github.com/hashicorp/vault/command/server_test.go
deleted file mode 100644
index 9a90239..0000000
--- a/vendor/github.com/hashicorp/vault/command/server_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// +build !race
-
-package command
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "math/rand"
- "os"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/physical"
- "github.com/mitchellh/cli"
-
- physFile "github.com/hashicorp/vault/physical/file"
-)
-
-var (
- basehcl = `
-disable_mlock = true
-
-listener "tcp" {
- address = "127.0.0.1:8200"
- tls_disable = "true"
-}
-`
-
- consulhcl = `
-backend "consul" {
- prefix = "foo/"
- advertise_addr = "http://127.0.0.1:8200"
- disable_registration = "true"
-}
-`
- haconsulhcl = `
-ha_backend "consul" {
- prefix = "bar/"
- redirect_addr = "http://127.0.0.1:8200"
- disable_registration = "true"
-}
-`
-
- badhaconsulhcl = `
-ha_backend "file" {
- path = "/dev/null"
-}
-`
-
- reloadhcl = `
-backend "file" {
- path = "/dev/null"
-}
-
-disable_mlock = true
-
-listener "tcp" {
- address = "127.0.0.1:8203"
- tls_cert_file = "TMPDIR/reload_cert.pem"
- tls_key_file = "TMPDIR/reload_key.pem"
-}
-`
-)
-
-// The following tests have a go-metrics/exp manager race condition
-func TestServer_ReloadListener(t *testing.T) {
- wd, _ := os.Getwd()
- wd += "/server/test-fixtures/reload/"
-
- td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63))
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(td)
-
- wg := &sync.WaitGroup{}
-
- // Setup initial certs
- inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem")
- ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777)
- inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key")
- ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777)
-
- relhcl := strings.Replace(reloadhcl, "TMPDIR", td, -1)
- ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
-
- inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem")
- certPool := x509.NewCertPool()
- ok := certPool.AppendCertsFromPEM(inBytes)
- if !ok {
- t.Fatal("not ok when appending CA cert")
- }
-
- ui := new(cli.MockUi)
- c := &ServerCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- ShutdownCh: MakeShutdownCh(),
- SighupCh: MakeSighupCh(),
- PhysicalBackends: map[string]physical.Factory{
- "file": physFile.NewFileBackend,
- },
- }
-
- finished := false
- finishedMutex := sync.Mutex{}
-
- wg.Add(1)
- args := []string{"-config", td + "/reload.hcl"}
- go func() {
- if code := c.Run(args); code != 0 {
- t.Error("got a non-zero exit status")
- }
- finishedMutex.Lock()
- finished = true
- finishedMutex.Unlock()
- wg.Done()
- }()
-
- checkFinished := func() {
- finishedMutex.Lock()
- if finished {
- t.Fatalf(fmt.Sprintf("finished early; relhcl was\n%s\nstdout was\n%s\nstderr was\n%s\n", relhcl, ui.OutputWriter.String(), ui.ErrorWriter.String()))
- }
- finishedMutex.Unlock()
- }
-
- testCertificateName := func(cn string) error {
- conn, err := tls.Dial("tcp", "127.0.0.1:8203", &tls.Config{
- RootCAs: certPool,
- })
- if err != nil {
- return err
- }
- defer conn.Close()
- if err = conn.Handshake(); err != nil {
- return err
- }
- servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName
- if servName != cn {
- return fmt.Errorf("expected %s, got %s", cn, servName)
- }
- return nil
- }
-
- checkFinished()
- time.Sleep(5 * time.Second)
- checkFinished()
-
- if err := testCertificateName("foo.example.com"); err != nil {
- t.Fatalf("certificate name didn't check out: %s", err)
- }
-
- relhcl = strings.Replace(reloadhcl, "TMPDIR", td, -1)
- inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem")
- ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777)
- inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key")
- ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777)
- ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
-
- c.SighupCh <- struct{}{}
- checkFinished()
- time.Sleep(2 * time.Second)
- checkFinished()
-
- if err := testCertificateName("bar.example.com"); err != nil {
- t.Fatalf("certificate name didn't check out: %s", err)
- }
-
- c.ShutdownCh <- struct{}{}
-
- wg.Wait()
-}
diff --git a/vendor/github.com/hashicorp/vault/command/ssh.go b/vendor/github.com/hashicorp/vault/command/ssh.go
deleted file mode 100644
index 03e1933..0000000
--- a/vendor/github.com/hashicorp/vault/command/ssh.go
+++ /dev/null
@@ -1,681 +0,0 @@
-package command
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "os/exec"
- "os/user"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/ssh"
- "github.com/hashicorp/vault/meta"
- homedir "github.com/mitchellh/go-homedir"
- "github.com/mitchellh/mapstructure"
- "github.com/pkg/errors"
-)
-
-// SSHCommand is a Command that establishes a SSH connection with target by
-// generating a dynamic key
-type SSHCommand struct {
- meta.Meta
-
- // API
- client *api.Client
- sshClient *api.SSH
-
- // Common options
- mode string
- noExec bool
- format string
- mountPoint string
- role string
- username string
- ip string
- sshArgs []string
-
- // Key options
- strictHostKeyChecking string
- userKnownHostsFile string
-
- // SSH CA backend specific options
- publicKeyPath string
- privateKeyPath string
- hostKeyMountPoint string
- hostKeyHostnames string
-}
-
-// Structure to hold the fields returned when asked for a credential from SSHh backend.
-type SSHCredentialResp struct {
- KeyType string `mapstructure:"key_type"`
- Key string `mapstructure:"key"`
- Username string `mapstructure:"username"`
- IP string `mapstructure:"ip"`
- Port string `mapstructure:"port"`
-}
-
-func (c *SSHCommand) Run(args []string) int {
-
- flags := c.Meta.FlagSet("ssh", meta.FlagSetDefault)
-
- envOrDefault := func(key string, def string) string {
- if k := os.Getenv(key); k != "" {
- return k
- }
- return def
- }
-
- expandPath := func(p string) string {
- e, err := homedir.Expand(p)
- if err != nil {
- return p
- }
- return e
- }
-
- // Common options
- flags.StringVar(&c.mode, "mode", "", "")
- flags.BoolVar(&c.noExec, "no-exec", false, "")
- flags.StringVar(&c.format, "format", "table", "")
- flags.StringVar(&c.mountPoint, "mount-point", "ssh", "")
- flags.StringVar(&c.role, "role", "", "")
-
- // Key options
- flags.StringVar(&c.strictHostKeyChecking, "strict-host-key-checking",
- envOrDefault("VAULT_SSH_STRICT_HOST_KEY_CHECKING", "ask"), "")
- flags.StringVar(&c.userKnownHostsFile, "user-known-hosts-file",
- envOrDefault("VAULT_SSH_USER_KNOWN_HOSTS_FILE", expandPath("~/.ssh/known_hosts")), "")
-
- // CA-specific options
- flags.StringVar(&c.publicKeyPath, "public-key-path",
- expandPath("~/.ssh/id_rsa.pub"), "")
- flags.StringVar(&c.privateKeyPath, "private-key-path",
- expandPath("~/.ssh/id_rsa"), "")
- flags.StringVar(&c.hostKeyMountPoint, "host-key-mount-point", "", "")
- flags.StringVar(&c.hostKeyHostnames, "host-key-hostnames", "*", "")
-
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) < 1 {
- c.Ui.Error("ssh expects at least one argument")
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
- return 1
- }
- c.client = client
- c.sshClient = client.SSHWithMountPoint(c.mountPoint)
-
- // Extract the username and IP.
- c.username, c.ip, err = c.userAndIP(args[0])
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error parsing user and IP: %s", err))
- return 1
- }
-
- // The rest of the args are ssh args
- if len(args) > 1 {
- c.sshArgs = args[1:]
- }
-
- // Credentials are generated only against a registered role. If user
- // does not specify a role with the SSH command, then lookup API is used
- // to fetch all the roles with which this IP is associated. If there is
- // only one role associated with it, use it to establish the connection.
- //
- // TODO: remove in 0.9.0, convert to validation error
- if c.role == "" {
- c.Ui.Warn("" +
- "WARNING: No -role specified. Use -role to tell Vault which ssh role\n" +
- "to use for authentication. In the future, you will need to tell Vault\n" +
- "which role to use. For now, Vault will attempt to guess based on a\n" +
- "the API response.")
-
- role, err := c.defaultRole(c.mountPoint, c.ip)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error choosing role: %v", err))
- return 1
- }
- // Print the default role chosen so that user knows the role name
- // if something doesn't work. If the role chosen is not allowed to
- // be used by the user (ACL enforcement), then user should see an
- // error message accordingly.
- c.Ui.Output(fmt.Sprintf("Vault SSH: Role: %q", role))
- c.role = role
- }
-
- // If no mode was given, perform the old-school lookup. Keep this now for
- // backwards-compatability, but print a warning.
- //
- // TODO: remove in 0.9.0, convert to validation error
- if c.mode == "" {
- c.Ui.Warn("" +
- "WARNING: No -mode specified. Use -mode to tell Vault which ssh\n" +
- "authentication mode to use. In the future, you will need to tell\n" +
- "Vault which mode to use. For now, Vault will attempt to guess based\n" +
- "on the API response. This guess involves creating a temporary\n" +
- "credential, reading its type, and then revoking it. To reduce the\n" +
- "number of API calls and surface area, specify -mode directly.")
- secret, cred, err := c.generateCredential()
- if err != nil {
- // This is _very_ hacky, but is the only sane backwards-compatible way
- // to do this. If the error is "key type unknown", we just assume the
- // type is "ca". In the future, mode will be required as an option.
- if strings.Contains(err.Error(), "key type unknown") {
- c.mode = ssh.KeyTypeCA
- } else {
- c.Ui.Error(fmt.Sprintf("Error getting credential: %s", err))
- return 1
- }
- } else {
- c.mode = cred.KeyType
- }
-
- // Revoke the secret, since the child functions will generate their own
- // credential. Users wishing to avoid this should specify -mode.
- if secret != nil {
- if err := c.client.Sys().Revoke(secret.LeaseID); err != nil {
- c.Ui.Warn(fmt.Sprintf("Failed to revoke temporary key: %s", err))
- }
- }
- }
-
- switch strings.ToLower(c.mode) {
- case ssh.KeyTypeCA:
- if err := c.handleTypeCA(); err != nil {
- c.Ui.Error(err.Error())
- return 1
- }
- case ssh.KeyTypeOTP:
- if err := c.handleTypeOTP(); err != nil {
- c.Ui.Error(err.Error())
- return 1
- }
- case ssh.KeyTypeDynamic:
- if err := c.handleTypeDynamic(); err != nil {
- c.Ui.Error(err.Error())
- return 1
- }
- default:
- c.Ui.Error(fmt.Sprintf("Unknown SSH mode: %s", c.mode))
- return 1
- }
-
- return 0
-}
-
-// handleTypeCA is used to handle SSH logins using the "CA" key type.
-func (c *SSHCommand) handleTypeCA() error {
- // Read the key from disk
- publicKey, err := ioutil.ReadFile(c.publicKeyPath)
- if err != nil {
- return errors.Wrap(err, "failed to read public key")
- }
-
- // Attempt to sign the public key
- secret, err := c.sshClient.SignKey(c.role, map[string]interface{}{
- // WARNING: publicKey is []byte, which is b64 encoded on JSON upload. We
- // have to convert it to a string. SV lost many hours to this...
- "public_key": string(publicKey),
- "valid_principals": c.username,
- "cert_type": "user",
-
- // TODO: let the user configure these. In the interim, if users want to
- // customize these values, they can produce the key themselves.
- "extensions": map[string]string{
- "permit-X11-forwarding": "",
- "permit-agent-forwarding": "",
- "permit-port-forwarding": "",
- "permit-pty": "",
- "permit-user-rc": "",
- },
- })
- if err != nil {
- return errors.Wrap(err, "failed to sign public key")
- }
- if secret == nil || secret.Data == nil {
- return fmt.Errorf("client signing returned empty credentials")
- }
-
- // Handle no-exec
- if c.noExec {
- // This is hacky, but OutputSecret returns an int, not an error :(
- if i := OutputSecret(c.Ui, c.format, secret); i != 0 {
- return fmt.Errorf("an error occurred outputting the secret")
- }
- return nil
- }
-
- // Extract public key
- key, ok := secret.Data["signed_key"].(string)
- if !ok {
- return fmt.Errorf("missing signed key")
- }
-
- // Capture the current value - this could be overwritten later if the user
- // enabled host key signing verification.
- userKnownHostsFile := c.userKnownHostsFile
- strictHostKeyChecking := c.strictHostKeyChecking
-
- // Handle host key signing verification. If the user specified a mount point,
- // download the public key, trust it with the given domains, and use that
- // instead of the user's regular known_hosts file.
- if c.hostKeyMountPoint != "" {
- secret, err := c.client.Logical().Read(c.hostKeyMountPoint + "/config/ca")
- if err != nil {
- return errors.Wrap(err, "failed to get host signing key")
- }
- if secret == nil || secret.Data == nil {
- return fmt.Errorf("missing host signing key")
- }
- publicKey, ok := secret.Data["public_key"].(string)
- if !ok {
- return fmt.Errorf("host signing key is empty")
- }
-
- // Write the known_hosts file
- name := fmt.Sprintf("vault_ssh_ca_known_hosts_%s_%s", c.username, c.ip)
- data := fmt.Sprintf("@cert-authority %s %s", c.hostKeyHostnames, publicKey)
- knownHosts, err, closer := c.writeTemporaryFile(name, []byte(data), 0644)
- defer closer()
- if err != nil {
- return errors.Wrap(err, "failed to write host public key")
- }
-
- // Update the variables
- userKnownHostsFile = knownHosts
- strictHostKeyChecking = "yes"
- }
-
- // Write the signed public key to disk
- name := fmt.Sprintf("vault_ssh_ca_%s_%s", c.username, c.ip)
- signedPublicKeyPath, err, closer := c.writeTemporaryKey(name, []byte(key))
- defer closer()
- if err != nil {
- return errors.Wrap(err, "failed to write signed public key")
- }
-
- args := append([]string{
- "-i", c.privateKeyPath,
- "-i", signedPublicKeyPath,
- "-o UserKnownHostsFile=" + userKnownHostsFile,
- "-o StrictHostKeyChecking=" + strictHostKeyChecking,
- c.username + "@" + c.ip,
- }, c.sshArgs...)
-
- cmd := exec.Command("ssh", args...)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err = cmd.Run()
- if err != nil {
- return errors.Wrap(err, "failed to run ssh command")
- }
-
- // There is no secret to revoke, since it's a certificate signing
-
- return nil
-}
-
-// handleTypeOTP is used to handle SSH logins using the "otp" key type.
-func (c *SSHCommand) handleTypeOTP() error {
- secret, cred, err := c.generateCredential()
- if err != nil {
- return errors.Wrap(err, "failed to generate credential")
- }
-
- // Handle no-exec
- if c.noExec {
- // This is hacky, but OutputSecret returns an int, not an error :(
- if i := OutputSecret(c.Ui, c.format, secret); i != 0 {
- return fmt.Errorf("an error occurred outputting the secret")
- }
- return nil
- }
-
- var cmd *exec.Cmd
-
- // Check if the application 'sshpass' is installed in the client machine.
- // If it is then, use it to automate typing in OTP to the prompt. Unfortunately,
- // it was not possible to automate it without a third-party application, with
- // only the Go libraries.
- // Feel free to try and remove this dependency.
- sshpassPath, err := exec.LookPath("sshpass")
- if err != nil {
- c.Ui.Warn("" +
- "Vault could not locate sshpass. The OTP code for the session will be\n" +
- "displayed below. Enter this code in the SSH password prompt. If you\n" +
- "install sshpass, Vault can automatically perform this step for you.")
- c.Ui.Output("OTP for the session is " + cred.Key)
-
- args := append([]string{
- "-o UserKnownHostsFile=" + c.userKnownHostsFile,
- "-o StrictHostKeyChecking=" + c.strictHostKeyChecking,
- "-p", cred.Port,
- c.username + "@" + c.ip,
- }, c.sshArgs...)
- cmd = exec.Command("ssh", args...)
- } else {
- args := append([]string{
- "-e", // Read password for SSHPASS environment variable
- "ssh",
- "-o UserKnownHostsFile=" + c.userKnownHostsFile,
- "-o StrictHostKeyChecking=" + c.strictHostKeyChecking,
- "-p", cred.Port,
- c.username + "@" + c.ip,
- }, c.sshArgs...)
- cmd = exec.Command(sshpassPath, args...)
- env := os.Environ()
- env = append(env, fmt.Sprintf("SSHPASS=%s", string(cred.Key)))
- cmd.Env = env
- }
-
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err = cmd.Run()
- if err != nil {
- return errors.Wrap(err, "failed to run ssh command")
- }
-
- // Revoke the key if it's longer than expected
- if err := c.client.Sys().Revoke(secret.LeaseID); err != nil {
- return errors.Wrap(err, "failed to revoke key")
- }
-
- return nil
-}
-
-// handleTypeDynamic is used to handle SSH logins using the "dyanmic" key type.
-func (c *SSHCommand) handleTypeDynamic() error {
- // Generate the credential
- secret, cred, err := c.generateCredential()
- if err != nil {
- return errors.Wrap(err, "failed to generate credential")
- }
-
- // Handle no-exec
- if c.noExec {
- // This is hacky, but OutputSecret returns an int, not an error :(
- if i := OutputSecret(c.Ui, c.format, secret); i != 0 {
- return fmt.Errorf("an error occurred outputting the secret")
- }
- return nil
- }
-
- // Write the dynamic key to disk
- name := fmt.Sprintf("vault_ssh_dynamic_%s_%s", c.username, c.ip)
- keyPath, err, closer := c.writeTemporaryKey(name, []byte(cred.Key))
- defer closer()
- if err != nil {
- return errors.Wrap(err, "failed to save dyanmic key")
- }
-
- args := append([]string{
- "-i", keyPath,
- "-o UserKnownHostsFile=" + c.userKnownHostsFile,
- "-o StrictHostKeyChecking=" + c.strictHostKeyChecking,
- "-p", cred.Port,
- c.username + "@" + c.ip,
- }, c.sshArgs...)
-
- cmd := exec.Command("ssh", args...)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err = cmd.Run()
- if err != nil {
- return errors.Wrap(err, "failed to run ssh command")
- }
-
- // Revoke the key if it's longer than expected
- if err := c.client.Sys().Revoke(secret.LeaseID); err != nil {
- return errors.Wrap(err, "failed to revoke key")
- }
-
- return nil
-}
-
-// generateCredential generates a credential for the given role and returns the
-// decoded secret data.
-func (c *SSHCommand) generateCredential() (*api.Secret, *SSHCredentialResp, error) {
- // Attempt to generate the credential.
- secret, err := c.sshClient.Credential(c.role, map[string]interface{}{
- "username": c.username,
- "ip": c.ip,
- })
- if err != nil {
- return nil, nil, errors.Wrap(err, "failed to get credentials")
- }
- if secret == nil || secret.Data == nil {
- return nil, nil, fmt.Errorf("vault returned empty credentials")
- }
-
- // Port comes back as a json.Number which mapstructure doesn't like, so
- // convert it
- if d, ok := secret.Data["port"].(json.Number); ok {
- secret.Data["port"] = d.String()
- }
-
- // Use mapstructure to decode the response
- var resp SSHCredentialResp
- if err := mapstructure.Decode(secret.Data, &resp); err != nil {
- return nil, nil, errors.Wrap(err, "failed to decode credential")
- }
-
- // Check for an empty key response
- if len(resp.Key) == 0 {
- return nil, nil, fmt.Errorf("vault returned an invalid key")
- }
-
- return secret, &resp, nil
-}
-
-// writeTemporaryFile writes a file to a temp location with the given data and
-// file permissions.
-func (c *SSHCommand) writeTemporaryFile(name string, data []byte, perms os.FileMode) (string, error, func() error) {
- // default closer to prevent panic
- closer := func() error { return nil }
-
- f, err := ioutil.TempFile("", name)
- if err != nil {
- return "", errors.Wrap(err, "creating temporary file"), closer
- }
-
- closer = func() error { return os.Remove(f.Name()) }
-
- if err := ioutil.WriteFile(f.Name(), data, perms); err != nil {
- return "", errors.Wrap(err, "writing temporary key"), closer
- }
-
- return f.Name(), nil, closer
-}
-
-// writeTemporaryKey writes the key to a temporary file and returns the path.
-// The caller should defer the closer to cleanup the key.
-func (c *SSHCommand) writeTemporaryKey(name string, data []byte) (string, error, func() error) {
- return c.writeTemporaryFile(name, data, 0600)
-}
-
-// If user did not provide the role with which SSH connection has
-// to be established and if there is only one role associated with
-// the IP, it is used by default.
-func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) {
- data := map[string]interface{}{
- "ip": ip,
- }
- client, err := c.Client()
- if err != nil {
- return "", err
- }
- secret, err := client.Logical().Write(mountPoint+"/lookup", data)
- if err != nil {
- return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err)
-
- }
- if secret == nil || secret.Data == nil {
- return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err)
- }
-
- if secret.Data["roles"] == nil {
- return "", fmt.Errorf("No matching roles found for IP %q", ip)
- }
-
- if len(secret.Data["roles"].([]interface{})) == 1 {
- return secret.Data["roles"].([]interface{})[0].(string), nil
- } else {
- var roleNames string
- for _, item := range secret.Data["roles"].([]interface{}) {
- roleNames += item.(string) + ", "
- }
- roleNames = strings.TrimRight(roleNames, ", ")
- return "", fmt.Errorf("Roles:%q. "+`
- Multiple roles are registered for this IP.
- Select a role using '-role' option.
- Note that all roles may not be permitted, based on ACLs.`, roleNames)
- }
-}
-
-// userAndIP takes an argument in the format foo@1.2.3.4 and separates the IP
-// and user parts, returning any errors.
-func (c *SSHCommand) userAndIP(s string) (string, string, error) {
- // split the parameter username@ip
- input := strings.Split(s, "@")
- var username, address string
-
- // If only IP is mentioned and username is skipped, assume username to
- // be the current username. Vault SSH role's default username could have
- // been used, but in order to retain the consistency with SSH command,
- // current username is employed.
- switch len(input) {
- case 1:
- u, err := user.Current()
- if err != nil {
- return "", "", errors.Wrap(err, "failed to fetch current user")
- }
- username, address = u.Username, input[0]
- case 2:
- username, address = input[0], input[1]
- default:
- return "", "", fmt.Errorf("invalid arguments: %q", s)
- }
-
- // Resolving domain names to IP address on the client side.
- // Vault only deals with IP addresses.
- ipAddr, err := net.ResolveIPAddr("ip", address)
- if err != nil {
- return "", "", errors.Wrap(err, "failed to resolve IP address")
- }
- ip := ipAddr.String()
-
- return username, ip, nil
-}
-
-func (c *SSHCommand) Synopsis() string {
- return "Initiate an SSH session"
-}
-
-func (c *SSHCommand) Help() string {
- helpText := `
-Usage: vault ssh [options] username@ip [ssh options]
-
- Establishes an SSH connection with the target machine.
-
- This command uses one of the SSH authentication backends to authenticate and
- automatically establish an SSH connection to a host. This operation requires
- that the SSH backend is mounted and configured.
-
- SSH using the OTP mode (requires sshpass for full automation):
-
- $ vault ssh -mode=otp -role=my-role user@1.2.3.4
-
- SSH using the CA mode:
-
- $ vault ssh -mode=ca -role=my-role user@1.2.3.4
-
- SSH using CA mode with host key verification:
-
- $ vault ssh \
- -mode=ca \
- -role=my-role \
- -host-key-mount-point=host-signer \
- -host-key-hostnames=example.com \
- user@example.com
-
- For the full list of options and arguments, please see the documentation.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-SSH Options:
-
- -role Role to be used to create the key. Each IP is associated with
- a role. To see the associated roles with IP, use "lookup"
- endpoint. If you are certain that there is only one role
- associated with the IP, you can skip mentioning the role. It
- will be chosen by default. If there are no roles associated
- with the IP, register the CIDR block of that IP using the
- "roles/" endpoint.
-
- -no-exec Shows the credentials but does not establish connection.
-
- -mount-point Mount point of SSH backend. If the backend is mounted at
- "ssh" (default), this parameter can be skipped.
-
- -format If the "no-exec" option is enabled, the credentials will be
- printed out and SSH connection will not be established. The
- format of the output can be "json" or "table" (default).
-
- -strict-host-key-checking This option corresponds to "StrictHostKeyChecking"
- of SSH configuration. If "sshpass" is employed to enable
- automated login, then if host key is not "known" to the
- client, "vault ssh" command will fail. Set this option to
- "no" to bypass the host key checking. Defaults to "ask".
- Can also be specified with the
- "VAULT_SSH_STRICT_HOST_KEY_CHECKING" environment variable.
-
- -user-known-hosts-file This option corresponds to "UserKnownHostsFile" of
- SSH configuration. Assigns the file to use for storing the
- host keys. If this option is set to "/dev/null" along with
- "-strict-host-key-checking=no", both warnings and host key
- checking can be avoided while establishing the connection.
- Defaults to "~/.ssh/known_hosts". Can also be specified with
- "VAULT_SSH_USER_KNOWN_HOSTS_FILE" environment variable.
-
-CA Mode Options:
-
- - public-key-path=
- The path to the public key to send to Vault for signing. The default value
- is ~/.ssh/id_rsa.pub.
-
- - private-key-path=
- The path to the private key to use for authentication. This must be the
- corresponding private key to -public-key-path. The default value is
- ~/.ssh/id_rsa.
-
- - host-key-mount-point=
- The mount point to the SSH backend where host keys are signed. When given
- a value, Vault will generate a custom known_hosts file with delegation to
- the CA at the provided mount point and verify the SSH connection's host
- keys against the provided CA. By default, this command uses the users's
- existing known_hosts file. When this flag is set, this command will force
- strict host key checking and will override any values provided for a
- custom -user-known-hosts-file.
-
- - host-key-hostnames=
- The list of hostnames to delegate for this certificate authority. By
- default, this is "*", which allows all domains and IPs. To restrict
- validation to a series of hostnames, specify them as comma-separated
- values here.
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/ssh_test.go b/vendor/github.com/hashicorp/vault/command/ssh_test.go
deleted file mode 100644
index 70a58f5..0000000
--- a/vendor/github.com/hashicorp/vault/command/ssh_test.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package command
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "strings"
- "testing"
-
- logicalssh "github.com/hashicorp/vault/builtin/logical/ssh"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-const (
- testCidr = "127.0.0.1/32"
- testRoleName = "testRoleName"
- testKey = "testKey"
- testSharedPrivateKey = `
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
-A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
-/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
-mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
-GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
-nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
-+aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
-MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
-Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
-4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
-oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
-Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
-6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
-IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
-CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
-AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
-kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
-rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
-AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
-cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
-5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
-My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
-O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
-oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
-+B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
------END RSA PRIVATE KEY-----
-`
-)
-
-var testIP string
-var testPort string
-var testUserName string
-var testAdminUser string
-
-// Starts the server and initializes the servers IP address,
-// port and usernames to be used by the test cases.
-func initTest() {
- addr, err := vault.StartSSHHostTestServer()
- if err != nil {
- panic(fmt.Sprintf("Error starting mock server:%s", err))
- }
- input := strings.Split(addr, ":")
- testIP = input[0]
- testPort = input[1]
-
- testUserName := os.Getenv("VAULT_SSHTEST_USER")
- if len(testUserName) == 0 {
- panic("VAULT_SSHTEST_USER must be set to the desired user")
- }
- testAdminUser = testUserName
-}
-
-// This test is broken. Hence temporarily disabling it.
-func testSSH(t *testing.T) {
- initTest()
- // Add the SSH backend to the unsealed test core.
- // This should be done before the unsealed core is created.
- err := vault.AddTestLogicalBackend("ssh", logicalssh.Factory)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- mountCmd := &MountCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{"-address", addr, "ssh"}
-
- // Mount the SSH backend
- if code := mountCmd.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := mountCmd.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mounts, err := client.Sys().ListMounts()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Check if SSH backend is mounted or not
- mount, ok := mounts["ssh/"]
- if !ok {
- t.Fatal("should have ssh mount")
- }
- if mount.Type != "ssh" {
- t.Fatal("should have ssh type")
- }
-
- writeCmd := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- // Create a 'named' key in vault
- args = []string{
- "-address", addr,
- "ssh/keys/" + testKey,
- "key=" + testSharedPrivateKey,
- }
- if code := writeCmd.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Create a role using the named key along with cidr, username and port
- args = []string{
- "-address", addr,
- "ssh/roles/" + testRoleName,
- "key=" + testKey,
- "admin_user=" + testUserName,
- "cidr=" + testCidr,
- "port=" + testPort,
- }
- if code := writeCmd.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- sshCmd := &SSHCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- // Get the dynamic key and establish an SSH connection with target.
- // Inline command when supplied, runs on target and terminates the
- // connection. Use whoami as the inline command in target and get
- // the result. Compare the result with the username used to connect
- // to target. Test succeeds if they match.
- args = []string{
- "-address", addr,
- "-role=" + testRoleName,
- testUserName + "@" + testIP,
- "/usr/bin/whoami",
- }
-
- // Creating pipe to get the result of the inline command run in target machine.
- stdout := os.Stdout
- r, w, err := os.Pipe()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- os.Stdout = w
- if code := sshCmd.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- bufChan := make(chan string)
- go func() {
- var buf bytes.Buffer
- io.Copy(&buf, r)
- bufChan <- buf.String()
- }()
- w.Close()
- os.Stdout = stdout
- userName := <-bufChan
- userName = strings.TrimSpace(userName)
-
- // Comparing the username used to connect to target and
- // the username on the target, thereby verifying successful
- // execution
- if userName != testUserName {
- t.Fatalf("err: username mismatch")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/status.go b/vendor/github.com/hashicorp/vault/command/status.go
deleted file mode 100644
index 7b6cce3..0000000
--- a/vendor/github.com/hashicorp/vault/command/status.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
-)
-
-// StatusCommand is a Command that outputs the status of whether
-// Vault is sealed or not as well as HA information.
-type StatusCommand struct {
- meta.Meta
-}
-
-func (c *StatusCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("status", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 1
- }
-
- sealStatus, err := client.Sys().SealStatus()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error checking seal status: %s", err))
- return 1
- }
-
- outStr := fmt.Sprintf(
- "Sealed: %v\n"+
- "Key Shares: %d\n"+
- "Key Threshold: %d\n"+
- "Unseal Progress: %d\n"+
- "Unseal Nonce: %v\n"+
- "Version: %s",
- sealStatus.Sealed,
- sealStatus.N,
- sealStatus.T,
- sealStatus.Progress,
- sealStatus.Nonce,
- sealStatus.Version)
-
- if sealStatus.ClusterName != "" && sealStatus.ClusterID != "" {
- outStr = fmt.Sprintf("%s\nCluster Name: %s\nCluster ID: %s", outStr, sealStatus.ClusterName, sealStatus.ClusterID)
- }
-
- c.Ui.Output(outStr)
-
- // Mask the 'Vault is sealed' error, since this means HA is enabled,
- // but that we cannot query for the leader since we are sealed.
- leaderStatus, err := client.Sys().Leader()
- if err != nil && strings.Contains(err.Error(), "Vault is sealed") {
- leaderStatus = &api.LeaderResponse{HAEnabled: true}
- err = nil
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error checking leader status: %s", err))
- return 1
- }
-
- // Output if HA is enabled
- c.Ui.Output("")
- c.Ui.Output(fmt.Sprintf("High-Availability Enabled: %v", leaderStatus.HAEnabled))
- if leaderStatus.HAEnabled {
- if sealStatus.Sealed {
- c.Ui.Output("\tMode: sealed")
- } else {
- mode := "standby"
- if leaderStatus.IsSelf {
- mode = "active"
- }
- c.Ui.Output(fmt.Sprintf("\tMode: %s", mode))
-
- if leaderStatus.LeaderAddress == "" {
- leaderStatus.LeaderAddress = ""
- }
- if leaderStatus.LeaderClusterAddress == "" {
- leaderStatus.LeaderClusterAddress = ""
- }
- c.Ui.Output(fmt.Sprintf("\tLeader Cluster Address: %s", leaderStatus.LeaderClusterAddress))
- }
- }
-
- if sealStatus.Sealed {
- return 2
- } else {
- return 0
- }
-}
-
-func (c *StatusCommand) Synopsis() string {
- return "Outputs status of whether Vault is sealed and if HA mode is enabled"
-}
-
-func (c *StatusCommand) Help() string {
- helpText := `
-Usage: vault status [options]
-
- Outputs the state of the Vault, sealed or unsealed and if HA is enabled.
-
- This command outputs whether or not the Vault is sealed. The exit
- code also reflects the seal status (0 unsealed, 2 sealed, 1 error).
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/status_test.go b/vendor/github.com/hashicorp/vault/command/status_test.go
deleted file mode 100644
index 92e7f74..0000000
--- a/vendor/github.com/hashicorp/vault/command/status_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestStatus(t *testing.T) {
- ui := new(cli.MockUi)
- c := &StatusCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- core := vault.TestCore(t)
- keys, _ := vault.TestCoreInit(t, core)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- args := []string{"-address", addr}
- if code := c.Run(args); code != 2 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- for _, key := range keys {
- if _, err := core.Unseal(key); err != nil {
- t.Fatalf("err: %s", err)
- }
- }
-
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/step-down.go b/vendor/github.com/hashicorp/vault/command/step-down.go
deleted file mode 100644
index be445a8..0000000
--- a/vendor/github.com/hashicorp/vault/command/step-down.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// StepDownCommand is a Command that seals the vault.
-type StepDownCommand struct {
- meta.Meta
-}
-
-func (c *StepDownCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("step-down", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().StepDown(); err != nil {
- c.Ui.Error(fmt.Sprintf("Error stepping down: %s", err))
- return 1
- }
-
- return 0
-}
-
-func (c *StepDownCommand) Synopsis() string {
- return "Force the Vault node to give up active duty"
-}
-
-func (c *StepDownCommand) Help() string {
- helpText := `
-Usage: vault step-down [options]
-
- Force the Vault node to step down from active duty.
-
- This causes the indicated node to give up active status. Note that while the
- affected node will have a short delay before attempting to grab the lock
- again, if no other node grabs the lock beforehand, it is possible for the
- same node to re-grab the lock and become active again.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper.go b/vendor/github.com/hashicorp/vault/command/token/helper.go
deleted file mode 100644
index db068be..0000000
--- a/vendor/github.com/hashicorp/vault/command/token/helper.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package token
-
-// TokenHelper is an interface that contains basic operations that must be
-// implemented by a token helper
-type TokenHelper interface {
- // Path displays a backend-specific path; for the internal helper this
- // is the location of the token stored on disk; for the external helper
- // this is the location of the binary being invoked
- Path() string
-
- Erase() error
- Get() (string, error)
- Store(string) error
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_external.go b/vendor/github.com/hashicorp/vault/command/token/helper_external.go
deleted file mode 100644
index 40de9bf..0000000
--- a/vendor/github.com/hashicorp/vault/command/token/helper_external.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package token
-
-import (
- "bytes"
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strings"
-)
-
-// ExternalTokenHelperPath takes the configured path to a helper and expands it to
-// a full absolute path that can be executed. As of 0.5, the default token
-// helper is internal, to avoid problems running in dev mode (see GH-850 and
-// GH-783), so special assumptions of prepending "vault token-" no longer
-// apply.
-//
-// As an additional result, only absolute paths are now allowed. Looking in the
-// path or a current directory for an arbitrary executable could allow someone
-// to switch the expected binary for one further up the path (or in the current
-// directory), potentially opening up execution of an arbitrary binary.
-func ExternalTokenHelperPath(path string) (string, error) {
- if !filepath.IsAbs(path) {
- var err error
- path, err = filepath.Abs(path)
- if err != nil {
- return "", err
- }
- }
-
- if _, err := os.Stat(path); err != nil {
- return "", fmt.Errorf("unknown error getting the external helper path")
- }
-
- return path, nil
-}
-
-// ExternalTokenHelper is the struct that has all the logic for storing and retrieving
-// tokens from the token helper. The API for the helpers is simple: the
-// BinaryPath is executed within a shell with environment Env. The last argument
-// appended will be the operation, which is:
-//
-// * "get" - Read the value of the token and write it to stdout.
-// * "store" - Store the value of the token which is on stdin. Output
-// nothing.
-// * "erase" - Erase the contents stored. Output nothing.
-//
-// Any errors can be written on stdout. If the helper exits with a non-zero
-// exit code then the stderr will be made part of the error value.
-type ExternalTokenHelper struct {
- BinaryPath string
- Env []string
-}
-
-// Erase deletes the contents from the helper.
-func (h *ExternalTokenHelper) Erase() error {
- cmd, err := h.cmd("erase")
- if err != nil {
- return fmt.Errorf("Error: %s", err)
- }
- if output, err := cmd.CombinedOutput(); err != nil {
- return fmt.Errorf(
- "Error: %s\n\n%s", err, string(output))
- }
- return nil
-}
-
-// Get gets the token value from the helper.
-func (h *ExternalTokenHelper) Get() (string, error) {
- var buf, stderr bytes.Buffer
- cmd, err := h.cmd("get")
- if err != nil {
- return "", fmt.Errorf("Error: %s", err)
- }
- cmd.Stdout = &buf
- cmd.Stderr = &stderr
- if err := cmd.Run(); err != nil {
- return "", fmt.Errorf(
- "Error: %s\n\n%s", err, stderr.String())
- }
-
- return buf.String(), nil
-}
-
-// Store stores the token value into the helper.
-func (h *ExternalTokenHelper) Store(v string) error {
- buf := bytes.NewBufferString(v)
- cmd, err := h.cmd("store")
- if err != nil {
- return fmt.Errorf("Error: %s", err)
- }
- cmd.Stdin = buf
- if output, err := cmd.CombinedOutput(); err != nil {
- return fmt.Errorf(
- "Error: %s\n\n%s", err, string(output))
- }
-
- return nil
-}
-
-func (h *ExternalTokenHelper) Path() string {
- return h.BinaryPath
-}
-
-func (h *ExternalTokenHelper) cmd(op string) (*exec.Cmd, error) {
- script := strings.Replace(h.BinaryPath, "\\", "\\\\", -1) + " " + op
- cmd, err := ExecScript(script)
- if err != nil {
- return nil, err
- }
- cmd.Env = h.Env
- return cmd, nil
-}
-
-// ExecScript returns a command to execute a script
-func ExecScript(script string) (*exec.Cmd, error) {
- var shell, flag string
- if runtime.GOOS == "windows" {
- shell = "cmd"
- flag = "/C"
- } else {
- shell = "/bin/sh"
- flag = "-c"
- }
- if other := os.Getenv("SHELL"); other != "" {
- shell = other
- }
- cmd := exec.Command(shell, flag, script)
- return cmd, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_external_test.go b/vendor/github.com/hashicorp/vault/command/token/helper_external_test.go
deleted file mode 100644
index b49dd93..0000000
--- a/vendor/github.com/hashicorp/vault/command/token/helper_external_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package token
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "runtime"
- "strings"
- "testing"
-)
-
-func TestExternalTokenHelperPath(t *testing.T) {
- cases := map[string]string{}
-
- unixCases := map[string]string{
- "/foo": "/foo",
- }
- windowsCases := map[string]string{
- "C:/foo": "C:/foo",
- `C:\Program Files`: `C:\Program Files`,
- }
-
- var runtimeCases map[string]string
- if runtime.GOOS == "windows" {
- runtimeCases = windowsCases
- } else {
- runtimeCases = unixCases
- }
-
- for k, v := range runtimeCases {
- cases[k] = v
- }
-
- // We don't expect those to actually exist, so we expect an error. For now,
- // I'm commenting out the rest of this code as we don't have real external
- // helpers to test with and the os.Stat will fail with our fake test cases.
- /*
- for k, v := range cases {
- actual, err := ExternalTokenHelperPath(k)
- if err != nil {
- t.Fatalf("error getting external helper path: %v", err)
- }
- if actual != v {
- t.Fatalf(
- "input: %s, expected: %s, got: %s",
- k, v, actual)
- }
- }
- */
-}
-
-func TestExternalTokenHelper(t *testing.T) {
- Test(t, testExternalTokenHelper(t))
-}
-
-func testExternalTokenHelper(t *testing.T) *ExternalTokenHelper {
- return &ExternalTokenHelper{BinaryPath: helperPath("helper"), Env: helperEnv()}
-}
-
-func helperPath(s ...string) string {
- cs := []string{"-test.run=TestExternalTokenHelperProcess", "--"}
- cs = append(cs, s...)
- return fmt.Sprintf(
- "%s %s",
- os.Args[0],
- strings.Join(cs, " "))
-}
-
-func helperEnv() []string {
- var env []string
-
- tf, err := ioutil.TempFile("", "vault")
- if err != nil {
- panic(err)
- }
- tf.Close()
-
- env = append(env, "GO_HELPER_PATH="+tf.Name(), "GO_WANT_HELPER_PROCESS=1")
- return env
-}
-
-// This is not a real test. This is just a helper process kicked off by tests.
-func TestExternalTokenHelperProcess(*testing.T) {
- if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
- return
- }
-
- defer os.Exit(0)
-
- args := os.Args
- for len(args) > 0 {
- if args[0] == "--" {
- args = args[1:]
- break
- }
-
- args = args[1:]
- }
-
- if len(args) == 0 {
- fmt.Fprintf(os.Stderr, "No command\n")
- os.Exit(2)
- }
-
- cmd, args := args[0], args[1:]
- switch cmd {
- case "helper":
- path := os.Getenv("GO_HELPER_PATH")
-
- switch args[0] {
- case "erase":
- os.Remove(path)
- case "get":
- f, err := os.Open(path)
- if os.IsNotExist(err) {
- return
- }
- if err != nil {
- fmt.Fprintf(os.Stderr, "Err: %s\n", err)
- os.Exit(1)
- }
- defer f.Close()
- io.Copy(os.Stdout, f)
- case "store":
- f, err := os.Create(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Err: %s\n", err)
- os.Exit(1)
- }
- defer f.Close()
- io.Copy(f, os.Stdin)
- }
- default:
- fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd)
- os.Exit(2)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_internal.go b/vendor/github.com/hashicorp/vault/command/token/helper_internal.go
deleted file mode 100644
index 89793cb..0000000
--- a/vendor/github.com/hashicorp/vault/command/token/helper_internal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package token
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "strings"
-
- "github.com/mitchellh/go-homedir"
-)
-
-// InternalTokenHelper fulfills the TokenHelper interface when no external
-// token-helper is configured, and avoids shelling out
-type InternalTokenHelper struct {
- tokenPath string
-}
-
-// populateTokenPath figures out the token path using homedir to get the user's
-// home directory
-func (i *InternalTokenHelper) populateTokenPath() {
- homePath, err := homedir.Dir()
- if err != nil {
- panic(fmt.Errorf("error getting user's home directory: %v", err))
- }
- i.tokenPath = homePath + "/.vault-token"
-}
-
-func (i *InternalTokenHelper) Path() string {
- return i.tokenPath
-}
-
-// Get gets the value of the stored token, if any
-func (i *InternalTokenHelper) Get() (string, error) {
- i.populateTokenPath()
- f, err := os.Open(i.tokenPath)
- if os.IsNotExist(err) {
- return "", nil
- }
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- buf := bytes.NewBuffer(nil)
- if _, err := io.Copy(buf, f); err != nil {
- return "", err
- }
-
- return strings.TrimSpace(buf.String()), nil
-}
-
-// Store stores the value of the token to the file
-func (i *InternalTokenHelper) Store(input string) error {
- i.populateTokenPath()
- f, err := os.OpenFile(i.tokenPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
- if err != nil {
- return err
- }
- defer f.Close()
-
- buf := bytes.NewBufferString(input)
- if _, err := io.Copy(f, buf); err != nil {
- return err
- }
-
- return nil
-}
-
-// Erase erases the value of the token
-func (i *InternalTokenHelper) Erase() error {
- i.populateTokenPath()
- if err := os.Remove(i.tokenPath); err != nil && !os.IsNotExist(err) {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token/helper_internal_test.go b/vendor/github.com/hashicorp/vault/command/token/helper_internal_test.go
deleted file mode 100644
index 4ac527c..0000000
--- a/vendor/github.com/hashicorp/vault/command/token/helper_internal_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package token
-
-import (
- "testing"
-)
-
-// TestCommand re-uses the existing Test function to ensure proper behavior of
-// the internal token helper
-func TestCommand(t *testing.T) {
- Test(t, &InternalTokenHelper{})
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token/testing.go b/vendor/github.com/hashicorp/vault/command/token/testing.go
deleted file mode 100644
index 725f127..0000000
--- a/vendor/github.com/hashicorp/vault/command/token/testing.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package token
-
-import (
- "fmt"
- "os"
- "strings"
- "testing"
-
- "github.com/mitchellh/cli"
-)
-
-// Test is a public function that can be used in other tests to
-// test that a helper is functioning properly.
-func Test(t *testing.T, h TokenHelper) {
- if err := h.Store("foo"); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- v, err := h.Get()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if v != "foo" {
- t.Fatalf("bad: %#v", v)
- }
-
- if err := h.Erase(); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- v, err = h.Get()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if v != "" {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-// TestProcess is used to re-execute this test in order to use it as the
-// helper process. For this to work, the TestExternalTokenHelperProcess function must
-// exist.
-func TestProcess(t *testing.T, s ...string) {
- h := &ExternalTokenHelper{BinaryPath: TestProcessPath(t, s...)}
- Test(t, h)
-}
-
-// TestProcessPath returns the path to the test process.
-func TestProcessPath(t *testing.T, s ...string) string {
- cs := []string{"-test.run=TestExternalTokenHelperProcess", "--", "GO_WANT_HELPER_PROCESS"}
- cs = append(cs, s...)
- return fmt.Sprintf(
- "%s %s",
- os.Args[0],
- strings.Join(cs, " "))
-}
-
-// TestExternalTokenHelperProcessCLI can be called to implement TestExternalTokenHelperProcess
-// for TestProcess that just executes a CLI command.
-func TestExternalTokenHelperProcessCLI(t *testing.T, cmd cli.Command) {
- args := os.Args
- for len(args) > 0 {
- if args[0] == "--" {
- args = args[1:]
- break
- }
-
- args = args[1:]
- }
- if len(args) == 0 || args[0] != "GO_WANT_HELPER_PROCESS" {
- return
- }
- args = args[1:]
-
- os.Exit(cmd.Run(args))
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_create.go b/vendor/github.com/hashicorp/vault/command/token_create.go
deleted file mode 100644
index f8d8c59..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_create.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/flag-kv"
- "github.com/hashicorp/vault/helper/flag-slice"
- "github.com/hashicorp/vault/meta"
-)
-
-// TokenCreateCommand is a Command that mounts a new mount.
-type TokenCreateCommand struct {
- meta.Meta
-}
-
-func (c *TokenCreateCommand) Run(args []string) int {
- var format string
- var id, displayName, lease, ttl, explicitMaxTTL, period, role string
- var orphan, noDefaultPolicy, renewable bool
- var metadata map[string]string
- var numUses int
- var policies []string
- flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
- flags.StringVar(&format, "format", "table", "")
- flags.StringVar(&displayName, "display-name", "", "")
- flags.StringVar(&id, "id", "", "")
- flags.StringVar(&lease, "lease", "", "")
- flags.StringVar(&ttl, "ttl", "", "")
- flags.StringVar(&explicitMaxTTL, "explicit-max-ttl", "", "")
- flags.StringVar(&period, "period", "", "")
- flags.StringVar(&role, "role", "", "")
- flags.BoolVar(&orphan, "orphan", false, "")
- flags.BoolVar(&renewable, "renewable", true, "")
- flags.BoolVar(&noDefaultPolicy, "no-default-policy", false, "")
- flags.IntVar(&numUses, "use-limit", 0, "")
- flags.Var((*kvFlag.Flag)(&metadata), "metadata", "")
- flags.Var((*sliceflag.StringFlag)(&policies), "policy", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 0 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\ntoken-create expects no arguments"))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if ttl == "" {
- ttl = lease
- }
-
- tcr := &api.TokenCreateRequest{
- ID: id,
- Policies: policies,
- Metadata: metadata,
- TTL: ttl,
- NoParent: orphan,
- NoDefaultPolicy: noDefaultPolicy,
- DisplayName: displayName,
- NumUses: numUses,
- Renewable: new(bool),
- ExplicitMaxTTL: explicitMaxTTL,
- Period: period,
- }
- *tcr.Renewable = renewable
-
- var secret *api.Secret
- if role != "" {
- secret, err = client.Auth().Token().CreateWithRole(tcr, role)
- } else {
- secret, err = client.Auth().Token().Create(tcr)
- }
-
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error creating token: %s", err))
- return 2
- }
-
- return OutputSecret(c.Ui, format, secret)
-}
-
-func (c *TokenCreateCommand) Synopsis() string {
- return "Create a new auth token"
-}
-
-func (c *TokenCreateCommand) Help() string {
- helpText := `
-Usage: vault token-create [options]
-
- Create a new auth token.
-
- This command creates a new token that can be used for authentication.
- This token will be created as a child of your token. The created token
- will inherit your policies, or can be assigned a subset of your policies.
-
- A lease can also be associated with the token. If a lease is not associated
- with the token, then it cannot be renewed. If a lease is associated with
- the token, it will expire after that amount of time unless it is renewed.
-
- Metadata associated with the token (specified with "-metadata") is
- written to the audit log when the token is used.
-
- If a role is specified, the role may override parameters specified here.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Token Options:
-
- -id="7699125c-d8...." The token value that clients will use to authenticate
- with Vault. If not provided this defaults to a 36
- character UUID. A root token is required to specify
- the ID of a token.
-
- -display-name="name" A display name to associate with this token. This
- is a non-security sensitive value used to help
- identify created secrets, i.e. prefixes.
-
- -ttl="1h" Initial TTL to associate with the token; renewals can
- extend this value.
-
- -explicit-max-ttl="1h" An explicit maximum lifetime for the token. Unlike
- normal token TTLs, which can be renewed up until the
- maximum TTL set on the auth/token mount or the system
- configuration file, this lifetime is a hard limit set
- on the token itself and cannot be exceeded.
-
- -period="1h" If specified, the token will be periodic; it will
- have no maximum TTL (unless an "explicit-max-ttl" is
- also set) but every renewal will use the given
- period. Requires a root/sudo token to use.
-
- -renewable=true Whether or not the token is renewable to extend its
- TTL up to Vault's configured maximum TTL for tokens.
- This defaults to true; set to false to disable
- renewal of this token.
-
- -metadata="key=value" Metadata to associate with the token. This shows
- up in the audit log. This can be specified multiple
- times.
-
- -orphan If specified, the token will have no parent. This
- prevents the new token from being revoked with
- your token. Requires a root/sudo token to use.
-
- -no-default-policy If specified, the token will not have the "default"
- policy included in its policy set.
-
- -policy="name" Policy to associate with this token. This can be
- specified multiple times.
-
- -use-limit=5 The number of times this token can be used until
- it is automatically revoked.
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-
- -role=name If set, the token will be created against the named
- role. The role may override other parameters. This
- requires the client to have permissions on the
- appropriate endpoint (auth/token/create/).
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_create_test.go b/vendor/github.com/hashicorp/vault/command/token_create_test.go
deleted file mode 100644
index 9db2a26..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_create_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package command
-
-import (
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestTokenCreate(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenCreateCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Ensure we get lease info
- output := ui.OutputWriter.String()
- if !strings.Contains(output, "token_duration") {
- t.Fatalf("bad: %#v", output)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_lookup.go b/vendor/github.com/hashicorp/vault/command/token_lookup.go
deleted file mode 100644
index c1c62ef..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_lookup.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
-)
-
-// TokenLookupCommand is a Command that outputs details about the
-// provided.
-type TokenLookupCommand struct {
- meta.Meta
-}
-
-func (c *TokenLookupCommand) Run(args []string) int {
- var format string
- var accessor bool
- flags := c.Meta.FlagSet("token-lookup", meta.FlagSetDefault)
- flags.BoolVar(&accessor, "accessor", false, "")
- flags.StringVar(&format, "format", "table", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) > 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\ntoken-lookup expects at most one argument"))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "error initializing client: %s", err))
- return 2
- }
-
- var secret *api.Secret
- switch {
- case !accessor && len(args) == 0:
- secret, err = client.Auth().Token().LookupSelf()
- case !accessor && len(args) == 1:
- secret, err = client.Auth().Token().Lookup(args[0])
- case accessor && len(args) == 1:
- secret, err = client.Auth().Token().LookupAccessor(args[0])
- default:
- // This happens only when accessor is set and no argument is passed
- c.Ui.Error(fmt.Sprintf("token-lookup expects an argument when accessor flag is set"))
- return 1
- }
-
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "error looking up token: %s", err))
- return 1
- }
- return OutputSecret(c.Ui, format, secret)
-}
-
-func doTokenLookup(args []string, client *api.Client) (*api.Secret, error) {
- if len(args) == 0 {
- return client.Auth().Token().LookupSelf()
- }
-
- token := args[0]
- return client.Auth().Token().Lookup(token)
-}
-
-func (c *TokenLookupCommand) Synopsis() string {
- return "Display information about the specified token"
-}
-
-func (c *TokenLookupCommand) Help() string {
- helpText := `
-Usage: vault token-lookup [options] [token|accessor]
-
- Displays information about the specified token. If no token is specified, the
- operation is performed on the currently authenticated token i.e. lookup-self.
- Information about the token can be retrieved using the token accessor via the
- '-accessor' flag.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Token Lookup Options:
- -accessor A boolean flag, if set, treats the argument as an accessor of the token.
- Note that the response of the command when this is set, will not contain
- the token ID. Accessor is only meant for looking up the token properties
- (and for revocation via '/auth/token/revoke-accessor/' endpoint).
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_lookup_test.go b/vendor/github.com/hashicorp/vault/command/token_lookup_test.go
deleted file mode 100644
index 143b944..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_lookup_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestTokenLookupAccessor(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenLookupCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
- args := []string{
- "-address", addr,
- }
- c.Run(args)
-
- // Create a new token for us to use
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Enable the accessor flag
- args = append(args, "-accessor")
-
- // Expect failure if no argument is passed when accessor flag is set
- code := c.Run(args)
- if code == 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Add token accessor as arg
- args = append(args, resp.Auth.Accessor)
- code = c.Run(args)
- if code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestTokenLookupSelf(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenLookupCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run it against itself
- code := c.Run(args)
-
- // Verify it worked
- if code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestTokenLookup(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenLookupCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
- // Run it once for client
- c.Run(args)
-
- // Create a new token for us to use
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Add token as arg for real test and run it
- args = append(args, resp.Auth.ClientToken)
- code := c.Run(args)
-
- // Verify it worked
- if code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_renew.go b/vendor/github.com/hashicorp/vault/command/token_renew.go
deleted file mode 100644
index 8ec1a55..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_renew.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/meta"
-)
-
-// TokenRenewCommand is a Command that mounts a new mount.
-type TokenRenewCommand struct {
- meta.Meta
-}
-
-func (c *TokenRenewCommand) Run(args []string) int {
- var format, increment string
- flags := c.Meta.FlagSet("token-renew", meta.FlagSetDefault)
- flags.StringVar(&format, "format", "table", "")
- flags.StringVar(&increment, "increment", "", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) > 2 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\ntoken-renew expects at most two arguments"))
- return 1
- }
-
- var token string
- if len(args) > 0 {
- token = args[0]
- }
-
- var inc int
- // If both are specified prefer the argument
- if len(args) == 2 {
- increment = args[1]
- }
- if increment != "" {
- dur, err := parseutil.ParseDurationSecond(increment)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Invalid increment: %s", err))
- return 1
- }
-
- inc = int(dur / time.Second)
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- // If the given token is the same as the client's, use renew-self instead
- // as this is far more likely to be allowed via policy
- var secret *api.Secret
- if token == "" {
- secret, err = client.Auth().Token().RenewSelf(inc)
- } else {
- secret, err = client.Auth().Token().Renew(token, inc)
- }
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error renewing token: %s", err))
- return 1
- }
-
- return OutputSecret(c.Ui, format, secret)
-}
-
-func (c *TokenRenewCommand) Synopsis() string {
- return "Renew an auth token if there is an associated lease"
-}
-
-func (c *TokenRenewCommand) Help() string {
- helpText := `
-Usage: vault token-renew [options] [token] [increment]
-
- Renew an auth token, extending the amount of time it can be used. If a token
- is given to the command, '/auth/token/renew' will be called with the given
- token; otherwise, '/auth/token/renew-self' will be called with the client
- token.
-
- This command is similar to "renew", but "renew" is only for leases; this
- command is only for tokens.
-
- An optional increment can be given to request a certain number of seconds to
- increment the lease. This request is advisory; Vault may not adhere to it at
- all. If a token is being passed in on the command line, the increment can as
- well; otherwise it must be passed in via the '-increment' flag.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Token Renew Options:
-
- -increment=3600 The desired increment. If not supplied, Vault will
- use the default TTL. If supplied, it may still be
- ignored. This can be submitted as an integer number
- of seconds or a string duration (e.g. "72h").
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_renew_test.go b/vendor/github.com/hashicorp/vault/command/token_renew_test.go
deleted file mode 100644
index 270ee7e..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_renew_test.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestTokenRenew(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenRenewCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run it once for client
- c.Run(args)
-
- // Create a token
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Renew, passing in the token
- args = append(args, resp.Auth.ClientToken)
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestTokenRenewWithIncrement(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenRenewCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run it once for client
- c.Run(args)
-
- // Create a token
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Renew, passing in the token
- args = append(args, resp.Auth.ClientToken)
- args = append(args, "72h")
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestTokenRenewSelf(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenRenewCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run it once for client
- c.Run(args)
-
- // Create a token
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp.Auth.ClientToken == "" {
- t.Fatal("returned client token is empty")
- }
-
- c.Meta.ClientToken = resp.Auth.ClientToken
-
- // Renew using the self endpoint
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestTokenRenewSelfWithIncrement(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenRenewCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run it once for client
- c.Run(args)
-
- // Create a token
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp.Auth.ClientToken == "" {
- t.Fatal("returned client token is empty")
- }
-
- c.Meta.ClientToken = resp.Auth.ClientToken
-
- args = append(args, "-increment=72h")
- // Renew using the self endpoint
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_revoke.go b/vendor/github.com/hashicorp/vault/command/token_revoke.go
deleted file mode 100644
index 6e4105d..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_revoke.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// TokenRevokeCommand is a Command that mounts a new mount.
-type TokenRevokeCommand struct {
- meta.Meta
-}
-
-func (c *TokenRevokeCommand) Run(args []string) int {
- var mode string
- var accessor bool
- var self bool
- var token string
- flags := c.Meta.FlagSet("token-revoke", meta.FlagSetDefault)
- flags.BoolVar(&accessor, "accessor", false, "")
- flags.BoolVar(&self, "self", false, "")
- flags.StringVar(&mode, "mode", "", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- switch {
- case len(args) == 1 && !self:
- token = args[0]
- case len(args) != 0 && self:
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\ntoken-revoke expects no arguments when revoking self"))
- return 1
- case len(args) != 1 && !self:
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\ntoken-revoke expects one argument or the 'self' flag"))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- var fn func(string) error
- // Handle all 6 possible combinations
- switch {
- case !accessor && self && mode == "":
- fn = client.Auth().Token().RevokeSelf
- case !accessor && !self && mode == "":
- fn = client.Auth().Token().RevokeTree
- case !accessor && !self && mode == "orphan":
- fn = client.Auth().Token().RevokeOrphan
- case !accessor && !self && mode == "path":
- fn = client.Sys().RevokePrefix
- case accessor && !self && mode == "":
- fn = client.Auth().Token().RevokeAccessor
- case accessor && self:
- c.Ui.Error("token-revoke cannot be run on self when 'accessor' flag is set")
- return 1
- case self && mode != "":
- c.Ui.Error("token-revoke cannot be run on self when 'mode' flag is set")
- return 1
- case accessor && mode == "orphan":
- c.Ui.Error("token-revoke cannot be run for 'orphan' mode when 'accessor' flag is set")
- return 1
- case accessor && mode == "path":
- c.Ui.Error("token-revoke cannot be run for 'path' mode when 'accessor' flag is set")
- return 1
- }
-
- if err := fn(token); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error revoking token: %s", err))
- return 2
- }
-
- c.Ui.Output("Success! Token revoked if it existed.")
- return 0
-}
-
-func (c *TokenRevokeCommand) Synopsis() string {
- return "Revoke one or more auth tokens"
-}
-
-func (c *TokenRevokeCommand) Help() string {
- helpText := `
-Usage: vault token-revoke [options] [token|accessor]
-
- Revoke one or more auth tokens.
-
- This command revokes auth tokens. Use the "revoke" command for
- revoking secrets.
-
- Depending on the flags used, auth tokens can be revoked in multiple ways
- depending on the "-mode" flag:
-
- * Without any value, the token specified and all of its children
- will be revoked.
-
- * With the "orphan" value, only the specific token will be revoked.
- All of its children will be orphaned.
-
- * With the "path" value, tokens created from the given auth path
- prefix will be deleted, along with all their children. In this case
- the "token" arg above is actually a "path". This mode does *not*
- work with token values or parts of token values.
-
- Token can be revoked using the token accessor. This can be done by
- setting the '-accessor' flag. Note that when '-accessor' flag is set,
- '-mode' should not be set for 'orphan' or 'path'. This is because,
- a token accessor always revokes the token along with its child tokens.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Token Options:
-
- -accessor A boolean flag, if set, treats the argument as an accessor of the token.
- Note that accessor can also be used for looking up the token properties
- via '/auth/token/lookup-accessor/' endpoint.
- Accessor is used when there is no access to token ID.
-
- -self A boolean flag, if set, the operation is performed on the currently
- authenticated token i.e. lookup-self.
-
- -mode=value The type of revocation to do. See the documentation
- above for more information.
-
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/token_revoke_test.go b/vendor/github.com/hashicorp/vault/command/token_revoke_test.go
deleted file mode 100644
index 7265a10..0000000
--- a/vendor/github.com/hashicorp/vault/command/token_revoke_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestTokenRevokeAccessor(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenRevokeCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run it once for client
- c.Run(args)
-
- // Create a token
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Treat the argument as accessor
- args = append(args, "-accessor")
- if code := c.Run(args); code == 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Verify it worked with proper accessor
- args1 := append(args, resp.Auth.Accessor)
- if code := c.Run(args1); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Fail if mode is set to 'orphan' when accessor is set
- args2 := append(args, "-mode=\"orphan\"")
- if code := c.Run(args2); code == 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- // Fail if mode is set to 'path' when accessor is set
- args3 := append(args, "-mode=\"path\"")
- if code := c.Run(args3); code == 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
-
-func TestTokenRevoke(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenRevokeCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
-
- // Run it once for client
- c.Run(args)
-
- // Create a token
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Verify it worked
- args = append(args, resp.Auth.ClientToken)
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/unmount.go b/vendor/github.com/hashicorp/vault/command/unmount.go
deleted file mode 100644
index b04e532..0000000
--- a/vendor/github.com/hashicorp/vault/command/unmount.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/meta"
-)
-
-// UnmountCommand is a Command that mounts a new mount.
-type UnmountCommand struct {
- meta.Meta
-}
-
-func (c *UnmountCommand) Run(args []string) int {
- flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) != 1 {
- flags.Usage()
- c.Ui.Error(fmt.Sprintf(
- "\nunmount expects one argument: the path to unmount"))
- return 1
- }
-
- path := args[0]
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- if err := client.Sys().Unmount(path); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Unmount error: %s", err))
- return 2
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Successfully unmounted '%s' if it was mounted", path))
-
- return 0
-}
-
-func (c *UnmountCommand) Synopsis() string {
- return "Unmount a secret backend"
-}
-
-func (c *UnmountCommand) Help() string {
- helpText := `
-Usage: vault unmount [options] path
-
- Unmount a secret backend.
-
- This command unmounts a secret backend. All the secrets created
- by this backend will be revoked and its Vault data will be deleted.
-
-General Options:
-` + meta.GeneralOptionsUsage()
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/unmount_test.go b/vendor/github.com/hashicorp/vault/command/unmount_test.go
deleted file mode 100644
index 1af5ef8..0000000
--- a/vendor/github.com/hashicorp/vault/command/unmount_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestUnmount(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &UnmountCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- mounts, err := client.Sys().ListMounts()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- _, ok := mounts["secret/"]
- if ok {
- t.Fatal("should not have mount")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/unseal.go b/vendor/github.com/hashicorp/vault/command/unseal.go
deleted file mode 100644
index 2dfb947..0000000
--- a/vendor/github.com/hashicorp/vault/command/unseal.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package command
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/helper/password"
- "github.com/hashicorp/vault/meta"
-)
-
-// UnsealCommand is a Command that unseals the vault.
-type UnsealCommand struct {
- meta.Meta
-
- // Key can be used to pre-seed the key. If it is set, it will not
- // be asked with the `password` helper.
- Key string
-}
-
-func (c *UnsealCommand) Run(args []string) int {
- var reset bool
- flags := c.Meta.FlagSet("unseal", meta.FlagSetDefault)
- flags.BoolVar(&reset, "reset", false, "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- sealStatus, err := client.Sys().SealStatus()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error checking seal status: %s", err))
- return 2
- }
-
- if !sealStatus.Sealed {
- c.Ui.Output("Vault is already unsealed.")
- return 0
- }
-
- args = flags.Args()
- if reset {
- sealStatus, err = client.Sys().ResetUnsealProcess()
- } else {
- value := c.Key
- if len(args) > 0 {
- value = args[0]
- }
- if value == "" {
- fmt.Printf("Key (will be hidden): ")
- value, err = password.Read(os.Stdin)
- fmt.Printf("\n")
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error attempting to ask for password. The raw error message\n"+
- "is shown below, but the most common reason for this error is\n"+
- "that you attempted to pipe a value into unseal or you're\n"+
- "executing `vault unseal` from outside of a terminal.\n\n"+
- "You should use `vault unseal` from a terminal for maximum\n"+
- "security. If this isn't an option, the unseal key can be passed\n"+
- "in using the first parameter.\n\n"+
- "Raw error: %s", err))
- return 1
- }
- }
- sealStatus, err = client.Sys().Unseal(strings.TrimSpace(value))
- }
-
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error: %s", err))
- return 1
- }
-
- c.Ui.Output(fmt.Sprintf(
- "Sealed: %v\n"+
- "Key Shares: %d\n"+
- "Key Threshold: %d\n"+
- "Unseal Progress: %d\n"+
- "Unseal Nonce: %v",
- sealStatus.Sealed,
- sealStatus.N,
- sealStatus.T,
- sealStatus.Progress,
- sealStatus.Nonce,
- ))
-
- return 0
-}
-
-func (c *UnsealCommand) Synopsis() string {
- return "Unseals the Vault server"
-}
-
-func (c *UnsealCommand) Help() string {
- helpText := `
-Usage: vault unseal [options] [key]
-
- Unseal the vault by entering a portion of the master key. Once all
- portions are entered, the vault will be unsealed.
-
- Every Vault server initially starts as sealed. It cannot perform any
- operation except unsealing until it is sealed. Secrets cannot be accessed
- in any way until the vault is unsealed. This command allows you to enter
- a portion of the master key to unseal the vault.
-
- The unseal key can be specified via the command line, but this is
- not recommended. The key may then live in your terminal history. This
- only exists to assist in scripting.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Unseal Options:
-
- -reset Reset the unsealing process by throwing away
- prior keys in process to unseal the vault.
-
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/unseal_test.go b/vendor/github.com/hashicorp/vault/command/unseal_test.go
deleted file mode 100644
index 699fdd8..0000000
--- a/vendor/github.com/hashicorp/vault/command/unseal_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package command
-
-import (
- "encoding/hex"
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestUnseal(t *testing.T) {
- core := vault.TestCore(t)
- keys, _ := vault.TestCoreInit(t, core)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
-
- for _, key := range keys {
- c := &UnsealCommand{
- Key: hex.EncodeToString(key),
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- args := []string{"-address", addr}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- }
-
- sealed, err := core.Sealed()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-}
-
-func TestUnseal_arg(t *testing.T) {
- core := vault.TestCore(t)
- keys, _ := vault.TestCoreInit(t, core)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
-
- for _, key := range keys {
- c := &UnsealCommand{
- Meta: meta.Meta{
- Ui: ui,
- },
- }
-
- args := []string{"-address", addr, hex.EncodeToString(key)}
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- }
-
- sealed, err := core.Sealed()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/unwrap.go b/vendor/github.com/hashicorp/vault/command/unwrap.go
deleted file mode 100644
index 5a21920..0000000
--- a/vendor/github.com/hashicorp/vault/command/unwrap.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package command
-
-import (
- "flag"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/meta"
-)
-
-// UnwrapCommand is a Command that behaves like ReadCommand but specifically
-// for unwrapping cubbyhole-wrapped secrets
-type UnwrapCommand struct {
- meta.Meta
-}
-
-func (c *UnwrapCommand) Run(args []string) int {
- var format string
- var field string
- var err error
- var secret *api.Secret
- var flags *flag.FlagSet
- flags = c.Meta.FlagSet("unwrap", meta.FlagSetDefault)
- flags.StringVar(&format, "format", "table", "")
- flags.StringVar(&field, "field", "", "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- var tokenID string
-
- args = flags.Args()
- switch len(args) {
- case 0:
- case 1:
- tokenID = args[0]
- default:
- c.Ui.Error("unwrap expects zero or one argument (the ID of the wrapping token)")
- flags.Usage()
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- secret, err = client.Logical().Unwrap(tokenID)
- if err != nil {
- c.Ui.Error(err.Error())
- return 1
- }
- if secret == nil {
- c.Ui.Error("Server gave empty response or secret returned was empty")
- return 1
- }
-
- // Handle single field output
- if field != "" {
- return PrintRawField(c.Ui, secret, field)
- }
-
- // Check if the original was a list response and format as a list if so
- if secret.Data != nil &&
- len(secret.Data) == 1 &&
- secret.Data["keys"] != nil {
- _, ok := secret.Data["keys"].([]interface{})
- if ok {
- return OutputList(c.Ui, format, secret)
- }
- }
- return OutputSecret(c.Ui, format, secret)
-}
-
-func (c *UnwrapCommand) Synopsis() string {
- return "Unwrap a wrapped secret"
-}
-
-func (c *UnwrapCommand) Help() string {
- helpText := `
-Usage: vault unwrap [options]
-
- Unwrap a wrapped secret.
-
- Unwraps the data wrapped by the given token ID. The returned result is the
- same as a 'read' operation on a non-wrapped secret.
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Read Options:
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-
- -field=field If included, the raw value of the specified field
- will be output raw to stdout.
-
-`
- return strings.TrimSpace(helpText)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/unwrap_test.go b/vendor/github.com/hashicorp/vault/command/unwrap_test.go
deleted file mode 100644
index e5dc0bf..0000000
--- a/vendor/github.com/hashicorp/vault/command/unwrap_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package command
-
-import (
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestUnwrap(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &UnwrapCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "-field", "zip",
- }
-
- // Run once so the client is setup, ignore errors
- c.Run(args)
-
- // Get the client so we can write data
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- wrapLookupFunc := func(method, path string) string {
- if method == "GET" && path == "secret/foo" {
- return "60s"
- }
- if method == "LIST" && path == "secret" {
- return "60s"
- }
- return ""
- }
- client.SetWrappingLookupFunc(wrapLookupFunc)
-
- data := map[string]interface{}{"zip": "zap"}
- if _, err := client.Logical().Write("secret/foo", data); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- outer, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if outer == nil {
- t.Fatal("outer response was nil")
- }
- if outer.WrapInfo == nil {
- t.Fatalf("outer wrapinfo was nil, response was %#v", *outer)
- }
-
- args = append(args, outer.WrapInfo.Token)
-
- // Run the read
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- output := ui.OutputWriter.String()
- if output != "zap\n" {
- t.Fatalf("unexpectd output:\n%s", output)
- }
-
- // Now test with list handling, specifically that it will be called with
- // the list output formatter
- ui.OutputWriter.Reset()
-
- outer, err = client.Logical().List("secret")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if outer == nil {
- t.Fatal("outer response was nil")
- }
- if outer.WrapInfo == nil {
- t.Fatalf("outer wrapinfo was nil, response was %#v", *outer)
- }
-
- args = []string{
- "-address", addr,
- outer.WrapInfo.Token,
- }
- // Run the read
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- output = ui.OutputWriter.String()
- if strings.TrimSpace(output) != "Keys\n----\nfoo" {
- t.Fatalf("unexpected output:\n%s", output)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/util.go b/vendor/github.com/hashicorp/vault/command/util.go
deleted file mode 100644
index 1eefc92..0000000
--- a/vendor/github.com/hashicorp/vault/command/util.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package command
-
-import (
- "fmt"
- "os"
- "reflect"
- "time"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/command/token"
- "github.com/mitchellh/cli"
-)
-
-// DefaultTokenHelper returns the token helper that is configured for Vault.
-func DefaultTokenHelper() (token.TokenHelper, error) {
- config, err := LoadConfig("")
- if err != nil {
- return nil, err
- }
-
- path := config.TokenHelper
- if path == "" {
- return &token.InternalTokenHelper{}, nil
- }
-
- path, err = token.ExternalTokenHelperPath(path)
- if err != nil {
- return nil, err
- }
- return &token.ExternalTokenHelper{BinaryPath: path}, nil
-}
-
-func PrintRawField(ui cli.Ui, secret *api.Secret, field string) int {
- var val interface{}
- switch {
- case secret.Auth != nil:
- switch field {
- case "token":
- val = secret.Auth.ClientToken
- case "token_accessor":
- val = secret.Auth.Accessor
- case "token_duration":
- val = secret.Auth.LeaseDuration
- case "token_renewable":
- val = secret.Auth.Renewable
- case "token_policies":
- val = secret.Auth.Policies
- default:
- val = secret.Data[field]
- }
-
- case secret.WrapInfo != nil:
- switch field {
- case "wrapping_token":
- val = secret.WrapInfo.Token
- case "wrapping_token_ttl":
- val = secret.WrapInfo.TTL
- case "wrapping_token_creation_time":
- val = secret.WrapInfo.CreationTime.Format(time.RFC3339Nano)
- case "wrapping_token_creation_path":
- val = secret.WrapInfo.CreationPath
- case "wrapped_accessor":
- val = secret.WrapInfo.WrappedAccessor
- default:
- val = secret.Data[field]
- }
-
- default:
- switch field {
- case "refresh_interval":
- val = secret.LeaseDuration
- default:
- val = secret.Data[field]
- }
- }
-
- if val != nil {
- // c.Ui.Output() prints a CR character which in this case is
- // not desired. Since Vault CLI currently only uses BasicUi,
- // which writes to standard output, os.Stdout is used here to
- // directly print the message. If mitchellh/cli exposes method
- // to print without CR, this check needs to be removed.
- if reflect.TypeOf(ui).String() == "*cli.BasicUi" {
- fmt.Fprintf(os.Stdout, "%v", val)
- } else {
- ui.Output(fmt.Sprintf("%v", val))
- }
- return 0
- } else {
- ui.Error(fmt.Sprintf(
- "Field %s not present in secret", field))
- return 1
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/version.go b/vendor/github.com/hashicorp/vault/command/version.go
deleted file mode 100644
index 4665436..0000000
--- a/vendor/github.com/hashicorp/vault/command/version.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package command
-
-import (
- "github.com/hashicorp/vault/version"
- "github.com/mitchellh/cli"
-)
-
-// VersionCommand is a Command implementation prints the version.
-type VersionCommand struct {
- VersionInfo *version.VersionInfo
- Ui cli.Ui
-}
-
-func (c *VersionCommand) Help() string {
- return ""
-}
-
-func (c *VersionCommand) Run(_ []string) int {
- out := c.VersionInfo.FullVersionNumber(true)
- if version.CgoEnabled {
- out += " (cgo)"
- }
- c.Ui.Output(out)
- return 0
-}
-
-func (c *VersionCommand) Synopsis() string {
- return "Prints the Vault version"
-}
diff --git a/vendor/github.com/hashicorp/vault/command/version_test.go b/vendor/github.com/hashicorp/vault/command/version_test.go
deleted file mode 100644
index 2a64569..0000000
--- a/vendor/github.com/hashicorp/vault/command/version_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package command
-
-import (
- "testing"
-
- "github.com/mitchellh/cli"
-)
-
-func TestVersionCommand_implements(t *testing.T) {
- var _ cli.Command = &VersionCommand{}
-}
diff --git a/vendor/github.com/hashicorp/vault/command/wrapping_test.go b/vendor/github.com/hashicorp/vault/command/wrapping_test.go
deleted file mode 100644
index a380cfc..0000000
--- a/vendor/github.com/hashicorp/vault/command/wrapping_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package command
-
-import (
- "os"
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestWrapping_Env(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenLookupCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- }
- // Run it once for client
- c.Run(args)
-
- // Create a new token for us to use
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- prevWrapTTLEnv := os.Getenv(api.EnvVaultWrapTTL)
- os.Setenv(api.EnvVaultWrapTTL, "5s")
- defer func() {
- os.Setenv(api.EnvVaultWrapTTL, prevWrapTTLEnv)
- }()
-
- // Now when we do a lookup-self the response should be wrapped
- args = append(args, resp.Auth.ClientToken)
-
- resp, err = client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp == nil {
- t.Fatal("nil response")
- }
- if resp.WrapInfo == nil {
- t.Fatal("nil wrap info")
- }
- if resp.WrapInfo.Token == "" || resp.WrapInfo.TTL != 5 {
- t.Fatal("did not get token or ttl wrong")
- }
-}
-
-func TestWrapping_Flag(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &TokenLookupCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "-wrap-ttl", "5s",
- }
- // Run it once for client
- c.Run(args)
-
- // Create a new token for us to use
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp == nil {
- t.Fatal("nil response")
- }
- if resp.WrapInfo == nil {
- t.Fatal("nil wrap info")
- }
- if resp.WrapInfo.Token == "" || resp.WrapInfo.TTL != 5 {
- t.Fatal("did not get token or ttl wrong")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/write.go b/vendor/github.com/hashicorp/vault/command/write.go
deleted file mode 100644
index 6f7b495..0000000
--- a/vendor/github.com/hashicorp/vault/command/write.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package command
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/helper/kv-builder"
- "github.com/hashicorp/vault/meta"
- "github.com/posener/complete"
-)
-
-// WriteCommand is a Command that puts data into the Vault.
-type WriteCommand struct {
- meta.Meta
-
- // The fields below can be overwritten for tests
- testStdin io.Reader
-}
-
-func (c *WriteCommand) Run(args []string) int {
- var field, format string
- var force bool
- flags := c.Meta.FlagSet("write", meta.FlagSetDefault)
- flags.StringVar(&format, "format", "table", "")
- flags.StringVar(&field, "field", "", "")
- flags.BoolVar(&force, "force", false, "")
- flags.BoolVar(&force, "f", false, "")
- flags.Usage = func() { c.Ui.Error(c.Help()) }
- if err := flags.Parse(args); err != nil {
- return 1
- }
-
- args = flags.Args()
- if len(args) < 1 {
- c.Ui.Error("write requires a path")
- flags.Usage()
- return 1
- }
-
- if len(args) < 2 && !force {
- c.Ui.Error("write expects at least two arguments; use -f to perform the write anyways")
- flags.Usage()
- return 1
- }
-
- path := args[0]
- if path[0] == '/' {
- path = path[1:]
- }
-
- data, err := c.parseData(args[1:])
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error loading data: %s", err))
- return 1
- }
-
- client, err := c.Client()
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 2
- }
-
- secret, err := client.Logical().Write(path, data)
- if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error writing data to %s: %s", path, err))
- return 1
- }
-
- if secret == nil {
- // Don't output anything if people aren't using the "human" output
- if format == "table" {
- c.Ui.Output(fmt.Sprintf("Success! Data written to: %s", path))
- }
- return 0
- }
-
- // Handle single field output
- if field != "" {
- return PrintRawField(c.Ui, secret, field)
- }
-
- return OutputSecret(c.Ui, format, secret)
-}
-
-func (c *WriteCommand) parseData(args []string) (map[string]interface{}, error) {
- var stdin io.Reader = os.Stdin
- if c.testStdin != nil {
- stdin = c.testStdin
- }
-
- builder := &kvbuilder.Builder{Stdin: stdin}
- if err := builder.Add(args...); err != nil {
- return nil, err
- }
-
- return builder.Map(), nil
-}
-
-func (c *WriteCommand) Synopsis() string {
- return "Write secrets or configuration into Vault"
-}
-
-func (c *WriteCommand) Help() string {
- helpText := `
-Usage: vault write [options] path [data]
-
- Write data (secrets or configuration) into Vault.
-
- Write sends data into Vault at the given path. The behavior of the write is
- determined by the backend at the given path. For example, writing to
- "aws/policy/ops" will create an "ops" IAM policy for the AWS backend
- (configuration), but writing to "consul/foo" will write a value directly into
- Consul at that key. Check the documentation of the logical backend you're
- using for more information on key structure.
-
- Data is sent via additional arguments in "key=value" pairs. If value begins
- with an "@", then it is loaded from a file. Write expects data in the file to
- be in JSON format. If you want to start the value with a literal "@", then
- prefix the "@" with a slash: "\@".
-
-General Options:
-` + meta.GeneralOptionsUsage() + `
-Write Options:
-
- -f | -force Force the write to continue without any data values
- specified. This allows writing to keys that do not
- need or expect any fields to be specified.
-
- -format=table The format for output. By default it is a whitespace-
- delimited table. This can also be json or yaml.
-
- -field=field If included, the raw value of the specified field
- will be output raw to stdout.
-
-`
- return strings.TrimSpace(helpText)
-}
-
-func (c *WriteCommand) AutocompleteArgs() complete.Predictor {
- return complete.PredictNothing
-}
-
-func (c *WriteCommand) AutocompleteFlags() complete.Flags {
- return complete.Flags{
- "-force": complete.PredictNothing,
- "-format": predictFormat,
- "-field": complete.PredictNothing,
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/command/write_test.go b/vendor/github.com/hashicorp/vault/command/write_test.go
deleted file mode 100644
index 5aa3c1e..0000000
--- a/vendor/github.com/hashicorp/vault/command/write_test.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package command
-
-import (
- "io"
- "io/ioutil"
- "os"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/vault"
- "github.com/mitchellh/cli"
-)
-
-func TestWrite(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret/foo",
- "value=bar",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- resp, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if resp.Data["value"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestWrite_arbitrary(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- stdinR, stdinW := io.Pipe()
- ui := new(cli.MockUi)
- c := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
-
- testStdin: stdinR,
- }
-
- go func() {
- stdinW.Write([]byte(`{"foo":"bar"}`))
- stdinW.Close()
- }()
-
- args := []string{
- "-address", addr,
- "secret/foo",
- "-",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- resp, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if resp.Data["foo"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestWrite_escaped(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret/foo",
- "value=\\@bar",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- resp, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if resp.Data["value"] != "@bar" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestWrite_file(t *testing.T) {
- tf, err := ioutil.TempFile("", "vault")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- tf.Write([]byte(`{"foo":"bar"}`))
- tf.Close()
- defer os.Remove(tf.Name())
-
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret/foo",
- "@" + tf.Name(),
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- resp, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if resp.Data["foo"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestWrite_fileValue(t *testing.T) {
- tf, err := ioutil.TempFile("", "vault")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- tf.Write([]byte("foo"))
- tf.Close()
- defer os.Remove(tf.Name())
-
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "secret/foo",
- "value=@" + tf.Name(),
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-
- client, err := c.Client()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- resp, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if resp.Data["value"] != "foo" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestWrite_Output(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "auth/token/create",
- "display_name=foo",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
- if !strings.Contains(ui.OutputWriter.String(), "Key") {
- t.Fatalf("bad: %s", ui.OutputWriter.String())
- }
-}
-
-func TestWrite_force(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := http.TestServer(t, core)
- defer ln.Close()
-
- ui := new(cli.MockUi)
- c := &WriteCommand{
- Meta: meta.Meta{
- ClientToken: token,
- Ui: ui,
- },
- }
-
- args := []string{
- "-address", addr,
- "-force",
- "sys/rotate",
- }
- if code := c.Run(args); code != 0 {
- t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go b/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go
deleted file mode 100644
index 6b18968..0000000
--- a/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package awsutil
-
-import (
- "fmt"
- "net/http"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/defaults"
-)
-
-type CredentialsConfig struct {
- // The access key if static credentials are being used
- AccessKey string
-
- // The secret key if static credentials are being used
- SecretKey string
-
- // The session token if it is being used
- SessionToken string
-
- // If specified, the region will be provided to the config of the
- // EC2RoleProvider's client. This may be useful if you want to e.g. reuse
- // the client elsewhere.
- Region string
-
- // The filename for the shared credentials provider, if being used
- Filename string
-
- // The profile for the shared credentials provider, if being used
- Profile string
-
- // The http.Client to use, or nil for the client to use its default
- HTTPClient *http.Client
-}
-
-func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, error) {
- var providers []credentials.Provider
-
- switch {
- case c.AccessKey != "" && c.SecretKey != "":
- // Add the static credential provider
- providers = append(providers, &credentials.StaticProvider{
- Value: credentials.Value{
- AccessKeyID: c.AccessKey,
- SecretAccessKey: c.SecretKey,
- SessionToken: c.SessionToken,
- }})
- case c.AccessKey == "" && c.SecretKey == "":
- // Attempt to get credentials from the IAM instance role below
-
- default: // Have one or the other but not both and not neither
- return nil, fmt.Errorf(
- "static AWS client credentials haven't been properly configured (the access key or secret key were provided but not both)")
- }
-
- // Add the environment credential provider
- providers = append(providers, &credentials.EnvProvider{})
-
- // Add the shared credentials provider
- providers = append(providers, &credentials.SharedCredentialsProvider{
- Filename: c.Filename,
- Profile: c.Profile,
- })
-
- // Add the remote provider
- def := defaults.Get()
- if c.Region != "" {
- def.Config.Region = aws.String(c.Region)
- }
- if c.HTTPClient != nil {
- def.Config.HTTPClient = c.HTTPClient
- }
-
- providers = append(providers, defaults.RemoteCredProvider(*def.Config, def.Handlers))
-
- // Create the credentials required to access the API.
- creds := credentials.NewChainCredentials(providers)
- if creds == nil {
- return nil, fmt.Errorf("could not compile valid credential providers from static config, environemnt, shared, or instance metadata")
- }
-
- return creds, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go b/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go
deleted file mode 100644
index df424ce..0000000
--- a/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package builtinplugins
-
-import (
- "github.com/hashicorp/vault/plugins/database/cassandra"
- "github.com/hashicorp/vault/plugins/database/hana"
- "github.com/hashicorp/vault/plugins/database/mongodb"
- "github.com/hashicorp/vault/plugins/database/mssql"
- "github.com/hashicorp/vault/plugins/database/mysql"
- "github.com/hashicorp/vault/plugins/database/postgresql"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
-)
-
-// BuiltinFactory is the func signature that should be returned by
-// the plugin's New() func.
-type BuiltinFactory func() (interface{}, error)
-
-var plugins = map[string]BuiltinFactory{
- // These four plugins all use the same mysql implementation but with
- // different username settings passed by the constructor.
- "mysql-database-plugin": mysql.New(mysql.MetadataLen, mysql.MetadataLen, mysql.UsernameLen),
- "mysql-aurora-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen),
- "mysql-rds-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen),
- "mysql-legacy-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen),
-
- "postgresql-database-plugin": postgresql.New,
- "mssql-database-plugin": mssql.New,
- "cassandra-database-plugin": cassandra.New,
- "mongodb-database-plugin": mongodb.New,
- "hana-database-plugin": hana.New,
-}
-
-// Get returns the BuiltinFactory func for a particular backend plugin
-// from the plugins map.
-func Get(name string) (BuiltinFactory, bool) {
- f, ok := plugins[name]
- return f, ok
-}
-
-// Keys returns the list of plugin names that are considered builtin plugins.
-func Keys() []string {
- keys := make([]string, len(plugins))
-
- i := 0
- for k := range plugins {
- keys[i] = k
- i++
- }
-
- return keys
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/certutil/certutil_test.go b/vendor/github.com/hashicorp/vault/helper/certutil/certutil_test.go
deleted file mode 100644
index 06769ff..0000000
--- a/vendor/github.com/hashicorp/vault/helper/certutil/certutil_test.go
+++ /dev/null
@@ -1,666 +0,0 @@
-package certutil
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "reflect"
- "testing"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/api"
-)
-
-// Tests converting back and forth between a CertBundle and a ParsedCertBundle.
-//
-// Also tests the GetSubjKeyID, GetHexFormatted, and
-// ParsedCertBundle.getSigner functions.
-func TestCertBundleConversion(t *testing.T) {
- cbuts := []*CertBundle{
- refreshRSACertBundle(),
- refreshRSACertBundleWithChain(),
- refreshRSA8CertBundle(),
- refreshRSA8CertBundleWithChain(),
- refreshECCertBundle(),
- refreshECCertBundleWithChain(),
- refreshEC8CertBundle(),
- refreshEC8CertBundleWithChain(),
- }
-
- for i, cbut := range cbuts {
- pcbut, err := cbut.ToParsedCertBundle()
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Errorf("Error converting to parsed cert bundle: %s", err)
- continue
- }
-
- err = compareCertBundleToParsedCertBundle(cbut, pcbut)
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Errorf(err.Error())
- }
-
- cbut, err := pcbut.ToCertBundle()
- if err != nil {
- t.Fatalf("Error converting to cert bundle: %s", err)
- }
-
- err = compareCertBundleToParsedCertBundle(cbut, pcbut)
- if err != nil {
- t.Fatalf(err.Error())
- }
- }
-}
-
-func BenchmarkCertBundleParsing(b *testing.B) {
- for i := 0; i < b.N; i++ {
- cbuts := []*CertBundle{
- refreshRSACertBundle(),
- refreshRSACertBundleWithChain(),
- refreshRSA8CertBundle(),
- refreshRSA8CertBundleWithChain(),
- refreshECCertBundle(),
- refreshECCertBundleWithChain(),
- refreshEC8CertBundle(),
- refreshEC8CertBundleWithChain(),
- }
-
- for i, cbut := range cbuts {
- pcbut, err := cbut.ToParsedCertBundle()
- if err != nil {
- b.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- b.Errorf("Error converting to parsed cert bundle: %s", err)
- continue
- }
-
- cbut, err = pcbut.ToCertBundle()
- if err != nil {
- b.Fatalf("Error converting to cert bundle: %s", err)
- }
- }
- }
-}
-
-func TestCertBundleParsing(t *testing.T) {
- cbuts := []*CertBundle{
- refreshRSACertBundle(),
- refreshRSACertBundleWithChain(),
- refreshRSA8CertBundle(),
- refreshRSA8CertBundleWithChain(),
- refreshECCertBundle(),
- refreshECCertBundleWithChain(),
- refreshEC8CertBundle(),
- refreshEC8CertBundleWithChain(),
- }
-
- for i, cbut := range cbuts {
- jsonString, err := json.Marshal(cbut)
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Fatalf("Error marshaling testing certbundle to JSON: %s", err)
- }
- pcbut, err := ParsePKIJSON(jsonString)
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Fatalf("Error during JSON bundle handling: %s", err)
- }
- err = compareCertBundleToParsedCertBundle(cbut, pcbut)
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Fatalf(err.Error())
- }
-
- secret := &api.Secret{
- Data: structs.New(cbut).Map(),
- }
- pcbut, err = ParsePKIMap(secret.Data)
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Fatalf("Error during JSON bundle handling: %s", err)
- }
- err = compareCertBundleToParsedCertBundle(cbut, pcbut)
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Fatalf(err.Error())
- }
-
- pcbut, err = ParsePEMBundle(cbut.ToPEMBundle())
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Fatalf("Error during JSON bundle handling: %s", err)
- }
- err = compareCertBundleToParsedCertBundle(cbut, pcbut)
- if err != nil {
- t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i)
- t.Fatalf(err.Error())
- }
- }
-}
-
-func compareCertBundleToParsedCertBundle(cbut *CertBundle, pcbut *ParsedCertBundle) error {
- if cbut == nil {
- return fmt.Errorf("Got nil bundle")
- }
- if pcbut == nil {
- return fmt.Errorf("Got nil parsed bundle")
- }
-
- switch {
- case pcbut.Certificate == nil:
- return fmt.Errorf("Parsed bundle has nil certificate")
- case pcbut.PrivateKey == nil:
- return fmt.Errorf("Parsed bundle has nil private key")
- }
-
- switch cbut.PrivateKey {
- case privRSAKeyPem:
- if pcbut.PrivateKeyType != RSAPrivateKey {
- return fmt.Errorf("Parsed bundle has wrong private key type: %v, should be 'rsa' (%v)", pcbut.PrivateKeyType, RSAPrivateKey)
- }
- case privRSA8KeyPem:
- if pcbut.PrivateKeyType != RSAPrivateKey {
- return fmt.Errorf("Parsed bundle has wrong pkcs8 private key type: %v, should be 'rsa' (%v)", pcbut.PrivateKeyType, RSAPrivateKey)
- }
- case privECKeyPem:
- if pcbut.PrivateKeyType != ECPrivateKey {
- return fmt.Errorf("Parsed bundle has wrong private key type: %v, should be 'ec' (%v)", pcbut.PrivateKeyType, ECPrivateKey)
- }
- case privEC8KeyPem:
- if pcbut.PrivateKeyType != ECPrivateKey {
- return fmt.Errorf("Parsed bundle has wrong pkcs8 private key type: %v, should be 'ec' (%v)", pcbut.PrivateKeyType, ECPrivateKey)
- }
- default:
- return fmt.Errorf("Parsed bundle has unknown private key type")
- }
-
- subjKeyID, err := GetSubjKeyID(pcbut.PrivateKey)
- if err != nil {
- return fmt.Errorf("Error when getting subject key id: %s", err)
- }
- if bytes.Compare(subjKeyID, pcbut.Certificate.SubjectKeyId) != 0 {
- return fmt.Errorf("Parsed bundle private key does not match subject key id")
- }
-
- switch {
- case len(pcbut.CAChain) > 0 && len(cbut.CAChain) == 0:
- return fmt.Errorf("Parsed bundle ca chain has certs when cert bundle does not")
- case len(pcbut.CAChain) == 0 && len(cbut.CAChain) > 0:
- return fmt.Errorf("Cert bundle ca chain has certs when parsed cert bundle does not")
- }
-
- cb, err := pcbut.ToCertBundle()
- if err != nil {
- return fmt.Errorf("Thrown error during parsed bundle conversion: %s\n\nInput was: %#v", err, *pcbut)
- }
-
- switch {
- case len(cb.Certificate) == 0:
- return fmt.Errorf("Bundle has nil certificate")
- case len(cb.PrivateKey) == 0:
- return fmt.Errorf("Bundle has nil private key")
- case len(cb.CAChain[0]) == 0:
- return fmt.Errorf("Bundle has nil issuing CA")
- }
-
- switch pcbut.PrivateKeyType {
- case RSAPrivateKey:
- if cb.PrivateKey != privRSAKeyPem && cb.PrivateKey != privRSA8KeyPem {
- return fmt.Errorf("Bundle private key does not match")
- }
- case ECPrivateKey:
- if cb.PrivateKey != privECKeyPem && cb.PrivateKey != privEC8KeyPem {
- return fmt.Errorf("Bundle private key does not match")
- }
- default:
- return fmt.Errorf("CertBundle has unknown private key type")
- }
-
- if cb.SerialNumber != GetHexFormatted(pcbut.Certificate.SerialNumber.Bytes(), ":") {
- return fmt.Errorf("Bundle serial number does not match")
- }
-
- switch {
- case len(pcbut.CAChain) > 0 && len(cb.CAChain) == 0:
- return fmt.Errorf("Parsed bundle ca chain has certs when cert bundle does not")
- case len(pcbut.CAChain) == 0 && len(cb.CAChain) > 0:
- return fmt.Errorf("Cert bundle ca chain has certs when parsed cert bundle does not")
- case !reflect.DeepEqual(cbut.CAChain, cb.CAChain):
- return fmt.Errorf("Cert bundle ca chain does not match: %#v\n\n%#v", cbut.CAChain, cb.CAChain)
- }
-
- return nil
-}
-
-func TestCSRBundleConversion(t *testing.T) {
- csrbuts := []*CSRBundle{
- refreshRSACSRBundle(),
- refreshECCSRBundle(),
- }
-
- for _, csrbut := range csrbuts {
- pcsrbut, err := csrbut.ToParsedCSRBundle()
- if err != nil {
- t.Fatalf("Error converting to parsed CSR bundle: %v", err)
- }
-
- err = compareCSRBundleToParsedCSRBundle(csrbut, pcsrbut)
- if err != nil {
- t.Fatalf(err.Error())
- }
-
- csrbut, err = pcsrbut.ToCSRBundle()
- if err != nil {
- t.Fatalf("Error converting to CSR bundle: %v", err)
- }
-
- err = compareCSRBundleToParsedCSRBundle(csrbut, pcsrbut)
- if err != nil {
- t.Fatalf(err.Error())
- }
- }
-}
-
-func compareCSRBundleToParsedCSRBundle(csrbut *CSRBundle, pcsrbut *ParsedCSRBundle) error {
- if csrbut == nil {
- return fmt.Errorf("Got nil bundle")
- }
- if pcsrbut == nil {
- return fmt.Errorf("Got nil parsed bundle")
- }
-
- switch {
- case pcsrbut.CSR == nil:
- return fmt.Errorf("Parsed bundle has nil csr")
- case pcsrbut.PrivateKey == nil:
- return fmt.Errorf("Parsed bundle has nil private key")
- }
-
- switch csrbut.PrivateKey {
- case privRSAKeyPem:
- if pcsrbut.PrivateKeyType != RSAPrivateKey {
- return fmt.Errorf("Parsed bundle has wrong private key type")
- }
- case privECKeyPem:
- if pcsrbut.PrivateKeyType != ECPrivateKey {
- return fmt.Errorf("Parsed bundle has wrong private key type")
- }
- default:
- return fmt.Errorf("Parsed bundle has unknown private key type")
- }
-
- csrb, err := pcsrbut.ToCSRBundle()
- if err != nil {
- return fmt.Errorf("Thrown error during parsed bundle conversion: %s\n\nInput was: %#v", err, *pcsrbut)
- }
-
- switch {
- case len(csrb.CSR) == 0:
- return fmt.Errorf("Bundle has nil certificate")
- case len(csrb.PrivateKey) == 0:
- return fmt.Errorf("Bundle has nil private key")
- }
-
- switch csrb.PrivateKeyType {
- case "rsa":
- if pcsrbut.PrivateKeyType != RSAPrivateKey {
- return fmt.Errorf("Bundle has wrong private key type")
- }
- if csrb.PrivateKey != privRSAKeyPem {
- return fmt.Errorf("Bundle private key does not match")
- }
- case "ec":
- if pcsrbut.PrivateKeyType != ECPrivateKey {
- return fmt.Errorf("Bundle has wrong private key type")
- }
- if csrb.PrivateKey != privECKeyPem {
- return fmt.Errorf("Bundle private key does not match")
- }
- default:
- return fmt.Errorf("Bundle has unknown private key type")
- }
-
- return nil
-}
-
-func TestTLSConfig(t *testing.T) {
- cbut := refreshRSACertBundle()
-
- pcbut, err := cbut.ToParsedCertBundle()
- if err != nil {
- t.Fatalf("Error getting parsed cert bundle: %s", err)
- }
-
- usages := []TLSUsage{
- TLSUnknown,
- TLSClient,
- TLSServer,
- TLSClient | TLSServer,
- }
-
- for _, usage := range usages {
- tlsConfig, err := pcbut.GetTLSConfig(usage)
- if err != nil {
- t.Fatalf("Error getting tls config: %s", err)
- }
- if tlsConfig == nil {
- t.Fatalf("Got nil tls.Config")
- }
-
- if len(tlsConfig.Certificates) != 1 {
- t.Fatalf("Unexpected length in config.Certificates")
- }
-
- // Length should be 2, since we passed in a CA
- if len(tlsConfig.Certificates[0].Certificate) != 2 {
- t.Fatalf("Did not find both certificates in config.Certificates.Certificate")
- }
-
- if tlsConfig.Certificates[0].Leaf != pcbut.Certificate {
- t.Fatalf("Leaf certificate does not match parsed bundle's certificate")
- }
-
- if tlsConfig.Certificates[0].PrivateKey != pcbut.PrivateKey {
- t.Fatalf("Config's private key does not match parsed bundle's private key")
- }
-
- switch usage {
- case TLSServer | TLSClient:
- if len(tlsConfig.ClientCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.ClientCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
- t.Fatalf("CA certificate not in client cert pool as expected")
- }
- if len(tlsConfig.RootCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.RootCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
- t.Fatalf("CA certificate not in root cert pool as expected")
- }
- case TLSServer:
- if len(tlsConfig.ClientCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.ClientCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
- t.Fatalf("CA certificate not in client cert pool as expected")
- }
- if tlsConfig.RootCAs != nil {
- t.Fatalf("Found root pools in config object when not expected")
- }
- case TLSClient:
- if len(tlsConfig.RootCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.RootCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 {
- t.Fatalf("CA certificate not in root cert pool as expected")
- }
- if tlsConfig.ClientCAs != nil {
- t.Fatalf("Found root pools in config object when not expected")
- }
- default:
- if tlsConfig.RootCAs != nil || tlsConfig.ClientCAs != nil {
- t.Fatalf("Found root pools in config object when not expected")
- }
- }
- }
-}
-
-func refreshRSA8CertBundle() *CertBundle {
- return &CertBundle{
- Certificate: certRSAPem,
- PrivateKey: privRSA8KeyPem,
- CAChain: []string{issuingCaChainPem[0]},
- }
-}
-
-func refreshRSA8CertBundleWithChain() *CertBundle {
- ret := refreshRSA8CertBundle()
- ret.CAChain = issuingCaChainPem
- return ret
-}
-
-func refreshRSACertBundle() *CertBundle {
- return &CertBundle{
- Certificate: certRSAPem,
- CAChain: []string{issuingCaChainPem[0]},
- PrivateKey: privRSAKeyPem,
- }
-}
-
-func refreshRSACertBundleWithChain() *CertBundle {
- ret := refreshRSACertBundle()
- ret.CAChain = issuingCaChainPem
- return ret
-}
-
-func refreshECCertBundle() *CertBundle {
- return &CertBundle{
- Certificate: certECPem,
- CAChain: []string{issuingCaChainPem[0]},
- PrivateKey: privECKeyPem,
- }
-}
-
-func refreshECCertBundleWithChain() *CertBundle {
- ret := refreshECCertBundle()
- ret.CAChain = issuingCaChainPem
- return ret
-}
-
-func refreshRSACSRBundle() *CSRBundle {
- return &CSRBundle{
- CSR: csrRSAPem,
- PrivateKey: privRSAKeyPem,
- }
-}
-
-func refreshECCSRBundle() *CSRBundle {
- return &CSRBundle{
- CSR: csrECPem,
- PrivateKey: privECKeyPem,
- }
-}
-
-func refreshEC8CertBundle() *CertBundle {
- return &CertBundle{
- Certificate: certECPem,
- PrivateKey: privEC8KeyPem,
- CAChain: []string{issuingCaChainPem[0]},
- }
-}
-
-func refreshEC8CertBundleWithChain() *CertBundle {
- ret := refreshEC8CertBundle()
- ret.CAChain = issuingCaChainPem
- return ret
-}
-
-var (
- privRSA8KeyPem = `-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC92mr7+D/tGkW5
-nvDH/fYkOLywbxsU9wU7lKVPCdj+zNzQYHiixTZtmZPYVTBj27lZgaUDUXuiw9Ru
-BWHTuAb/Cpn1I+71qJbh8FgWot+MRDFKuV0PLkgHz5eRVC4JmKy9hbcgo1q0FfGf
-qxL+VQmI0GcQ4IYK/ppVMrKbn4ndIg70uR46vPiU11GqRIz5wkiPeLklrhoWa5qE
-IHINnR83eHUbijaCuqPEcz0QTz0iLM8dfubVaJ+Gn/DseUtku+qBdcYcUK2hQyCc
-NRKtu953gr5hhEX0N9x5JBb10WaI1UL5HGa2wa6ndZ7yVb42B2WTHzNQRR5Cr4N4
-ve31//gRAgMBAAECggEAfEvyvTLDz5zix2tK4vTfYMmQp8amKWysjVx9eijNW8yO
-SRLQCGkrgEgLJphnjQk+6V3axjhjxKWHf9ygNrgGRJYRRBCZk1YkKpprYa6Sw0em
-KfD//z9iw1JjPi+p0HiXp6FSytiIOt0fC1U6oy7ThjJDOCZ3O92C94KwsviZjx9r
-DZbTLDm7Ya2LF4jGCq0dQ+AVqZ65QJ3yjdxm87PSE6q2eiV9wdMUx9RDOmFy+Meq
-Mm3L9TW1QzyFtFMXeIF5QYGpmxWP/iii5V0CP573apXMIqQ+wTNpwK3WU5iURypZ
-kJ1Iaxbzjfok6wpwLj7SJytF+fOVcygUxud7GPH8UQKBgQDPhQhB3+o+y+bwkUTx
-Qdj/YNKcA/bjo/b9KMq+3rufwN9u/DK5z7vVfVklidbh5DVqhlLREsdSuZvb/IHc
-OdCYwNeDxk1rLr+1W/iPYSBJod4eWDteIH1U9lts+/mH+u+iSsWVuikbeA8/MUJ3
-nnAYu4FR1nz8I/CrvGbQL/KCdQKBgQDqNNI562Ch+4dJ407F3MC4gNPwPgksfLXn
-ZRcPVVwGagil9oIIte0BIT0rAG/jVACfghGxfrj719uwjcFFxnUaSHGQcATseSf6
-SgoruIVF15lI4e8lEcWrOypsW8Id2/amwUiIWYCgwlYG2Q7dggpXfgjmKfjSlvJ8
-+yKR/Y6zrQKBgQCkx2aqICm5mWEUbtWGmJm9Ft3FQqSdV4n8tZJgAy6KiLUiRKHm
-x1vIBtNtqkj1b6c2odhK6ZVaS8XF5XgcLdBEKwQ2P5Uj4agaUyBIgYAI174u7DKf
-6D58423vWRln70qu3J6N6JdRl4DL1cqIf0dVbDYgjKcL82HcjCo7b4cqLQKBgFGU
-TJX4MxS5NIq8LrglCMw7s5c/RJrGZeZQBBRHO2LQlGqazviRxhhap5/O6ypYHE9z
-Uw5sgarXqaJ5/hR76FZbXZNeMZjdKtu35osMHwAQ9Ue5yz8yTZQza7eKzrbv4556
-PPWhl3hnuOdxvAfUQB3xvM/PVuijw5tdLtGDbK2RAoGBAKB7OsTgF7wVEkzccJTE
-hrbVKD+KBZz8WKnEgNoyyTIT0Kaugk15MCXkGrXIY8bW0IzYAw69qhTOgaWkcu4E
-JbTK5UerP8V+X7XPBiw72StPVM4bxaXx2/B+78IuMOI/GR0tHQCF8S6DwTHeBXnl
-ke8GFExnXHTPqG6Bku0r/G47
------END PRIVATE KEY-----`
-
- privRSAKeyPem = `-----BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAvdpq+/g/7RpFuZ7wx/32JDi8sG8bFPcFO5SlTwnY/szc0GB4
-osU2bZmT2FUwY9u5WYGlA1F7osPUbgVh07gG/wqZ9SPu9aiW4fBYFqLfjEQxSrld
-Dy5IB8+XkVQuCZisvYW3IKNatBXxn6sS/lUJiNBnEOCGCv6aVTKym5+J3SIO9Lke
-Orz4lNdRqkSM+cJIj3i5Ja4aFmuahCByDZ0fN3h1G4o2grqjxHM9EE89IizPHX7m
-1Wifhp/w7HlLZLvqgXXGHFCtoUMgnDUSrbved4K+YYRF9DfceSQW9dFmiNVC+Rxm
-tsGup3We8lW+Ngdlkx8zUEUeQq+DeL3t9f/4EQIDAQABAoIBAHxL8r0yw8+c4sdr
-SuL032DJkKfGpilsrI1cfXoozVvMjkkS0AhpK4BICyaYZ40JPuld2sY4Y8Slh3/c
-oDa4BkSWEUQQmZNWJCqaa2GuksNHpinw//8/YsNSYz4vqdB4l6ehUsrYiDrdHwtV
-OqMu04YyQzgmdzvdgveCsLL4mY8faw2W0yw5u2GtixeIxgqtHUPgFameuUCd8o3c
-ZvOz0hOqtnolfcHTFMfUQzphcvjHqjJty/U1tUM8hbRTF3iBeUGBqZsVj/4oouVd
-Aj+e92qVzCKkPsEzacCt1lOYlEcqWZCdSGsW8436JOsKcC4+0icrRfnzlXMoFMbn
-exjx/FECgYEAz4UIQd/qPsvm8JFE8UHY/2DSnAP246P2/SjKvt67n8Dfbvwyuc+7
-1X1ZJYnW4eQ1aoZS0RLHUrmb2/yB3DnQmMDXg8ZNay6/tVv4j2EgSaHeHlg7XiB9
-VPZbbPv5h/rvokrFlbopG3gPPzFCd55wGLuBUdZ8/CPwq7xm0C/ygnUCgYEA6jTS
-OetgofuHSeNOxdzAuIDT8D4JLHy152UXD1VcBmoIpfaCCLXtASE9KwBv41QAn4IR
-sX64+9fbsI3BRcZ1GkhxkHAE7Hkn+koKK7iFRdeZSOHvJRHFqzsqbFvCHdv2psFI
-iFmAoMJWBtkO3YIKV34I5in40pbyfPsikf2Os60CgYEApMdmqiApuZlhFG7VhpiZ
-vRbdxUKknVeJ/LWSYAMuioi1IkSh5sdbyAbTbapI9W+nNqHYSumVWkvFxeV4HC3Q
-RCsENj+VI+GoGlMgSIGACNe+Luwyn+g+fONt71kZZ+9KrtyejeiXUZeAy9XKiH9H
-VWw2IIynC/Nh3IwqO2+HKi0CgYBRlEyV+DMUuTSKvC64JQjMO7OXP0SaxmXmUAQU
-Rzti0JRqms74kcYYWqefzusqWBxPc1MObIGq16mief4Ue+hWW12TXjGY3Srbt+aL
-DB8AEPVHucs/Mk2UM2u3is627+Oeejz1oZd4Z7jncbwH1EAd8bzPz1boo8ObXS7R
-g2ytkQKBgQCgezrE4Be8FRJM3HCUxIa21Sg/igWc/FipxIDaMskyE9CmroJNeTAl
-5Bq1yGPG1tCM2AMOvaoUzoGlpHLuBCW0yuVHqz/Ffl+1zwYsO9krT1TOG8Wl8dvw
-fu/CLjDiPxkdLR0AhfEug8Ex3gV55ZHvBhRMZ1x0z6hugZLtK/xuOw==
------END RSA PRIVATE KEY-----`
-
- csrRSAPem = `-----BEGIN CERTIFICATE REQUEST-----
-MIICijCCAXICAQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
-ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBALd2SVGs7QkYlYPOz9MvE+0DDRUIutQQkiCnqcV1
-1nO84uSSUjH0ALLVyRKgWIkobEqWJurKIk+oV9O+Le8EABxkt/ru6jaIFqZkwFE1
-JzSDPkAsR9jXP5M2MWlmo7qVc4Bry3oqlN3eLSFJP2ZH7b1ia8q9oWXPMxDdwuKR
-kv9hfkUPszr/gaQCNQXW0BoRe5vdr5+ikv4lrKpsBxvaAoYL1ngR41lCPKhmrgXP
-oreEuUcXzfCSSAV1CGbW2qhWc4I7/JFA8qqEBr5GP+AntStGxSbDO8JjD7/uULHC
-AReCloDdJE0jDz1355/0CAH4WmEJE/TI8Bq+vgd0jgzRgk0CAwEAAaAAMA0GCSqG
-SIb3DQEBCwUAA4IBAQAR8U1vZMJf7YFvGU69QvoWPTDe/o8SwYy1j+++AAO9Y7H2
-C7nb+9tnEMtXm+3pkY0aJIecAnq8H4QWimOrJa/ZsoZLzz9LKW2nzARdWo63j4nB
-jKld/EDBzQ/nQSTyoX7s9JiDiSC9yqTXBrPHSXruPbh7sE0yXROar+6atjNdCpDp
-uLw86gwewDJrMaB1aFAmDvwaRQQDONwRy0zG1UdMxLQxsxpKOHaGM/ZvV3FPir2B
-7mKupki/dvap5UW0lTMJBlKf3qhoeHKMHFo9i5vGCIkWUIv+XgTF0NjbYv9i7bfq
-WdW905v4wiuWRlddNwqFtLx9Pf1/fRJVT5mBbjIx
------END CERTIFICATE REQUEST-----`
-
- certRSAPem = `-----BEGIN CERTIFICATE-----
-MIIDfDCCAmSgAwIBAgIUad4Q9EhVvqc06H7fCfKaLGcyDw0wDQYJKoZIhvcNAQEL
-BQAwNzE1MDMGA1UEAxMsVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIFN1
-YiBBdXRob3JpdHkwHhcNMTYwODA0MTkyMjAyWhcNMTYwODA0MjAyMjMyWjAhMR8w
-HQYDVQQDExZWYXVsdCBUZXN0IENlcnRpZmljYXRlMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEAvdpq+/g/7RpFuZ7wx/32JDi8sG8bFPcFO5SlTwnY/szc
-0GB4osU2bZmT2FUwY9u5WYGlA1F7osPUbgVh07gG/wqZ9SPu9aiW4fBYFqLfjEQx
-SrldDy5IB8+XkVQuCZisvYW3IKNatBXxn6sS/lUJiNBnEOCGCv6aVTKym5+J3SIO
-9LkeOrz4lNdRqkSM+cJIj3i5Ja4aFmuahCByDZ0fN3h1G4o2grqjxHM9EE89IizP
-HX7m1Wifhp/w7HlLZLvqgXXGHFCtoUMgnDUSrbved4K+YYRF9DfceSQW9dFmiNVC
-+RxmtsGup3We8lW+Ngdlkx8zUEUeQq+DeL3t9f/4EQIDAQABo4GVMIGSMA4GA1Ud
-DwEB/wQEAwIDqDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O
-BBYEFMKLVTrdDyyF0kTxkxcAMcGNcGWlMB8GA1UdIwQYMBaAFNmGqFL215GlYyD0
-mWVIoMB71s+NMCEGA1UdEQQaMBiCFlZhdWx0IFRlc3QgQ2VydGlmaWNhdGUwDQYJ
-KoZIhvcNAQELBQADggEBAJJP9OWG3W5uUluKdeFYCzKMIY+rsCUb86QrKRqQ5xYR
-w4pKC3yuryEfreBs3iQA4NNw2mMWxuI8t/i+km2H7NzQytTRn6L0sxTa8ThNZ3e7
-xCdWaZZzd1O6Xwq/pDbE1MZ/4z5nvsKaKJVVIvVFL5algi4A8njiFMVSww035c1e
-waLww4AOHydlLky/RJBJPOkQNoDBToC9ojDqPtNJVWWaQL2TsUCu+Q+L5QL5djgj
-LxPwqGOiM4SLSUrXSXMpHNLX1rhBH1/sNb3Kn1FDBaZ+M9kZglCDwuQyQuH8xKwB
-qukeKfgFUp7rH0yoQTZa0eaXAYTFoRLjnTQ+fS7e19s=
------END CERTIFICATE-----`
-
- privECKeyPem = `-----BEGIN EC PRIVATE KEY-----
-MHcCAQEEICC2XihYLxEYEseFesZEXjV1va6rMAdtkpkaxT4hGu5boAoGCCqGSM49
-AwEHoUQDQgAEti0uWkq7MAkQevNNrBpYY0FLni8OAZroHXkij2x6Vo0xIvClftbC
-L33BU/520t23TcewtQYsNqv86Bvhx9PeAw==
------END EC PRIVATE KEY-----`
-
- csrECPem = `-----BEGIN CERTIFICATE REQUEST-----
-MIHsMIGcAgEAMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
-HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwTjAQBgcqhkjOPQIBBgUr
-gQQAIQM6AATBZ3VXwBE9oeSREpM5b25PW6WiuLb4EXWpKZyjj552QYKYe7QBuGe9
-wvvgOeCBovN3tSuGKzTiUKAAMAoGCCqGSM49BAMCAz8AMDwCHFap/5XDuqtXCG1g
-ljbYH5OWGBqGYCfL2k2+/6cCHAuk1bmOkGx7JAq/fSPd09i0DQIqUu7WHQHms48=
------END CERTIFICATE REQUEST-----`
-
- privEC8KeyPem = `-----BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgILZeKFgvERgSx4V6
-xkReNXW9rqswB22SmRrFPiEa7luhRANCAAS2LS5aSrswCRB6802sGlhjQUueLw4B
-mugdeSKPbHpWjTEi8KV+1sIvfcFT/nbS3bdNx7C1Biw2q/zoG+HH094D
------END PRIVATE KEY-----`
-
- certECPem = `-----BEGIN CERTIFICATE-----
-MIICtzCCAZ+gAwIBAgIUNDYMWd9SOGVMs4I1hezvRnGDMyUwDQYJKoZIhvcNAQEL
-BQAwNzE1MDMGA1UEAxMsVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIFN1
-YiBBdXRob3JpdHkwHhcNMTYwODA0MTkyOTM0WhcNMTYwODA0MjAzMDA0WjAkMSIw
-IAYDVQQDExlWYXVsdCBUZXN0IEVDIENlcnRpZmljYXRlMFkwEwYHKoZIzj0CAQYI
-KoZIzj0DAQcDQgAEti0uWkq7MAkQevNNrBpYY0FLni8OAZroHXkij2x6Vo0xIvCl
-ftbCL33BU/520t23TcewtQYsNqv86Bvhx9PeA6OBmDCBlTAOBgNVHQ8BAf8EBAMC
-A6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBStnbW/
-ga2/dz4FyRNafwhTzM1UbzAfBgNVHSMEGDAWgBTZhqhS9teRpWMg9JllSKDAe9bP
-jTAkBgNVHREEHTAbghlWYXVsdCBUZXN0IEVDIENlcnRpZmljYXRlMA0GCSqGSIb3
-DQEBCwUAA4IBAQBsPhwRB51de3sGBMnjDiOMViYpRH7kKhUWAY1W2W/1hqk5HgZw
-4c3r0LmdIQ94gShaXng8ojYRDW/5D7LeXJdbtLy9U29xfeCb+vqKDc2oN7Ps3/HB
-4YLnseqDiZFKPEAdOE4rtwyFWJI7JR9sOSG1B5El6duN0i9FWOLSklQ4EbV5R45r
-cy/fJq0DOYje7MXsFuNl5iQ92gfDjPD2P98DK9lCIquSzB3WkpjE41UtKJ0IKPeD
-wYoyl0J33Alxq2eC2subR7xISR3MzZFcdkzNNrBddeaSviYlR4SgTUiqOldAcdR4
-QZxtxazcUqQDZ+wZFOpBOnp94bzVeXT9BF+L
------END CERTIFICATE-----`
-
- issuingCaChainPem = []string{`-----BEGIN CERTIFICATE-----
-MIIDljCCAn6gAwIBAgIUHjciEzUzeNVqI9mwFJeduNtXWzMwDQYJKoZIhvcNAQEL
-BQAwMzExMC8GA1UEAxMoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1
-dGhvcml0eTAeFw0xNjA4MDQxOTEyNTdaFw0xNjA4MDUyMDEzMjdaMDcxNTAzBgNV
-BAMTLFZhdWx0IFRlc3RpbmcgSW50ZXJtZWRpYXRlIFN1YiBTdWIgQXV0aG9yaXR5
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy2pAH1U8KzhjO+MLRPTb
-ic7Iyk57d0TFnj6CAJWqZaKNGXoTkwD8wRCirY8mQv8YrfBy3hwGqSLYj6oxwA0R
-8FxsiWdf4gFTX2cJpxThFnIllGbzqIXnEZLvCIMydp44Ls9eYxoXfZQ9X24u/Wmf
-kWEQFGUzrpyklkIOx2Yo5g7OHbFLl3OfPz89/TDM8VeymlGzCTJZ+Y+iNGDBPT0L
-X9aE65lL76dUx/bcKnfQEgAcH4nkE4K/Kgjnj5umZKQUH4+6wKFwDCQT2RwaBkve
-WyAiz0LY9a1WFXt7RYCPs+QWLJAhv7wJL8l4gnxYA1k+ovLXDjUqYweU+WHV6/lR
-7wIDAQABo4GdMIGaMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
-A1UdDgQWBBTZhqhS9teRpWMg9JllSKDAe9bPjTAfBgNVHSMEGDAWgBRTY6430DXg
-cIDAnEnA+fostjcbqDA3BgNVHREEMDAugixWYXVsdCBUZXN0aW5nIEludGVybWVk
-aWF0ZSBTdWIgU3ViIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEAZp3VwrUw
-jw6TzjZJEnXkfRxLWZNmqMPAXYOtBl/+5FjAfehifTTzIdIxR4mfdgH5YZnSQpzY
-m/w17BXElao8uOX6CUaX+sLTVzwsl2csswpcGlNwHooVREoMq9X187qxSr1HS7zF
-O550XgDVIf5e7sXrVuV1rd1XUo3xZLaSLUhU70y/343mcN2TRUslXO4QrIE5lo2v
-awyQl0NW0hSO0F9VZYzOvPPVwu7mf1ijTzbkPtUbAXDnmlvOCrlx2JZd/BqXb75e
-UgYDq7hIyQ109FBOjv0weAM5tZCdesyvro4/43Krd8pa74zHdZMjfQAsTr66WOi4
-yedj8LnWl66JOA==
------END CERTIFICATE-----`,
- `-----BEGIN CERTIFICATE-----
-MIIDijCCAnKgAwIBAgIUBNDYCUsOT2Wth8Fz3layfjEVbcIwDQYJKoZIhvcNAQEL
-BQAwLzEtMCsGA1UEAxMkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9y
-aXR5MB4XDTE2MDgwNDE5MTI1NloXDTE2MDgwNjIxMTMyNlowMzExMC8GA1UEAxMo
-VmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3ViIEF1dGhvcml0eTCCASIwDQYJ
-KoZIhvcNAQEBBQADggEPADCCAQoCggEBALHoD7g5YYu2akO8hkFlUCF45Bxjckq4
-WTyDIcDwv/wr7vhZnngCClnP+7Rc30XTmkq2RnH6N7iuqowGM5RNcBV/C9R1weVx
-9esXtWr/AUMyuNb3HSjwDwQGuiAVEgk67fXYy08Ii78+ap3uY3CKC1AFDkHdgDZt
-e946rJ3Nps00TcH0KwyP5voitLgt6dMBR9ttuUdSoQ4uLQDdDf0HRw/IAQswO4Av
-lgUgQObBecnLGhh7e3PM5VVz5f0IqG2ZYnDs3ncl2UYOrj0/JqOMDIMvSQMc2pzS
-Hjty0d1wKWWPC9waguL/24oQR4VG5b7TL62elc2kcEg7r8u5L/sCi/8CAwEAAaOB
-mTCBljAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
-U2OuN9A14HCAwJxJwPn6LLY3G6gwHwYDVR0jBBgwFoAUgAz80p6Pkzk6Cb7lYmTI
-T1jc7iYwMwYDVR0RBCwwKoIoVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgU3Vi
-IEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAQEACXjzGVqRZi6suHwBqvXwVxlS
-nf5YwudfBDJ4LfNd5nTCypsvHkfXnaki6LZMCS1rwPvxzssZ5Wp/7zO5fu6lpSTx
-yjuiH5fBUGHy+f1Ygu6tlAZtUnxAi6pU4eoCDNZpqunJMM4IdaahHeICdjPhx/bH
-AlmwaN0FsNvOlgUuPTjQ3z6jMZn3p2lXI3HiRlcz+nR7gQizPb2L7u8mQ+5EZFmC
-AmXMj40g3bTJVmKoGeAR7cb0pYG/GUELmERjEjCfP7W15eYfuu1j7EYTUAVuPAlJ
-34HDxCuM8cPJwCGMDKfb3Q39AYRmLT6sE3/sq2CZ5xlj8wfwDpVfpXikRDpI0A==
------END CERTIFICATE-----`,
- `-----BEGIN CERTIFICATE-----
-MIIDejCCAmKgAwIBAgIUEtjlbdzIth3U71TELA0PVW7HvaEwDQYJKoZIhvcNAQEL
-BQAwJzElMCMGA1UEAxMcVmF1bHQgVGVzdGluZyBSb290IEF1dGhvcml0eTAeFw0x
-NjA4MDQxOTEyNTVaFw0xNjA4MDgyMzEzMjVaMC8xLTArBgNVBAMTJFZhdWx0IFRl
-c3RpbmcgSW50ZXJtZWRpYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAMYAQAHCm9V9062NF/UuAa6z6aYqsS5g2YGkd9DvgYxfU5JI
-yIdSz7rkp9QprlQYl2abptZocq+1C9yRVmRJWKjZYDckSwXdmQam/sOfNuiw6Gbd
-3OJGdQ82jhx3v3mIQp+3u9E43wXX0StaJ44+9DgkgwG8iybiv4fh0LzuHPSeKsXe
-/IvJZ0YAInWuzFNegYxU32UT2CEvLtZdru8+sLr4NFWRu/nYIMPJDeZ2JEQVi9IF
-lcB3dP63c6vMBrn4Wn2xBo12JPsQp+ezf5Z5zmtAe68PwRmIXZVAUa2q+CfEuJ36
-66756Ypa0Z3brhPWfX2ahhxSg8DjqFGmZZ5Gfl8CAwEAAaOBlTCBkjAOBgNVHQ8B
-Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUgAz80p6Pkzk6Cb7l
-YmTIT1jc7iYwHwYDVR0jBBgwFoAU6dC1U32HZp7iq97KSu2i+g8+rf4wLwYDVR0R
-BCgwJoIkVmF1bHQgVGVzdGluZyBJbnRlcm1lZGlhdGUgQXV0aG9yaXR5MA0GCSqG
-SIb3DQEBCwUAA4IBAQA6xVMyuZgpJhIQnG2LwwD5Zcbmm4+rHkGVNSpwkUH8ga8X
-b4Owog+MvBw8R7ADVwAh/aOh1/qsRfHv8KkMWW+SAQ84ICVXJiPBzEUJaMWujpyr
-SDkbaD05avRtfvSrPCagaUGVRt+wK24g8hpJqQ+trkufzjq9ySU018+NNX9yGRyA
-VjwZAqALlNEAkdcvd4adEBpZqum2x1Fl9EXnjp6NEWQ7nuGkp3X2DP4gDtQPxgmn
-omOo4GHhO0U57exEIl0d4kiy9WU0qcIISOr6I+gzesMooX6aI43CaqJoZKsHXYY6
-1uxFLss+/wDtvIcyXdTdjPrgD38YIgk1/iKNIgKO
------END CERTIFICATE-----`}
-)
diff --git a/vendor/github.com/hashicorp/vault/helper/certutil/helpers.go b/vendor/github.com/hashicorp/vault/helper/certutil/helpers.go
index 4256edb..3c072ce 100644
--- a/vendor/github.com/hashicorp/vault/helper/certutil/helpers.go
+++ b/vendor/github.com/hashicorp/vault/helper/certutil/helpers.go
@@ -33,6 +33,7 @@ func GetHexFormatted(buf []byte, sep string) string {
return ret.String()
}
+// ParseHexFormatted returns the raw bytes from a formatted hex string
func ParseHexFormatted(in, sep string) []byte {
var ret bytes.Buffer
var err error
@@ -41,9 +42,8 @@ func ParseHexFormatted(in, sep string) []byte {
for _, inByte := range inBytes {
if inBits, err = strconv.ParseInt(inByte, 16, 8); err != nil {
return nil
- } else {
- ret.WriteByte(byte(inBits))
}
+ ret.WriteByte(byte(inBits))
}
return ret.Bytes()
}
@@ -52,12 +52,12 @@ func ParseHexFormatted(in, sep string) []byte {
// of the marshaled public key
func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) {
if privateKey == nil {
- return nil, errutil.InternalError{"passed-in private key is nil"}
+ return nil, errutil.InternalError{Err: "passed-in private key is nil"}
}
marshaledKey, err := x509.MarshalPKIXPublicKey(privateKey.Public())
if err != nil {
- return nil, errutil.InternalError{fmt.Sprintf("error marshalling public key: %s", err)}
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)}
}
subjKeyID := sha1.Sum(marshaledKey)
@@ -71,7 +71,7 @@ func ParsePKIMap(data map[string]interface{}) (*ParsedCertBundle, error) {
result := &CertBundle{}
err := mapstructure.Decode(data, result)
if err != nil {
- return nil, errutil.UserError{err.Error()}
+ return nil, errutil.UserError{Err: err.Error()}
}
return result.ToParsedCertBundle()
@@ -97,20 +97,18 @@ func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) {
return ParsePKIMap(secret.Data)
}
- return nil, errutil.UserError{"unable to parse out of either secret data or a secret object"}
+ return nil, errutil.UserError{Err: "unable to parse out of either secret data or a secret object"}
}
// ParsePEMBundle takes a string of concatenated PEM-format certificate
// and private key values and decodes/parses them, checking validity along
-// the way. There must be at max two certificates (a certificate and its
-// issuing certificate) and one private key.
+// the way. The first certificate must be the subject certificate and issuing
+// certificates may follow. There must be at most one private key.
func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) {
if len(pemBundle) == 0 {
- return nil, errutil.UserError{"empty pem bundle"}
+ return nil, errutil.UserError{Err: "empty pem bundle"}
}
- pemBundle = strings.TrimSpace(pemBundle)
-
pemBytes := []byte(pemBundle)
var pemBlock *pem.Block
parsedBundle := &ParsedCertBundle{}
@@ -119,12 +117,12 @@ func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) {
for len(pemBytes) > 0 {
pemBlock, pemBytes = pem.Decode(pemBytes)
if pemBlock == nil {
- return nil, errutil.UserError{"no data found"}
+ return nil, errutil.UserError{Err: "no data found in PEM block"}
}
if signer, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil {
if parsedBundle.PrivateKeyType != UnknownPrivateKey {
- return nil, errutil.UserError{"more than one private key given; provide only one private key in the bundle"}
+ return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"}
}
parsedBundle.PrivateKeyFormat = ECBlock
parsedBundle.PrivateKeyType = ECPrivateKey
@@ -133,7 +131,7 @@ func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) {
} else if signer, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil {
if parsedBundle.PrivateKeyType != UnknownPrivateKey {
- return nil, errutil.UserError{"more than one private key given; provide only one private key in the bundle"}
+ return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"}
}
parsedBundle.PrivateKeyType = RSAPrivateKey
parsedBundle.PrivateKeyFormat = PKCS1Block
@@ -143,7 +141,7 @@ func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) {
parsedBundle.PrivateKeyFormat = PKCS8Block
if parsedBundle.PrivateKeyType != UnknownPrivateKey {
- return nil, errutil.UserError{"More than one private key given; provide only one private key in the bundle"}
+ return nil, errutil.UserError{Err: "More than one private key given; provide only one private key in the bundle"}
}
switch signer := signer.(type) {
case *rsa.PrivateKey:
diff --git a/vendor/github.com/hashicorp/vault/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/helper/certutil/types.go
index c955222..6a5251c 100644
--- a/vendor/github.com/hashicorp/vault/helper/certutil/types.go
+++ b/vendor/github.com/hashicorp/vault/helper/certutil/types.go
@@ -20,6 +20,7 @@ import (
"math/big"
"strings"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/errutil"
)
@@ -147,7 +148,7 @@ func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
if len(c.PrivateKey) > 0 {
pemBlock, _ = pem.Decode([]byte(c.PrivateKey))
if pemBlock == nil {
- return nil, errutil.UserError{"Error decoding private key from cert bundle"}
+ return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"}
}
result.PrivateKeyBytes = pemBlock.Bytes
@@ -161,7 +162,7 @@ func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
case PKCS8Block:
t, err := getPKCS8Type(pemBlock.Bytes)
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Error getting key type from pkcs#8: %v", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)}
}
result.PrivateKeyType = t
switch t {
@@ -171,24 +172,24 @@ func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
c.PrivateKeyType = RSAPrivateKey
}
default:
- return nil, errutil.UserError{fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)}
}
result.PrivateKey, err = result.getSigner()
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Error getting signer: %s", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)}
}
}
if len(c.Certificate) > 0 {
pemBlock, _ = pem.Decode([]byte(c.Certificate))
if pemBlock == nil {
- return nil, errutil.UserError{"Error decoding certificate from cert bundle"}
+ return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"}
}
result.CertificateBytes = pemBlock.Bytes
result.Certificate, err = x509.ParseCertificate(result.CertificateBytes)
if err != nil {
- return nil, errutil.UserError{"Error encountered parsing certificate bytes from raw bundle"}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)}
}
}
switch {
@@ -196,12 +197,12 @@ func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
for _, cert := range c.CAChain {
pemBlock, _ := pem.Decode([]byte(cert))
if pemBlock == nil {
- return nil, errutil.UserError{"Error decoding certificate from cert bundle"}
+ return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"}
}
parsedCert, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
- return nil, errutil.UserError{"Error encountered parsing certificate bytes from raw bundle"}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CA chain: %v", err)}
}
certBlock := &CertBlock{
@@ -211,16 +212,16 @@ func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) {
result.CAChain = append(result.CAChain, certBlock)
}
- // For backwards compabitibility
+ // For backwards compatibility
case len(c.IssuingCA) > 0:
pemBlock, _ = pem.Decode([]byte(c.IssuingCA))
if pemBlock == nil {
- return nil, errutil.UserError{"Error decoding ca certificate from cert bundle"}
+ return nil, errutil.UserError{Err: "Error decoding ca certificate from cert bundle"}
}
parsedCert, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
- return nil, errutil.UserError{"Error encountered parsing certificate bytes from raw bundle3"}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via issuing CA: %v", err)}
}
result.SerialNumber = result.Certificate.SerialNumber
@@ -293,10 +294,10 @@ func (p *ParsedCertBundle) Verify() error {
if p.PrivateKey != nil && p.Certificate != nil {
equal, err := ComparePublicKeys(p.Certificate.PublicKey, p.PrivateKey.Public())
if err != nil {
- return fmt.Errorf("could not compare public and private keys: %s", err)
+ return errwrap.Wrapf("could not compare public and private keys: {{err}}", err)
}
if !equal {
- return fmt.Errorf("Public key of certificate does not match private key")
+ return fmt.Errorf("public key of certificate does not match private key")
}
}
@@ -307,7 +308,7 @@ func (p *ParsedCertBundle) Verify() error {
return fmt.Errorf("certificate %d of certificate chain is not a certificate authority", i+1)
}
if !bytes.Equal(certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) {
- return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%s/%s)",
+ return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%q/%q)",
i+1, certPath[i].Certificate.Subject.CommonName, caCert.Certificate.Subject.CommonName)
}
}
@@ -316,6 +317,8 @@ func (p *ParsedCertBundle) Verify() error {
return nil
}
+// GetCertificatePath returns a slice of certificates making up a path, pulled
+// from the parsed cert bundle
func (p *ParsedCertBundle) GetCertificatePath() []*CertBlock {
var certPath []*CertBlock
@@ -343,20 +346,20 @@ func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) {
var err error
if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 {
- return nil, errutil.UserError{"Given parsed cert bundle does not have private key information"}
+ return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"}
}
switch p.PrivateKeyFormat {
case ECBlock:
signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes)
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Unable to parse CA's private EC key: %s", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)}
}
case PKCS1Block:
signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes)
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)}
}
case PKCS8Block:
@@ -365,12 +368,12 @@ func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) {
case *rsa.PrivateKey, *ecdsa.PrivateKey:
return k.(crypto.Signer), nil
default:
- return nil, errutil.UserError{"Found unknown private key type in pkcs#8 wrapping"}
+ return nil, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"}
}
}
- return nil, errutil.UserError{fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)}
default:
- return nil, errutil.UserError{"Unable to determine type of private key; only RSA and EC are supported"}
+ return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"}
}
return signer, nil
}
@@ -385,7 +388,7 @@ func (p *ParsedCertBundle) SetParsedPrivateKey(privateKey crypto.Signer, private
func getPKCS8Type(bs []byte) (PrivateKeyType, error) {
k, err := x509.ParsePKCS8PrivateKey(bs)
if err != nil {
- return UnknownPrivateKey, errutil.UserError{fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)}
+ return UnknownPrivateKey, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)}
}
switch k.(type) {
@@ -394,7 +397,7 @@ func getPKCS8Type(bs []byte) (PrivateKeyType, error) {
case *rsa.PrivateKey:
return RSAPrivateKey, nil
default:
- return UnknownPrivateKey, errutil.UserError{"Found unknown private key type in pkcs#8 wrapping"}
+ return UnknownPrivateKey, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"}
}
}
@@ -408,7 +411,7 @@ func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) {
if len(c.PrivateKey) > 0 {
pemBlock, _ = pem.Decode([]byte(c.PrivateKey))
if pemBlock == nil {
- return nil, errutil.UserError{"Error decoding private key from cert bundle"}
+ return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"}
}
result.PrivateKeyBytes = pemBlock.Bytes
@@ -426,25 +429,25 @@ func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) {
result.PrivateKeyType = RSAPrivateKey
c.PrivateKeyType = "rsa"
} else {
- return nil, errutil.UserError{fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)}
}
}
result.PrivateKey, err = result.getSigner()
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Error getting signer: %s", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)}
}
}
if len(c.CSR) > 0 {
pemBlock, _ = pem.Decode([]byte(c.CSR))
if pemBlock == nil {
- return nil, errutil.UserError{"Error decoding certificate from cert bundle"}
+ return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"}
}
result.CSRBytes = pemBlock.Bytes
result.CSR, err = x509.ParseCertificateRequest(result.CSRBytes)
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CSR: %v", err)}
}
}
@@ -474,7 +477,7 @@ func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) {
result.PrivateKeyType = "ec"
block.Type = "EC PRIVATE KEY"
default:
- return nil, errutil.InternalError{"Could not determine private key type when creating block"}
+ return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"}
}
result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block)))
}
@@ -491,24 +494,24 @@ func (p *ParsedCSRBundle) getSigner() (crypto.Signer, error) {
var err error
if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 {
- return nil, errutil.UserError{"Given parsed cert bundle does not have private key information"}
+ return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"}
}
switch p.PrivateKeyType {
case ECPrivateKey:
signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes)
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Unable to parse CA's private EC key: %s", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)}
}
case RSAPrivateKey:
signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes)
if err != nil {
- return nil, errutil.UserError{fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)}
+ return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)}
}
default:
- return nil, errutil.UserError{"Unable to determine type of private key; only RSA and EC are supported"}
+ return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"}
}
return signer, nil
}
@@ -521,7 +524,7 @@ func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateK
}
// GetTLSConfig returns a TLS config generally suitable for client
-// authentiation. The returned TLS config can be modified slightly
+// authentication. The returned TLS config can be modified slightly
// to be made suitable for a server requiring client authentication;
// specifically, you should set the value of ClientAuth in the returned
// config to match your needs.
@@ -554,13 +557,13 @@ func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) {
// Technically we only need one cert, but this doesn't duplicate code
certBundle, err := p.ToCertBundle()
if err != nil {
- return nil, fmt.Errorf("Error converting parsed bundle to string bundle when getting TLS config: %s", err)
+ return nil, errwrap.Wrapf("error converting parsed bundle to string bundle when getting TLS config: {{err}}", err)
}
caPool := x509.NewCertPool()
ok := caPool.AppendCertsFromPEM([]byte(certBundle.CAChain[0]))
if !ok {
- return nil, fmt.Errorf("Could not append CA certificate")
+ return nil, fmt.Errorf("could not append CA certificate")
}
if usage&TLSServer > 0 {
diff --git a/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr.go b/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr.go
deleted file mode 100644
index 8031bb8..0000000
--- a/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package cidrutil
-
-import (
- "fmt"
- "net"
- "strings"
-
- "github.com/hashicorp/vault/helper/strutil"
-)
-
-// IPBelongsToCIDR checks if the given IP is encompassed by the given CIDR block
-func IPBelongsToCIDR(ipAddr string, cidr string) (bool, error) {
- if ipAddr == "" {
- return false, fmt.Errorf("missing IP address")
- }
-
- ip := net.ParseIP(ipAddr)
- if ip == nil {
- return false, fmt.Errorf("invalid IP address")
- }
-
- _, ipnet, err := net.ParseCIDR(cidr)
- if err != nil {
- return false, err
- }
-
- if !ipnet.Contains(ip) {
- return false, nil
- }
-
- return true, nil
-}
-
-// IPBelongsToCIDRBlocksString checks if the given IP is encompassed by any of
-// the given CIDR blocks, when the input is a string composed by joining all
-// the CIDR blocks using a separator. The input is separated based on the given
-// separator and the IP is checked to be belonged by any CIDR block.
-func IPBelongsToCIDRBlocksString(ipAddr string, cidrList, separator string) (bool, error) {
- if ipAddr == "" {
- return false, fmt.Errorf("missing IP address")
- }
-
- if cidrList == "" {
- return false, fmt.Errorf("missing CIDR list")
- }
-
- if separator == "" {
- return false, fmt.Errorf("missing separator")
- }
-
- if ip := net.ParseIP(ipAddr); ip == nil {
- return false, fmt.Errorf("invalid IP address")
- }
-
- return IPBelongsToCIDRBlocksSlice(ipAddr, strutil.ParseDedupLowercaseAndSortStrings(cidrList, separator))
-}
-
-// IPBelongsToCIDRBlocksSlice checks if the given IP is encompassed by any of the given
-// CIDR blocks
-func IPBelongsToCIDRBlocksSlice(ipAddr string, cidrs []string) (bool, error) {
- if ipAddr == "" {
- return false, fmt.Errorf("missing IP address")
- }
-
- if len(cidrs) == 0 {
- return false, fmt.Errorf("missing CIDR blocks to be checked against")
- }
-
- if ip := net.ParseIP(ipAddr); ip == nil {
- return false, fmt.Errorf("invalid IP address")
- }
-
- for _, cidr := range cidrs {
- belongs, err := IPBelongsToCIDR(ipAddr, cidr)
- if err != nil {
- return false, err
- }
- if belongs {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-// ValidateCIDRListString checks if the list of CIDR blocks are valid, given
-// that the input is a string composed by joining all the CIDR blocks using a
-// separator. The input is separated based on the given separator and validity
-// of each is checked.
-func ValidateCIDRListString(cidrList string, separator string) (bool, error) {
- if cidrList == "" {
- return false, fmt.Errorf("missing CIDR list that needs validation")
- }
- if separator == "" {
- return false, fmt.Errorf("missing separator")
- }
-
- return ValidateCIDRListSlice(strutil.ParseDedupLowercaseAndSortStrings(cidrList, separator))
-}
-
-// ValidateCIDRListSlice checks if the given list of CIDR blocks are valid
-func ValidateCIDRListSlice(cidrBlocks []string) (bool, error) {
- if len(cidrBlocks) == 0 {
- return false, fmt.Errorf("missing CIDR blocks that needs validation")
- }
-
- for _, block := range cidrBlocks {
- if _, _, err := net.ParseCIDR(strings.TrimSpace(block)); err != nil {
- return false, err
- }
- }
-
- return true, nil
-}
-
-// Subset checks if the IPs belonging to a given CIDR block is a subset of IPs
-// belonging to another CIDR block.
-func Subset(cidr1, cidr2 string) (bool, error) {
- if cidr1 == "" {
- return false, fmt.Errorf("missing CIDR to be checked against")
- }
-
- if cidr2 == "" {
- return false, fmt.Errorf("missing CIDR that needs to be checked")
- }
-
- ip1, net1, err := net.ParseCIDR(cidr1)
- if err != nil {
- return false, fmt.Errorf("failed to parse the CIDR to be checked against: %q", err)
- }
-
- zeroAddr := false
- if ip := ip1.To4(); ip != nil && ip.Equal(net.IPv4zero) {
- zeroAddr = true
- }
- if ip := ip1.To16(); ip != nil && ip.Equal(net.IPv6zero) {
- zeroAddr = true
- }
-
- maskLen1, _ := net1.Mask.Size()
- if !zeroAddr && maskLen1 == 0 {
- return false, fmt.Errorf("CIDR to be checked against is not in its canonical form")
- }
-
- ip2, net2, err := net.ParseCIDR(cidr2)
- if err != nil {
- return false, fmt.Errorf("failed to parse the CIDR that needs to be checked: %q", err)
- }
-
- zeroAddr = false
- if ip := ip2.To4(); ip != nil && ip.Equal(net.IPv4zero) {
- zeroAddr = true
- }
- if ip := ip2.To16(); ip != nil && ip.Equal(net.IPv6zero) {
- zeroAddr = true
- }
-
- maskLen2, _ := net2.Mask.Size()
- if !zeroAddr && maskLen2 == 0 {
- return false, fmt.Errorf("CIDR that needs to be checked is not in its canonical form")
- }
-
- // If the mask length of the CIDR that needs to be checked is smaller
- // then the mask length of the CIDR to be checked against, then the
- // former will encompass more IPs than the latter, and hence can't be a
- // subset of the latter.
- if maskLen2 < maskLen1 {
- return false, nil
- }
-
- belongs, err := IPBelongsToCIDR(net2.IP.String(), cidr1)
- if err != nil {
- return false, err
- }
-
- return belongs, nil
-}
-
-// SubsetBlocks checks if each CIDR block of a given set of CIDR blocks, is a
-// subset of at least one CIDR block belonging to another set of CIDR blocks.
-// First parameter is the set of CIDR blocks to check against and the second
-// parameter is the set of CIDR blocks that needs to be checked.
-func SubsetBlocks(cidrBlocks1, cidrBlocks2 []string) (bool, error) {
- if len(cidrBlocks1) == 0 {
- return false, fmt.Errorf("missing CIDR blocks to be checked against")
- }
-
- if len(cidrBlocks2) == 0 {
- return false, fmt.Errorf("missing CIDR blocks that needs to be checked")
- }
-
- // Check if all the elements of cidrBlocks2 is a subset of at least one
- // element of cidrBlocks1
- for _, cidrBlock2 := range cidrBlocks2 {
- isSubset := false
- for _, cidrBlock1 := range cidrBlocks1 {
- subset, err := Subset(cidrBlock1, cidrBlock2)
- if err != nil {
- return false, err
- }
- // If CIDR is a subset of any of the CIDR block, its
- // good enough. Break out.
- if subset {
- isSubset = true
- break
- }
- }
- // CIDR block was not a subset of any of the CIDR blocks in the
- // set of blocks to check against
- if !isSubset {
- return false, nil
- }
- }
-
- return true, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr_test.go b/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr_test.go
deleted file mode 100644
index f6d5849..0000000
--- a/vendor/github.com/hashicorp/vault/helper/cidrutil/cidr_test.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package cidrutil
-
-import "testing"
-
-func TestCIDRUtil_IPBelongsToCIDR(t *testing.T) {
- ip := "192.168.25.30"
- cidr := "192.168.26.30/16"
-
- belongs, err := IPBelongsToCIDR(ip, cidr)
- if err != nil {
- t.Fatal(err)
- }
- if !belongs {
- t.Fatalf("expected IP %q to belong to CIDR %q", ip, cidr)
- }
-
- ip = "10.197.192.6"
- cidr = "10.197.192.0/18"
- belongs, err = IPBelongsToCIDR(ip, cidr)
- if err != nil {
- t.Fatal(err)
- }
- if !belongs {
- t.Fatalf("expected IP %q to belong to CIDR %q", ip, cidr)
- }
-
- ip = "192.168.25.30"
- cidr = "192.168.26.30/24"
- belongs, err = IPBelongsToCIDR(ip, cidr)
- if err != nil {
- t.Fatal(err)
- }
- if belongs {
- t.Fatalf("expected IP %q to not belong to CIDR %q", ip, cidr)
- }
-
- ip = "192.168.25.30.100"
- cidr = "192.168.26.30/24"
- belongs, err = IPBelongsToCIDR(ip, cidr)
- if err == nil {
- t.Fatalf("expected an error")
- }
-}
-
-func TestCIDRUtil_IPBelongsToCIDRBlocksString(t *testing.T) {
- ip := "192.168.27.29"
- cidrList := "172.169.100.200/18,192.168.0.0/16,10.10.20.20/24"
-
- belongs, err := IPBelongsToCIDRBlocksString(ip, cidrList, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !belongs {
- t.Fatalf("expected IP %q to belong to one of the CIDRs in %q", ip, cidrList)
- }
-
- ip = "10.197.192.6"
- cidrList = "1.2.3.0/8,10.197.192.0/18,10.197.193.0/24"
-
- belongs, err = IPBelongsToCIDRBlocksString(ip, cidrList, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !belongs {
- t.Fatalf("expected IP %q to belong to one of the CIDRs in %q", ip, cidrList)
- }
-
- ip = "192.168.27.29"
- cidrList = "172.169.100.200/18,192.168.0.0.0/16,10.10.20.20/24"
-
- belongs, err = IPBelongsToCIDRBlocksString(ip, cidrList, ",")
- if err == nil {
- t.Fatalf("expected an error")
- }
-
- ip = "30.40.50.60"
- cidrList = "172.169.100.200/18,192.168.0.0/16,10.10.20.20/24"
-
- belongs, err = IPBelongsToCIDRBlocksString(ip, cidrList, ",")
- if err != nil {
- t.Fatal(err)
- }
- if belongs {
- t.Fatalf("expected IP %q to not belong to one of the CIDRs in %q", ip, cidrList)
- }
-
-}
-
-func TestCIDRUtil_IPBelongsToCIDRBlocksSlice(t *testing.T) {
- ip := "192.168.27.29"
- cidrList := []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"}
-
- belongs, err := IPBelongsToCIDRBlocksSlice(ip, cidrList)
- if err != nil {
- t.Fatal(err)
- }
- if !belongs {
- t.Fatalf("expected IP %q to belong to one of the CIDRs in %q", ip, cidrList)
- }
-
- ip = "192.168.27.29"
- cidrList = []string{"172.169.100.200/18", "192.168.0.0.0/16", "10.10.20.20/24"}
-
- belongs, err = IPBelongsToCIDRBlocksSlice(ip, cidrList)
- if err == nil {
- t.Fatalf("expected an error")
- }
-
- ip = "30.40.50.60"
- cidrList = []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"}
-
- belongs, err = IPBelongsToCIDRBlocksSlice(ip, cidrList)
- if err != nil {
- t.Fatal(err)
- }
- if belongs {
- t.Fatalf("expected IP %q to not belong to one of the CIDRs in %q", ip, cidrList)
- }
-}
-
-func TestCIDRUtil_ValidateCIDRListString(t *testing.T) {
- cidrList := "172.169.100.200/18,192.168.0.0/16,10.10.20.20/24"
-
- valid, err := ValidateCIDRListString(cidrList, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !valid {
- t.Fatalf("expected CIDR list %q to be valid", cidrList)
- }
-
- cidrList = "172.169.100.200,192.168.0.0/16,10.10.20.20/24"
- valid, err = ValidateCIDRListString(cidrList, ",")
- if err == nil {
- t.Fatal("expected an error")
- }
-
- cidrList = "172.169.100.200/18,192.168.0.0.0/16,10.10.20.20/24"
- valid, err = ValidateCIDRListString(cidrList, ",")
- if err == nil {
- t.Fatal("expected an error")
- }
-}
-
-func TestCIDRUtil_ValidateCIDRListSlice(t *testing.T) {
- cidrList := []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"}
-
- valid, err := ValidateCIDRListSlice(cidrList)
- if err != nil {
- t.Fatal(err)
- }
- if !valid {
- t.Fatalf("expected CIDR list %q to be valid", cidrList)
- }
-
- cidrList = []string{"172.169.100.200", "192.168.0.0/16", "10.10.20.20/24"}
- valid, err = ValidateCIDRListSlice(cidrList)
- if err == nil {
- t.Fatal("expected an error")
- }
-
- cidrList = []string{"172.169.100.200/18", "192.168.0.0.0/16", "10.10.20.20/24"}
- valid, err = ValidateCIDRListSlice(cidrList)
- if err == nil {
- t.Fatal("expected an error")
- }
-}
-
-func TestCIDRUtil_Subset(t *testing.T) {
- cidr1 := "192.168.27.29/24"
- cidr2 := "192.168.27.29/24"
- subset, err := Subset(cidr1, cidr2)
- if err != nil {
- t.Fatal(err)
- }
- if !subset {
- t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr2, cidr1)
- }
-
- cidr1 = "192.168.27.29/16"
- cidr2 = "192.168.27.29/24"
- subset, err = Subset(cidr1, cidr2)
- if err != nil {
- t.Fatal(err)
- }
- if !subset {
- t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr2, cidr1)
- }
-
- cidr1 = "192.168.27.29/24"
- cidr2 = "192.168.27.29/16"
- subset, err = Subset(cidr1, cidr2)
- if err != nil {
- t.Fatal(err)
- }
- if subset {
- t.Fatalf("expected CIDR %q to not be a subset of CIDR %q", cidr2, cidr1)
- }
-
- cidr1 = "192.168.0.128/25"
- cidr2 = "192.168.0.0/24"
- subset, err = Subset(cidr1, cidr2)
- if err != nil {
- t.Fatal(err)
- }
- if subset {
- t.Fatalf("expected CIDR %q to not be a subset of CIDR %q", cidr2, cidr1)
- }
- subset, err = Subset(cidr2, cidr1)
- if err != nil {
- t.Fatal(err)
- }
- if !subset {
- t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr1, cidr2)
- }
-}
-
-func TestCIDRUtil_SubsetBlocks(t *testing.T) {
- cidrBlocks1 := []string{"192.168.27.29/16", "172.245.30.40/24", "10.20.30.40/30"}
- cidrBlocks2 := []string{"192.168.27.29/20", "172.245.30.40/25", "10.20.30.40/32"}
-
- subset, err := SubsetBlocks(cidrBlocks1, cidrBlocks2)
- if err != nil {
- t.Fatal(err)
- }
- if !subset {
- t.Fatalf("expected CIDR blocks %q to be a subset of CIDR blocks %q", cidrBlocks2, cidrBlocks1)
- }
-
- cidrBlocks1 = []string{"192.168.27.29/16", "172.245.30.40/25", "10.20.30.40/30"}
- cidrBlocks2 = []string{"192.168.27.29/20", "172.245.30.40/24", "10.20.30.40/32"}
-
- subset, err = SubsetBlocks(cidrBlocks1, cidrBlocks2)
- if err != nil {
- t.Fatal(err)
- }
- if subset {
- t.Fatalf("expected CIDR blocks %q to not be a subset of CIDR blocks %q", cidrBlocks2, cidrBlocks1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
index 31a2dcd..a7fb87b 100644
--- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
+++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
@@ -8,6 +8,7 @@ import (
"io"
"github.com/golang/snappy"
+ "github.com/hashicorp/errwrap"
)
const (
@@ -33,7 +34,7 @@ const (
)
// SnappyReadCloser embeds the snappy reader which implements the io.Reader
-// interface. The decompress procedure in this utility expectes an
+// interface. The decompress procedure in this utility expects an
// io.ReadCloser. This type implements the io.Closer interface to retain the
// generic way of decompression.
type SnappyReadCloser struct {
@@ -107,7 +108,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
}
if err != nil {
- return nil, fmt.Errorf("failed to create a compression writer; err: %v", err)
+ return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err)
}
if writer == nil {
@@ -117,7 +118,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
// Compress the input and place it in the same buffer containing the
// canary byte.
if _, err = writer.Write(data); err != nil {
- return nil, fmt.Errorf("failed to compress input data; err: %v", err)
+ return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err)
}
// Close the io.WriteCloser
@@ -172,7 +173,7 @@ func Decompress(data []byte) ([]byte, bool, error) {
return nil, true, nil
}
if err != nil {
- return nil, false, fmt.Errorf("failed to create a compression reader; err: %v", err)
+ return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err)
}
if reader == nil {
return nil, false, fmt.Errorf("failed to create a compression reader")
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go
deleted file mode 100644
index 5eeeea8..0000000
--- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package compressutil
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/json"
- "testing"
-)
-
-func TestCompressUtil_CompressSnappy(t *testing.T) {
- input := map[string]interface{}{
- "sample": "data",
- "verification": "process",
- }
-
- // Encode input into JSON
- var buf bytes.Buffer
- enc := json.NewEncoder(&buf)
- if err := enc.Encode(input); err != nil {
- t.Fatal(err)
- }
- inputJSONBytes := buf.Bytes()
-
- // Set Snappy compression in the configuration
- compressionConfig := &CompressionConfig{
- Type: CompressionTypeSnappy,
- }
-
- // Compress the input
- compressedJSONBytes, err := Compress(inputJSONBytes, compressionConfig)
- if err != nil {
- t.Fatal(err)
- }
-
- decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes)
- if err != nil {
- t.Fatal(err)
- }
-
- // Check if the input for decompress was not compressed in the first place
- if wasNotCompressed {
- t.Fatalf("bad: expected compressed bytes")
- }
-
- // Compare the value after decompression
- if string(inputJSONBytes) != string(decompressedJSONBytes) {
- t.Fatalf("bad: decompressed value;\nexpected: %q\nactual: %q", string(inputJSONBytes), string(decompressedJSONBytes))
- }
-}
-
-func TestCompressUtil_CompressDecompress(t *testing.T) {
- input := map[string]interface{}{
- "sample": "data",
- "verification": "process",
- }
-
- // Encode input into JSON
- var buf bytes.Buffer
- enc := json.NewEncoder(&buf)
- if err := enc.Encode(input); err != nil {
- t.Fatal(err)
- }
-
- inputJSONBytes := buf.Bytes()
- // Test nil configuration
- if _, err := Compress(inputJSONBytes, nil); err == nil {
- t.Fatal("expected an error")
- }
-
- // Test invalid configuration
- if _, err := Compress(inputJSONBytes, &CompressionConfig{}); err == nil {
- t.Fatal("expected an error")
- }
-
- // Compress input using lzw format
- compressedJSONBytes, err := Compress(inputJSONBytes, &CompressionConfig{
- Type: CompressionTypeLzw,
- })
- if err != nil {
- t.Fatal("expected an error")
- }
- if len(compressedJSONBytes) == 0 {
- t.Fatal("failed to compress data in lzw format")
- }
- // Check the presense of the canary
- if compressedJSONBytes[0] != CompressionCanaryLzw {
- t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryLzw, compressedJSONBytes[0])
- }
-
- // Decompress the input and check the output
- decompressedJSONBytes, uncompressed, err := Decompress(compressedJSONBytes)
- if err != nil {
- t.Fatal(err)
- }
- if uncompressed {
- t.Fatal("failed to recognize compressed data")
- }
- if len(decompressedJSONBytes) == 0 {
- t.Fatal("failed to decompress lzw formatted data")
- }
-
- if string(inputJSONBytes) != string(decompressedJSONBytes) {
- t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
- }
-
- // Compress input using Gzip format, assume DefaultCompression
- compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
- Type: CompressionTypeGzip,
- })
- if err != nil {
- t.Fatal("expected an error")
- }
- if len(compressedJSONBytes) == 0 {
- t.Fatal("failed to compress data in lzw format")
- }
- // Check the presense of the canary
- if compressedJSONBytes[0] != CompressionCanaryGzip {
- t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
- }
-
- // Decompress the input and check the output
- decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
- if err != nil {
- t.Fatal(err)
- }
- if uncompressed {
- t.Fatal("failed to recognize compressed data")
- }
- if len(decompressedJSONBytes) == 0 {
- t.Fatal("failed to decompress lzw formatted data")
- }
-
- if string(inputJSONBytes) != string(decompressedJSONBytes) {
- t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
- }
-
- // Compress input using Gzip format: DefaultCompression
- compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
- Type: CompressionTypeGzip,
- GzipCompressionLevel: gzip.DefaultCompression,
- })
- if err != nil {
- t.Fatal("expected an error")
- }
- if len(compressedJSONBytes) == 0 {
- t.Fatal("failed to compress data in lzw format")
- }
- // Check the presense of the canary
- if compressedJSONBytes[0] != CompressionCanaryGzip {
- t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
- }
-
- // Decompress the input and check the output
- decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
- if err != nil {
- t.Fatal(err)
- }
- if uncompressed {
- t.Fatal("failed to recognize compressed data")
- }
- if len(decompressedJSONBytes) == 0 {
- t.Fatal("failed to decompress lzw formatted data")
- }
-
- if string(inputJSONBytes) != string(decompressedJSONBytes) {
- t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
- }
-
- // Compress input using Gzip format, BestCompression
- compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
- Type: CompressionTypeGzip,
- GzipCompressionLevel: gzip.BestCompression,
- })
- if err != nil {
- t.Fatal("expected an error")
- }
- if len(compressedJSONBytes) == 0 {
- t.Fatal("failed to compress data in lzw format")
- }
- // Check the presense of the canary
- if compressedJSONBytes[0] != CompressionCanaryGzip {
- t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
- }
-
- // Decompress the input and check the output
- decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
- if err != nil {
- t.Fatal(err)
- }
- if uncompressed {
- t.Fatal("failed to recognize compressed data")
- }
- if len(decompressedJSONBytes) == 0 {
- t.Fatal("failed to decompress lzw formatted data")
- }
-
- if string(inputJSONBytes) != string(decompressedJSONBytes) {
- t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
- }
-
- // Compress input using Gzip format, BestSpeed
- compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
- Type: CompressionTypeGzip,
- GzipCompressionLevel: gzip.BestSpeed,
- })
- if err != nil {
- t.Fatal("expected an error")
- }
- if len(compressedJSONBytes) == 0 {
- t.Fatal("failed to compress data in lzw format")
- }
- // Check the presense of the canary
- if compressedJSONBytes[0] != CompressionCanaryGzip {
- t.Fatalf("bad: compression canary: expected: %d actual: %d",
- CompressionCanaryGzip, compressedJSONBytes[0])
- }
-
- // Decompress the input and check the output
- decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
- if err != nil {
- t.Fatal(err)
- }
- if uncompressed {
- t.Fatal("failed to recognize compressed data")
- }
- if len(decompressedJSONBytes) == 0 {
- t.Fatal("failed to decompress lzw formatted data")
- }
-
- if string(inputJSONBytes) != string(decompressedJSONBytes) {
- t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/helper/consts/consts.go
deleted file mode 100644
index 2ec952b..0000000
--- a/vendor/github.com/hashicorp/vault/helper/consts/consts.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package consts
-
-const (
- // ExpirationRestoreWorkerCount specifies the numer of workers to use while
- // restoring leases into the expiration manager
- ExpirationRestoreWorkerCount = 64
-)
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/error.go b/vendor/github.com/hashicorp/vault/helper/consts/error.go
deleted file mode 100644
index 06977d5..0000000
--- a/vendor/github.com/hashicorp/vault/helper/consts/error.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package consts
-
-import "errors"
-
-var (
- // ErrSealed is returned if an operation is performed on a sealed barrier.
- // No operation is expected to succeed before unsealing
- ErrSealed = errors.New("Vault is sealed")
-
- // ErrStandby is returned if an operation is performed on a standby Vault.
- // No operation is expected to succeed until active.
- ErrStandby = errors.New("Vault is in standby mode")
-
- // Used when .. is used in a path
- ErrPathContainsParentReferences = errors.New("path cannot contain parent references")
-)
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/helper/consts/replication.go
deleted file mode 100644
index 7fbeb88..0000000
--- a/vendor/github.com/hashicorp/vault/helper/consts/replication.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package consts
-
-type ReplicationState uint32
-
-const (
- _ ReplicationState = iota
- OldReplicationPrimary
- OldReplicationSecondary
- OldReplicationBootstrapping
-
- ReplicationDisabled ReplicationState = 0
- ReplicationPerformancePrimary ReplicationState = 1 << iota
- ReplicationPerformanceSecondary
- ReplicationBootstrapping
- ReplicationDRPrimary
- ReplicationDRSecondary
-)
-
-func (r ReplicationState) String() string {
- switch r {
- case ReplicationPerformanceSecondary:
- return "perf-secondary"
- case ReplicationPerformancePrimary:
- return "perf-primary"
- case ReplicationBootstrapping:
- return "bootstrapping"
- case ReplicationDRPrimary:
- return "dr-primary"
- case ReplicationDRSecondary:
- return "dr-secondary"
- }
-
- return "disabled"
-}
-
-func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 }
-func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag }
-func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag }
-func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag }
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-kv/flag.go b/vendor/github.com/hashicorp/vault/helper/flag-kv/flag.go
deleted file mode 100644
index 3e8a8f7..0000000
--- a/vendor/github.com/hashicorp/vault/helper/flag-kv/flag.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package kvFlag
-
-import (
- "fmt"
- "strings"
-)
-
-// Flag is a flag.Value implementation for parsing user variables
-// from the command-line in the format of '-var key=value'.
-type Flag map[string]string
-
-func (v *Flag) String() string {
- return ""
-}
-
-func (v *Flag) Set(raw string) error {
- idx := strings.Index(raw, "=")
- if idx == -1 {
- return fmt.Errorf("No '=' value in arg: %s", raw)
- }
-
- if *v == nil {
- *v = make(map[string]string)
- }
-
- key, value := raw[0:idx], raw[idx+1:]
- (*v)[key] = value
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-kv/flag_test.go b/vendor/github.com/hashicorp/vault/helper/flag-kv/flag_test.go
deleted file mode 100644
index 2fc88aa..0000000
--- a/vendor/github.com/hashicorp/vault/helper/flag-kv/flag_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package kvFlag
-
-import (
- "flag"
- "reflect"
- "testing"
-)
-
-func TestFlag_impl(t *testing.T) {
- var _ flag.Value = new(Flag)
-}
-
-func TestFlag(t *testing.T) {
- cases := []struct {
- Input string
- Output map[string]string
- Error bool
- }{
- {
- "key=value",
- map[string]string{"key": "value"},
- false,
- },
-
- {
- "key=",
- map[string]string{"key": ""},
- false,
- },
-
- {
- "key=foo=bar",
- map[string]string{"key": "foo=bar"},
- false,
- },
-
- {
- "key",
- nil,
- true,
- },
- }
-
- for _, tc := range cases {
- f := new(Flag)
- err := f.Set(tc.Input)
- if (err != nil) != tc.Error {
- t.Fatalf("bad error. Input: %#v", tc.Input)
- }
-
- actual := map[string]string(*f)
- if !reflect.DeepEqual(actual, tc.Output) {
- t.Fatalf("bad: %#v", actual)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-slice/flag.go b/vendor/github.com/hashicorp/vault/helper/flag-slice/flag.go
deleted file mode 100644
index da75149..0000000
--- a/vendor/github.com/hashicorp/vault/helper/flag-slice/flag.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package sliceflag
-
-import "strings"
-
-// StringFlag implements the flag.Value interface and allows multiple
-// calls to the same variable to append a list.
-type StringFlag []string
-
-func (s *StringFlag) String() string {
- return strings.Join(*s, ",")
-}
-
-func (s *StringFlag) Set(value string) error {
- *s = append(*s, value)
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/flag-slice/flag_test.go b/vendor/github.com/hashicorp/vault/helper/flag-slice/flag_test.go
deleted file mode 100644
index f72e1d9..0000000
--- a/vendor/github.com/hashicorp/vault/helper/flag-slice/flag_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sliceflag
-
-import (
- "flag"
- "reflect"
- "testing"
-)
-
-func TestStringFlag_implements(t *testing.T) {
- var raw interface{}
- raw = new(StringFlag)
- if _, ok := raw.(flag.Value); !ok {
- t.Fatalf("StringFlag should be a Value")
- }
-}
-
-func TestStringFlagSet(t *testing.T) {
- sv := new(StringFlag)
- err := sv.Set("foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = sv.Set("bar")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := []string{"foo", "bar"}
- if !reflect.DeepEqual([]string(*sv), expected) {
- t.Fatalf("Bad: %#v", sv)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go
deleted file mode 100644
index d146a37..0000000
--- a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go
+++ /dev/null
@@ -1,261 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: types.proto
-
-/*
-Package forwarding is a generated protocol buffer package.
-
-It is generated from these files:
- types.proto
-
-It has these top-level messages:
- Request
- URL
- HeaderEntry
- Response
-*/
-package forwarding
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Request struct {
- // Not used right now but reserving in case it turns out that streaming
- // makes things more economical on the gRPC side
- // uint64 id = 1;
- Method string `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"`
- Url *URL `protobuf:"bytes,3,opt,name=url" json:"url,omitempty"`
- HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- Body []byte `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"`
- Host string `protobuf:"bytes,6,opt,name=host" json:"host,omitempty"`
- RemoteAddr string `protobuf:"bytes,7,opt,name=remote_addr,json=remoteAddr" json:"remote_addr,omitempty"`
- PeerCertificates [][]byte `protobuf:"bytes,8,rep,name=peer_certificates,json=peerCertificates,proto3" json:"peer_certificates,omitempty"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-func (m *Request) GetMethod() string {
- if m != nil {
- return m.Method
- }
- return ""
-}
-
-func (m *Request) GetUrl() *URL {
- if m != nil {
- return m.Url
- }
- return nil
-}
-
-func (m *Request) GetHeaderEntries() map[string]*HeaderEntry {
- if m != nil {
- return m.HeaderEntries
- }
- return nil
-}
-
-func (m *Request) GetBody() []byte {
- if m != nil {
- return m.Body
- }
- return nil
-}
-
-func (m *Request) GetHost() string {
- if m != nil {
- return m.Host
- }
- return ""
-}
-
-func (m *Request) GetRemoteAddr() string {
- if m != nil {
- return m.RemoteAddr
- }
- return ""
-}
-
-func (m *Request) GetPeerCertificates() [][]byte {
- if m != nil {
- return m.PeerCertificates
- }
- return nil
-}
-
-type URL struct {
- Scheme string `protobuf:"bytes,1,opt,name=scheme" json:"scheme,omitempty"`
- Opaque string `protobuf:"bytes,2,opt,name=opaque" json:"opaque,omitempty"`
- // This isn't needed now but might be in the future, so we'll skip the
- // number to keep the ordering in net/url
- // UserInfo user = 3;
- Host string `protobuf:"bytes,4,opt,name=host" json:"host,omitempty"`
- Path string `protobuf:"bytes,5,opt,name=path" json:"path,omitempty"`
- RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath" json:"raw_path,omitempty"`
- // This also isn't needed right now, but we'll reserve the number
- // bool force_query = 7;
- RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery" json:"raw_query,omitempty"`
- Fragment string `protobuf:"bytes,9,opt,name=fragment" json:"fragment,omitempty"`
-}
-
-func (m *URL) Reset() { *m = URL{} }
-func (m *URL) String() string { return proto.CompactTextString(m) }
-func (*URL) ProtoMessage() {}
-func (*URL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-func (m *URL) GetScheme() string {
- if m != nil {
- return m.Scheme
- }
- return ""
-}
-
-func (m *URL) GetOpaque() string {
- if m != nil {
- return m.Opaque
- }
- return ""
-}
-
-func (m *URL) GetHost() string {
- if m != nil {
- return m.Host
- }
- return ""
-}
-
-func (m *URL) GetPath() string {
- if m != nil {
- return m.Path
- }
- return ""
-}
-
-func (m *URL) GetRawPath() string {
- if m != nil {
- return m.RawPath
- }
- return ""
-}
-
-func (m *URL) GetRawQuery() string {
- if m != nil {
- return m.RawQuery
- }
- return ""
-}
-
-func (m *URL) GetFragment() string {
- if m != nil {
- return m.Fragment
- }
- return ""
-}
-
-type HeaderEntry struct {
- Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
-}
-
-func (m *HeaderEntry) Reset() { *m = HeaderEntry{} }
-func (m *HeaderEntry) String() string { return proto.CompactTextString(m) }
-func (*HeaderEntry) ProtoMessage() {}
-func (*HeaderEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *HeaderEntry) GetValues() []string {
- if m != nil {
- return m.Values
- }
- return nil
-}
-
-type Response struct {
- // Not used right now but reserving in case it turns out that streaming
- // makes things more economical on the gRPC side
- // uint64 id = 1;
- StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode" json:"status_code,omitempty"`
- Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
- // Added in 0.6.2 to ensure that the content-type is set appropriately, as
- // well as any other information
- HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
-
-func (m *Response) GetStatusCode() uint32 {
- if m != nil {
- return m.StatusCode
- }
- return 0
-}
-
-func (m *Response) GetBody() []byte {
- if m != nil {
- return m.Body
- }
- return nil
-}
-
-func (m *Response) GetHeaderEntries() map[string]*HeaderEntry {
- if m != nil {
- return m.HeaderEntries
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Request)(nil), "forwarding.Request")
- proto.RegisterType((*URL)(nil), "forwarding.URL")
- proto.RegisterType((*HeaderEntry)(nil), "forwarding.HeaderEntry")
- proto.RegisterType((*Response)(nil), "forwarding.Response")
-}
-
-func init() { proto.RegisterFile("types.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 437 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0xc1, 0x6e, 0xd4, 0x30,
- 0x10, 0x95, 0xd7, 0xdb, 0xdd, 0x64, 0xd2, 0x42, 0xf1, 0x01, 0x4c, 0x39, 0x10, 0x56, 0x02, 0x22,
- 0x21, 0xf6, 0xb0, 0x5c, 0x10, 0x37, 0x54, 0x21, 0x71, 0x28, 0x08, 0x2c, 0xf5, 0xc0, 0x29, 0x72,
- 0xd7, 0xb3, 0xcd, 0x8a, 0x26, 0xce, 0xda, 0x0e, 0xab, 0x7c, 0x16, 0xff, 0xc4, 0x89, 0xaf, 0x40,
- 0xb6, 0x43, 0x1b, 0x84, 0x10, 0xa7, 0x9e, 0x76, 0xde, 0x7b, 0xb3, 0xe3, 0x79, 0x33, 0x13, 0xc8,
- 0x5c, 0xdf, 0xa2, 0x5d, 0xb6, 0x46, 0x3b, 0xcd, 0x60, 0xa3, 0xcd, 0x5e, 0x1a, 0xb5, 0x6d, 0x2e,
- 0x17, 0x3f, 0x26, 0x30, 0x17, 0xb8, 0xeb, 0xd0, 0x3a, 0x76, 0x1f, 0x66, 0x35, 0xba, 0x4a, 0x2b,
- 0x3e, 0xc9, 0x49, 0x91, 0x8a, 0x01, 0xb1, 0x27, 0x40, 0x3b, 0x73, 0xc5, 0x69, 0x4e, 0x8a, 0x6c,
- 0x75, 0x77, 0x79, 0xf3, 0xef, 0xe5, 0xb9, 0x38, 0x13, 0x5e, 0x63, 0x1f, 0xe0, 0x4e, 0x85, 0x52,
- 0xa1, 0x29, 0xb1, 0x71, 0x66, 0x8b, 0x96, 0x4f, 0x73, 0x5a, 0x64, 0xab, 0x67, 0xe3, 0xec, 0xe1,
- 0x9d, 0xe5, 0xfb, 0x90, 0xf9, 0x2e, 0x26, 0xfa, 0x9f, 0x5e, 0x1c, 0x55, 0x63, 0x8e, 0x31, 0x98,
- 0x5e, 0x68, 0xd5, 0xf3, 0x83, 0x9c, 0x14, 0x87, 0x22, 0xc4, 0x9e, 0xab, 0xb4, 0x75, 0x7c, 0x16,
- 0x7a, 0x0b, 0x31, 0x7b, 0x0c, 0x99, 0xc1, 0x5a, 0x3b, 0x2c, 0xa5, 0x52, 0x86, 0xcf, 0x83, 0x04,
- 0x91, 0x7a, 0xab, 0x94, 0x61, 0x2f, 0xe0, 0x5e, 0x8b, 0x68, 0xca, 0x35, 0x1a, 0xb7, 0xdd, 0x6c,
- 0xd7, 0xd2, 0xa1, 0xe5, 0x49, 0x4e, 0x8b, 0x43, 0x71, 0xec, 0x85, 0xd3, 0x11, 0x7f, 0xf2, 0x05,
- 0xd8, 0xdf, 0xad, 0xb1, 0x63, 0xa0, 0x5f, 0xb1, 0xe7, 0x24, 0xd4, 0xf6, 0x21, 0x7b, 0x09, 0x07,
- 0xdf, 0xe4, 0x55, 0x87, 0x61, 0x4c, 0xd9, 0xea, 0xc1, 0xd8, 0xe3, 0x4d, 0x81, 0x5e, 0xc4, 0xac,
- 0x37, 0x93, 0xd7, 0x64, 0xf1, 0x9d, 0x00, 0x3d, 0x17, 0x67, 0x7e, 0xc4, 0x76, 0x5d, 0x61, 0x8d,
- 0x43, 0xbd, 0x01, 0x79, 0x5e, 0xb7, 0x72, 0x37, 0xd4, 0x4c, 0xc5, 0x80, 0xae, 0x4d, 0x4f, 0x47,
- 0xa6, 0x19, 0x4c, 0x5b, 0xe9, 0xaa, 0x30, 0x9c, 0x54, 0x84, 0x98, 0x3d, 0x84, 0xc4, 0xc8, 0x7d,
- 0x19, 0xf8, 0x38, 0xa0, 0xb9, 0x91, 0xfb, 0x4f, 0x5e, 0x7a, 0x04, 0xa9, 0x97, 0x76, 0x1d, 0x9a,
- 0x9e, 0x27, 0x41, 0xf3, 0xb9, 0x9f, 0x3d, 0x66, 0x27, 0x90, 0x6c, 0x8c, 0xbc, 0xac, 0xb1, 0x71,
- 0x3c, 0x8d, 0xda, 0x6f, 0xbc, 0x78, 0x0a, 0xd9, 0xc8, 0x8d, 0x6f, 0x31, 0xf8, 0xb1, 0x9c, 0xe4,
- 0xd4, 0xb7, 0x18, 0xd1, 0xe2, 0x27, 0x81, 0x44, 0xa0, 0x6d, 0x75, 0x63, 0xd1, 0x2f, 0xc4, 0x3a,
- 0xe9, 0x3a, 0x5b, 0xae, 0xb5, 0x8a, 0x66, 0x8e, 0x04, 0x44, 0xea, 0x54, 0x2b, 0xbc, 0xde, 0x2c,
- 0x1d, 0x6d, 0xf6, 0xe3, 0x3f, 0x8e, 0xe7, 0xf9, 0x9f, 0xc7, 0x13, 0x9f, 0xf8, 0xff, 0xf5, 0xdc,
- 0xe2, 0x1e, 0x2f, 0x66, 0xe1, 0x0b, 0x7a, 0xf5, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x57, 0x73, 0xdf,
- 0x6b, 0x50, 0x03, 0x00, 0x00,
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto b/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto
deleted file mode 100644
index 02c3518..0000000
--- a/vendor/github.com/hashicorp/vault/helper/forwarding/types.proto
+++ /dev/null
@@ -1,46 +0,0 @@
-syntax = "proto3";
-
-package forwarding;
-
-message Request {
- // Not used right now but reserving in case it turns out that streaming
- // makes things more economical on the gRPC side
- //uint64 id = 1;
- string method = 2;
- URL url = 3;
- map header_entries = 4;
- bytes body = 5;
- string host = 6;
- string remote_addr = 7;
- repeated bytes peer_certificates = 8;
-}
-
-message URL {
- string scheme = 1;
- string opaque = 2;
- // This isn't needed now but might be in the future, so we'll skip the
- // number to keep the ordering in net/url
- //UserInfo user = 3;
- string host = 4;
- string path = 5;
- string raw_path = 6;
- // This also isn't needed right now, but we'll reserve the number
- //bool force_query = 7;
- string raw_query = 8;
- string fragment = 9;
-}
-
-message HeaderEntry {
- repeated string values = 1;
-}
-
-message Response {
- // Not used right now but reserving in case it turns out that streaming
- // makes things more economical on the gRPC side
- //uint64 id = 1;
- uint32 status_code = 2;
- bytes body = 3;
- // Added in 0.6.2 to ensure that the content-type is set appropriately, as
- // well as any other information
- map header_entries = 4;
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/util.go b/vendor/github.com/hashicorp/vault/helper/forwarding/util.go
deleted file mode 100644
index 92e6cb1..0000000
--- a/vendor/github.com/hashicorp/vault/helper/forwarding/util.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package forwarding
-
-import (
- "bytes"
- "crypto/tls"
- "crypto/x509"
- "net/http"
- "net/url"
- "os"
-
- "github.com/golang/protobuf/proto"
- "github.com/hashicorp/vault/helper/compressutil"
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-type bufCloser struct {
- *bytes.Buffer
-}
-
-func (b bufCloser) Close() error {
- b.Reset()
- return nil
-}
-
-// GenerateForwardedRequest generates a new http.Request that contains the
-// original requests's information in the new request's body.
-func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request, error) {
- fq, err := GenerateForwardedRequest(req)
- if err != nil {
- return nil, err
- }
-
- var newBody []byte
- switch os.Getenv("VAULT_MESSAGE_TYPE") {
- case "json":
- newBody, err = jsonutil.EncodeJSON(fq)
- case "json_compress":
- newBody, err = jsonutil.EncodeJSONAndCompress(fq, &compressutil.CompressionConfig{
- Type: compressutil.CompressionTypeLzw,
- })
- case "proto3":
- fallthrough
- default:
- newBody, err = proto.Marshal(fq)
- }
- if err != nil {
- return nil, err
- }
-
- ret, err := http.NewRequest("POST", addr, bytes.NewBuffer(newBody))
- if err != nil {
- return nil, err
- }
-
- return ret, nil
-}
-
-func GenerateForwardedRequest(req *http.Request) (*Request, error) {
- fq := Request{
- Method: req.Method,
- HeaderEntries: make(map[string]*HeaderEntry, len(req.Header)),
- Host: req.Host,
- RemoteAddr: req.RemoteAddr,
- }
-
- reqURL := req.URL
- fq.Url = &URL{
- Scheme: reqURL.Scheme,
- Opaque: reqURL.Opaque,
- Host: reqURL.Host,
- Path: reqURL.Path,
- RawPath: reqURL.RawPath,
- RawQuery: reqURL.RawQuery,
- Fragment: reqURL.Fragment,
- }
-
- for k, v := range req.Header {
- fq.HeaderEntries[k] = &HeaderEntry{
- Values: v,
- }
- }
-
- buf := bytes.NewBuffer(nil)
- _, err := buf.ReadFrom(req.Body)
- if err != nil {
- return nil, err
- }
- fq.Body = buf.Bytes()
-
- if req.TLS != nil && req.TLS.PeerCertificates != nil && len(req.TLS.PeerCertificates) > 0 {
- fq.PeerCertificates = make([][]byte, len(req.TLS.PeerCertificates))
- for i, cert := range req.TLS.PeerCertificates {
- fq.PeerCertificates[i] = cert.Raw
- }
- }
-
- return &fq, nil
-}
-
-// ParseForwardedRequest generates a new http.Request that is comprised of the
-// values in the given request's body, assuming it correctly parses into a
-// ForwardedRequest.
-func ParseForwardedHTTPRequest(req *http.Request) (*http.Request, error) {
- buf := bytes.NewBuffer(nil)
- _, err := buf.ReadFrom(req.Body)
- if err != nil {
- return nil, err
- }
-
- fq := new(Request)
- switch os.Getenv("VAULT_MESSAGE_TYPE") {
- case "json", "json_compress":
- err = jsonutil.DecodeJSON(buf.Bytes(), fq)
- default:
- err = proto.Unmarshal(buf.Bytes(), fq)
- }
- if err != nil {
- return nil, err
- }
-
- return ParseForwardedRequest(fq)
-}
-
-func ParseForwardedRequest(fq *Request) (*http.Request, error) {
- buf := bufCloser{
- Buffer: bytes.NewBuffer(fq.Body),
- }
-
- ret := &http.Request{
- Method: fq.Method,
- Header: make(map[string][]string, len(fq.HeaderEntries)),
- Body: buf,
- Host: fq.Host,
- RemoteAddr: fq.RemoteAddr,
- }
-
- ret.URL = &url.URL{
- Scheme: fq.Url.Scheme,
- Opaque: fq.Url.Opaque,
- Host: fq.Url.Host,
- Path: fq.Url.Path,
- RawPath: fq.Url.RawPath,
- RawQuery: fq.Url.RawQuery,
- Fragment: fq.Url.Fragment,
- }
-
- for k, v := range fq.HeaderEntries {
- ret.Header[k] = v.Values
- }
-
- if fq.PeerCertificates != nil && len(fq.PeerCertificates) > 0 {
- ret.TLS = &tls.ConnectionState{
- PeerCertificates: make([]*x509.Certificate, len(fq.PeerCertificates)),
- }
- for i, certBytes := range fq.PeerCertificates {
- cert, err := x509.ParseCertificate(certBytes)
- if err != nil {
- return nil, err
- }
- ret.TLS.PeerCertificates[i] = cert
- }
- }
-
- return ret, nil
-}
-
-type RPCResponseWriter struct {
- statusCode int
- header http.Header
- body *bytes.Buffer
-}
-
-// NewRPCResponseWriter returns an initialized RPCResponseWriter
-func NewRPCResponseWriter() *RPCResponseWriter {
- w := &RPCResponseWriter{
- header: make(http.Header),
- body: new(bytes.Buffer),
- statusCode: 200,
- }
- //w.header.Set("Content-Type", "application/octet-stream")
- return w
-}
-
-func (w *RPCResponseWriter) Header() http.Header {
- return w.header
-}
-
-func (w *RPCResponseWriter) Write(buf []byte) (int, error) {
- w.body.Write(buf)
- return len(buf), nil
-}
-
-func (w *RPCResponseWriter) WriteHeader(code int) {
- w.statusCode = code
-}
-
-func (w *RPCResponseWriter) StatusCode() int {
- return w.statusCode
-}
-
-func (w *RPCResponseWriter) Body() *bytes.Buffer {
- return w.body
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/util_test.go b/vendor/github.com/hashicorp/vault/helper/forwarding/util_test.go
deleted file mode 100644
index 0af2b89..0000000
--- a/vendor/github.com/hashicorp/vault/helper/forwarding/util_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package forwarding
-
-import (
- "bufio"
- "bytes"
- "net/http"
- "os"
- "reflect"
- "testing"
-)
-
-func Test_ForwardedRequest_GenerateParse(t *testing.T) {
- testForwardedRequestGenerateParse(t)
-}
-
-func Benchmark_ForwardedRequest_GenerateParse_JSON(b *testing.B) {
- os.Setenv("VAULT_MESSAGE_TYPE", "json")
- var totalSize int64
- var numRuns int64
- for i := 0; i < b.N; i++ {
- totalSize += testForwardedRequestGenerateParse(b)
- numRuns++
- }
- b.Logf("message size per op: %d", totalSize/numRuns)
-}
-
-func Benchmark_ForwardedRequest_GenerateParse_JSON_Compressed(b *testing.B) {
- os.Setenv("VAULT_MESSAGE_TYPE", "json_compress")
- var totalSize int64
- var numRuns int64
- for i := 0; i < b.N; i++ {
- totalSize += testForwardedRequestGenerateParse(b)
- numRuns++
- }
- b.Logf("message size per op: %d", totalSize/numRuns)
-}
-
-func Benchmark_ForwardedRequest_GenerateParse_Proto3(b *testing.B) {
- os.Setenv("VAULT_MESSAGE_TYPE", "proto3")
- var totalSize int64
- var numRuns int64
- for i := 0; i < b.N; i++ {
- totalSize += testForwardedRequestGenerateParse(b)
- numRuns++
- }
- b.Logf("message size per op: %d", totalSize/numRuns)
-}
-
-func testForwardedRequestGenerateParse(t testing.TB) int64 {
- bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": { "argle": "bargle", neet: 0 } }`))
- req, err := http.NewRequest("FOOBAR", "https://pushit.real.good:9281/snicketysnack?furbleburble=bloopetybloop", bodBuf)
- if err != nil {
- t.Fatal(err)
- }
-
- // We want to get the fields we would expect from an incoming request, so
- // we write it out and then read it again
- buf1 := bytes.NewBuffer(nil)
- err = req.Write(buf1)
- if err != nil {
- t.Fatal(err)
- }
-
- // Read it back in, parsing like a server
- bufr1 := bufio.NewReader(buf1)
- initialReq, err := http.ReadRequest(bufr1)
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate the request with the forwarded request in the body
- req, err = GenerateForwardedHTTPRequest(initialReq, "https://bloopety.bloop:8201")
- if err != nil {
- t.Fatal(err)
- }
-
- // Perform another "round trip"
- buf2 := bytes.NewBuffer(nil)
- err = req.Write(buf2)
- if err != nil {
- t.Fatal(err)
- }
- size := int64(buf2.Len())
- bufr2 := bufio.NewReader(buf2)
- intreq, err := http.ReadRequest(bufr2)
- if err != nil {
- t.Fatal(err)
- }
-
- // Now extract the forwarded request to generate a final request for processing
- finalReq, err := ParseForwardedHTTPRequest(intreq)
- if err != nil {
- t.Fatal(err)
- }
-
- switch {
- case initialReq.Method != finalReq.Method:
- t.Fatalf("bad method:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
- case initialReq.RemoteAddr != finalReq.RemoteAddr:
- t.Fatalf("bad remoteaddr:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
- case initialReq.Host != finalReq.Host:
- t.Fatalf("bad host:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
- case !reflect.DeepEqual(initialReq.URL, finalReq.URL):
- t.Fatalf("bad url:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq.URL, *finalReq.URL)
- case !reflect.DeepEqual(initialReq.Header, finalReq.Header):
- t.Fatalf("bad header:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq)
- default:
- // Compare bodies
- bodBuf.Seek(0, 0)
- initBuf := bytes.NewBuffer(nil)
- _, err = initBuf.ReadFrom(bodBuf)
- if err != nil {
- t.Fatal(err)
- }
- finBuf := bytes.NewBuffer(nil)
- _, err = finBuf.ReadFrom(finalReq.Body)
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(initBuf.Bytes(), finBuf.Bytes()) {
- t.Fatalf("badbody :\ninitialReq:\n%#v\nfinalReq:\n%#v\n", initBuf.Bytes(), finBuf.Bytes())
- }
- }
-
- return size
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/gated-writer/writer.go b/vendor/github.com/hashicorp/vault/helper/gated-writer/writer.go
deleted file mode 100644
index 9c5aeba..0000000
--- a/vendor/github.com/hashicorp/vault/helper/gated-writer/writer.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package gatedwriter
-
-import (
- "io"
- "sync"
-)
-
-// Writer is an io.Writer implementation that buffers all of its
-// data into an internal buffer until it is told to let data through.
-type Writer struct {
- Writer io.Writer
-
- buf [][]byte
- flush bool
- lock sync.RWMutex
-}
-
-// Flush tells the Writer to flush any buffered data and to stop
-// buffering.
-func (w *Writer) Flush() {
- w.lock.Lock()
- w.flush = true
- w.lock.Unlock()
-
- for _, p := range w.buf {
- w.Write(p)
- }
- w.buf = nil
-}
-
-func (w *Writer) Write(p []byte) (n int, err error) {
- w.lock.RLock()
- defer w.lock.RUnlock()
-
- if w.flush {
- return w.Writer.Write(p)
- }
-
- p2 := make([]byte, len(p))
- copy(p2, p)
- w.buf = append(w.buf, p2)
- return len(p), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/gated-writer/writer_test.go b/vendor/github.com/hashicorp/vault/helper/gated-writer/writer_test.go
deleted file mode 100644
index b007ef1..0000000
--- a/vendor/github.com/hashicorp/vault/helper/gated-writer/writer_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package gatedwriter
-
-import (
- "bytes"
- "io"
- "testing"
-)
-
-func TestWriter_impl(t *testing.T) {
- var _ io.Writer = new(Writer)
-}
-
-func TestWriter(t *testing.T) {
- buf := new(bytes.Buffer)
- w := &Writer{Writer: buf}
- w.Write([]byte("foo\n"))
- w.Write([]byte("bar\n"))
-
- if buf.String() != "" {
- t.Fatalf("bad: %s", buf.String())
- }
-
- w.Flush()
-
- if buf.String() != "foo\nbar\n" {
- t.Fatalf("bad: %s", buf.String())
- }
-
- w.Write([]byte("baz\n"))
-
- if buf.String() != "foo\nbar\nbaz\n" {
- t.Fatalf("bad: %s", buf.String())
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
index a96745b..d03ddef 100644
--- a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
+++ b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/compressutil"
)
@@ -64,7 +65,7 @@ func DecodeJSON(data []byte, out interface{}) error {
// Decompress the data if it was compressed in the first place
decompressedBytes, uncompressed, err := compressutil.Decompress(data)
if err != nil {
- return fmt.Errorf("failed to decompress JSON: err: %v", err)
+ return errwrap.Wrapf("failed to decompress JSON: {{err}}", err)
}
if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) {
return fmt.Errorf("decompressed data being decoded is invalid")
@@ -91,7 +92,7 @@ func DecodeJSONFromReader(r io.Reader, out interface{}) error {
dec := json.NewDecoder(r)
- // While decoding JSON values, intepret the integer values as `json.Number`s instead of `float64`.
+ // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`.
dec.UseNumber()
// Since 'out' is an interface representing a pointer, pass it to the decoder without an '&'
diff --git a/vendor/github.com/hashicorp/vault/helper/jsonutil/json_test.go b/vendor/github.com/hashicorp/vault/helper/jsonutil/json_test.go
deleted file mode 100644
index 53d4adf..0000000
--- a/vendor/github.com/hashicorp/vault/helper/jsonutil/json_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package jsonutil
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "reflect"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/helper/compressutil"
-)
-
-func TestJSONUtil_CompressDecompressJSON(t *testing.T) {
- expected := map[string]interface{}{
- "test": "data",
- "validation": "process",
- }
-
- // Compress an object
- compressedBytes, err := EncodeJSONAndCompress(expected, nil)
- if err != nil {
- t.Fatal(err)
- }
- if len(compressedBytes) == 0 {
- t.Fatal("expected compressed data")
- }
-
- // Check if canary is present in the compressed data
- if compressedBytes[0] != compressutil.CompressionCanaryGzip {
- t.Fatalf("canary missing in compressed data")
- }
-
- // Decompress and decode the compressed information and verify the functional
- // behavior
- var actual map[string]interface{}
- if err = DecodeJSON(compressedBytes, &actual); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
- for key, _ := range actual {
- delete(actual, key)
- }
-
- // Test invalid data
- if err = DecodeJSON([]byte{}, &actual); err == nil {
- t.Fatalf("expected a failure")
- }
-
- // Test invalid data after the canary byte
- var buf bytes.Buffer
- buf.Write([]byte{compressutil.CompressionCanaryGzip})
- if err = DecodeJSON(buf.Bytes(), &actual); err == nil {
- t.Fatalf("expected a failure")
- }
-
- // Compress an object
- compressedBytes, err = EncodeJSONAndCompress(expected, &compressutil.CompressionConfig{
- Type: compressutil.CompressionTypeGzip,
- GzipCompressionLevel: gzip.BestSpeed,
- })
- if err != nil {
- t.Fatal(err)
- }
- if len(compressedBytes) == 0 {
- t.Fatal("expected compressed data")
- }
-
- // Check if canary is present in the compressed data
- if compressedBytes[0] != compressutil.CompressionCanaryGzip {
- t.Fatalf("canary missing in compressed data")
- }
-
- // Decompress and decode the compressed information and verify the functional
- // behavior
- if err = DecodeJSON(compressedBytes, &actual); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestJSONUtil_EncodeJSON(t *testing.T) {
- input := map[string]interface{}{
- "test": "data",
- "validation": "process",
- }
-
- actualBytes, err := EncodeJSON(input)
- if err != nil {
- t.Fatalf("failed to encode JSON: %v", err)
- }
-
- actual := strings.TrimSpace(string(actualBytes))
- expected := `{"test":"data","validation":"process"}`
-
- if actual != expected {
- t.Fatalf("bad: encoded JSON: expected:%s\nactual:%s\n", expected, string(actualBytes))
- }
-}
-
-func TestJSONUtil_DecodeJSON(t *testing.T) {
- input := `{"test":"data","validation":"process"}`
-
- var actual map[string]interface{}
-
- err := DecodeJSON([]byte(input), &actual)
- if err != nil {
- fmt.Printf("decoding err: %v\n", err)
- }
-
- expected := map[string]interface{}{
- "test": "data",
- "validation": "process",
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-}
-
-func TestJSONUtil_DecodeJSONFromReader(t *testing.T) {
- input := `{"test":"data","validation":"process"}`
-
- var actual map[string]interface{}
-
- err := DecodeJSONFromReader(bytes.NewReader([]byte(input)), &actual)
- if err != nil {
- fmt.Printf("decoding err: %v\n", err)
- }
-
- expected := map[string]interface{}{
- "test": "data",
- "validation": "process",
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/kdf/kdf.go b/vendor/github.com/hashicorp/vault/helper/kdf/kdf.go
deleted file mode 100644
index 5009074..0000000
--- a/vendor/github.com/hashicorp/vault/helper/kdf/kdf.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// This package is used to implement Key Derivation Functions (KDF)
-// based on the recommendations of NIST SP 800-108. These are useful
-// for generating unique-per-transaction keys, or situations in which
-// a key hierarchy may be useful.
-package kdf
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "encoding/binary"
- "fmt"
-)
-
-// PRF is a pseudo-random function that takes a key or seed,
-// as well as additional binary data and generates output that is
-// indistinguishable from random. Examples are cryptographic hash
-// functions or block ciphers.
-type PRF func([]byte, []byte) ([]byte, error)
-
-// CounterMode implements the counter mode KDF that uses a psuedo-random-function (PRF)
-// along with a counter to generate derived keys. The KDF takes a base key
-// a derivation context, and the required number of output bits.
-func CounterMode(prf PRF, prfLen uint32, key []byte, context []byte, bits uint32) ([]byte, error) {
- // Ensure the PRF is byte aligned
- if prfLen%8 != 0 {
- return nil, fmt.Errorf("PRF must be byte aligned")
- }
-
- // Ensure the bits required are byte aligned
- if bits%8 != 0 {
- return nil, fmt.Errorf("bits required must be byte aligned")
- }
-
- // Determine the number of rounds required
- rounds := bits / prfLen
- if bits%prfLen != 0 {
- rounds++
- }
-
- // Allocate and setup the input
- input := make([]byte, 4+len(context)+4)
- copy(input[4:], context)
- binary.BigEndian.PutUint32(input[4+len(context):], bits)
-
- // Iteratively generate more key material
- var out []byte
- var i uint32
- for i = 0; i < rounds; i++ {
- // Update the counter in the input string
- binary.BigEndian.PutUint32(input[:4], i)
-
- // Compute more key material
- part, err := prf(key, input)
- if err != nil {
- return nil, err
- }
- if uint32(len(part)*8) != prfLen {
- return nil, fmt.Errorf("PRF length mis-match (%d vs %d)", len(part)*8, prfLen)
- }
- out = append(out, part...)
- }
-
- // Return the desired number of output bytes
- return out[:bits/8], nil
-}
-
-const (
- // HMACSHA256PRFLen is the length of output from HMACSHA256PRF
- HMACSHA256PRFLen uint32 = 256
-)
-
-// HMACSHA256PRF is a pseudo-random-function (PRF) that uses an HMAC-SHA256
-func HMACSHA256PRF(key []byte, data []byte) ([]byte, error) {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/kdf/kdf_test.go b/vendor/github.com/hashicorp/vault/helper/kdf/kdf_test.go
deleted file mode 100644
index 120c903..0000000
--- a/vendor/github.com/hashicorp/vault/helper/kdf/kdf_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package kdf
-
-import (
- "bytes"
- "testing"
-)
-
-func TestCounterMode(t *testing.T) {
- key := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
- context := []byte("the quick brown fox")
- prf := HMACSHA256PRF
- prfLen := HMACSHA256PRFLen
-
- // Expect256 was generated in python with
- // import hashlib, hmac
- // hash = hashlib.sha256
- // context = "the quick brown fox"
- // key = "".join([chr(x) for x in range(1, 17)])
- // inp = "\x00\x00\x00\x00"+context+"\x00\x00\x01\x00"
- // digest = hmac.HMAC(key, inp, hash).digest()
- // print [ord(x) for x in digest]
- expect256 := []byte{219, 25, 238, 6, 185, 236, 180, 64, 248, 152, 251,
- 153, 79, 5, 141, 222, 66, 200, 66, 143, 40, 3, 101, 221, 206, 163, 102,
- 80, 88, 234, 87, 157}
-
- for _, l := range []uint32{128, 256, 384, 1024} {
- out, err := CounterMode(prf, prfLen, key, context, l)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if uint32(len(out)*8) != l {
- t.Fatalf("bad length: %#v", out)
- }
-
- if bytes.Contains(out, key) {
- t.Fatalf("output contains key")
- }
-
- if l == 256 && !bytes.Equal(out, expect256) {
- t.Fatalf("mis-match")
- }
- }
-
-}
-
-func TestHMACSHA256PRF(t *testing.T) {
- key := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
- data := []byte("foobarbaz")
- out, err := HMACSHA256PRF(key, data)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if uint32(len(out)*8) != HMACSHA256PRFLen {
- t.Fatalf("Bad len")
- }
-
- // Expect was generated in python with:
- // import hashlib, hmac
- // hash = hashlib.sha256
- // msg = "foobarbaz"
- // key = "".join([chr(x) for x in range(1, 17)])
- // hm = hmac.HMAC(key, msg, hash)
- // print [ord(x) for x in hm.digest()]
- expect := []byte{9, 50, 146, 8, 188, 130, 150, 107, 205, 147, 82, 170,
- 253, 183, 26, 38, 167, 194, 220, 111, 56, 118, 219, 209, 31, 52, 137,
- 90, 246, 133, 191, 124}
- if !bytes.Equal(expect, out) {
- t.Fatalf("mis-matched output")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go b/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go
deleted file mode 100644
index 7588199..0000000
--- a/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go
+++ /dev/null
@@ -1,400 +0,0 @@
-package keysutil
-
-import (
- "errors"
- "fmt"
- "sync"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- shared = false
- exclusive = true
-)
-
-var (
- errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation")
-)
-
-// PolicyRequest holds values used when requesting a policy. Most values are
-// only used during an upsert.
-type PolicyRequest struct {
- // The storage to use
- Storage logical.Storage
-
- // The name of the policy
- Name string
-
- // The key type
- KeyType KeyType
-
- // Whether it should be derived
- Derived bool
-
- // Whether to enable convergent encryption
- Convergent bool
-
- // Whether to allow export
- Exportable bool
-
- // Whether to upsert
- Upsert bool
-}
-
-type LockManager struct {
- // A lock for each named key
- locks map[string]*sync.RWMutex
-
- // A mutex for the map itself
- locksMutex sync.RWMutex
-
- // If caching is enabled, the map of name to in-memory policy cache
- cache map[string]*Policy
-
- // Used for global locking, and as the cache map mutex
- cacheMutex sync.RWMutex
-}
-
-func NewLockManager(cacheDisabled bool) *LockManager {
- lm := &LockManager{
- locks: map[string]*sync.RWMutex{},
- }
- if !cacheDisabled {
- lm.cache = map[string]*Policy{}
- }
- return lm
-}
-
-func (lm *LockManager) CacheActive() bool {
- return lm.cache != nil
-}
-
-func (lm *LockManager) InvalidatePolicy(name string) {
- // Check if it's in our cache. If so, return right away.
- if lm.CacheActive() {
- lm.cacheMutex.Lock()
- defer lm.cacheMutex.Unlock()
- delete(lm.cache, name)
- }
-}
-
-func (lm *LockManager) policyLock(name string, lockType bool) *sync.RWMutex {
- lm.locksMutex.RLock()
- lock := lm.locks[name]
- if lock != nil {
- // We want to give this up before locking the lock, but it's safe --
- // the only time we ever write to a value in this map is the first time
- // we access the value, so it won't be changing out from under us
- lm.locksMutex.RUnlock()
- if lockType == exclusive {
- lock.Lock()
- } else {
- lock.RLock()
- }
- return lock
- }
-
- lm.locksMutex.RUnlock()
- lm.locksMutex.Lock()
-
- // Don't defer the unlock call because if we get a valid lock below we want
- // to release the lock mutex right away to avoid the possibility of
- // deadlock by trying to grab the second lock
-
- // Check to make sure it hasn't been created since
- lock = lm.locks[name]
- if lock != nil {
- lm.locksMutex.Unlock()
- if lockType == exclusive {
- lock.Lock()
- } else {
- lock.RLock()
- }
- return lock
- }
-
- lock = &sync.RWMutex{}
- lm.locks[name] = lock
- lm.locksMutex.Unlock()
- if lockType == exclusive {
- lock.Lock()
- } else {
- lock.RLock()
- }
-
- return lock
-}
-
-func (lm *LockManager) UnlockPolicy(lock *sync.RWMutex, lockType bool) {
- if lockType == exclusive {
- lock.Unlock()
- } else {
- lock.RUnlock()
- }
-}
-
-// Get the policy with a read lock. If we get an error saying an exclusive lock
-// is needed (for instance, for an upgrade/migration), give up the read lock,
-// call again with an exclusive lock, then swap back out for a read lock.
-func (lm *LockManager) GetPolicyShared(storage logical.Storage, name string) (*Policy, *sync.RWMutex, error) {
- p, lock, _, err := lm.getPolicyCommon(PolicyRequest{
- Storage: storage,
- Name: name,
- }, shared)
- if err == nil ||
- (err != nil && err != errNeedExclusiveLock) {
- return p, lock, err
- }
-
- // Try again while asking for an exlusive lock
- p, lock, _, err = lm.getPolicyCommon(PolicyRequest{
- Storage: storage,
- Name: name,
- }, exclusive)
- if err != nil || p == nil || lock == nil {
- return p, lock, err
- }
-
- lock.Unlock()
-
- p, lock, _, err = lm.getPolicyCommon(PolicyRequest{
- Storage: storage,
- Name: name,
- }, shared)
- return p, lock, err
-}
-
-// Get the policy with an exclusive lock
-func (lm *LockManager) GetPolicyExclusive(storage logical.Storage, name string) (*Policy, *sync.RWMutex, error) {
- p, lock, _, err := lm.getPolicyCommon(PolicyRequest{
- Storage: storage,
- Name: name,
- }, exclusive)
- return p, lock, err
-}
-
-// Get the policy with a read lock; if it returns that an exclusive lock is
-// needed, retry. If successful, call one more time to get a read lock and
-// return the value.
-func (lm *LockManager) GetPolicyUpsert(req PolicyRequest) (*Policy, *sync.RWMutex, bool, error) {
- req.Upsert = true
-
- p, lock, _, err := lm.getPolicyCommon(req, shared)
- if err == nil ||
- (err != nil && err != errNeedExclusiveLock) {
- return p, lock, false, err
- }
-
- // Try again while asking for an exlusive lock
- p, lock, upserted, err := lm.getPolicyCommon(req, exclusive)
- if err != nil || p == nil || lock == nil {
- return p, lock, upserted, err
- }
- lock.Unlock()
-
- req.Upsert = false
- // Now get a shared lock for the return, but preserve the value of upserted
- p, lock, _, err = lm.getPolicyCommon(req, shared)
-
- return p, lock, upserted, err
-}
-
-// When the function returns, a lock will be held on the policy if err == nil.
-// It is the caller's responsibility to unlock.
-func (lm *LockManager) getPolicyCommon(req PolicyRequest, lockType bool) (*Policy, *sync.RWMutex, bool, error) {
- lock := lm.policyLock(req.Name, lockType)
-
- var p *Policy
- var err error
-
- // Check if it's in our cache. If so, return right away.
- if lm.CacheActive() {
- lm.cacheMutex.RLock()
- p = lm.cache[req.Name]
- if p != nil {
- lm.cacheMutex.RUnlock()
- return p, lock, false, nil
- }
- lm.cacheMutex.RUnlock()
- }
-
- // Load it from storage
- p, err = lm.getStoredPolicy(req.Storage, req.Name)
- if err != nil {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, err
- }
-
- if p == nil {
- // This is the only place we upsert a new policy, so if upsert is not
- // specified, or the lock type is wrong, unlock before returning
- if !req.Upsert {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, nil
- }
-
- if lockType != exclusive {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, errNeedExclusiveLock
- }
-
- switch req.KeyType {
- case KeyType_AES256_GCM96:
- if req.Convergent && !req.Derived {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled")
- }
-
- case KeyType_ECDSA_P256:
- if req.Derived || req.Convergent {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType)
- }
-
- case KeyType_ED25519:
- if req.Convergent {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, fmt.Errorf("convergent encryption not not supported for keys of type %v", req.KeyType)
- }
-
- default:
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, fmt.Errorf("unsupported key type %v", req.KeyType)
- }
-
- p = &Policy{
- Name: req.Name,
- Type: req.KeyType,
- Derived: req.Derived,
- Exportable: req.Exportable,
- }
- if req.Derived {
- p.KDF = Kdf_hkdf_sha256
- p.ConvergentEncryption = req.Convergent
- p.ConvergentVersion = 2
- }
-
- err = p.Rotate(req.Storage)
- if err != nil {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, err
- }
-
- if lm.CacheActive() {
- // Since we didn't have the policy in the cache, if there was no
- // error, write the value in.
- lm.cacheMutex.Lock()
- defer lm.cacheMutex.Unlock()
- // Make sure a policy didn't appear. If so, it will only be set if
- // there was no error, so assume it's good and return that
- exp := lm.cache[req.Name]
- if exp != nil {
- return exp, lock, false, nil
- }
- if err == nil {
- lm.cache[req.Name] = p
- }
- }
-
- // We don't need to worry about upgrading since it will be a new policy
- return p, lock, true, nil
- }
-
- if p.NeedsUpgrade() {
- if lockType == shared {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, errNeedExclusiveLock
- }
-
- err = p.Upgrade(req.Storage)
- if err != nil {
- lm.UnlockPolicy(lock, lockType)
- return nil, nil, false, err
- }
- }
-
- if lm.CacheActive() {
- // Since we didn't have the policy in the cache, if there was no
- // error, write the value in.
- lm.cacheMutex.Lock()
- defer lm.cacheMutex.Unlock()
- // Make sure a policy didn't appear. If so, it will only be set if
- // there was no error, so assume it's good and return that
- exp := lm.cache[req.Name]
- if exp != nil {
- return exp, lock, false, nil
- }
- if err == nil {
- lm.cache[req.Name] = p
- }
- }
-
- return p, lock, false, nil
-}
-
-func (lm *LockManager) DeletePolicy(storage logical.Storage, name string) error {
- lm.cacheMutex.Lock()
- lock := lm.policyLock(name, exclusive)
- defer lock.Unlock()
- defer lm.cacheMutex.Unlock()
-
- var p *Policy
- var err error
-
- if lm.CacheActive() {
- p = lm.cache[name]
- }
- if p == nil {
- p, err = lm.getStoredPolicy(storage, name)
- if err != nil {
- return err
- }
- if p == nil {
- return fmt.Errorf("could not delete policy; not found")
- }
- }
-
- if !p.DeletionAllowed {
- return fmt.Errorf("deletion is not allowed for this policy")
- }
-
- err = storage.Delete("policy/" + name)
- if err != nil {
- return fmt.Errorf("error deleting policy %s: %s", name, err)
- }
-
- err = storage.Delete("archive/" + name)
- if err != nil {
- return fmt.Errorf("error deleting archive %s: %s", name, err)
- }
-
- if lm.CacheActive() {
- delete(lm.cache, name)
- }
-
- return nil
-}
-
-func (lm *LockManager) getStoredPolicy(storage logical.Storage, name string) (*Policy, error) {
- // Check if the policy already exists
- raw, err := storage.Get("policy/" + name)
- if err != nil {
- return nil, err
- }
- if raw == nil {
- return nil, nil
- }
-
- // Decode the policy
- policy := &Policy{
- Keys: keyEntryMap{},
- }
- err = jsonutil.DecodeJSON(raw.Value, policy)
- if err != nil {
- return nil, err
- }
-
- return policy, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go
deleted file mode 100644
index 7c4a691..0000000
--- a/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go
+++ /dev/null
@@ -1,950 +0,0 @@
-package keysutil
-
-import (
- "bytes"
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/hmac"
- "crypto/rand"
- "crypto/sha256"
- "crypto/x509"
- "encoding/asn1"
- "encoding/base64"
- "encoding/json"
- "encoding/pem"
- "fmt"
- "io"
- "math/big"
- "strconv"
- "strings"
- "time"
-
- "golang.org/x/crypto/ed25519"
- "golang.org/x/crypto/hkdf"
-
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/kdf"
- "github.com/hashicorp/vault/logical"
-)
-
-// Careful with iota; don't put anything before it in this const block because
-// we need the default of zero to be the old-style KDF
-const (
- Kdf_hmac_sha256_counter = iota // built-in helper
- Kdf_hkdf_sha256 // golang.org/x/crypto/hkdf
-)
-
-// Or this one...we need the default of zero to be the original AES256-GCM96
-const (
- KeyType_AES256_GCM96 = iota
- KeyType_ECDSA_P256
- KeyType_ED25519
-)
-
-const ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)"
-
-type SigningResult struct {
- Signature string
- PublicKey []byte
-}
-
-type ecdsaSignature struct {
- R, S *big.Int
-}
-
-type KeyType int
-
-func (kt KeyType) EncryptionSupported() bool {
- switch kt {
- case KeyType_AES256_GCM96:
- return true
- }
- return false
-}
-
-func (kt KeyType) DecryptionSupported() bool {
- switch kt {
- case KeyType_AES256_GCM96:
- return true
- }
- return false
-}
-
-func (kt KeyType) SigningSupported() bool {
- switch kt {
- case KeyType_ECDSA_P256, KeyType_ED25519:
- return true
- }
- return false
-}
-
-func (kt KeyType) HashSignatureInput() bool {
- switch kt {
- case KeyType_ECDSA_P256:
- return true
- }
- return false
-}
-
-func (kt KeyType) DerivationSupported() bool {
- switch kt {
- case KeyType_AES256_GCM96, KeyType_ED25519:
- return true
- }
- return false
-}
-
-func (kt KeyType) String() string {
- switch kt {
- case KeyType_AES256_GCM96:
- return "aes256-gcm96"
- case KeyType_ECDSA_P256:
- return "ecdsa-p256"
- case KeyType_ED25519:
- return "ed25519"
- }
-
- return "[unknown]"
-}
-
-// KeyEntry stores the key and metadata
-type KeyEntry struct {
- // AES or some other kind that is a pure byte slice like ED25519
- Key []byte `json:"key"`
-
- // Key used for HMAC functions
- HMACKey []byte `json:"hmac_key"`
-
- // Time of creation
- CreationTime time.Time `json:"time"`
-
- EC_X *big.Int `json:"ec_x"`
- EC_Y *big.Int `json:"ec_y"`
- EC_D *big.Int `json:"ec_d"`
-
- // The public key in an appropriate format for the type of key
- FormattedPublicKey string `json:"public_key"`
-
- // This is deprecated (but still filled) in favor of the value above which
- // is more precise
- DeprecatedCreationTime int64 `json:"creation_time"`
-}
-
-// keyEntryMap is used to allow JSON marshal/unmarshal
-type keyEntryMap map[int]KeyEntry
-
-// MarshalJSON implements JSON marshaling
-func (kem keyEntryMap) MarshalJSON() ([]byte, error) {
- intermediate := map[string]KeyEntry{}
- for k, v := range kem {
- intermediate[strconv.Itoa(k)] = v
- }
- return json.Marshal(&intermediate)
-}
-
-// MarshalJSON implements JSON unmarshaling
-func (kem keyEntryMap) UnmarshalJSON(data []byte) error {
- intermediate := map[string]KeyEntry{}
- if err := jsonutil.DecodeJSON(data, &intermediate); err != nil {
- return err
- }
- for k, v := range intermediate {
- keyval, err := strconv.Atoi(k)
- if err != nil {
- return err
- }
- kem[keyval] = v
- }
-
- return nil
-}
-
-// Policy is the struct used to store metadata
-type Policy struct {
- Name string `json:"name"`
- Key []byte `json:"key,omitempty"` //DEPRECATED
- Keys keyEntryMap `json:"keys"`
-
- // Derived keys MUST provide a context and the master underlying key is
- // never used. If convergent encryption is true, the context will be used
- // as the nonce as well.
- Derived bool `json:"derived"`
- KDF int `json:"kdf"`
- ConvergentEncryption bool `json:"convergent_encryption"`
-
- // Whether the key is exportable
- Exportable bool `json:"exportable"`
-
- // The minimum version of the key allowed to be used for decryption
- MinDecryptionVersion int `json:"min_decryption_version"`
-
- // The minimum version of the key allowed to be used for encryption
- MinEncryptionVersion int `json:"min_encryption_version"`
-
- // The latest key version in this policy
- LatestVersion int `json:"latest_version"`
-
- // The latest key version in the archive. We never delete these, so this is
- // a max.
- ArchiveVersion int `json:"archive_version"`
-
- // Whether the key is allowed to be deleted
- DeletionAllowed bool `json:"deletion_allowed"`
-
- // The version of the convergent nonce to use
- ConvergentVersion int `json:"convergent_version"`
-
- // The type of key
- Type KeyType `json:"type"`
-}
-
-// ArchivedKeys stores old keys. This is used to keep the key loading time sane
-// when there are huge numbers of rotations.
-type archivedKeys struct {
- Keys []KeyEntry `json:"keys"`
-}
-
-func (p *Policy) LoadArchive(storage logical.Storage) (*archivedKeys, error) {
- archive := &archivedKeys{}
-
- raw, err := storage.Get("archive/" + p.Name)
- if err != nil {
- return nil, err
- }
- if raw == nil {
- archive.Keys = make([]KeyEntry, 0)
- return archive, nil
- }
-
- if err := jsonutil.DecodeJSON(raw.Value, archive); err != nil {
- return nil, err
- }
-
- return archive, nil
-}
-
-func (p *Policy) storeArchive(archive *archivedKeys, storage logical.Storage) error {
- // Encode the policy
- buf, err := json.Marshal(archive)
- if err != nil {
- return err
- }
-
- // Write the policy into storage
- err = storage.Put(&logical.StorageEntry{
- Key: "archive/" + p.Name,
- Value: buf,
- })
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// handleArchiving manages the movement of keys to and from the policy archive.
-// This should *ONLY* be called from Persist() since it assumes that the policy
-// will be persisted afterwards.
-func (p *Policy) handleArchiving(storage logical.Storage) error {
- // We need to move keys that are no longer accessible to archivedKeys, and keys
- // that now need to be accessible back here.
- //
- // For safety, because there isn't really a good reason to, we never delete
- // keys from the archive even when we move them back.
-
- // Check if we have the latest minimum version in the current set of keys
- _, keysContainsMinimum := p.Keys[p.MinDecryptionVersion]
-
- // Sanity checks
- switch {
- case p.MinDecryptionVersion < 1:
- return fmt.Errorf("minimum decryption version of %d is less than 1", p.MinDecryptionVersion)
- case p.LatestVersion < 1:
- return fmt.Errorf("latest version of %d is less than 1", p.LatestVersion)
- case !keysContainsMinimum && p.ArchiveVersion != p.LatestVersion:
- return fmt.Errorf("need to move keys from archive but archive version not up-to-date")
- case p.ArchiveVersion > p.LatestVersion:
- return fmt.Errorf("archive version of %d is greater than the latest version %d",
- p.ArchiveVersion, p.LatestVersion)
- case p.MinEncryptionVersion > 0 && p.MinEncryptionVersion < p.MinDecryptionVersion:
- return fmt.Errorf("minimum decryption version of %d is greater than minimum encryption version %d",
- p.MinDecryptionVersion, p.MinEncryptionVersion)
- case p.MinDecryptionVersion > p.LatestVersion:
- return fmt.Errorf("minimum decryption version of %d is greater than the latest version %d",
- p.MinDecryptionVersion, p.LatestVersion)
- }
-
- archive, err := p.LoadArchive(storage)
- if err != nil {
- return err
- }
-
- if !keysContainsMinimum {
- // Need to move keys *from* archive
-
- for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
- p.Keys[i] = archive.Keys[i]
- }
-
- return nil
- }
-
- // Need to move keys *to* archive
-
- // We need a size that is equivalent to the latest version (number of keys)
- // but adding one since slice numbering starts at 0 and we're indexing by
- // key version
- if len(archive.Keys) < p.LatestVersion+1 {
- // Increase the size of the archive slice
- newKeys := make([]KeyEntry, p.LatestVersion+1)
- copy(newKeys, archive.Keys)
- archive.Keys = newKeys
- }
-
- // We are storing all keys in the archive, so we ensure that it is up to
- // date up to p.LatestVersion
- for i := p.ArchiveVersion + 1; i <= p.LatestVersion; i++ {
- archive.Keys[i] = p.Keys[i]
- p.ArchiveVersion = i
- }
-
- err = p.storeArchive(archive, storage)
- if err != nil {
- return err
- }
-
- // Perform deletion afterwards so that if there is an error saving we
- // haven't messed with the current policy
- for i := p.LatestVersion - len(p.Keys) + 1; i < p.MinDecryptionVersion; i++ {
- delete(p.Keys, i)
- }
-
- return nil
-}
-
-func (p *Policy) Persist(storage logical.Storage) error {
- err := p.handleArchiving(storage)
- if err != nil {
- return err
- }
-
- // Encode the policy
- buf, err := p.Serialize()
- if err != nil {
- return err
- }
-
- // Write the policy into storage
- err = storage.Put(&logical.StorageEntry{
- Key: "policy/" + p.Name,
- Value: buf,
- })
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (p *Policy) Serialize() ([]byte, error) {
- return json.Marshal(p)
-}
-
-func (p *Policy) NeedsUpgrade() bool {
- // Ensure we've moved from Key -> Keys
- if p.Key != nil && len(p.Key) > 0 {
- return true
- }
-
- // With archiving, past assumptions about the length of the keys map are no
- // longer valid
- if p.LatestVersion == 0 && len(p.Keys) != 0 {
- return true
- }
-
- // We disallow setting the version to 0, since they start at 1 since moving
- // to rotate-able keys, so update if it's set to 0
- if p.MinDecryptionVersion == 0 {
- return true
- }
-
- // On first load after an upgrade, copy keys to the archive
- if p.ArchiveVersion == 0 {
- return true
- }
-
- // Need to write the version
- if p.ConvergentEncryption && p.ConvergentVersion == 0 {
- return true
- }
-
- if p.Keys[p.LatestVersion].HMACKey == nil || len(p.Keys[p.LatestVersion].HMACKey) == 0 {
- return true
- }
-
- return false
-}
-
-func (p *Policy) Upgrade(storage logical.Storage) error {
- persistNeeded := false
- // Ensure we've moved from Key -> Keys
- if p.Key != nil && len(p.Key) > 0 {
- p.MigrateKeyToKeysMap()
- persistNeeded = true
- }
-
- // With archiving, past assumptions about the length of the keys map are no
- // longer valid
- if p.LatestVersion == 0 && len(p.Keys) != 0 {
- p.LatestVersion = len(p.Keys)
- persistNeeded = true
- }
-
- // We disallow setting the version to 0, since they start at 1 since moving
- // to rotate-able keys, so update if it's set to 0
- if p.MinDecryptionVersion == 0 {
- p.MinDecryptionVersion = 1
- persistNeeded = true
- }
-
- // On first load after an upgrade, copy keys to the archive
- if p.ArchiveVersion == 0 {
- persistNeeded = true
- }
-
- if p.ConvergentEncryption && p.ConvergentVersion == 0 {
- p.ConvergentVersion = 1
- persistNeeded = true
- }
-
- if p.Keys[p.LatestVersion].HMACKey == nil || len(p.Keys[p.LatestVersion].HMACKey) == 0 {
- entry := p.Keys[p.LatestVersion]
- hmacKey, err := uuid.GenerateRandomBytes(32)
- if err != nil {
- return err
- }
- entry.HMACKey = hmacKey
- p.Keys[p.LatestVersion] = entry
- persistNeeded = true
- }
-
- if persistNeeded {
- err := p.Persist(storage)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// DeriveKey is used to derive the encryption key that should be used depending
-// on the policy. If derivation is disabled the raw key is used and no context
-// is required, otherwise the KDF mode is used with the context to derive the
-// proper key.
-func (p *Policy) DeriveKey(context []byte, ver int) ([]byte, error) {
- if !p.Type.DerivationSupported() {
- return nil, errutil.UserError{Err: fmt.Sprintf("derivation not supported for key type %v", p.Type)}
- }
-
- if p.Keys == nil || p.LatestVersion == 0 {
- return nil, errutil.InternalError{Err: "unable to access the key; no key versions found"}
- }
-
- if ver <= 0 || ver > p.LatestVersion {
- return nil, errutil.UserError{Err: "invalid key version"}
- }
-
- // Fast-path non-derived keys
- if !p.Derived {
- return p.Keys[ver].Key, nil
- }
-
- // Ensure a context is provided
- if len(context) == 0 {
- return nil, errutil.UserError{Err: "missing 'context' for key derivation; the key was created using a derived key, which means additional, per-request information must be included in order to perform operations with the key"}
- }
-
- switch p.KDF {
- case Kdf_hmac_sha256_counter:
- prf := kdf.HMACSHA256PRF
- prfLen := kdf.HMACSHA256PRFLen
- return kdf.CounterMode(prf, prfLen, p.Keys[ver].Key, context, 256)
-
- case Kdf_hkdf_sha256:
- reader := hkdf.New(sha256.New, p.Keys[ver].Key, nil, context)
- derBytes := bytes.NewBuffer(nil)
- derBytes.Grow(32)
- limReader := &io.LimitedReader{
- R: reader,
- N: 32,
- }
-
- switch p.Type {
- case KeyType_AES256_GCM96:
- n, err := derBytes.ReadFrom(limReader)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)}
- }
- if n != 32 {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed 32, got %d", n)}
- }
- return derBytes.Bytes(), nil
-
- case KeyType_ED25519:
- // We use the limited reader containing the derived bytes as the
- // "random" input to the generation function
- _, pri, err := ed25519.GenerateKey(limReader)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error generating derived key: %v", err)}
- }
- return pri, nil
-
- default:
- return nil, errutil.InternalError{Err: "unsupported key type for derivation"}
- }
-
- default:
- return nil, errutil.InternalError{Err: "unsupported key derivation mode"}
- }
-}
-
-func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, error) {
- if !p.Type.EncryptionSupported() {
- return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)}
- }
-
- // Guard against a potentially invalid key type
- switch p.Type {
- case KeyType_AES256_GCM96:
- default:
- return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
- }
-
- // Decode the plaintext value
- plaintext, err := base64.StdEncoding.DecodeString(value)
- if err != nil {
- return "", errutil.UserError{Err: "failed to base64-decode plaintext"}
- }
-
- switch {
- case ver == 0:
- ver = p.LatestVersion
- case ver < 0:
- return "", errutil.UserError{Err: "requested version for encryption is negative"}
- case ver > p.LatestVersion:
- return "", errutil.UserError{Err: "requested version for encryption is higher than the latest key version"}
- case ver < p.MinEncryptionVersion:
- return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"}
- }
-
- // Derive the key that should be used
- key, err := p.DeriveKey(context, ver)
- if err != nil {
- return "", err
- }
-
- // Guard against a potentially invalid key type
- switch p.Type {
- case KeyType_AES256_GCM96:
- default:
- return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
- }
-
- // Setup the cipher
- aesCipher, err := aes.NewCipher(key)
- if err != nil {
- return "", errutil.InternalError{Err: err.Error()}
- }
-
- // Setup the GCM AEAD
- gcm, err := cipher.NewGCM(aesCipher)
- if err != nil {
- return "", errutil.InternalError{Err: err.Error()}
- }
-
- if p.ConvergentEncryption {
- switch p.ConvergentVersion {
- case 1:
- if len(nonce) != gcm.NonceSize() {
- return "", errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long when using convergent encryption with this key", gcm.NonceSize())}
- }
- default:
- nonceHmac := hmac.New(sha256.New, context)
- nonceHmac.Write(plaintext)
- nonceSum := nonceHmac.Sum(nil)
- nonce = nonceSum[:gcm.NonceSize()]
- }
- } else {
- // Compute random nonce
- nonce, err = uuid.GenerateRandomBytes(gcm.NonceSize())
- if err != nil {
- return "", errutil.InternalError{Err: err.Error()}
- }
- }
-
- // Encrypt and tag with GCM
- out := gcm.Seal(nil, nonce, plaintext, nil)
-
- // Place the encrypted data after the nonce
- full := out
- if !p.ConvergentEncryption || p.ConvergentVersion > 1 {
- full = append(nonce, out...)
- }
-
- // Convert to base64
- encoded := base64.StdEncoding.EncodeToString(full)
-
- // Prepend some information
- encoded = "vault:v" + strconv.Itoa(ver) + ":" + encoded
-
- return encoded, nil
-}
-
-func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) {
- if !p.Type.DecryptionSupported() {
- return "", errutil.UserError{Err: fmt.Sprintf("message decryption not supported for key type %v", p.Type)}
- }
-
- // Verify the prefix
- if !strings.HasPrefix(value, "vault:v") {
- return "", errutil.UserError{Err: "invalid ciphertext: no prefix"}
- }
-
- if p.ConvergentEncryption && p.ConvergentVersion == 1 && (nonce == nil || len(nonce) == 0) {
- return "", errutil.UserError{Err: "invalid convergent nonce supplied"}
- }
-
- splitVerCiphertext := strings.SplitN(strings.TrimPrefix(value, "vault:v"), ":", 2)
- if len(splitVerCiphertext) != 2 {
- return "", errutil.UserError{Err: "invalid ciphertext: wrong number of fields"}
- }
-
- ver, err := strconv.Atoi(splitVerCiphertext[0])
- if err != nil {
- return "", errutil.UserError{Err: "invalid ciphertext: version number could not be decoded"}
- }
-
- if ver == 0 {
- // Compatibility mode with initial implementation, where keys start at
- // zero
- ver = 1
- }
-
- if ver > p.LatestVersion {
- return "", errutil.UserError{Err: "invalid ciphertext: version is too new"}
- }
-
- if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion {
- return "", errutil.UserError{Err: ErrTooOld}
- }
-
- // Derive the key that should be used
- key, err := p.DeriveKey(context, ver)
- if err != nil {
- return "", err
- }
-
- // Guard against a potentially invalid key type
- switch p.Type {
- case KeyType_AES256_GCM96:
- default:
- return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
- }
-
- // Decode the base64
- decoded, err := base64.StdEncoding.DecodeString(splitVerCiphertext[1])
- if err != nil {
- return "", errutil.UserError{Err: "invalid ciphertext: could not decode base64"}
- }
-
- // Setup the cipher
- aesCipher, err := aes.NewCipher(key)
- if err != nil {
- return "", errutil.InternalError{Err: err.Error()}
- }
-
- // Setup the GCM AEAD
- gcm, err := cipher.NewGCM(aesCipher)
- if err != nil {
- return "", errutil.InternalError{Err: err.Error()}
- }
-
- // Extract the nonce and ciphertext
- var ciphertext []byte
- if p.ConvergentEncryption && p.ConvergentVersion < 2 {
- ciphertext = decoded
- } else {
- nonce = decoded[:gcm.NonceSize()]
- ciphertext = decoded[gcm.NonceSize():]
- }
-
- // Verify and Decrypt
- plain, err := gcm.Open(nil, nonce, ciphertext, nil)
- if err != nil {
- return "", errutil.UserError{Err: "invalid ciphertext: unable to decrypt"}
- }
-
- return base64.StdEncoding.EncodeToString(plain), nil
-}
-
-func (p *Policy) HMACKey(version int) ([]byte, error) {
- switch {
- case version < 0:
- return nil, fmt.Errorf("key version does not exist (cannot be negative)")
- case version > p.LatestVersion:
- return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion)
- }
-
- if p.Keys[version].HMACKey == nil {
- return nil, fmt.Errorf("no HMAC key exists for that key version")
- }
-
- return p.Keys[version].HMACKey, nil
-}
-
-func (p *Policy) Sign(ver int, context, input []byte) (*SigningResult, error) {
- if !p.Type.SigningSupported() {
- return nil, fmt.Errorf("message signing not supported for key type %v", p.Type)
- }
-
- switch {
- case ver == 0:
- ver = p.LatestVersion
- case ver < 0:
- return nil, errutil.UserError{Err: "requested version for signing is negative"}
- case ver > p.LatestVersion:
- return nil, errutil.UserError{Err: "requested version for signing is higher than the latest key version"}
- case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion:
- return nil, errutil.UserError{Err: "requested version for signing is less than the minimum encryption key version"}
- }
-
- var sig []byte
- var pubKey []byte
- var err error
- switch p.Type {
- case KeyType_ECDSA_P256:
- keyParams := p.Keys[ver]
- key := &ecdsa.PrivateKey{
- PublicKey: ecdsa.PublicKey{
- Curve: elliptic.P256(),
- X: keyParams.EC_X,
- Y: keyParams.EC_Y,
- },
- D: keyParams.EC_D,
- }
- r, s, err := ecdsa.Sign(rand.Reader, key, input)
- if err != nil {
- return nil, err
- }
- marshaledSig, err := asn1.Marshal(ecdsaSignature{
- R: r,
- S: s,
- })
- if err != nil {
- return nil, err
- }
- sig = marshaledSig
-
- case KeyType_ED25519:
- var key ed25519.PrivateKey
-
- if p.Derived {
- // Derive the key that should be used
- var err error
- key, err = p.DeriveKey(context, ver)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)}
- }
- pubKey = key.Public().(ed25519.PublicKey)
- } else {
- key = ed25519.PrivateKey(p.Keys[ver].Key)
- }
-
- // Per docs, do not pre-hash ed25519; it does two passes and performs
- // its own hashing
- sig, err = key.Sign(rand.Reader, input, crypto.Hash(0))
- if err != nil {
- return nil, err
- }
-
- default:
- return nil, fmt.Errorf("unsupported key type %v", p.Type)
- }
-
- // Convert to base64
- encoded := base64.StdEncoding.EncodeToString(sig)
-
- res := &SigningResult{
- Signature: "vault:v" + strconv.Itoa(ver) + ":" + encoded,
- PublicKey: pubKey,
- }
-
- return res, nil
-}
-
-func (p *Policy) VerifySignature(context, input []byte, sig string) (bool, error) {
- if !p.Type.SigningSupported() {
- return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)}
- }
-
- // Verify the prefix
- if !strings.HasPrefix(sig, "vault:v") {
- return false, errutil.UserError{Err: "invalid signature: no prefix"}
- }
-
- splitVerSig := strings.SplitN(strings.TrimPrefix(sig, "vault:v"), ":", 2)
- if len(splitVerSig) != 2 {
- return false, errutil.UserError{Err: "invalid signature: wrong number of fields"}
- }
-
- ver, err := strconv.Atoi(splitVerSig[0])
- if err != nil {
- return false, errutil.UserError{Err: "invalid signature: version number could not be decoded"}
- }
-
- if ver > p.LatestVersion {
- return false, errutil.UserError{Err: "invalid signature: version is too new"}
- }
-
- if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion {
- return false, errutil.UserError{Err: ErrTooOld}
- }
-
- sigBytes, err := base64.StdEncoding.DecodeString(splitVerSig[1])
- if err != nil {
- return false, errutil.UserError{Err: "invalid base64 signature value"}
- }
-
- switch p.Type {
- case KeyType_ECDSA_P256:
- var ecdsaSig ecdsaSignature
- rest, err := asn1.Unmarshal(sigBytes, &ecdsaSig)
- if err != nil {
- return false, errutil.UserError{Err: "supplied signature is invalid"}
- }
- if rest != nil && len(rest) != 0 {
- return false, errutil.UserError{Err: "supplied signature contains extra data"}
- }
-
- keyParams := p.Keys[ver]
- key := &ecdsa.PublicKey{
- Curve: elliptic.P256(),
- X: keyParams.EC_X,
- Y: keyParams.EC_Y,
- }
-
- return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil
-
- case KeyType_ED25519:
- var key ed25519.PrivateKey
-
- if p.Derived {
- // Derive the key that should be used
- var err error
- key, err = p.DeriveKey(context, ver)
- if err != nil {
- return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)}
- }
- } else {
- key = ed25519.PrivateKey(p.Keys[ver].Key)
- }
-
- return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil
-
- default:
- return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
- }
-
- return false, errutil.InternalError{Err: "no valid key type found"}
-}
-
-func (p *Policy) Rotate(storage logical.Storage) error {
- if p.Keys == nil {
- // This is an initial key rotation when generating a new policy. We
- // don't need to call migrate here because if we've called getPolicy to
- // get the policy in the first place it will have been run.
- p.Keys = keyEntryMap{}
- }
-
- p.LatestVersion += 1
- now := time.Now()
- entry := KeyEntry{
- CreationTime: now,
- DeprecatedCreationTime: now.Unix(),
- }
-
- hmacKey, err := uuid.GenerateRandomBytes(32)
- if err != nil {
- return err
- }
- entry.HMACKey = hmacKey
-
- switch p.Type {
- case KeyType_AES256_GCM96:
- // Generate a 256bit key
- newKey, err := uuid.GenerateRandomBytes(32)
- if err != nil {
- return err
- }
- entry.Key = newKey
-
- case KeyType_ECDSA_P256:
- privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- if err != nil {
- return err
- }
- entry.EC_D = privKey.D
- entry.EC_X = privKey.X
- entry.EC_Y = privKey.Y
- derBytes, err := x509.MarshalPKIXPublicKey(privKey.Public())
- if err != nil {
- return fmt.Errorf("error marshaling public key: %s", err)
- }
- pemBlock := &pem.Block{
- Type: "PUBLIC KEY",
- Bytes: derBytes,
- }
- pemBytes := pem.EncodeToMemory(pemBlock)
- if pemBytes == nil || len(pemBytes) == 0 {
- return fmt.Errorf("error PEM-encoding public key")
- }
- entry.FormattedPublicKey = string(pemBytes)
-
- case KeyType_ED25519:
- pub, pri, err := ed25519.GenerateKey(rand.Reader)
- if err != nil {
- return err
- }
- entry.Key = pri
- entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub)
- }
-
- p.Keys[p.LatestVersion] = entry
-
- // This ensures that with new key creations min decryption version is set
- // to 1 rather than the int default of 0, since keys start at 1 (either
- // fresh or after migration to the key map)
- if p.MinDecryptionVersion == 0 {
- p.MinDecryptionVersion = 1
- }
-
- return p.Persist(storage)
-}
-
-func (p *Policy) MigrateKeyToKeysMap() {
- now := time.Now()
- p.Keys = keyEntryMap{
- 1: KeyEntry{
- Key: p.Key,
- CreationTime: now,
- DeprecatedCreationTime: now.Unix(),
- },
- }
- p.Key = nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go
deleted file mode 100644
index 7969cf9..0000000
--- a/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go
+++ /dev/null
@@ -1,351 +0,0 @@
-package keysutil
-
-import (
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- keysArchive []KeyEntry
-)
-
-func resetKeysArchive() {
- keysArchive = []KeyEntry{KeyEntry{}}
-}
-
-func Test_KeyUpgrade(t *testing.T) {
- testKeyUpgradeCommon(t, NewLockManager(false))
- testKeyUpgradeCommon(t, NewLockManager(true))
-}
-
-func testKeyUpgradeCommon(t *testing.T, lm *LockManager) {
- storage := &logical.InmemStorage{}
- p, lock, upserted, err := lm.GetPolicyUpsert(PolicyRequest{
- Storage: storage,
- KeyType: KeyType_AES256_GCM96,
- Name: "test",
- })
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- t.Fatal(err)
- }
- if p == nil {
- t.Fatal("nil policy")
- }
- if !upserted {
- t.Fatal("expected an upsert")
- }
-
- testBytes := make([]byte, len(p.Keys[1].Key))
- copy(testBytes, p.Keys[1].Key)
-
- p.Key = p.Keys[1].Key
- p.Keys = nil
- p.MigrateKeyToKeysMap()
- if p.Key != nil {
- t.Fatal("policy.Key is not nil")
- }
- if len(p.Keys) != 1 {
- t.Fatal("policy.Keys is the wrong size")
- }
- if !reflect.DeepEqual(testBytes, p.Keys[1].Key) {
- t.Fatal("key mismatch")
- }
-}
-
-func Test_ArchivingUpgrade(t *testing.T) {
- testArchivingUpgradeCommon(t, NewLockManager(false))
- testArchivingUpgradeCommon(t, NewLockManager(true))
-}
-
-func testArchivingUpgradeCommon(t *testing.T, lm *LockManager) {
- resetKeysArchive()
-
- // First, we generate a policy and rotate it a number of times. Each time
- // we'll ensure that we have the expected number of keys in the archive and
- // the main keys object, which without changing the min version should be
- // zero and latest, respectively
-
- storage := &logical.InmemStorage{}
- p, lock, _, err := lm.GetPolicyUpsert(PolicyRequest{
- Storage: storage,
- KeyType: KeyType_AES256_GCM96,
- Name: "test",
- })
- if err != nil {
- t.Fatal(err)
- }
- if p == nil || lock == nil {
- t.Fatal("nil policy or lock")
- }
- lock.RUnlock()
-
- // Store the initial key in the archive
- keysArchive = append(keysArchive, p.Keys[1])
- checkKeys(t, p, storage, "initial", 1, 1, 1)
-
- for i := 2; i <= 10; i++ {
- err = p.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- keysArchive = append(keysArchive, p.Keys[i])
- checkKeys(t, p, storage, "rotate", i, i, i)
- }
-
- // Now, wipe the archive and set the archive version to zero
- err = storage.Delete("archive/test")
- if err != nil {
- t.Fatal(err)
- }
- p.ArchiveVersion = 0
-
- // Store it, but without calling persist, so we don't trigger
- // handleArchiving()
- buf, err := p.Serialize()
- if err != nil {
- t.Fatal(err)
- }
-
- // Write the policy into storage
- err = storage.Put(&logical.StorageEntry{
- Key: "policy/" + p.Name,
- Value: buf,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // If we're caching, expire from the cache since we modified it
- // under-the-hood
- if lm.CacheActive() {
- delete(lm.cache, "test")
- }
-
- // Now get the policy again; the upgrade should happen automatically
- p, lock, err = lm.GetPolicyShared(storage, "test")
- if err != nil {
- t.Fatal(err)
- }
- if p == nil || lock == nil {
- t.Fatal("nil policy or lock")
- }
- lock.RUnlock()
-
- checkKeys(t, p, storage, "upgrade", 10, 10, 10)
-
- // Let's check some deletion logic while we're at it
-
- // The policy should be in there
- if lm.CacheActive() && lm.cache["test"] == nil {
- t.Fatal("nil policy in cache")
- }
-
- // First we'll do this wrong, by not setting the deletion flag
- err = lm.DeletePolicy(storage, "test")
- if err == nil {
- t.Fatal("got nil error, but should not have been able to delete since we didn't set the deletion flag on the policy")
- }
-
- // The policy should still be in there
- if lm.CacheActive() && lm.cache["test"] == nil {
- t.Fatal("nil policy in cache")
- }
-
- p, lock, err = lm.GetPolicyShared(storage, "test")
- if err != nil {
- t.Fatal(err)
- }
- if p == nil || lock == nil {
- t.Fatal("policy or lock nil after bad delete")
- }
- lock.RUnlock()
-
- // Now do it properly
- p.DeletionAllowed = true
- err = p.Persist(storage)
- if err != nil {
- t.Fatal(err)
- }
- err = lm.DeletePolicy(storage, "test")
- if err != nil {
- t.Fatal(err)
- }
-
- // The policy should *not* be in there
- if lm.CacheActive() && lm.cache["test"] != nil {
- t.Fatal("non-nil policy in cache")
- }
-
- p, lock, err = lm.GetPolicyShared(storage, "test")
- if err != nil {
- t.Fatal(err)
- }
- if p != nil || lock != nil {
- t.Fatal("policy or lock not nil after delete")
- }
-}
-
-func Test_Archiving(t *testing.T) {
- testArchivingCommon(t, NewLockManager(false))
- testArchivingCommon(t, NewLockManager(true))
-}
-
-func testArchivingCommon(t *testing.T, lm *LockManager) {
- resetKeysArchive()
-
- // First, we generate a policy and rotate it a number of times. Each time
- // we'll ensure that we have the expected number of keys in the archive and
- // the main keys object, which without changing the min version should be
- // zero and latest, respectively
-
- storage := &logical.InmemStorage{}
- p, lock, _, err := lm.GetPolicyUpsert(PolicyRequest{
- Storage: storage,
- KeyType: KeyType_AES256_GCM96,
- Name: "test",
- })
- if lock != nil {
- defer lock.RUnlock()
- }
- if err != nil {
- t.Fatal(err)
- }
- if p == nil {
- t.Fatal("nil policy")
- }
-
- // Store the initial key in the archive
- keysArchive = append(keysArchive, p.Keys[1])
- checkKeys(t, p, storage, "initial", 1, 1, 1)
-
- for i := 2; i <= 10; i++ {
- err = p.Rotate(storage)
- if err != nil {
- t.Fatal(err)
- }
- keysArchive = append(keysArchive, p.Keys[i])
- checkKeys(t, p, storage, "rotate", i, i, i)
- }
-
- // Move the min decryption version up
- for i := 1; i <= 10; i++ {
- p.MinDecryptionVersion = i
-
- err = p.Persist(storage)
- if err != nil {
- t.Fatal(err)
- }
- // We expect to find:
- // * The keys in archive are the same as the latest version
- // * The latest version is constant
- // * The number of keys in the policy itself is from the min
- // decryption version up to the latest version, so for e.g. 7 and
- // 10, you'd need 7, 8, 9, and 10 -- IOW, latest version - min
- // decryption version plus 1 (the min decryption version key
- // itself)
- checkKeys(t, p, storage, "minadd", 10, 10, p.LatestVersion-p.MinDecryptionVersion+1)
- }
-
- // Move the min decryption version down
- for i := 10; i >= 1; i-- {
- p.MinDecryptionVersion = i
-
- err = p.Persist(storage)
- if err != nil {
- t.Fatal(err)
- }
- // We expect to find:
- // * The keys in archive are never removed so same as the latest version
- // * The latest version is constant
- // * The number of keys in the policy itself is from the min
- // decryption version up to the latest version, so for e.g. 7 and
- // 10, you'd need 7, 8, 9, and 10 -- IOW, latest version - min
- // decryption version plus 1 (the min decryption version key
- // itself)
- checkKeys(t, p, storage, "minsub", 10, 10, p.LatestVersion-p.MinDecryptionVersion+1)
- }
-}
-
-func checkKeys(t *testing.T,
- p *Policy,
- storage logical.Storage,
- action string,
- archiveVer, latestVer, keysSize int) {
-
- // Sanity check
- if len(keysArchive) != latestVer+1 {
- t.Fatalf("latest expected key version is %d, expected test keys archive size is %d, "+
- "but keys archive is of size %d", latestVer, latestVer+1, len(keysArchive))
- }
-
- archive, err := p.LoadArchive(storage)
- if err != nil {
- t.Fatal(err)
- }
-
- badArchiveVer := false
- if archiveVer == 0 {
- if len(archive.Keys) != 0 || p.ArchiveVersion != 0 {
- badArchiveVer = true
- }
- } else {
- // We need to subtract one because we have the indexes match key
- // versions, which start at 1. So for an archive version of 1, we
- // actually have two entries -- a blank 0 entry, and the key at spot 1
- if archiveVer != len(archive.Keys)-1 || archiveVer != p.ArchiveVersion {
- badArchiveVer = true
- }
- }
- if badArchiveVer {
- t.Fatalf(
- "expected archive version %d, found length of archive keys %d and policy archive version %d",
- archiveVer, len(archive.Keys), p.ArchiveVersion,
- )
- }
-
- if latestVer != p.LatestVersion {
- t.Fatalf(
- "expected latest version %d, found %d",
- latestVer, p.LatestVersion,
- )
- }
-
- if keysSize != len(p.Keys) {
- t.Fatalf(
- "expected keys size %d, found %d, action is %s, policy is \n%#v\n",
- keysSize, len(p.Keys), action, p,
- )
- }
-
- for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
- if _, ok := p.Keys[i]; !ok {
- t.Fatalf(
- "expected key %d, did not find it in policy keys", i,
- )
- }
- }
-
- for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
- // Travis has weird time zone issues and gets super unhappy
- if !p.Keys[i].CreationTime.Equal(keysArchive[i].CreationTime) {
- t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[i], keysArchive[i])
- }
- polKey := p.Keys[i]
- polKey.CreationTime = keysArchive[i].CreationTime
- p.Keys[i] = polKey
- if !reflect.DeepEqual(p.Keys[i], keysArchive[i]) {
- t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[i], keysArchive[i])
- }
- }
-
- for i := 1; i < len(archive.Keys); i++ {
- if !reflect.DeepEqual(archive.Keys[i].Key, keysArchive[i].Key) {
- t.Fatalf("key %d not equivalent between policy archive and test keys archive; policy archive:\n%#v\ntest keys archive:\n%#v\n", i, archive.Keys[i].Key, keysArchive[i].Key)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go
deleted file mode 100644
index 685624a..0000000
--- a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package kvbuilder
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "strings"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/mitchellh/mapstructure"
-)
-
-// Builder is a struct to build a key/value mapping based on a list
-// of "k=v" pairs, where the value might come from stdin, a file, etc.
-type Builder struct {
- Stdin io.Reader
-
- result map[string]interface{}
- stdin bool
-}
-
-// Map returns the built map.
-func (b *Builder) Map() map[string]interface{} {
- return b.result
-}
-
-// Add adds to the mapping with the given args.
-func (b *Builder) Add(args ...string) error {
- for _, a := range args {
- if err := b.add(a); err != nil {
- return fmt.Errorf("Invalid key/value pair '%s': %s", a, err)
- }
- }
-
- return nil
-}
-
-func (b *Builder) add(raw string) error {
- // Regardless of validity, make sure we make our result
- if b.result == nil {
- b.result = make(map[string]interface{})
- }
-
- // Empty strings are fine, just ignored
- if raw == "" {
- return nil
- }
-
- // Split into key/value
- parts := strings.SplitN(raw, "=", 2)
-
- // If the arg is exactly "-", then we need to read from stdin
- // and merge the results into the resulting structure.
- if len(parts) == 1 {
- if raw == "-" {
- if b.Stdin == nil {
- return fmt.Errorf("stdin is not supported")
- }
- if b.stdin {
- return fmt.Errorf("stdin already consumed")
- }
-
- b.stdin = true
- return b.addReader(b.Stdin)
- }
-
- // If the arg begins with "@" then we need to read a file directly
- if raw[0] == '@' {
- f, err := os.Open(raw[1:])
- if err != nil {
- return err
- }
- defer f.Close()
-
- return b.addReader(f)
- }
- }
-
- if len(parts) != 2 {
- return fmt.Errorf("format must be key=value")
- }
- key, value := parts[0], parts[1]
-
- if len(value) > 0 {
- if value[0] == '@' {
- contents, err := ioutil.ReadFile(value[1:])
- if err != nil {
- return fmt.Errorf("error reading file: %s", err)
- }
-
- value = string(contents)
- } else if value[0] == '\\' && value[1] == '@' {
- value = value[1:]
- } else if value == "-" {
- if b.Stdin == nil {
- return fmt.Errorf("stdin is not supported")
- }
- if b.stdin {
- return fmt.Errorf("stdin already consumed")
- }
- b.stdin = true
-
- var buf bytes.Buffer
- if _, err := io.Copy(&buf, b.Stdin); err != nil {
- return err
- }
-
- value = buf.String()
- }
- }
-
- // Repeated keys will be converted into a slice
- if existingValue, ok := b.result[key]; ok {
- var sliceValue []interface{}
- if err := mapstructure.WeakDecode(existingValue, &sliceValue); err != nil {
- return err
- }
- sliceValue = append(sliceValue, value)
- b.result[key] = sliceValue
- return nil
- }
-
- b.result[key] = value
- return nil
-}
-
-func (b *Builder) addReader(r io.Reader) error {
- return jsonutil.DecodeJSONFromReader(r, &b.result)
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go
deleted file mode 100644
index aa31784..0000000
--- a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package kvbuilder
-
-import (
- "bytes"
- "reflect"
- "testing"
-)
-
-func TestBuilder_basic(t *testing.T) {
- var b Builder
- err := b.Add("foo=bar", "bar=baz", "baz=")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]interface{}{
- "foo": "bar",
- "bar": "baz",
- "baz": "",
- }
- actual := b.Map()
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestBuilder_escapedAt(t *testing.T) {
- var b Builder
- err := b.Add("foo=bar", "bar=\\@baz")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]interface{}{
- "foo": "bar",
- "bar": "@baz",
- }
- actual := b.Map()
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestBuilder_stdin(t *testing.T) {
- var b Builder
- b.Stdin = bytes.NewBufferString("baz")
- err := b.Add("foo=bar", "bar=-")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]interface{}{
- "foo": "bar",
- "bar": "baz",
- }
- actual := b.Map()
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestBuilder_stdinMap(t *testing.T) {
- var b Builder
- b.Stdin = bytes.NewBufferString(`{"foo": "bar"}`)
- err := b.Add("-", "bar=baz")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]interface{}{
- "foo": "bar",
- "bar": "baz",
- }
- actual := b.Map()
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestBuilder_stdinTwice(t *testing.T) {
- var b Builder
- b.Stdin = bytes.NewBufferString(`{"foo": "bar"}`)
- err := b.Add("-", "-")
- if err == nil {
- t.Fatal("should error")
- }
-}
-
-func TestBuilder_sameKeyTwice(t *testing.T) {
- var b Builder
- err := b.Add("foo=bar", "foo=baz")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]interface{}{
- "foo": []interface{}{"bar", "baz"},
- }
- actual := b.Map()
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestBuilder_sameKeyMultipleTimes(t *testing.T) {
- var b Builder
- err := b.Add("foo=bar", "foo=baz", "foo=bay", "foo=bax", "bar=baz")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]interface{}{
- "foo": []interface{}{"bar", "baz", "bay", "bax"},
- "bar": "baz",
- }
- actual := b.Map()
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestBuilder_specialCharacteresInKey(t *testing.T) {
- var b Builder
- b.Stdin = bytes.NewBufferString("{\"foo\": \"bay\"}")
- err := b.Add("@foo=bar", "-foo=baz", "-")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- expected := map[string]interface{}{
- "@foo": "bar",
- "-foo": "baz",
- "foo": "bay",
- }
- actual := b.Map()
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/locksutil/locks.go b/vendor/github.com/hashicorp/vault/helper/locksutil/locks.go
deleted file mode 100644
index dcf1b4b..0000000
--- a/vendor/github.com/hashicorp/vault/helper/locksutil/locks.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package locksutil
-
-import (
- "crypto/md5"
- "sync"
-)
-
-const (
- LockCount = 256
-)
-
-type LockEntry struct {
- sync.RWMutex
-}
-
-func CreateLocks() []*LockEntry {
- ret := make([]*LockEntry, LockCount)
- for i := range ret {
- ret[i] = new(LockEntry)
- }
- return ret
-}
-
-func LockIndexForKey(key string) uint8 {
- hf := md5.New()
- hf.Write([]byte(key))
- return uint8(hf.Sum(nil)[0])
-}
-
-func LockForKey(locks []*LockEntry, key string) *LockEntry {
- return locks[LockIndexForKey(key)]
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/locksutil/locks_test.go b/vendor/github.com/hashicorp/vault/helper/locksutil/locks_test.go
deleted file mode 100644
index 9916644..0000000
--- a/vendor/github.com/hashicorp/vault/helper/locksutil/locks_test.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package locksutil
-
-import "testing"
-
-func Test_CreateLocks(t *testing.T) {
- locks := CreateLocks()
- if len(locks) != 256 {
- t.Fatalf("bad: len(locks): expected:256 actual:%d", len(locks))
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/logformat/vault.go b/vendor/github.com/hashicorp/vault/helper/logformat/vault.go
deleted file mode 100644
index fa53a19..0000000
--- a/vendor/github.com/hashicorp/vault/helper/logformat/vault.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package logformat
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "os"
- "strings"
- "sync"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-)
-
-const (
- styledefault = iota
- stylejson
-)
-
-// NewVaultLogger creates a new logger with the specified level and a Vault
-// formatter
-func NewVaultLogger(level int) log.Logger {
- logger := log.New("vault")
- return setLevelFormatter(logger, level, createVaultFormatter())
-}
-
-// NewVaultLoggerWithWriter creates a new logger with the specified level and
-// writer and a Vault formatter
-func NewVaultLoggerWithWriter(w io.Writer, level int) log.Logger {
- logger := log.NewLogger(w, "vault")
- return setLevelFormatter(logger, level, createVaultFormatter())
-}
-
-// Sets the level and formatter on the log, which must be a DefaultLogger
-func setLevelFormatter(logger log.Logger, level int, formatter log.Formatter) log.Logger {
- logger.(*log.DefaultLogger).SetLevel(level)
- logger.(*log.DefaultLogger).SetFormatter(formatter)
- return logger
-}
-
-// Creates a formatter, checking env vars for the style
-func createVaultFormatter() log.Formatter {
- ret := &vaultFormatter{
- Mutex: &sync.Mutex{},
- }
- logFormat := os.Getenv("VAULT_LOG_FORMAT")
- if logFormat == "" {
- logFormat = os.Getenv("LOGXI_FORMAT")
- }
- switch strings.ToLower(logFormat) {
- case "json", "vault_json", "vault-json", "vaultjson":
- ret.style = stylejson
- default:
- ret.style = styledefault
- }
- return ret
-}
-
-// Thread safe formatter
-type vaultFormatter struct {
- *sync.Mutex
- style int
- module string
-}
-
-func (v *vaultFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) {
- currTime := time.Now()
- v.Lock()
- defer v.Unlock()
- switch v.style {
- case stylejson:
- v.formatJSON(writer, currTime, level, msg, args)
- default:
- v.formatDefault(writer, currTime, level, msg, args)
- }
-}
-
-func (v *vaultFormatter) formatDefault(writer io.Writer, currTime time.Time, level int, msg string, args []interface{}) {
- // Write a trailing newline
- defer writer.Write([]byte("\n"))
-
- writer.Write([]byte(currTime.Local().Format("2006/01/02 15:04:05.000000")))
-
- switch level {
- case log.LevelCritical:
- writer.Write([]byte(" [CRIT ] "))
- case log.LevelError:
- writer.Write([]byte(" [ERROR] "))
- case log.LevelWarn:
- writer.Write([]byte(" [WARN ] "))
- case log.LevelInfo:
- writer.Write([]byte(" [INFO ] "))
- case log.LevelDebug:
- writer.Write([]byte(" [DEBUG] "))
- case log.LevelTrace:
- writer.Write([]byte(" [TRACE] "))
- default:
- writer.Write([]byte(" [ALL ] "))
- }
-
- if v.module != "" {
- writer.Write([]byte(fmt.Sprintf("(%s) ", v.module)))
- }
-
- writer.Write([]byte(msg))
-
- if args != nil && len(args) > 0 {
- if len(args)%2 != 0 {
- args = append(args, "[unknown!]")
- }
-
- writer.Write([]byte(":"))
-
- for i := 0; i < len(args); i = i + 2 {
- var quote string
- switch args[i+1].(type) {
- case string:
- if strings.ContainsRune(args[i+1].(string), ' ') {
- quote = `"`
- }
- }
- writer.Write([]byte(fmt.Sprintf(" %s=%s%v%s", args[i], quote, args[i+1], quote)))
- }
- }
-}
-
-func (v *vaultFormatter) formatJSON(writer io.Writer, currTime time.Time, level int, msg string, args []interface{}) {
- vals := map[string]interface{}{
- "@message": msg,
- "@timestamp": currTime.Format("2006-01-02T15:04:05.000000Z07:00"),
- }
-
- var levelStr string
- switch level {
- case log.LevelCritical:
- levelStr = "critical"
- case log.LevelError:
- levelStr = "error"
- case log.LevelWarn:
- levelStr = "warn"
- case log.LevelInfo:
- levelStr = "info"
- case log.LevelDebug:
- levelStr = "debug"
- case log.LevelTrace:
- levelStr = "trace"
- default:
- levelStr = "all"
- }
-
- vals["@level"] = levelStr
-
- if v.module != "" {
- vals["@module"] = v.module
- }
-
- if args != nil && len(args) > 0 {
-
- if len(args)%2 != 0 {
- args = append(args, "[unknown!]")
- }
-
- for i := 0; i < len(args); i = i + 2 {
- if _, ok := args[i].(string); !ok {
- // As this is the logging function not much we can do here
- // without injecting into logs...
- continue
- }
- vals[args[i].(string)] = args[i+1]
- }
- }
-
- enc := json.NewEncoder(writer)
- enc.Encode(vals)
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo.go
deleted file mode 100644
index db97074..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Package duo provides a Duo MFA handler to authenticate users
-// with Duo. This handler is registered as the "duo" type in
-// mfa_config.
-package duo
-
-import (
- "fmt"
- "net/url"
-
- "github.com/duosecurity/duo_api_golang/authapi"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// DuoPaths returns path functions to configure Duo.
-func DuoPaths() []*framework.Path {
- return []*framework.Path{
- pathDuoConfig(),
- pathDuoAccess(),
- }
-}
-
-// DuoRootPaths returns the paths that are used to configure Duo.
-func DuoRootPaths() []string {
- return []string{
- "duo/access",
- "duo/config",
- }
-}
-
-// DuoHandler interacts with the Duo Auth API to authenticate a user
-// login request. If successful, the original response from the login
-// backend is returned.
-func DuoHandler(req *logical.Request, d *framework.FieldData, resp *logical.Response) (
- *logical.Response, error) {
- duoConfig, err := GetDuoConfig(req)
- if err != nil || duoConfig == nil {
- return logical.ErrorResponse("Could not load Duo configuration"), nil
- }
-
- duoAuthClient, err := GetDuoAuthClient(req, duoConfig)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- username, ok := resp.Auth.Metadata["username"]
- if !ok {
- return logical.ErrorResponse("Could not read username for MFA"), nil
- }
-
- var request *duoAuthRequest = &duoAuthRequest{}
- request.successResp = resp
- request.username = username
- request.method = d.Get("method").(string)
- request.passcode = d.Get("passcode").(string)
- request.ipAddr = req.Connection.RemoteAddr
-
- return duoHandler(duoConfig, duoAuthClient, request)
-}
-
-type duoAuthRequest struct {
- successResp *logical.Response
- username string
- method string
- passcode string
- ipAddr string
-}
-
-func duoHandler(duoConfig *DuoConfig, duoAuthClient AuthClient, request *duoAuthRequest) (
- *logical.Response, error) {
-
- duoUser := fmt.Sprintf(duoConfig.UsernameFormat, request.username)
-
- preauth, err := duoAuthClient.Preauth(
- authapi.PreauthUsername(duoUser),
- authapi.PreauthIpAddr(request.ipAddr),
- )
-
- if err != nil || preauth == nil {
- return logical.ErrorResponse("Could not call Duo preauth"), nil
- }
-
- if preauth.StatResult.Stat != "OK" {
- errorMsg := "Could not look up Duo user information"
- if preauth.StatResult.Message != nil {
- errorMsg = errorMsg + ": " + *preauth.StatResult.Message
- }
- if preauth.StatResult.Message_Detail != nil {
- errorMsg = errorMsg + " (" + *preauth.StatResult.Message_Detail + ")"
- }
- return logical.ErrorResponse(errorMsg), nil
- }
-
- switch preauth.Response.Result {
- case "allow":
- return request.successResp, err
- case "deny":
- return logical.ErrorResponse(preauth.Response.Status_Msg), nil
- case "enroll":
- return logical.ErrorResponse(fmt.Sprintf("%s (%s)",
- preauth.Response.Status_Msg,
- preauth.Response.Enroll_Portal_Url)), nil
- case "auth":
- break
- default:
- return logical.ErrorResponse(fmt.Sprintf("Invalid Duo preauth response: %s",
- preauth.Response.Result)), nil
- }
-
- options := []func(*url.Values){authapi.AuthUsername(duoUser)}
- if request.method == "" {
- request.method = "auto"
- }
- if request.method == "auto" || request.method == "push" {
- if duoConfig.PushInfo != "" {
- options = append(options, authapi.AuthPushinfo(duoConfig.PushInfo))
- }
- }
- if request.passcode != "" {
- request.method = "passcode"
- options = append(options, authapi.AuthPasscode(request.passcode))
- } else {
- options = append(options, authapi.AuthDevice("auto"))
- }
-
- result, err := duoAuthClient.Auth(request.method, options...)
-
- if err != nil || result == nil {
- return logical.ErrorResponse("Could not call Duo auth"), nil
- }
-
- if result.StatResult.Stat != "OK" {
- errorMsg := "Could not authenticate Duo user"
- if result.StatResult.Message != nil {
- errorMsg = errorMsg + ": " + *result.StatResult.Message
- }
- if result.StatResult.Message_Detail != nil {
- errorMsg = errorMsg + " (" + *result.StatResult.Message_Detail + ")"
- }
- return logical.ErrorResponse(errorMsg), nil
- }
-
- if result.Response.Result != "allow" {
- return logical.ErrorResponse(result.Response.Status_Msg), nil
- }
-
- return request.successResp, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo_test.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo_test.go
deleted file mode 100644
index fd31128..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mfa/duo/duo_test.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package duo
-
-import (
- "net/url"
- "strings"
- "testing"
-
- "github.com/duosecurity/duo_api_golang/authapi"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-type MockClientData struct {
- PreauthData *authapi.PreauthResult
- PreauthError error
- AuthData *authapi.AuthResult
- AuthError error
-}
-
-type MockAuthClient struct {
- MockData *MockClientData
-}
-
-func (c *MockAuthClient) Preauth(options ...func(*url.Values)) (*authapi.PreauthResult, error) {
- return c.MockData.PreauthData, c.MockData.PreauthError
-}
-
-func (c *MockAuthClient) Auth(factor string, options ...func(*url.Values)) (*authapi.AuthResult, error) {
- return c.MockData.AuthData, c.MockData.AuthError
-}
-
-func MockGetDuoAuthClient(data *MockClientData) func(*logical.Request, *DuoConfig) (AuthClient, error) {
- return func(*logical.Request, *DuoConfig) (AuthClient, error) {
- return getDuoAuthClient(data), nil
- }
-}
-
-func getDuoAuthClient(data *MockClientData) AuthClient {
- var c MockAuthClient
- // set default response to be successful
- preauthSuccessJSON := `
- {
- "Stat": "OK",
- "Response": {
- "Result": "auth",
- "Status_Msg": "Needs authentication",
- "Devices": []
- }
- }`
- if data.PreauthData == nil {
- data.PreauthData = &authapi.PreauthResult{}
- jsonutil.DecodeJSON([]byte(preauthSuccessJSON), data.PreauthData)
- }
-
- authSuccessJSON := `
- {
- "Stat": "OK",
- "Response": {
- "Result": "allow"
- }
- }`
- if data.AuthData == nil {
- data.AuthData = &authapi.AuthResult{}
- jsonutil.DecodeJSON([]byte(authSuccessJSON), data.AuthData)
- }
-
- c.MockData = data
- return &c
-}
-
-func TestDuoHandlerSuccess(t *testing.T) {
- successResp := &logical.Response{
- Auth: &logical.Auth{},
- }
- duoConfig := &DuoConfig{
- UsernameFormat: "%s",
- }
- duoAuthClient := getDuoAuthClient(&MockClientData{})
- resp, err := duoHandler(duoConfig, duoAuthClient, &duoAuthRequest{
- successResp: successResp,
- username: "",
- })
- if err != nil {
- t.Fatalf(err.Error())
- }
- if resp != successResp {
- t.Fatalf("Testing Duo authentication gave incorrect response (expected success, got: %v)", resp)
- }
-}
-
-func TestDuoHandlerReject(t *testing.T) {
- AuthData := &authapi.AuthResult{}
- authRejectJSON := `
- {
- "Stat": "OK",
- "Response": {
- "Result": "deny",
- "Status_Msg": "Invalid auth"
- }
- }`
- jsonutil.DecodeJSON([]byte(authRejectJSON), AuthData)
- successResp := &logical.Response{
- Auth: &logical.Auth{},
- }
- expectedError := AuthData.Response.Status_Msg
- duoConfig := &DuoConfig{
- UsernameFormat: "%s",
- }
- duoAuthClient := getDuoAuthClient(&MockClientData{
- AuthData: AuthData,
- })
- resp, err := duoHandler(duoConfig, duoAuthClient, &duoAuthRequest{
- successResp: successResp,
- username: "user",
- })
- if err != nil {
- t.Fatalf(err.Error())
- }
- error, ok := resp.Data["error"].(string)
- if !ok || !strings.Contains(error, expectedError) {
- t.Fatalf("Testing Duo authentication gave incorrect response (expected deny, got: %v)", error)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_access.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_access.go
deleted file mode 100644
index d087b0f..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_access.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package duo
-
-import (
- "fmt"
- "net/url"
-
- "github.com/duosecurity/duo_api_golang"
- "github.com/duosecurity/duo_api_golang/authapi"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-type AuthClient interface {
- Preauth(options ...func(*url.Values)) (*authapi.PreauthResult, error)
- Auth(factor string, options ...func(*url.Values)) (*authapi.AuthResult, error)
-}
-
-func pathDuoAccess() *framework.Path {
- return &framework.Path{
- Pattern: `duo/access`,
- Fields: map[string]*framework.FieldSchema{
- "skey": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Duo secret key",
- },
- "ikey": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Duo integration key",
- },
- "host": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Duo api host",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: pathDuoAccessWrite,
- },
-
- HelpSynopsis: pathDuoAccessHelpSyn,
- HelpDescription: pathDuoAccessHelpDesc,
- }
-}
-
-func GetDuoAuthClient(req *logical.Request, config *DuoConfig) (AuthClient, error) {
- entry, err := req.Storage.Get("duo/access")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, fmt.Errorf(
- "Duo access credentials haven't been configured. Please configure\n" +
- "them at the 'duo/access' endpoint")
- }
- var access DuoAccess
- if err := entry.DecodeJSON(&access); err != nil {
- return nil, err
- }
-
- duoClient := duoapi.NewDuoApi(
- access.IKey,
- access.SKey,
- access.Host,
- config.UserAgent,
- )
- duoAuthClient := authapi.NewAuthApi(*duoClient)
- check, err := duoAuthClient.Check()
- if err != nil {
- return nil, err
- }
- if check.StatResult.Stat != "OK" {
- return nil, fmt.Errorf("Could not connect to Duo: %s (%s)", *check.StatResult.Message, *check.StatResult.Message_Detail)
- }
- return duoAuthClient, nil
-}
-
-func pathDuoAccessWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entry, err := logical.StorageEntryJSON("duo/access", DuoAccess{
- SKey: d.Get("skey").(string),
- IKey: d.Get("ikey").(string),
- Host: d.Get("host").(string),
- })
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-type DuoAccess struct {
- SKey string `json:"skey"`
- IKey string `json:"ikey"`
- Host string `json:"host"`
-}
-
-const pathDuoAccessHelpSyn = `
-Configure the access keys and host for Duo API connections.
-`
-
-const pathDuoAccessHelpDesc = `
-To authenticate users with Duo, the backend needs to know what host to connect to
-and must authenticate with an integration key and secret key. This endpoint is used
-to configure that information.
-`
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_config.go b/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_config.go
deleted file mode 100644
index 88c5647..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mfa/duo/path_duo_config.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package duo
-
-import (
- "errors"
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathDuoConfig() *framework.Path {
- return &framework.Path{
- Pattern: `duo/config`,
- Fields: map[string]*framework.FieldSchema{
- "user_agent": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "User agent to connect to Duo (default \"\")",
- },
- "username_format": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Format string given auth backend username as argument to create Duo username (default '%s')",
- },
- "push_info": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "A string of URL-encoded key/value pairs that provides additional context about the authentication attemmpt in the Duo Mobile app",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: pathDuoConfigWrite,
- logical.ReadOperation: pathDuoConfigRead,
- },
-
- HelpSynopsis: pathDuoConfigHelpSyn,
- HelpDescription: pathDuoConfigHelpDesc,
- }
-}
-
-func GetDuoConfig(req *logical.Request) (*DuoConfig, error) {
- var result DuoConfig
- // all config parameters are optional, so path need not exist
- entry, err := req.Storage.Get("duo/config")
- if err == nil && entry != nil {
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- }
- if result.UsernameFormat == "" {
- result.UsernameFormat = "%s"
- }
- return &result, nil
-}
-
-func pathDuoConfigWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- username_format := d.Get("username_format").(string)
- if username_format == "" {
- username_format = "%s"
- }
- if !strings.Contains(username_format, "%s") {
- return nil, errors.New("username_format must include username ('%s')")
- }
- entry, err := logical.StorageEntryJSON("duo/config", DuoConfig{
- UsernameFormat: username_format,
- UserAgent: d.Get("user_agent").(string),
- PushInfo: d.Get("push_info").(string),
- })
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func pathDuoConfigRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- config, err := GetDuoConfig(req)
- if err != nil {
- return nil, err
- }
- if config == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "username_format": config.UsernameFormat,
- "user_agent": config.UserAgent,
- "push_info": config.PushInfo,
- },
- }, nil
-}
-
-type DuoConfig struct {
- UsernameFormat string `json:"username_format"`
- UserAgent string `json:"user_agent"`
- PushInfo string `json:"push_info"`
-}
-
-const pathDuoConfigHelpSyn = `
-Configure Duo second factor behavior.
-`
-
-const pathDuoConfigHelpDesc = `
-This endpoint allows you to configure how the original auth backend username maps to
-the Duo username by providing a template format string.
-`
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/mfa.go b/vendor/github.com/hashicorp/vault/helper/mfa/mfa.go
deleted file mode 100644
index 9939a5f..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mfa/mfa.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Package mfa provides wrappers to add multi-factor authentication
-// to any auth backend.
-//
-// To add MFA to a backend, replace its login path with the
-// paths returned by MFAPaths and add the additional root
-// paths returned by MFARootPaths. The backend provides
-// the username to the MFA wrapper in Auth.Metadata['username'].
-//
-// To add an additional MFA type, create a subpackage that
-// implements [Type]Paths, [Type]RootPaths, and [Type]Handler
-// functions and add them to MFAPaths, MFARootPaths, and
-// handlers respectively.
-package mfa
-
-import (
- "github.com/hashicorp/vault/helper/mfa/duo"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// MFAPaths returns paths to wrap the original login path and configure MFA.
-// When adding MFA to a backend, these paths should be included instead of
-// the login path in Backend.Paths.
-func MFAPaths(originalBackend *framework.Backend, loginPath *framework.Path) []*framework.Path {
- var b backend
- b.Backend = originalBackend
- return append(duo.DuoPaths(), pathMFAConfig(&b), wrapLoginPath(&b, loginPath))
-}
-
-// MFARootPaths returns path strings used to configure MFA. When adding MFA
-// to a backend, these paths should be included in
-// Backend.PathsSpecial.Root.
-func MFARootPaths() []string {
- return append(duo.DuoRootPaths(), "mfa_config")
-}
-
-// HandlerFunc is the callback called to handle MFA for a login request.
-type HandlerFunc func(*logical.Request, *framework.FieldData, *logical.Response) (*logical.Response, error)
-
-// handlers maps each supported MFA type to its handler.
-var handlers = map[string]HandlerFunc{
- "duo": duo.DuoHandler,
-}
-
-type backend struct {
- *framework.Backend
-}
-
-func wrapLoginPath(b *backend, loginPath *framework.Path) *framework.Path {
- loginPath.Fields["passcode"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "One time passcode (optional)",
- }
- loginPath.Fields["method"] = &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Multi-factor auth method to use (optional)",
- }
- // wrap write callback to do MFA after auth
- loginHandler := loginPath.Callbacks[logical.UpdateOperation]
- loginPath.Callbacks[logical.UpdateOperation] = b.wrapLoginHandler(loginHandler)
- return loginPath
-}
-
-func (b *backend) wrapLoginHandler(loginHandler framework.OperationFunc) framework.OperationFunc {
- return func(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- // login with original login function first
- resp, err := loginHandler(req, d)
- if err != nil || resp.Auth == nil {
- return resp, err
- }
-
- // check if multi-factor enabled
- mfa_config, err := b.MFAConfig(req)
- if err != nil || mfa_config == nil {
- return resp, nil
- }
-
- // perform multi-factor authentication if type supported
- handler, ok := handlers[mfa_config.Type]
- if ok {
- return handler(req, d, resp)
- } else {
- return resp, err
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/mfa_test.go b/vendor/github.com/hashicorp/vault/helper/mfa/mfa_test.go
deleted file mode 100644
index f618971..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mfa/mfa_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package mfa
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- logicaltest "github.com/hashicorp/vault/logical/testing"
-)
-
-// MakeTestBackend creates a simple MFA enabled backend.
-// Login (before MFA) always succeeds with policy "foo".
-// An MFA "test" type is added to mfa.handlers that succeeds
-// if MFA method is "accept", otherwise it rejects.
-func MakeTestBackend() *framework.Backend {
- handlers["test"] = testMFAHandler
- b := &framework.Backend{
- Help: "",
-
- PathsSpecial: &logical.Paths{
- Root: MFARootPaths(),
- Unauthenticated: []string{
- "login",
- },
- },
- Paths: MFAPaths(nil, testPathLogin()),
- }
- return b
-}
-
-func testPathLogin() *framework.Path {
- return &framework.Path{
- Pattern: `login`,
- Fields: map[string]*framework.FieldSchema{
- "username": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: testPathLoginHandler,
- },
- }
-}
-
-func testPathLoginHandler(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- username := d.Get("username").(string)
-
- return &logical.Response{
- Auth: &logical.Auth{
- Policies: []string{"foo"},
- Metadata: map[string]string{
- "username": username,
- },
- },
- }, nil
-}
-
-func testMFAHandler(req *logical.Request, d *framework.FieldData, resp *logical.Response) (
- *logical.Response, error) {
- if d.Get("method").(string) != "accept" {
- return logical.ErrorResponse("Deny access"), nil
- } else {
- return resp, nil
- }
-}
-
-func TestMFALogin(t *testing.T) {
- b := MakeTestBackend()
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepEnableMFA(t),
- testAccStepLogin(t, "user"),
- },
- })
-}
-
-func TestMFALoginDenied(t *testing.T) {
- b := MakeTestBackend()
-
- logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- Backend: b,
- Steps: []logicaltest.TestStep{
- testAccStepEnableMFA(t),
- testAccStepLoginDenied(t, "user"),
- },
- })
-}
-
-func testAccStepEnableMFA(t *testing.T) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "mfa_config",
- Data: map[string]interface{}{
- "type": "test",
- },
- }
-}
-
-func testAccStepLogin(t *testing.T, username string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Data: map[string]interface{}{
- "method": "accept",
- "username": username,
- },
- Unauthenticated: true,
- Check: logicaltest.TestCheckAuth([]string{"foo"}),
- }
-}
-
-func testAccStepLoginDenied(t *testing.T, username string) logicaltest.TestStep {
- return logicaltest.TestStep{
- Operation: logical.UpdateOperation,
- Path: "login",
- Data: map[string]interface{}{
- "method": "deny",
- "username": username,
- },
- Unauthenticated: true,
- Check: logicaltest.TestCheckError(),
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/mfa/path_mfa_config.go b/vendor/github.com/hashicorp/vault/helper/mfa/path_mfa_config.go
deleted file mode 100644
index 8f96e25..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mfa/path_mfa_config.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package mfa
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func pathMFAConfig(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: `mfa_config`,
- Fields: map[string]*framework.FieldSchema{
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Enables MFA with given backend (available: duo)",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathMFAConfigWrite,
- logical.ReadOperation: b.pathMFAConfigRead,
- },
-
- HelpSynopsis: pathMFAConfigHelpSyn,
- HelpDescription: pathMFAConfigHelpDesc,
- }
-}
-
-func (b *backend) MFAConfig(req *logical.Request) (*MFAConfig, error) {
- entry, err := req.Storage.Get("mfa_config")
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- var result MFAConfig
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-func (b *backend) pathMFAConfigWrite(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entry, err := logical.StorageEntryJSON("mfa_config", MFAConfig{
- Type: d.Get("type").(string),
- })
- if err != nil {
- return nil, err
- }
-
- if err := req.Storage.Put(entry); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathMFAConfigRead(
- req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- config, err := b.MFAConfig(req)
- if err != nil {
- return nil, err
- }
- if config == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "type": config.Type,
- },
- }, nil
-}
-
-type MFAConfig struct {
- Type string `json:"type"`
-}
-
-const pathMFAConfigHelpSyn = `
-Configure multi factor backend.
-`
-
-const pathMFAConfigHelpDesc = `
-This endpoint allows you to turn on multi-factor authentication with a given backend.
-Currently only Duo is supported.
-`
diff --git a/vendor/github.com/hashicorp/vault/helper/mlock/mlock.go b/vendor/github.com/hashicorp/vault/helper/mlock/mlock.go
deleted file mode 100644
index 1675633..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mlock/mlock.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package mlock
-
-// This should be set by the OS-specific packages to tell whether LockMemory
-// is supported or not.
-var supported bool
-
-// Supported returns true if LockMemory is functional on this system.
-func Supported() bool {
- return supported
-}
-
-// LockMemory prevents any memory from being swapped to disk.
-func LockMemory() error {
- return lockMemory()
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unavail.go b/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unavail.go
deleted file mode 100644
index 8084963..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unavail.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build android darwin nacl netbsd plan9 windows
-
-package mlock
-
-func init() {
- supported = false
-}
-
-func lockMemory() error {
- // XXX: No good way to do this on Windows. There is the VirtualLock
- // method, but it requires a specific address and offset.
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unix.go b/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unix.go
deleted file mode 100644
index af0a69d..0000000
--- a/vendor/github.com/hashicorp/vault/helper/mlock/mlock_unix.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build dragonfly freebsd linux openbsd solaris
-
-package mlock
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-func init() {
- supported = true
-}
-
-func lockMemory() error {
- // Mlockall prevents all current and future pages from being swapped out.
- return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE)
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
index 957d533..ae8c58b 100644
--- a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
+++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
@@ -3,10 +3,14 @@ package parseutil
import (
"encoding/json"
"errors"
+ "fmt"
"strconv"
"strings"
"time"
+ "github.com/hashicorp/errwrap"
+ sockaddr "github.com/hashicorp/go-sockaddr"
+ "github.com/hashicorp/vault/helper/strutil"
"github.com/mitchellh/mapstructure"
)
@@ -56,6 +60,43 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) {
return dur, nil
}
+func ParseInt(in interface{}) (int64, error) {
+ var ret int64
+ jsonIn, ok := in.(json.Number)
+ if ok {
+ in = jsonIn.String()
+ }
+ switch in.(type) {
+ case string:
+ inp := in.(string)
+ if inp == "" {
+ return 0, nil
+ }
+ var err error
+ left, err := strconv.ParseInt(inp, 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ ret = left
+ case int:
+ ret = int64(in.(int))
+ case int32:
+ ret = int64(in.(int32))
+ case int64:
+ ret = in.(int64)
+ case uint:
+ ret = int64(in.(uint))
+ case uint32:
+ ret = int64(in.(uint32))
+ case uint64:
+ ret = int64(in.(uint64))
+ default:
+ return 0, errors.New("could not parse value from input")
+ }
+
+ return ret, nil
+}
+
func ParseBool(in interface{}) (bool, error) {
var result bool
if err := mapstructure.WeakDecode(in, &result); err != nil {
@@ -63,3 +104,60 @@ func ParseBool(in interface{}) (bool, error) {
}
return result, nil
}
+
+func ParseCommaStringSlice(in interface{}) ([]string, error) {
+ var result []string
+ config := &mapstructure.DecoderConfig{
+ Result: &result,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.StringToSliceHookFunc(","),
+ }
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return nil, err
+ }
+ if err := decoder.Decode(in); err != nil {
+ return nil, err
+ }
+ return strutil.TrimStrings(result), nil
+}
+
+func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) {
+ out := make([]*sockaddr.SockAddrMarshaler, 0)
+ stringAddrs := make([]string, 0)
+
+ switch addrs.(type) {
+ case string:
+ stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",")
+ if len(stringAddrs) == 0 {
+ return nil, fmt.Errorf("unable to parse addresses from %v", addrs)
+ }
+
+ case []string:
+ stringAddrs = addrs.([]string)
+
+ case []interface{}:
+ for _, v := range addrs.([]interface{}) {
+ stringAddr, ok := v.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing %v as string", v)
+ }
+ stringAddrs = append(stringAddrs, stringAddr)
+ }
+
+ default:
+ return nil, fmt.Errorf("unknown address input type %T", addrs)
+ }
+
+ for _, addr := range stringAddrs {
+ sa, err := sockaddr.NewSockAddr(addr)
+ if err != nil {
+ return nil, errwrap.Wrapf(fmt.Sprintf("error parsing address %q: {{err}}", addr), err)
+ }
+ out = append(out, &sockaddr.SockAddrMarshaler{
+ SockAddr: sa,
+ })
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil_test.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil_test.go
deleted file mode 100644
index 7168a45..0000000
--- a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package parseutil
-
-import (
- "encoding/json"
- "testing"
- "time"
-)
-
-func Test_ParseDurationSecond(t *testing.T) {
- outp, err := ParseDurationSecond("9876s")
- if err != nil {
- t.Fatal(err)
- }
- if outp != time.Duration(9876)*time.Second {
- t.Fatal("not equivalent")
- }
- outp, err = ParseDurationSecond("9876")
- if err != nil {
- t.Fatal(err)
- }
- if outp != time.Duration(9876)*time.Second {
- t.Fatal("not equivalent")
- }
- outp, err = ParseDurationSecond(json.Number("4352"))
- if err != nil {
- t.Fatal(err)
- }
- if outp != time.Duration(4352)*time.Second {
- t.Fatal("not equivalent")
- }
-}
-
-func Test_ParseBool(t *testing.T) {
- outp, err := ParseBool("true")
- if err != nil {
- t.Fatal(err)
- }
- if !outp {
- t.Fatal("wrong output")
- }
- outp, err = ParseBool(1)
- if err != nil {
- t.Fatal(err)
- }
- if !outp {
- t.Fatal("wrong output")
- }
- outp, err = ParseBool(true)
- if err != nil {
- t.Fatal(err)
- }
- if !outp {
- t.Fatal("wrong output")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password.go b/vendor/github.com/hashicorp/vault/helper/password/password.go
deleted file mode 100644
index 102fbe8..0000000
--- a/vendor/github.com/hashicorp/vault/helper/password/password.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// password is a package for reading a password securely from a terminal.
-// The code in this package disables echo in the terminal so that the
-// password is not echoed back in plaintext to the user.
-package password
-
-import (
- "errors"
- "io"
- "os"
- "os/signal"
-)
-
-var ErrInterrupted = errors.New("interrupted")
-
-// Read reads the password from the given os.File. The password
-// will not be echoed back to the user. Ctrl-C will automatically return
-// from this function with a blank string and an ErrInterrupted.
-func Read(f *os.File) (string, error) {
- ch := make(chan os.Signal, 1)
- signal.Notify(ch, os.Interrupt)
- defer signal.Stop(ch)
-
- // Run the actual read in a go-routine so that we can still detect signals
- var result string
- var resultErr error
- doneCh := make(chan struct{})
- go func() {
- defer close(doneCh)
- result, resultErr = read(f)
- }()
-
- // Wait on either the read to finish or the signal to come through
- select {
- case <-ch:
- return "", ErrInterrupted
- case <-doneCh:
- return result, resultErr
- }
-}
-
-func readline(f *os.File) (string, error) {
- var buf [1]byte
- resultBuf := make([]byte, 0, 64)
- for {
- n, err := f.Read(buf[:])
- if err != nil && err != io.EOF {
- return "", err
- }
- if n == 0 || buf[0] == '\n' || buf[0] == '\r' {
- break
- }
-
- // ASCII code 3 is what is sent for a Ctrl-C while reading raw.
- // If we see that, then get the interrupt. We have to do this here
- // because terminals in raw mode won't catch it at the shell level.
- if buf[0] == 3 {
- return "", ErrInterrupted
- }
-
- resultBuf = append(resultBuf, buf[0])
- }
-
- return string(resultBuf), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password_solaris.go b/vendor/github.com/hashicorp/vault/helper/password/password_solaris.go
deleted file mode 100644
index 43ad722..0000000
--- a/vendor/github.com/hashicorp/vault/helper/password/password_solaris.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// +build solaris
-
-package password
-
-import (
- "fmt"
- "os"
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-func read(f *os.File) (string, error) {
- fd := int(f.Fd())
- if !isTerminal(fd) {
- return "", fmt.Errorf("File descriptor %d is not a terminal", fd)
- }
-
- oldState, err := makeRaw(fd)
- if err != nil {
- return "", err
- }
- defer unix.IoctlSetTermios(fd, unix.TCSETS, oldState)
-
- return readline(f)
-}
-
-// isTerminal returns true if there is a terminal attached to the given
-// file descriptor.
-// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func isTerminal(fd int) bool {
- var termio unix.Termio
- err := unix.IoctlSetTermio(fd, unix.TCGETA, &termio)
- return err == nil
-}
-
-// makeRaw puts the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
-func makeRaw(fd int) (*unix.Termios, error) {
- oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
- if err != nil {
- return nil, err
- }
- oldTermios := *oldTermiosPtr
-
- newTermios := oldTermios
- newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL
- if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
- return nil, err
- }
-
- return oldTermiosPtr, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password_unix.go b/vendor/github.com/hashicorp/vault/helper/password/password_unix.go
deleted file mode 100644
index 5ce7501..0000000
--- a/vendor/github.com/hashicorp/vault/helper/password/password_unix.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build linux darwin freebsd netbsd openbsd
-
-package password
-
-import (
- "fmt"
- "os"
-
- "golang.org/x/crypto/ssh/terminal"
-)
-
-func read(f *os.File) (string, error) {
- fd := int(f.Fd())
- if !terminal.IsTerminal(fd) {
- return "", fmt.Errorf("File descriptor %d is not a terminal", fd)
- }
-
- oldState, err := terminal.MakeRaw(fd)
- if err != nil {
- return "", err
- }
- defer terminal.Restore(fd, oldState)
-
- return readline(f)
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/password/password_windows.go b/vendor/github.com/hashicorp/vault/helper/password/password_windows.go
deleted file mode 100644
index 1cd7dc7..0000000
--- a/vendor/github.com/hashicorp/vault/helper/password/password_windows.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// +build windows
-
-package password
-
-import (
- "os"
- "syscall"
-)
-
-var (
- kernel32 = syscall.MustLoadDLL("kernel32.dll")
- setConsoleModeProc = kernel32.MustFindProc("SetConsoleMode")
-)
-
-// Magic constant from MSDN to control whether charactesr read are
-// repeated back on the console.
-//
-// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
-const ENABLE_ECHO_INPUT = 0x0004
-
-func read(f *os.File) (string, error) {
- handle := syscall.Handle(f.Fd())
-
- // Grab the old console mode so we can reset it. We defer the reset
- // right away because it doesn't matter (it is idempotent).
- var oldMode uint32
- if err := syscall.GetConsoleMode(handle, &oldMode); err != nil {
- return "", err
- }
- defer setConsoleMode(handle, oldMode)
-
- // The new mode is the old mode WITHOUT the echo input flag set.
- var newMode uint32 = uint32(int(oldMode) & ^ENABLE_ECHO_INPUT)
- if err := setConsoleMode(handle, newMode); err != nil {
- return "", err
- }
-
- return readline(f)
-}
-
-func setConsoleMode(console syscall.Handle, mode uint32) error {
- r, _, err := setConsoleModeProc.Call(uintptr(console), uintptr(mode))
- if r == 0 {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go
deleted file mode 100644
index d8b7f60..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package pgpkeys
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
-
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/packet"
-)
-
-// EncryptShares takes an ordered set of byte slices to encrypt and the
-// corresponding base64-encoded public keys to encrypt them with, encrypts each
-// byte slice with the corresponding public key.
-//
-// Note: There is no corresponding test function; this functionality is
-// thoroughly tested in the init and rekey command unit tests
-func EncryptShares(input [][]byte, pgpKeys []string) ([]string, [][]byte, error) {
- if len(input) != len(pgpKeys) {
- return nil, nil, fmt.Errorf("Mismatch between number items to encrypt and number of PGP keys")
- }
- encryptedShares := make([][]byte, 0, len(pgpKeys))
- entities, err := GetEntities(pgpKeys)
- if err != nil {
- return nil, nil, err
- }
- for i, entity := range entities {
- ctBuf := bytes.NewBuffer(nil)
- pt, err := openpgp.Encrypt(ctBuf, []*openpgp.Entity{entity}, nil, nil, nil)
- if err != nil {
- return nil, nil, fmt.Errorf("Error setting up encryption for PGP message: %s", err)
- }
- _, err = pt.Write(input[i])
- if err != nil {
- return nil, nil, fmt.Errorf("Error encrypting PGP message: %s", err)
- }
- pt.Close()
- encryptedShares = append(encryptedShares, ctBuf.Bytes())
- }
-
- fingerprints, err := GetFingerprints(nil, entities)
- if err != nil {
- return nil, nil, err
- }
-
- return fingerprints, encryptedShares, nil
-}
-
-// GetFingerprints takes in a list of openpgp Entities and returns the
-// fingerprints. If entities is nil, it will instead parse both entities and
-// fingerprints from the pgpKeys string slice.
-func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, error) {
- if entities == nil {
- var err error
- entities, err = GetEntities(pgpKeys)
-
- if err != nil {
- return nil, err
- }
- }
- ret := make([]string, 0, len(entities))
- for _, entity := range entities {
- ret = append(ret, fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))
- }
- return ret, nil
-}
-
-// GetEntities takes in a string array of base64-encoded PGP keys and returns
-// the openpgp Entities
-func GetEntities(pgpKeys []string) ([]*openpgp.Entity, error) {
- ret := make([]*openpgp.Entity, 0, len(pgpKeys))
- for _, keystring := range pgpKeys {
- data, err := base64.StdEncoding.DecodeString(keystring)
- if err != nil {
- return nil, fmt.Errorf("Error decoding given PGP key: %s", err)
- }
- entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data)))
- if err != nil {
- return nil, fmt.Errorf("Error parsing given PGP key: %s", err)
- }
- ret = append(ret, entity)
- }
- return ret, nil
-}
-
-// DecryptBytes takes in base64-encoded encrypted bytes and the base64-encoded
-// private key and decrypts it. A bytes.Buffer is returned to allow the caller
-// to do useful thing with it (get it as a []byte, get it as a string, use it
-// as an io.Reader, etc), and also because this function doesn't know if what
-// comes out is binary data or a string, so let the caller decide.
-func DecryptBytes(encodedCrypt, privKey string) (*bytes.Buffer, error) {
- privKeyBytes, err := base64.StdEncoding.DecodeString(privKey)
- if err != nil {
- return nil, fmt.Errorf("Error decoding base64 private key: %s", err)
- }
-
- cryptBytes, err := base64.StdEncoding.DecodeString(encodedCrypt)
- if err != nil {
- return nil, fmt.Errorf("Error decoding base64 crypted bytes: %s", err)
- }
-
- entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes)))
- if err != nil {
- return nil, fmt.Errorf("Error parsing private key: %s", err)
- }
-
- entityList := &openpgp.EntityList{entity}
- md, err := openpgp.ReadMessage(bytes.NewBuffer(cryptBytes), entityList, nil, nil)
- if err != nil {
- return nil, fmt.Errorf("Error decrypting the messages: %s", err)
- }
-
- ptBuf := bytes.NewBuffer(nil)
- ptBuf.ReadFrom(md.UnverifiedBody)
-
- return ptBuf, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go
deleted file mode 100644
index ccfc64b..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package pgpkeys
-
-import (
- "bytes"
- "encoding/base64"
- "errors"
- "fmt"
- "os"
- "strings"
-
- "github.com/keybase/go-crypto/openpgp"
-)
-
-// PGPPubKeyFiles implements the flag.Value interface and allows
-// parsing and reading a list of pgp public key files
-type PubKeyFilesFlag []string
-
-func (p *PubKeyFilesFlag) String() string {
- return fmt.Sprint(*p)
-}
-
-func (p *PubKeyFilesFlag) Set(value string) error {
- if len(*p) > 0 {
- return errors.New("pgp-keys can only be specified once")
- }
-
- splitValues := strings.Split(value, ",")
-
- keybaseMap, err := FetchKeybasePubkeys(splitValues)
- if err != nil {
- return err
- }
-
- // Now go through the actual flag, and substitute in resolved keybase
- // entries where appropriate
- for _, keyfile := range splitValues {
- if strings.HasPrefix(keyfile, kbPrefix) {
- key := keybaseMap[keyfile]
- if key == "" {
- return fmt.Errorf("key for keybase user %s was not found in the map", strings.TrimPrefix(keyfile, kbPrefix))
- }
- *p = append(*p, key)
- continue
- }
-
- pgpStr, err := ReadPGPFile(keyfile)
- if err != nil {
- return err
- }
-
- *p = append(*p, pgpStr)
- }
- return nil
-}
-
-func ReadPGPFile(path string) (string, error) {
- if path[0] == '@' {
- path = path[1:]
- }
- f, err := os.Open(path)
- if err != nil {
- return "", err
- }
- defer f.Close()
- buf := bytes.NewBuffer(nil)
- _, err = buf.ReadFrom(f)
- if err != nil {
- return "", err
- }
-
- // First parse as an armored keyring file, if that doesn't work, treat it as a straight binary/b64 string
- keyReader := bytes.NewReader(buf.Bytes())
- entityList, err := openpgp.ReadArmoredKeyRing(keyReader)
- if err == nil {
- if len(entityList) != 1 {
- return "", fmt.Errorf("more than one key found in file %s", path)
- }
- if entityList[0] == nil {
- return "", fmt.Errorf("primary key was nil for file %s", path)
- }
-
- serializedEntity := bytes.NewBuffer(nil)
- err = entityList[0].Serialize(serializedEntity)
- if err != nil {
- return "", fmt.Errorf("error serializing entity for file %s: %s", path, err)
- }
-
- return base64.StdEncoding.EncodeToString(serializedEntity.Bytes()), nil
- }
-
- _, err = base64.StdEncoding.DecodeString(buf.String())
- if err == nil {
- return buf.String(), nil
- }
- return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
-
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag_test.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag_test.go
deleted file mode 100644
index 6fa6718..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package pgpkeys
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "reflect"
- "strings"
- "testing"
-
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/packet"
-)
-
-func TestPubKeyFilesFlag_implements(t *testing.T) {
- var raw interface{}
- raw = new(PubKeyFilesFlag)
- if _, ok := raw.(flag.Value); !ok {
- t.Fatalf("PubKeysFilesFlag should be a Value")
- }
-}
-
-func TestPubKeyFilesFlagSetBinary(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "vault-test")
- if err != nil {
- t.Fatalf("Error creating temporary directory: %s", err)
- }
- defer os.RemoveAll(tempDir)
-
- decoder := base64.StdEncoding
- pub1Bytes, err := decoder.DecodeString(pubKey1)
- if err != nil {
- t.Fatalf("Error decoding bytes for public key 1: %s", err)
- }
- err = ioutil.WriteFile(tempDir+"/pubkey1", pub1Bytes, 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 1 to temp file: %s", err)
- }
- pub2Bytes, err := decoder.DecodeString(pubKey2)
- if err != nil {
- t.Fatalf("Error decoding bytes for public key 2: %s", err)
- }
- err = ioutil.WriteFile(tempDir+"/pubkey2", pub2Bytes, 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 2 to temp file: %s", err)
- }
- pub3Bytes, err := decoder.DecodeString(pubKey3)
- if err != nil {
- t.Fatalf("Error decoding bytes for public key 3: %s", err)
- }
- err = ioutil.WriteFile(tempDir+"/pubkey3", pub3Bytes, 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 3 to temp file: %s", err)
- }
-
- pkf := new(PubKeyFilesFlag)
- err = pkf.Set(tempDir + "/pubkey1,@" + tempDir + "/pubkey2")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = pkf.Set(tempDir + "/pubkey3")
- if err == nil {
- t.Fatalf("err: should not have been able to set a second value")
- }
-
- expected := []string{strings.Replace(pubKey1, "\n", "", -1), strings.Replace(pubKey2, "\n", "", -1)}
- if !reflect.DeepEqual(pkf.String(), fmt.Sprint(expected)) {
- t.Fatalf("Bad: %#v", pkf)
- }
-}
-
-func TestPubKeyFilesFlagSetB64(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "vault-test")
- if err != nil {
- t.Fatalf("Error creating temporary directory: %s", err)
- }
- defer os.RemoveAll(tempDir)
-
- err = ioutil.WriteFile(tempDir+"/pubkey1", []byte(pubKey1), 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 1 to temp file: %s", err)
- }
- err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 2 to temp file: %s", err)
- }
- err = ioutil.WriteFile(tempDir+"/pubkey3", []byte(pubKey3), 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 3 to temp file: %s", err)
- }
-
- pkf := new(PubKeyFilesFlag)
- err = pkf.Set(tempDir + "/pubkey1,@" + tempDir + "/pubkey2")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = pkf.Set(tempDir + "/pubkey3")
- if err == nil {
- t.Fatalf("err: should not have been able to set a second value")
- }
-
- expected := []string{pubKey1, pubKey2}
- if !reflect.DeepEqual(pkf.String(), fmt.Sprint(expected)) {
- t.Fatalf("bad: got %s, expected %s", pkf.String(), fmt.Sprint(expected))
- }
-}
-
-func TestPubKeyFilesFlagSetKeybase(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "vault-test")
- if err != nil {
- t.Fatalf("Error creating temporary directory: %s", err)
- }
- defer os.RemoveAll(tempDir)
-
- err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0755)
- if err != nil {
- t.Fatalf("Error writing pub key 2 to temp file: %s", err)
- }
-
- pkf := new(PubKeyFilesFlag)
- err = pkf.Set("keybase:jefferai,@" + tempDir + "/pubkey2" + ",keybase:hashicorp")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- fingerprints := []string{}
- for _, pubkey := range []string(*pkf) {
- keyBytes, err := base64.StdEncoding.DecodeString(pubkey)
- if err != nil {
- t.Fatalf("bad: %v", err)
- }
- pubKeyBuf := bytes.NewBuffer(keyBytes)
- reader := packet.NewReader(pubKeyBuf)
- entity, err := openpgp.ReadEntity(reader)
- if err != nil {
- t.Fatalf("bad: %v", err)
- }
- if entity == nil {
- t.Fatalf("nil entity encountered")
- }
- fingerprints = append(fingerprints, hex.EncodeToString(entity.PrimaryKey.Fingerprint[:]))
- }
-
- exp := []string{
- "0f801f518ec853daff611e836528efcac6caa3db",
- "cf3d4694c9f57b28cb4092c2eb832c67eb5e8957",
- "91a6e7f85d05c65630bef18951852d87348ffc4c",
- }
-
- if !reflect.DeepEqual(fingerprints, exp) {
- t.Fatalf("bad: got \n%#v\nexpected\n%#v\n", fingerprints, exp)
- }
-}
-
-const pubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
-rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
-063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
-sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
-8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg
-S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B
-HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD
-cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE
-A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB
-C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa
-QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn
-aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y
-jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb
-6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N
-ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu
-9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ
-AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu
-lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN
-C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0
-YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi
-oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH
-/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI
-PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O
-9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx
-8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd
-OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=`
-const pubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG
-Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4
-0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e
-Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk
-Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg
-S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr
-XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO
-2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P
-Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX
-elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY
-DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef
-pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg
-+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m
-UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ
-SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW
-IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ
-AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD
-hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ
-e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K
-BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY
-ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H
-/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7
-PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U
-PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd
-w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4
-MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=`
-const pubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj
-6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4
-Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH
-CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy
-resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg
-S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8
-LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi
-pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6
-LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY
-MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1
-lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3
-s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V
-KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8
-7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645
-jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ
-ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ
-AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI
-WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr
-+2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ
-GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827
-+jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI
-AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c
-GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj
-lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv
-dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn
-puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=`
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go
deleted file mode 100644
index 5c14cbc..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package pgpkeys
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
- "strings"
-
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/keybase/go-crypto/openpgp"
-)
-
-const (
- kbPrefix = "keybase:"
-)
-
-// FetchKeybasePubkeys fetches public keys from Keybase given a set of
-// usernames, which are derived from correctly formatted input entries. It
-// doesn't use their client code due to both the API and the fact that it is
-// considered alpha and probably best not to rely on it. The keys are returned
-// as base64-encoded strings.
-func FetchKeybasePubkeys(input []string) (map[string]string, error) {
- client := cleanhttp.DefaultClient()
- if client == nil {
- return nil, fmt.Errorf("unable to create an http client")
- }
-
- if len(input) == 0 {
- return nil, nil
- }
-
- usernames := make([]string, 0, len(input))
- for _, v := range input {
- if strings.HasPrefix(v, kbPrefix) {
- usernames = append(usernames, strings.TrimPrefix(v, kbPrefix))
- }
- }
-
- if len(usernames) == 0 {
- return nil, nil
- }
-
- ret := make(map[string]string, len(usernames))
- url := fmt.Sprintf("https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s&fields=public_keys", strings.Join(usernames, ","))
- resp, err := client.Get(url)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- type publicKeys struct {
- Primary struct {
- Bundle string
- }
- }
-
- type them struct {
- publicKeys `json:"public_keys"`
- }
-
- type kbResp struct {
- Status struct {
- Name string
- }
- Them []them
- }
-
- out := &kbResp{
- Them: []them{},
- }
-
- if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil {
- return nil, err
- }
-
- if out.Status.Name != "OK" {
- return nil, fmt.Errorf("got non-OK response: %s", out.Status.Name)
- }
-
- missingNames := make([]string, 0, len(usernames))
- var keyReader *bytes.Reader
- serializedEntity := bytes.NewBuffer(nil)
- for i, themVal := range out.Them {
- if themVal.Primary.Bundle == "" {
- missingNames = append(missingNames, usernames[i])
- continue
- }
- keyReader = bytes.NewReader([]byte(themVal.Primary.Bundle))
- entityList, err := openpgp.ReadArmoredKeyRing(keyReader)
- if err != nil {
- return nil, err
- }
- if len(entityList) != 1 {
- return nil, fmt.Errorf("primary key could not be parsed for user %s", usernames[i])
- }
- if entityList[0] == nil {
- return nil, fmt.Errorf("primary key was nil for user %s", usernames[i])
- }
-
- serializedEntity.Reset()
- err = entityList[0].Serialize(serializedEntity)
- if err != nil {
- return nil, fmt.Errorf("error serializing entity for user %s: %s", usernames[i], err)
- }
-
- // The API returns values in the same ordering requested, so this should properly match
- ret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes())
- }
-
- if len(missingNames) > 0 {
- return nil, fmt.Errorf("unable to fetch keys for user(s) %s from keybase", strings.Join(missingNames, ","))
- }
-
- return ret, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase_test.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase_test.go
deleted file mode 100644
index ded5af5..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package pgpkeys
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/hex"
- "reflect"
- "testing"
-
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/packet"
-)
-
-func TestFetchKeybasePubkeys(t *testing.T) {
- testset := []string{"keybase:jefferai", "keybase:hashicorp"}
- ret, err := FetchKeybasePubkeys(testset)
- if err != nil {
- t.Fatalf("bad: %v", err)
- }
-
- fingerprints := []string{}
- for _, user := range testset {
- data, err := base64.StdEncoding.DecodeString(ret[user])
- if err != nil {
- t.Fatalf("error decoding key for user %s: %v", user, err)
- }
- entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data)))
- if err != nil {
- t.Fatalf("error parsing key for user %s: %v", user, err)
- }
- fingerprints = append(fingerprints, hex.EncodeToString(entity.PrimaryKey.Fingerprint[:]))
- }
-
- exp := []string{
- "0f801f518ec853daff611e836528efcac6caa3db",
- "91a6e7f85d05c65630bef18951852d87348ffc4c",
- }
-
- if !reflect.DeepEqual(fingerprints, exp) {
- t.Fatalf("fingerprints do not match; expected \n%#v\ngot\n%#v\n", exp, fingerprints)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go
deleted file mode 100644
index c10a905..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package pgpkeys
-
-const (
- TestPrivKey1 = `lQOYBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
-rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
-063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
-sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
-8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAEAB/wL+KX0mdeISEpX
-oDgt766Key1Kthe8nbEs5dOXIsP7OR7ZPcnE2hy6gftgVFnBGEZnWVN70vmJd6Z5y9d1mI+GecXj
-UL0EpI0EmohyYDJsHUnght/5ecRNFA+VeNmGPYNQGCeHJyZOiFunGGENpHU7BbubAht8delz37Mx
-JQgvMyR6AKvg8HKBoQeqV1uMWNJE/vKwV/z1dh1sjK/GFxu05Qaq0GTfAjVLuFOyJTS95yq6gblD
-jUdbHLp7tBeqIKo9voWCJF5mGOlq3973vVoWETy9b0YYPCE/M7fXmK9dJITHqkROLMW6TgcFeIw4
-yL5KOBCHk+QGPSvyQN7R7Fd5BADwuT1HZmvg7Y9GjarKXDjxdNemUiHtba2rUzfH6uNmKNQvwQek
-nma5palNUJ4/dz1aPB21FUBXJF5yWwXEdApl+lIDU0J5m4UD26rqEVRq9Kx3GsX+yfcwObkrSzW6
-kmnQSB5KI0fIuegMTM+Jxo3pB/mIRwDTMmk+vfzIGyW+7QQA8aFwFLMdKdfLgSGbl5Z6etmOAVQ2
-Oe2ebegU9z/ewi/Rdt2s9yQiAdGVM8+q15Saz8a+kyS/l1CjNPzr3VpYx1OdZ3gb7i2xoy9GdMYR
-ZpTq3TuST95kx/9DqA97JrP23G47U0vwF/cg8ixCYF8Fz5dG4DEsxgMwKqhGdW58wMMD/iytkfMk
-Vk6Z958Rpy7lhlC6L3zpO38767bSeZ8gRRi/NMFVOSGYepKFarnfxcTiNa+EoSVA6hUo1N64nALE
-sJBpyOoTfKIpz7WwTF1+WogkiYrfM6lHon1+3qlziAcRW0IohM3g2C1i3GWdON4Cl8/PDO3R0E52
-N6iG/ctNNeMiPe60EFZhdWx0IFRlc3QgS2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUI
-AgkKCwQWAgMBAh4BAheAAAoJEOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d
-4hIHsG7kmJRTJfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C
-Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF3
-9jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poe
-o+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeUR
-BRWdA5gEVduM9QEIAL53hJ5bZJ7oEDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkf
-Rqnv981fFwGnh2+I1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a
-9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu
-9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/z
-bfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOleYwxk+FoTqXEAEQEAAQAH+wVyQXaNwnjQ
-xfW+M8SJNo0C7e+0d7HsuBTA/d/eP4bj6+X8RaRFVwiMvSAoxsqBNCLJP00qzzKfRQWJseD1H35z
-UjM7rNVUEL2k1yppyp61S0qj0TdhVUfJDYZqRYonVgRMvzfDTB1ryKrefKenQYL/jGd9VYMnKmWZ
-6GVk4WWXXx61iOt2HNcmSXKetMM1Mg67woPZkA3fJaXZ+zW0zMu4lTSB7yl3+vLGIFYILkCFnREr
-drQ+pmIMwozUAt+pBq8dylnkHh6g/FtRfWmLIMDqM1NlyuHRp3dyLDFdTA93osLG0QJblfX54W34
-byX7a4HASelGi3nPjjOAsTFDkuEEANV2viaWk1CV4ryDrXGmy4Xo32Md+laGPRcVfbJ0mjZjhQsO
-gWC1tjMs1qZMPhcrKIBCjjdAcAIrGV9h3CXc0uGuez4XxLO+TPBKaS0B8rKhnKph1YZuf+HrOhzS
-astDnOjNIT+qucCL/qSbdYpj9of3yY61S59WphPOBjoVM3BFBADka6ZCk81gx8jA2E1e9UqQDmdM
-FZaVA1E7++kqVSFRDJGnq+5GrBTwCJ+sevi+Rvf8Nx4AXvpCdtMBPX9RogsUFcR0pMrKBrgRo/Vg
-EpuodY2Ef1VtqXR24OxtRf1UwvHKydIsU05rzMAy5uGgQvTzRTXxZFLGUY31wjWqmo9VPQP+PnwA
-K83EV2kk2bsXwZ9MXg05iXqGQYR4bEc/12v04BtaNaDS53hBDO4JIa3Bnz+5oUoYhb8FgezUKA9I
-n6RdKTTP1BLAu8titeozpNF07V++dPiSE2wrIVsaNHL1pUwW0ql50titVwe+EglWiCKPtJBcCPUA
-3oepSPchiDjPqrNCYIkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZAQIABgUCVduM
-9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYulEimOPzLUX/Z
-XZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr
-9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEc
-ZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4
-EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY+XsKVYRf
-NIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcIPXFv3m3WfUln
-G/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O9uK3lQozbw2g
-H9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKf
-PRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h
-7u2CfYyFPu3AlUaGNMBlvy6PEpU=`
-
- TestPrivKey2 = `lQOYBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG
-Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4
-0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e
-Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk
-Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAEAB/oCBqTIsxlUgLtz
-HRpWW5MJ+93xvmVV0JHhRK/ygKghq+zpC6S+cn7dwrEj1JTPh+17lyemYQK+RMeiBEduoWNKuHUd
-WX353w2411rrc/VuGTglzhd8Ir2BdJlPesCzw4JQnrWqcBqN52W+iwhnE7PWVhnvItWnx6APK5Se
-q7dzFWy8Z8tNIHm0pBQbeyo6x2rHHSWkr2fs7V02qFQhii1ayFRMcgdOWSNX6CaZJuYhk/DyjApN
-9pVhi3P1pNMpFeV0Pt8Gl1f/9o6/HpAYYEt/6vtVRhFUGgtNi95oc0oyzIJxliRvd6+Z236osigQ
-QEBwj1ImRK8TKyWPlykiJWc5BADfldgOCA55o3Qz/z/oVE1mm+a3FmPPTQlHBXotNEsrWV2wmJHe
-lNQPI6ZwMtLrBSg8PUpG2Rvao6XJ4ZBl/VcDwfcLgCnALPCcL0L0Z3vH3Sc9Ta/bQWJODG7uSaI1
-iVJ7ArKNtVzTqRQWK967mol9CCqh4A0jRrH0aVEFbrqQ/QQA58iEJaFhzFZjufjC9N8Isn3Ky7xu
-h+dk001RNCb1GnNZcx4Ld2IB+uXyYjtg7dNaUhGgGuCBo9nax89bMsBzzUukx3SHq1pxopMg6Dm8
-ImBoIAicuQWgEkaP2T0rlwCozUalJZaG1gyrzkPhkeY7CglpJycHLHfY2MIb46c8+58D/iJ83Q5j
-Y4x+sqW2QeUYFwqCcOW8Urg64UxEkgXZXiNMwTAJCaxp/Pz7cgeUDwgv+6CXEdnT1910+byzK9ha
-V1Q/65+/JYuCeyHxcoAb4Wtpdl7GALGd/1G0UAmq47yrefEr/b00uS35i1qUUhOzo1NmEZch/bvF
-kmJ+WtAHunZcOCu0EFZhdWx0IFRlc3QgS2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUI
-AgkKCwQWAgMBAh4BAheAAAoJEOuDLGfrXolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHip
-ZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABq
-hb5ojexdnAYRswaHV201ZCclj9rnJN1PAg0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmG
-kdrg8K8ARmRILjmwuBAgJM0eXBZHNGWXelk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0
-vDttB+ZXqF88W9jAYlvdgbTtajNF5IDYDjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlx
-k4edA5gEVduQkQEIAOjZV5tbpfIh5QefpIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe
-4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/t
-GF5xE3e5CoZRsHV/c92h3t1LdJNOnC5mUKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBH
-yt0tdHtIWuQv6joTJzujqViRhlCwQYzQSKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1r
-ENO8JOuPu6tMS+znFu67skq2gFFZwCQWIjdHm+2ukE+PE580WAWudyMAEQEAAQAH/i7ndRPI+t0T
-AdEu0dTIdyrrg3g7gd471kQtIVZZwTYSy2yhNY/Ciu72s3ab8QNCxY8dNL5bRk8FKjHslAoNSFdO
-8iZSLiDgIHOZOcjYe6pqdgQaeTHodm1Otrn2SbB+K/3oX6W/y1xe18aSojGba/nHMj5PeJbIN9Pi
-jmh0WMLD/0rmkTTxR7qQ5+kMV4O29xY4qjdYRD5O0adeZX0mNncmlmQ+rX9yxrtSgFROu1jwVtfP
-hcNetifTTshJnTwND8hux5ECEadlIVBHypW28Hth9TRBXmddTmv7L7mdtUO6DybgkpWpw4k4LPsk
-uZ6aY4wcGRp7EVfWGr9NHbq/n+0EAOlhDXIGdylkQsndjBMyhPsXZa5fFBmOyHjXj733195Jgr1v
-ZjaIomrA9cvYrmN75oKrG1jJsMEl6HfC/ZPzEj6E51/p1PRdHP7CdUUA+DG8x4M3jn+e43psVuAR
-a1XbN+8/bOa0ubt7ljVPjAEvWRSvU9dRaQz93w3fduAuM07dBAD/ayK3e0d6JMJMrU50lNOXQBgL
-rFbg4rWzPO9BJQdhjOhmOZQiUa1Q+EV+s95yIUg1OAfaMP9KRIljr5RCdGNS6WoMNBAQOSrZpelf
-jW4NpzphNfWDGVkUoPoskVtJz/nu9d860dGd3Al0kSmtUpMu5QKlo+sSxXUPbWLUn8V9/wP/ScCW
-H+0gtL4R7SFazPeTIP+Cu5oR7A/DlFVLJKa3vo+atkhSvwxHGbg04vb/W4mKhGGVtMBtlhRmaWOe
-PhUulU5FdaYsdlpN/Yd+hhgU6NHlyImPGVEHWD8c6CG8qoZfpR33j2sqshs4i/MtJZeBvl62vxPn
-9bDN7KAjFNll9axAjIkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZAQIABgUCVduQ
-kQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDDhnV3bXQsCvn/
-6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQe3l4CqJvkn6j
-ybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4KBIrp/bhG6Pdn
-igKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eYENtyOmEMWOFC
-LLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H/1trYUtJjXQK
-HmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7PkUZTfpaP/L6
-DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0UPEnjvtZTp5yO
-hTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQdw/2epIewH0L/
-FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4MFOMVRn1dc3q
-dXlg3mimA+iK7tABQfG0RJ9YzWs=`
-
- TestPrivKey3 = `lQOXBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj
-6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4
-Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH
-CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy
-resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAEAB/dQbElFIa0VklZa
-39ZLhtbBxACSWH3ql3EtRZaB2Mh4zSALbFyJDQfScOy8AZHmv66Ozxit9X9WsYr9OzcHujgl/2da
-A3lybF6iLw1YDNaL11G6kuyn5sFP6lYGMRGOIWSik9oSVF6slo8m8ujRLdBsdMXVcElHKzCJiWmt
-JZHEnUkl9X96fIPajMBfWjHHwcaeMOc77nvjwqy5wC4EY8TSVYzxeZHL7DADQ0EHBcThlmfizpCq
-26LMVb6ju8STH7uDDFyKmhr/hC2vOkt+PKsvBCmW8/ESanO1zKPD9cvSsOWr2rZWNnkDRftqzOU5
-OCrI+3o9E74+toNb07bPntEEAMEStOzSvqZ6NKdh7EZYPA4mkkFC+EiHYIoinP1sd9V8O2Hq+dzx
-yFHtWu0LmP6uWXk45vsP9y1UMJcEa33ew5JJa7zgucI772/BNvd/Oys/PqwIAl6uNIY8uYLgmn4L
-1IPatp7vDiXzZSivPZd4yN4S4zCypZp9cnpO3qv8q7CtBADW87IA0TabdoxiN+m4XL7sYDRIfglr
-MRPAlfrkAUaGDBx/t1xb6IaKk7giFdwHpTI6+g9XNkqKqogMe4Fp+nsd1xtfsNUBn6iKZavm5kXe
-Lp9QgE+K6mvIreOTe2PKQqXqgPRG6+SRGatoKeY76fIpd8AxOJyWERxcq2lUHLn45QP/UXDTcYB7
-gzJtZrfpXN0GqQ0lYXMzbQfLnkUsu3mYzArfNy0otzEmKTkwmKclNY1/EJSzSdHfgmeA260a0nLK
-64C0wPgSmOqw90qwi5odAYSjSFBapDbyGF86JpHrLxyEEpGoXanRPwWfbiWp19Nwg6nknA87AtaM
-3+AHjbWzwCpHL7QQVmF1bHQgVGVzdCBLZXkgM4kBOAQTAQIAIgUCVduSIwIbLwYLCQgHAwIGFQgC
-CQoLBBYCAwECHgECF4AACgkQ9HlLVvwtxt1aMQf/aaGoL1rRWTUjM6DEShXFhWpV29rEjSdNk5N+
-ZwVifgdCVD5IsSjI1Z7mO2SHHiTm4eKnHAofM6/TZgzXg1YLpu8rDYJARMsM8bgK/xgxSamGjm2c
-wN220jOnwePIlG0drNTW5N6zb/K6qHoscJ6NUkjS5JPdGJuq7B0bdCM8/xSbG75gL34U5bYqK38B
-DwmW4UMl2rf/BJfxV9hmsZ2Cat4TspgyiWEKTMZI+PugXKDDwuoqgm+320K4EqFkwG4y/WwHkKgk
-hZ0+io5lzhTsvVd2p8q8VlH9GG5eA3WWQj0yqucsOmKQvcuT5y0vFY6NQJbyuioqgdlgEXtc+p0B
-+Z0DmARV25IjAQgA49yN3hCBsuWoiTezoE9FHJXOCVOBR1/4jStQPJtoMl8mhtl3xTp7iGQ+9GhD
-y0l5+fP+qcP/rfBq0BslhxVOZ7jQjdUoM6ZUZzJoPGIo/V2KwqpwQl3tdCIjvagCJeYQfTL7lTCc
-4ySz+XBoAYMwZVGMcRcjp+JE8Wx9Ovzuq8wnelbU6I5dVJ7O4E1OWbIkLuytDX+fDEvfft6/oPXN
-Bl3cm6FzEuQetQQss3DOG9xnvS+DrjmMCbPwR2a++ioQ8+geoqA/kB4cAI6xOb3ncoeGDHc1i4Y9
-T9Ggi+6Aq3girmfDtNYVOM8cZUXcZNCvLkJn8DNeIvnuFUSEO+a5PwARAQABAAf/TPd98CmRNdV/
-VUI8aYT9Kkervdi4DVzsfvrHcoFn88PSJrCkVTmI6qw526Kwa6VZD0YMmll7LszLt5nD1lorDrwN
-rir3FmMzlVwge20IvXRwX4rkunYxtA2oFvL+LsEEhtXGx0ERbWRDapk+eGxQ15hxIO4Y/Cdg9E+a
-CWfQUrTSnC6qMVfVYMGfnM1yNX3OWattEFfmxQas5XqQk/0FgjCZALixdanjN/r1tjp5/2MiSD8N
-Wkemzsr6yPicnc3+BOZc5YOOnH8FqBvVHcDlSJI6pCOCEiO3Pq2QEk/1evONulbF116mLnQoGrpp
-W77l+5O42VUpZfjROCPd5DYyMQQA492CFXZpDIJ2emB9/nK8X6IzdVRK3oof8btbSNnme5afIzhs
-wR1ruX30O7ThfB+5ezpbgK1C988CWkr9SNSTy43omarafGig6/Y1RzdiITILuIGfbChoSpc70jXx
-U0nzJ/1i9yZ/vDgP3EC2miRhlDcp5w0Bu0oMBlgG/1uhj0cEAP/+7aFGP0fo2MZPhyl5feHKWj4k
-85XoAIpMBnzF6HTGU3ljAE56a+4sVw3bWB755DPhvpZvDkX60I9iIJxio8TK5ITdfjlLhxuskXyt
-ycwWI/4J+soeq4meoxK9jxZJuDl/qvoGfyzNg1oy2OBehX8+6erW46kr6Z/MQutS3zJJBACmJHrK
-VR40qD7a8KbvfuM3ruwlm5JqT/Ykq1gfKKxHjWDIUIeyBX/axGQvAGNYeuuQCzZ0+QsEWur3C4kN
-U+Pb5K1WGyOKkhJzivSI56AG3d8TA/Q0JhqST6maY0fvUoahWSCcpd7MULa3n1zx5Wsvi8mkVtup
-Js/IDi/kqneqM0XviQI+BBgBAgAJBQJV25IjAhsuASkJEPR5S1b8LcbdwF0gBBkBAgAGBQJV25Ij
-AAoJEAUj/03Hcrkg84UIAKxn9nizYtwSgDnVNb5PnD5h6+Ui6r7ffYm2o0im4YhakbFTHIPI9PRh
-BavRI5sE5Fg2vtE/x38jattoUrJoNoq9Gh9iv5PBfL3amEGjul0RRqYGl+ub+yv7YGAAHbHcdZen
-4gx15VWGpB7y3hycWbdzV8h3EAPKIm5XmB7YyXmArnI3CoJA+HtTZGoL6WZWUwka9YichGfaZ/oD
-umENg1l87Pp2RqvjLKHmv2tGCtnDzyv/IiWur9zopFQiCc8ysVgRq6CA5x5nzbv6MqRspYUS4e2I
-LFbuREA3blR+caw9oX41IYzarW8IbgeIXJ3HqUyhczRKF/z5nDKtX/kHMCqlbAgAnfu0TALnwVuj
-KeXLo4Y7OA9LTEqfORcw62q5OjSoQf/VsRSwGSefv3kGZk5N/igELluU3qpG/twZI/TSL6zGqXU2
-FOMlyMm1849TOB9b4B//4dHrjzPhztzowKMMUqeTxmSgYtFTshKN6eQ0XO+7ZuOXEmSKXS4kOUs9
-ttfzSiPNXUZL2D5nFU9H7rw3VAuXYVTrOx+Dfi6mYsscbxUbi8THODI2Q7B9Ni92DJE1OOe4+57o
-fXZ9ln24I14bna/uVHd6hBwLEE6eLCCKkHxQnnZFZduXDHMK0a0OL8RYHfMtNSem4pyC5wDQui1u
-KFIzGEPKVoBF9U7VBXpyxpsz+A==`
-
- TestPubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da
-rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/
-063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f
-sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg
-8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg
-S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B
-HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD
-cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE
-A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB
-C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa
-QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn
-aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y
-jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb
-6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N
-ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu
-9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ
-AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu
-lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN
-C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0
-YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi
-oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH
-/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI
-PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O
-9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx
-8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd
-OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=`
-
- TestPubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG
-Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4
-0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e
-Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk
-Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg
-S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr
-XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO
-2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P
-Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX
-elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY
-DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef
-pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg
-+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m
-UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ
-SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW
-IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ
-AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD
-hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ
-e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K
-BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY
-ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H
-/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7
-PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U
-PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd
-w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4
-MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=`
-
- TestPubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj
-6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4
-Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH
-CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy
-resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg
-S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8
-LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi
-pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6
-LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY
-MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1
-lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3
-s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V
-KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8
-7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645
-jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ
-ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ
-AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI
-WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr
-+2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ
-GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827
-+jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI
-AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c
-GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj
-lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv
-dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn
-puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=`
-
- TestAAPubKey1 = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzz
-wiMwBS5cD0darGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7
-H+/mhfFvKmgr0Y5kDCF1j0T/063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX
-1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0fsF5St9jhO7mbZU9EFkv9O3t3EaUR
-fHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg8hQssKeVGpuskTdz
-5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3QgS2V5
-IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJ
-EOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRT
-JfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C
-Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1Z
-mumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4z
-J2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+
-7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7o
-EDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I
-1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okj
-h5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTj
-OleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2o
-P/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOle
-Ywxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ
-AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVh
-EGipBmpDGRYulEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHk
-GRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRd
-tPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEcZHvsjSZjgydKvfLY
-cm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4EKc7
-fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY
-+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7
-moViAAcIPXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWko
-jHqyob3cyLgy6z9Q557O9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJ
-iEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKfPRENiLOOc19MmS+phmUy
-rbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h7u2CfYyF
-Pu3AlUaGNMBlvy6PEpU=
-=NUTS
------END PGP PUBLIC KEY BLOCK-----`
-)
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go
deleted file mode 100644
index fff8ff1..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package pluginutil
-
-import (
- "bytes"
- "fmt"
- stdlog "log"
- "strings"
-
- hclog "github.com/hashicorp/go-hclog"
- log "github.com/mgutz/logxi/v1"
-)
-
-// pluginLogFaker is a wrapper on logxi.Logger that
-// implements hclog.Logger
-type hclogFaker struct {
- logger log.Logger
-
- name string
- implied []interface{}
-}
-
-func (f *hclogFaker) buildLog(msg string, args ...interface{}) (string, []interface{}) {
- if f.name != "" {
- msg = fmt.Sprintf("%s: %s", f.name, msg)
- }
- args = append(f.implied, args...)
-
- return msg, args
-}
-
-func (f *hclogFaker) Trace(msg string, args ...interface{}) {
- msg, args = f.buildLog(msg, args...)
- f.logger.Trace(msg, args...)
-}
-
-func (f *hclogFaker) Debug(msg string, args ...interface{}) {
- msg, args = f.buildLog(msg, args...)
- f.logger.Debug(msg, args...)
-}
-
-func (f *hclogFaker) Info(msg string, args ...interface{}) {
- msg, args = f.buildLog(msg, args...)
- f.logger.Info(msg, args...)
-}
-
-func (f *hclogFaker) Warn(msg string, args ...interface{}) {
- msg, args = f.buildLog(msg, args...)
- f.logger.Warn(msg, args...)
-}
-
-func (f *hclogFaker) Error(msg string, args ...interface{}) {
- msg, args = f.buildLog(msg, args...)
- f.logger.Error(msg, args...)
-}
-
-func (f *hclogFaker) IsTrace() bool {
- return f.logger.IsTrace()
-}
-
-func (f *hclogFaker) IsDebug() bool {
- return f.logger.IsDebug()
-}
-
-func (f *hclogFaker) IsInfo() bool {
- return f.logger.IsInfo()
-}
-
-func (f *hclogFaker) IsWarn() bool {
- return f.logger.IsWarn()
-}
-
-func (f *hclogFaker) IsError() bool {
- return !f.logger.IsTrace() && !f.logger.IsDebug() && !f.logger.IsInfo() && !f.IsWarn()
-}
-
-func (f *hclogFaker) With(args ...interface{}) hclog.Logger {
- var nf = *f
- nf.implied = append(nf.implied, args...)
- return f
-}
-
-func (f *hclogFaker) Named(name string) hclog.Logger {
- var nf = *f
- if nf.name != "" {
- nf.name = nf.name + "." + name
- }
- return &nf
-}
-
-func (f *hclogFaker) ResetNamed(name string) hclog.Logger {
- var nf = *f
- nf.name = name
- return &nf
-}
-
-func (f *hclogFaker) StandardLogger(opts *hclog.StandardLoggerOptions) *stdlog.Logger {
- if opts == nil {
- opts = &hclog.StandardLoggerOptions{}
- }
-
- return stdlog.New(&stdlogAdapter{f, opts.InferLevels}, "", 0)
-}
-
-// Provides a io.Writer to shim the data out of *log.Logger
-// and back into our Logger. This is basically the only way to
-// build upon *log.Logger.
-type stdlogAdapter struct {
- hl hclog.Logger
- inferLevels bool
-}
-
-// Take the data, infer the levels if configured, and send it through
-// a regular Logger
-func (s *stdlogAdapter) Write(data []byte) (int, error) {
- str := string(bytes.TrimRight(data, " \t\n"))
-
- if s.inferLevels {
- level, str := s.pickLevel(str)
- switch level {
- case hclog.Trace:
- s.hl.Trace(str)
- case hclog.Debug:
- s.hl.Debug(str)
- case hclog.Info:
- s.hl.Info(str)
- case hclog.Warn:
- s.hl.Warn(str)
- case hclog.Error:
- s.hl.Error(str)
- default:
- s.hl.Info(str)
- }
- } else {
- s.hl.Info(str)
- }
-
- return len(data), nil
-}
-
-// Detect, based on conventions, what log level this is
-func (s *stdlogAdapter) pickLevel(str string) (hclog.Level, string) {
- switch {
- case strings.HasPrefix(str, "[DEBUG]"):
- return hclog.Debug, strings.TrimSpace(str[7:])
- case strings.HasPrefix(str, "[TRACE]"):
- return hclog.Trace, strings.TrimSpace(str[7:])
- case strings.HasPrefix(str, "[INFO]"):
- return hclog.Info, strings.TrimSpace(str[6:])
- case strings.HasPrefix(str, "[WARN]"):
- return hclog.Warn, strings.TrimSpace(str[7:])
- case strings.HasPrefix(str, "[ERROR]"):
- return hclog.Error, strings.TrimSpace(str[7:])
- case strings.HasPrefix(str, "[ERR]"):
- return hclog.Error, strings.TrimSpace(str[5:])
- default:
- return hclog.Info, str
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go
deleted file mode 100644
index 1660ca8..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package pluginutil
-
-import (
- "os"
-
- "github.com/hashicorp/vault/helper/mlock"
-)
-
-var (
- // PluginMlockEnabled is the ENV name used to pass the configuration for
- // enabling mlock
- PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED"
-)
-
-// OptionallyEnableMlock determines if mlock should be called, and if so enables
-// mlock.
-func OptionallyEnableMlock() error {
- if os.Getenv(PluginMlockEnabled) == "true" {
- return mlock.LockMemory()
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go
deleted file mode 100644
index 2047651..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package pluginutil
-
-import (
- "crypto/sha256"
- "crypto/tls"
- "flag"
- "fmt"
- "os/exec"
- "time"
-
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/wrapping"
- log "github.com/mgutz/logxi/v1"
-)
-
-// Looker defines the plugin Lookup function that looks into the plugin catalog
-// for availible plugins and returns a PluginRunner
-type Looker interface {
- LookupPlugin(string) (*PluginRunner, error)
-}
-
-// Wrapper interface defines the functions needed by the runner to wrap the
-// metadata needed to run a plugin process. This includes looking up Mlock
-// configuration and wrapping data in a respose wrapped token.
-// logical.SystemView implementataions satisfy this interface.
-type RunnerUtil interface {
- ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
- MlockEnabled() bool
-}
-
-// LookWrapper defines the functions for both Looker and Wrapper
-type LookRunnerUtil interface {
- Looker
- RunnerUtil
-}
-
-// PluginRunner defines the metadata needed to run a plugin securely with
-// go-plugin.
-type PluginRunner struct {
- Name string `json:"name" structs:"name"`
- Command string `json:"command" structs:"command"`
- Args []string `json:"args" structs:"args"`
- Sha256 []byte `json:"sha256" structs:"sha256"`
- Builtin bool `json:"builtin" structs:"builtin"`
- BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"`
-}
-
-// Run takes a wrapper RunnerUtil instance along with the go-plugin paramaters and
-// returns a configured plugin.Client with TLS Configured and a wrapping token set
-// on PluginUnwrapTokenEnv for plugin process consumption.
-func (r *PluginRunner) Run(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) {
- return r.runCommon(wrapper, pluginMap, hs, env, logger, false)
-}
-
-// RunMetadataMode returns a configured plugin.Client that will dispense a plugin
-// in metadata mode. The PluginMetadaModeEnv is passed in as part of the Cmd to
-// plugin.Client, and consumed by the plugin process on pluginutil.VaultPluginTLSProvider.
-func (r *PluginRunner) RunMetadataMode(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) {
- return r.runCommon(wrapper, pluginMap, hs, env, logger, true)
-
-}
-
-func (r *PluginRunner) runCommon(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger, isMetadataMode bool) (*plugin.Client, error) {
- cmd := exec.Command(r.Command, r.Args...)
- cmd.Env = append(cmd.Env, env...)
-
- // Add the mlock setting to the ENV of the plugin
- if wrapper.MlockEnabled() {
- cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true"))
- }
-
- // Create logger for the plugin client
- clogger := &hclogFaker{
- logger: logger,
- }
- namedLogger := clogger.ResetNamed("plugin")
-
- var clientTLSConfig *tls.Config
- if !isMetadataMode {
- // Add the metadata mode ENV and set it to false
- cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadaModeEnv, "false"))
-
- // Get a CA TLS Certificate
- certBytes, key, err := generateCert()
- if err != nil {
- return nil, err
- }
-
- // Use CA to sign a client cert and return a configured TLS config
- clientTLSConfig, err = createClientTLSConfig(certBytes, key)
- if err != nil {
- return nil, err
- }
-
- // Use CA to sign a server cert and wrap the values in a response wrapped
- // token.
- wrapToken, err := wrapServerConfig(wrapper, certBytes, key)
- if err != nil {
- return nil, err
- }
-
- // Add the response wrap token to the ENV of the plugin
- cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken))
- } else {
- namedLogger = clogger.ResetNamed("plugin.metadata")
- cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadaModeEnv, "true"))
- }
-
- secureConfig := &plugin.SecureConfig{
- Checksum: r.Sha256,
- Hash: sha256.New(),
- }
-
- clientConfig := &plugin.ClientConfig{
- HandshakeConfig: hs,
- Plugins: pluginMap,
- Cmd: cmd,
- SecureConfig: secureConfig,
- TLSConfig: clientTLSConfig,
- Logger: namedLogger,
- }
-
- client := plugin.NewClient(clientConfig)
-
- return client, nil
-}
-
-type APIClientMeta struct {
- // These are set by the command line flags.
- flagCACert string
- flagCAPath string
- flagClientCert string
- flagClientKey string
- flagInsecure bool
-}
-
-func (f *APIClientMeta) FlagSet() *flag.FlagSet {
- fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError)
-
- fs.StringVar(&f.flagCACert, "ca-cert", "", "")
- fs.StringVar(&f.flagCAPath, "ca-path", "", "")
- fs.StringVar(&f.flagClientCert, "client-cert", "", "")
- fs.StringVar(&f.flagClientKey, "client-key", "", "")
- fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "")
-
- return fs
-}
-
-func (f *APIClientMeta) GetTLSConfig() *api.TLSConfig {
- // If we need custom TLS configuration, then set it
- if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure {
- t := &api.TLSConfig{
- CACert: f.flagCACert,
- CAPath: f.flagCAPath,
- ClientCert: f.flagClientCert,
- ClientKey: f.flagClientKey,
- TLSServerName: "",
- Insecure: f.flagInsecure,
- }
-
- return t
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go
deleted file mode 100644
index 112d33c..0000000
--- a/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package pluginutil
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/base64"
- "errors"
- "fmt"
- "net/url"
- "os"
- "time"
-
- "github.com/SermoDigital/jose/jws"
- "github.com/hashicorp/errwrap"
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/certutil"
-)
-
-var (
- // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the
- // plugin.
- PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN"
-
- // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded
- // string. Used for testing.
- PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM"
-
- // PluginMetadaModeEnv is an ENV name used to disable TLS communication
- // to bootstrap mounting plugins.
- PluginMetadaModeEnv = "VAULT_PLUGIN_METADATA_MODE"
-)
-
-// generateCert is used internally to create certificates for the plugin
-// client and server.
-func generateCert() ([]byte, *ecdsa.PrivateKey, error) {
- key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
- if err != nil {
- return nil, nil, err
- }
-
- host, err := uuid.GenerateUUID()
- if err != nil {
- return nil, nil, err
- }
-
- sn, err := certutil.GenerateSerialNumber()
- if err != nil {
- return nil, nil, err
- }
-
- template := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: host,
- },
- DNSNames: []string{host},
- ExtKeyUsage: []x509.ExtKeyUsage{
- x509.ExtKeyUsageClientAuth,
- x509.ExtKeyUsageServerAuth,
- },
- KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
- SerialNumber: sn,
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: time.Now().Add(262980 * time.Hour),
- IsCA: true,
- }
-
- certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
- if err != nil {
- return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err)
- }
-
- return certBytes, key, nil
-}
-
-// createClientTLSConfig creates a signed certificate and returns a configured
-// TLS config.
-func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) {
- clientCert, err := x509.ParseCertificate(certBytes)
- if err != nil {
- return nil, fmt.Errorf("error parsing generated plugin certificate: %v", err)
- }
-
- cert := tls.Certificate{
- Certificate: [][]byte{certBytes},
- PrivateKey: key,
- Leaf: clientCert,
- }
-
- clientCertPool := x509.NewCertPool()
- clientCertPool.AddCert(clientCert)
-
- tlsConfig := &tls.Config{
- Certificates: []tls.Certificate{cert},
- RootCAs: clientCertPool,
- ServerName: clientCert.Subject.CommonName,
- MinVersion: tls.VersionTLS12,
- }
-
- tlsConfig.BuildNameToCertificate()
-
- return tlsConfig, nil
-}
-
-// wrapServerConfig is used to create a server certificate and private key, then
-// wrap them in an unwrap token for later retrieval by the plugin.
-func wrapServerConfig(sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) {
- rawKey, err := x509.MarshalECPrivateKey(key)
- if err != nil {
- return "", err
- }
-
- wrapInfo, err := sys.ResponseWrapData(map[string]interface{}{
- "ServerCert": certBytes,
- "ServerKey": rawKey,
- }, time.Second*60, true)
- if err != nil {
- return "", err
- }
-
- return wrapInfo.Token, nil
-}
-
-// VaultPluginTLSProvider is run inside a plugin and retrives the response
-// wrapped TLS certificate from vault. It returns a configured TLS Config.
-func VaultPluginTLSProvider(apiTLSConfig *api.TLSConfig) func() (*tls.Config, error) {
- if os.Getenv(PluginMetadaModeEnv) == "true" {
- return nil
- }
-
- return func() (*tls.Config, error) {
- unwrapToken := os.Getenv(PluginUnwrapTokenEnv)
-
- // Parse the JWT and retrieve the vault address
- wt, err := jws.ParseJWT([]byte(unwrapToken))
- if err != nil {
- return nil, fmt.Errorf("error decoding token: %s", err)
- }
- if wt == nil {
- return nil, errors.New("nil decoded token")
- }
-
- addrRaw := wt.Claims().Get("addr")
- if addrRaw == nil {
- return nil, errors.New("decoded token does not contain primary cluster address")
- }
- vaultAddr, ok := addrRaw.(string)
- if !ok {
- return nil, errors.New("decoded token's address not valid")
- }
- if vaultAddr == "" {
- return nil, errors.New(`no address for the vault found`)
- }
-
- // Sanity check the value
- if _, err := url.Parse(vaultAddr); err != nil {
- return nil, fmt.Errorf("error parsing the vault address: %s", err)
- }
-
- // Unwrap the token
- clientConf := api.DefaultConfig()
- clientConf.Address = vaultAddr
- if apiTLSConfig != nil {
- err := clientConf.ConfigureTLS(apiTLSConfig)
- if err != nil {
- return nil, errwrap.Wrapf("error configuring api client {{err}}", err)
- }
- }
- client, err := api.NewClient(clientConf)
- if err != nil {
- return nil, errwrap.Wrapf("error during api client creation: {{err}}", err)
- }
-
- secret, err := client.Logical().Unwrap(unwrapToken)
- if err != nil {
- return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err)
- }
- if secret == nil {
- return nil, errors.New("error during token unwrap request: secret is nil")
- }
-
- // Retrieve and parse the server's certificate
- serverCertBytesRaw, ok := secret.Data["ServerCert"].(string)
- if !ok {
- return nil, errors.New("error unmarshalling certificate")
- }
-
- serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw)
- if err != nil {
- return nil, fmt.Errorf("error parsing certificate: %v", err)
- }
-
- serverCert, err := x509.ParseCertificate(serverCertBytes)
- if err != nil {
- return nil, fmt.Errorf("error parsing certificate: %v", err)
- }
-
- // Retrieve and parse the server's private key
- serverKeyB64, ok := secret.Data["ServerKey"].(string)
- if !ok {
- return nil, errors.New("error unmarshalling certificate")
- }
-
- serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64)
- if err != nil {
- return nil, fmt.Errorf("error parsing certificate: %v", err)
- }
-
- serverKey, err := x509.ParseECPrivateKey(serverKeyRaw)
- if err != nil {
- return nil, fmt.Errorf("error parsing certificate: %v", err)
- }
-
- // Add CA cert to the cert pool
- caCertPool := x509.NewCertPool()
- caCertPool.AddCert(serverCert)
-
- // Build a certificate object out of the server's cert and private key.
- cert := tls.Certificate{
- Certificate: [][]byte{serverCertBytes},
- PrivateKey: serverKey,
- Leaf: serverCert,
- }
-
- // Setup TLS config
- tlsConfig := &tls.Config{
- ClientCAs: caCertPool,
- RootCAs: caCertPool,
- ClientAuth: tls.RequireAndVerifyClientCert,
- // TLS 1.2 minimum
- MinVersion: tls.VersionTLS12,
- Certificates: []tls.Certificate{cert},
- }
- tlsConfig.BuildNameToCertificate()
-
- return tlsConfig, nil
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/policies/policies.go b/vendor/github.com/hashicorp/vault/helper/policies/policies.go
deleted file mode 100644
index 1e25522..0000000
--- a/vendor/github.com/hashicorp/vault/helper/policies/policies.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package policies
-
-import "sort"
-
-// ComparePolicies checks whether the given policy sets are equivalent, as in,
-// they contain the same values. The benefit of this method is that it leaves
-// the "default" policy out of its comparisons as it may be added later by core
-// after a set of policies has been saved by a backend.
-func EquivalentPolicies(a, b []string) bool {
- if a == nil && b == nil {
- return true
- }
-
- if a == nil || b == nil {
- return false
- }
-
- // First we'll build maps to ensure unique values and filter default
- mapA := map[string]bool{}
- mapB := map[string]bool{}
- for _, keyA := range a {
- if keyA == "default" {
- continue
- }
- mapA[keyA] = true
- }
- for _, keyB := range b {
- if keyB == "default" {
- continue
- }
- mapB[keyB] = true
- }
-
- // Now we'll build our checking slices
- var sortedA, sortedB []string
- for keyA, _ := range mapA {
- sortedA = append(sortedA, keyA)
- }
- for keyB, _ := range mapB {
- sortedB = append(sortedB, keyB)
- }
- sort.Strings(sortedA)
- sort.Strings(sortedB)
-
- // Finally, compare
- if len(sortedA) != len(sortedB) {
- return false
- }
-
- for i := range sortedA {
- if sortedA[i] != sortedB[i] {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/policies/policies_test.go b/vendor/github.com/hashicorp/vault/helper/policies/policies_test.go
deleted file mode 100644
index ba9b0a8..0000000
--- a/vendor/github.com/hashicorp/vault/helper/policies/policies_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package policies
-
-import "testing"
-
-func TestEquivalentPolicies(t *testing.T) {
- a := []string{"foo", "bar"}
- var b []string
- if EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-
- b = []string{"foo"}
- if EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-
- b = []string{"bar", "foo"}
- if !EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-
- b = []string{"foo", "default", "bar"}
- if !EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go
deleted file mode 100644
index f6d9f66..0000000
--- a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package policyutil
-
-import (
- "sort"
- "strings"
-
- "github.com/hashicorp/vault/helper/strutil"
-)
-
-const (
- AddDefaultPolicy = true
- DoNotAddDefaultPolicy = false
-)
-
-// ParsePolicies parses a comma-delimited list of policies.
-// The resulting collection will have no duplicate elements.
-// If 'root' policy was present in the list of policies, then
-// all other policies will be ignored, the result will contain
-// just the 'root'. In cases where 'root' is not present, if
-// 'default' policy is not already present, it will be added.
-func ParsePolicies(policiesRaw interface{}) []string {
- if policiesRaw == nil {
- return []string{"default"}
- }
-
- var policies []string
- switch policiesRaw.(type) {
- case string:
- if policiesRaw.(string) == "" {
- return []string{}
- }
- policies = strings.Split(policiesRaw.(string), ",")
- case []string:
- policies = policiesRaw.([]string)
- }
-
- return SanitizePolicies(policies, false)
-}
-
-// SanitizePolicies performs the common input validation tasks
-// which are performed on the list of policies across Vault.
-// The resulting collection will have no duplicate elements.
-// If 'root' policy was present in the list of policies, then
-// all other policies will be ignored, the result will contain
-// just the 'root'. In cases where 'root' is not present, if
-// 'default' policy is not already present, it will be added
-// if addDefault is set to true.
-func SanitizePolicies(policies []string, addDefault bool) []string {
- defaultFound := false
- for i, p := range policies {
- policies[i] = strings.ToLower(strings.TrimSpace(p))
- // Eliminate unnamed policies.
- if policies[i] == "" {
- continue
- }
-
- // If 'root' policy is present, ignore all other policies.
- if policies[i] == "root" {
- policies = []string{"root"}
- defaultFound = true
- break
- }
- if policies[i] == "default" {
- defaultFound = true
- }
- }
-
- // Always add 'default' except only if the policies contain 'root'.
- if addDefault && (len(policies) == 0 || !defaultFound) {
- policies = append(policies, "default")
- }
-
- return strutil.RemoveDuplicates(policies, true)
-}
-
-// EquivalentPolicies checks whether the given policy sets are equivalent, as in,
-// they contain the same values. The benefit of this method is that it leaves
-// the "default" policy out of its comparisons as it may be added later by core
-// after a set of policies has been saved by a backend.
-func EquivalentPolicies(a, b []string) bool {
- if a == nil && b == nil {
- return true
- }
-
- if a == nil || b == nil {
- return false
- }
-
- // First we'll build maps to ensure unique values and filter default
- mapA := map[string]bool{}
- mapB := map[string]bool{}
- for _, keyA := range a {
- if keyA == "default" {
- continue
- }
- mapA[keyA] = true
- }
- for _, keyB := range b {
- if keyB == "default" {
- continue
- }
- mapB[keyB] = true
- }
-
- // Now we'll build our checking slices
- var sortedA, sortedB []string
- for keyA, _ := range mapA {
- sortedA = append(sortedA, keyA)
- }
- for keyB, _ := range mapB {
- sortedB = append(sortedB, keyB)
- }
- sort.Strings(sortedA)
- sort.Strings(sortedB)
-
- // Finally, compare
- if len(sortedA) != len(sortedB) {
- return false
- }
-
- for i := range sortedA {
- if sortedA[i] != sortedB[i] {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil_test.go b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil_test.go
deleted file mode 100644
index 4b26483..0000000
--- a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package policyutil
-
-import "testing"
-
-func TestSanitizePolicies(t *testing.T) {
- expected := []string{"foo", "bar"}
- actual := SanitizePolicies([]string{"foo", "bar"}, false)
- if !EquivalentPolicies(expected, actual) {
- t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
- }
-
- // If 'default' is already added, do not remove it.
- expected = []string{"foo", "bar", "default"}
- actual = SanitizePolicies([]string{"foo", "bar", "default"}, false)
- if !EquivalentPolicies(expected, actual) {
- t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
- }
-}
-
-func TestParsePolicies(t *testing.T) {
- expected := []string{"foo", "bar", "default"}
- actual := ParsePolicies("foo,bar")
- // add default if not present.
- if !EquivalentPolicies(expected, actual) {
- t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
- }
-
- // do not add default more than once.
- actual = ParsePolicies("foo,bar,default")
- if !EquivalentPolicies(expected, actual) {
- t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
- }
-
- // handle spaces and tabs.
- actual = ParsePolicies(" foo , bar , default")
- if !EquivalentPolicies(expected, actual) {
- t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
- }
-
- // ignore all others if root is present.
- expected = []string{"root"}
- actual = ParsePolicies("foo,bar,root")
- if !EquivalentPolicies(expected, actual) {
- t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
- }
-
- // with spaces and tabs.
- expected = []string{"root"}
- actual = ParsePolicies("foo ,bar, root ")
- if !EquivalentPolicies(expected, actual) {
- t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual)
- }
-}
-
-func TestEquivalentPolicies(t *testing.T) {
- a := []string{"foo", "bar"}
- var b []string
- if EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-
- b = []string{"foo"}
- if EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-
- b = []string{"bar", "foo"}
- if !EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-
- b = []string{"foo", "default", "bar"}
- if !EquivalentPolicies(a, b) {
- t.Fatal("bad")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go b/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go
deleted file mode 100644
index 5ff59b1..0000000
--- a/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package proxyutil
-
-import (
- "fmt"
- "net"
- "sync"
-
- proxyproto "github.com/armon/go-proxyproto"
- "github.com/hashicorp/errwrap"
- sockaddr "github.com/hashicorp/go-sockaddr"
- "github.com/hashicorp/vault/helper/strutil"
-)
-
-// ProxyProtoConfig contains configuration for the PROXY protocol
-type ProxyProtoConfig struct {
- sync.RWMutex
- Behavior string
- AuthorizedAddrs []*sockaddr.SockAddrMarshaler `json:"authorized_addrs"`
-}
-
-func (p *ProxyProtoConfig) SetAuthorizedAddrs(addrs interface{}) error {
- p.AuthorizedAddrs = make([]*sockaddr.SockAddrMarshaler, 0)
- stringAddrs := make([]string, 0)
-
- switch addrs.(type) {
- case string:
- stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",")
- if len(stringAddrs) == 0 {
- return fmt.Errorf("unable to parse addresses from %v", addrs)
- }
-
- case []string:
- stringAddrs = addrs.([]string)
-
- case []interface{}:
- for _, v := range addrs.([]interface{}) {
- stringAddr, ok := v.(string)
- if !ok {
- return fmt.Errorf("error parsing %q as string")
- }
- stringAddrs = append(stringAddrs, stringAddr)
- }
-
- default:
- return fmt.Errorf("unknown address input type %T", addrs)
- }
-
- for _, addr := range stringAddrs {
- sa, err := sockaddr.NewSockAddr(addr)
- if err != nil {
- return errwrap.Wrapf("error parsing authorized address: {{err}}", err)
- }
- p.AuthorizedAddrs = append(p.AuthorizedAddrs, &sockaddr.SockAddrMarshaler{
- SockAddr: sa,
- })
- }
-
- return nil
-}
-
-// WrapInProxyProto wraps the given listener in the PROXY protocol. If behavior
-// is "use_if_authorized" or "deny_if_unauthorized" it also configures a
-// SourceCheck based on the given ProxyProtoConfig. In an error case it returns
-// the original listener and the error.
-func WrapInProxyProto(listener net.Listener, config *ProxyProtoConfig) (net.Listener, error) {
- config.Lock()
- defer config.Unlock()
-
- var newLn *proxyproto.Listener
-
- switch config.Behavior {
- case "use_always":
- newLn = &proxyproto.Listener{
- Listener: listener,
- }
-
- case "allow_authorized", "deny_unauthorized":
- newLn = &proxyproto.Listener{
- Listener: listener,
- SourceCheck: func(addr net.Addr) (bool, error) {
- config.RLock()
- defer config.RUnlock()
-
- sa, err := sockaddr.NewSockAddr(addr.String())
- if err != nil {
- return false, errwrap.Wrapf("error parsing remote address: {{err}}", err)
- }
-
- for _, authorizedAddr := range config.AuthorizedAddrs {
- if authorizedAddr.Contains(sa) {
- return true, nil
- }
- }
-
- if config.Behavior == "allow_authorized" {
- return false, nil
- }
-
- return false, proxyproto.ErrInvalidUpstream
- },
- }
- default:
- return listener, fmt.Errorf("unknown behavior type for proxy proto config")
- }
-
- return newLn, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/reload/reload.go b/vendor/github.com/hashicorp/vault/helper/reload/reload.go
deleted file mode 100644
index cc450b9..0000000
--- a/vendor/github.com/hashicorp/vault/helper/reload/reload.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package reload
-
-import (
- "crypto/tls"
- "fmt"
- "sync"
-)
-
-// ReloadFunc are functions that are called when a reload is requested
-type ReloadFunc func(map[string]interface{}) error
-
-// CertificateGetter satisfies ReloadFunc and its GetCertificate method
-// satisfies the tls.GetCertificate function signature. Currently it does not
-// allow changing paths after the fact.
-type CertificateGetter struct {
- sync.RWMutex
-
- cert *tls.Certificate
-
- certFile string
- keyFile string
-}
-
-func NewCertificateGetter(certFile, keyFile string) *CertificateGetter {
- return &CertificateGetter{
- certFile: certFile,
- keyFile: keyFile,
- }
-}
-
-func (cg *CertificateGetter) Reload(_ map[string]interface{}) error {
- cert, err := tls.LoadX509KeyPair(cg.certFile, cg.keyFile)
- if err != nil {
- return err
- }
-
- cg.Lock()
- defer cg.Unlock()
-
- cg.cert = &cert
-
- return nil
-}
-
-func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- cg.RLock()
- defer cg.RUnlock()
-
- if cg.cert == nil {
- return nil, fmt.Errorf("nil certificate")
- }
-
- return cg.cert, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/salt/salt.go b/vendor/github.com/hashicorp/vault/helper/salt/salt.go
deleted file mode 100644
index 3dba9eb..0000000
--- a/vendor/github.com/hashicorp/vault/helper/salt/salt.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package salt
-
-import (
- "crypto/hmac"
- "crypto/sha1"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "hash"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // DefaultLocation is the path in the view we store our key salt
- // if no other path is provided.
- DefaultLocation = "salt"
-)
-
-// Salt is used to manage a persistent salt key which is used to
-// hash values. This allows keys to be generated and recovered
-// using the global salt. Primarily, this allows paths in the storage
-// backend to be obfuscated if they may contain sensitive information.
-type Salt struct {
- config *Config
- salt string
- generated bool
-}
-
-type HashFunc func([]byte) []byte
-
-// Config is used to parameterize the Salt
-type Config struct {
- // Location is the path in the storage backend for the
- // salt. Uses DefaultLocation if not specified.
- Location string
-
- // HashFunc is the hashing function to use for salting.
- // Defaults to SHA1 if not provided.
- HashFunc HashFunc
-
- // HMAC allows specification of a hash function to use for
- // the HMAC helpers
- HMAC func() hash.Hash
-
- // String prepended to HMAC strings for identification.
- // Required if using HMAC
- HMACType string
-}
-
-// NewSalt creates a new salt based on the configuration
-func NewSalt(view logical.Storage, config *Config) (*Salt, error) {
- // Setup the configuration
- if config == nil {
- config = &Config{}
- }
- if config.Location == "" {
- config.Location = DefaultLocation
- }
- if config.HashFunc == nil {
- config.HashFunc = SHA256Hash
- }
- if config.HMAC == nil {
- config.HMAC = sha256.New
- config.HMACType = "hmac-sha256"
- }
-
- // Create the salt
- s := &Salt{
- config: config,
- }
-
- // Look for the salt
- var raw *logical.StorageEntry
- var err error
- if view != nil {
- raw, err = view.Get(config.Location)
- if err != nil {
- return nil, fmt.Errorf("failed to read salt: %v", err)
- }
- }
-
- // Restore the salt if it exists
- if raw != nil {
- s.salt = string(raw.Value)
- }
-
- // Generate a new salt if necessary
- if s.salt == "" {
- s.salt, err = uuid.GenerateUUID()
- if err != nil {
- return nil, fmt.Errorf("failed to generate uuid: %v", err)
- }
- s.generated = true
- if view != nil {
- raw := &logical.StorageEntry{
- Key: config.Location,
- Value: []byte(s.salt),
- }
- if err := view.Put(raw); err != nil {
- return nil, fmt.Errorf("failed to persist salt: %v", err)
- }
- }
- }
-
- if config.HMAC != nil {
- if len(config.HMACType) == 0 {
- return nil, fmt.Errorf("HMACType must be defined")
- }
- }
-
- return s, nil
-}
-
-// SaltID is used to apply a salt and hash function to an ID to make sure
-// it is not reversible
-func (s *Salt) SaltID(id string) string {
- return SaltID(s.salt, id, s.config.HashFunc)
-}
-
-// GetHMAC is used to apply a salt and hash function to data to make sure it is
-// not reversible, with an additional HMAC
-func (s *Salt) GetHMAC(data string) string {
- hm := hmac.New(s.config.HMAC, []byte(s.salt))
- hm.Write([]byte(data))
- return hex.EncodeToString(hm.Sum(nil))
-}
-
-// GetIdentifiedHMAC is used to apply a salt and hash function to data to make
-// sure it is not reversible, with an additional HMAC, and ID prepended
-func (s *Salt) GetIdentifiedHMAC(data string) string {
- return s.config.HMACType + ":" + s.GetHMAC(data)
-}
-
-// DidGenerate returns if the underlying salt value was generated
-// on initialization or if an existing salt value was loaded
-func (s *Salt) DidGenerate() bool {
- return s.generated
-}
-
-// SaltID is used to apply a salt and hash function to an ID to make sure
-// it is not reversible
-func SaltID(salt, id string, hash HashFunc) string {
- comb := salt + id
- hashVal := hash([]byte(comb))
- return hex.EncodeToString(hashVal)
-}
-
-func HMACValue(salt, val string, hashFunc func() hash.Hash) string {
- hm := hmac.New(hashFunc, []byte(salt))
- hm.Write([]byte(val))
- return hex.EncodeToString(hm.Sum(nil))
-}
-
-func HMACIdentifiedValue(salt, val, hmacType string, hashFunc func() hash.Hash) string {
- return hmacType + ":" + HMACValue(salt, val, hashFunc)
-}
-
-// SHA1Hash returns the SHA1 of the input
-func SHA1Hash(inp []byte) []byte {
- hashed := sha1.Sum(inp)
- return hashed[:]
-}
-
-// SHA256Hash returns the SHA256 of the input
-func SHA256Hash(inp []byte) []byte {
- hashed := sha256.Sum256(inp)
- return hashed[:]
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/salt/salt_test.go b/vendor/github.com/hashicorp/vault/helper/salt/salt_test.go
deleted file mode 100644
index 124e66c..0000000
--- a/vendor/github.com/hashicorp/vault/helper/salt/salt_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package salt
-
-import (
- "crypto/sha1"
- "crypto/sha256"
- "testing"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestSalt(t *testing.T) {
- inm := &logical.InmemStorage{}
- conf := &Config{}
-
- salt, err := NewSalt(inm, conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !salt.DidGenerate() {
- t.Fatalf("expected generation")
- }
-
- // Verify the salt exists
- out, err := inm.Get(DefaultLocation)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("missing salt")
- }
-
- // Create a new salt, should restore
- salt2, err := NewSalt(inm, conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if salt2.DidGenerate() {
- t.Fatalf("unexpected generation")
- }
-
- // Check for a match
- if salt.salt != salt2.salt {
- t.Fatalf("salt mismatch: %s %s", salt.salt, salt2.salt)
- }
-
- // Verify a match
- id := "foobarbaz"
- sid1 := salt.SaltID(id)
- sid2 := salt2.SaltID(id)
-
- if sid1 != sid2 {
- t.Fatalf("mismatch")
- }
-}
-
-func TestSaltID(t *testing.T) {
- salt, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- id := "foobarbaz"
-
- sid1 := SaltID(salt, id, SHA1Hash)
- sid2 := SaltID(salt, id, SHA1Hash)
-
- if len(sid1) != sha1.Size*2 {
- t.Fatalf("Bad len: %d %s", len(sid1), sid1)
- }
-
- if sid1 != sid2 {
- t.Fatalf("mismatch")
- }
-
- sid1 = SaltID(salt, id, SHA256Hash)
- sid2 = SaltID(salt, id, SHA256Hash)
-
- if len(sid1) != sha256.Size*2 {
- t.Fatalf("Bad len: %d", len(sid1))
- }
-
- if sid1 != sid2 {
- t.Fatalf("mismatch")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
index b5e69c4..a77e60d 100644
--- a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
+++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
@@ -6,8 +6,22 @@ import (
"fmt"
"sort"
"strings"
+
+ "github.com/hashicorp/errwrap"
+ glob "github.com/ryanuber/go-glob"
)
+// StrListContainsGlob looks for a string in a list of strings and allows
+// globs.
+func StrListContainsGlob(haystack []string, needle string) bool {
+ for _, item := range haystack {
+ if glob.Glob(item, needle) {
+ return true
+ }
+ }
+ return false
+}
+
// StrListContains looks for a string in a list of strings.
func StrListContains(haystack []string, needle string) bool {
for _, item := range haystack {
@@ -76,7 +90,7 @@ func ParseKeyValues(input string, out map[string]string, sep string) error {
key := strings.TrimSpace(shards[0])
value := strings.TrimSpace(shards[1])
if key == "" || value == "" {
- return fmt.Errorf("invalid pair: key:'%s' value:'%s'", key, value)
+ return fmt.Errorf("invalid pair: key: %q value: %q", key, value)
}
out[key] = value
}
@@ -91,7 +105,7 @@ func ParseKeyValues(input string, out map[string]string, sep string) error {
// * Base64 encoded string containing comma separated list of
// `=` pairs
//
-// Input will be parsed into the output paramater, which should
+// Input will be parsed into the output parameter, which should
// be a non-nil map[string]string.
func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error {
input = strings.TrimSpace(input)
@@ -116,14 +130,14 @@ func ParseArbitraryKeyValues(input string, out map[string]string, sep string) er
// If JSON unmarshalling fails, consider that the input was
// supplied as a comma separated string of 'key=value' pairs.
if err = ParseKeyValues(input, out, sep); err != nil {
- return fmt.Errorf("failed to parse the input: %v", err)
+ return errwrap.Wrapf("failed to parse the input: {{err}}", err)
}
}
// Validate the parsed input
for key, value := range out {
if key != "" && value == "" {
- return fmt.Errorf("invalid value for key '%s'", key)
+ return fmt.Errorf("invalid value for key %q", key)
}
}
@@ -154,7 +168,7 @@ func ParseStringSlice(input string, sep string) []string {
// * JSON string
// * Base64 encoded JSON string
// * `sep` separated list of values
-// * Base64-encoded string containting a `sep` separated list of values
+// * Base64-encoded string containing a `sep` separated list of values
//
// Note that the separator is ignored if the input is found to already be in a
// structured format (e.g., JSON)
@@ -269,7 +283,7 @@ func EquivalentSlices(a, b []string) bool {
return true
}
-// StrListDelete removes the first occurance of the given item from the slice
+// StrListDelete removes the first occurrence of the given item from the slice
// of strings if the item exists.
func StrListDelete(s []string, d string) []string {
if s == nil {
diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go
deleted file mode 100644
index ce02719..0000000
--- a/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package strutil
-
-import (
- "encoding/base64"
- "encoding/json"
- "reflect"
- "testing"
-)
-
-func TestStrUtil_StrListDelete(t *testing.T) {
- output := StrListDelete([]string{"item1", "item2", "item3"}, "item1")
- if StrListContains(output, "item1") {
- t.Fatal("bad: 'item1' should not have been present")
- }
-
- output = StrListDelete([]string{"item1", "item2", "item3"}, "item2")
- if StrListContains(output, "item2") {
- t.Fatal("bad: 'item2' should not have been present")
- }
-
- output = StrListDelete([]string{"item1", "item2", "item3"}, "item3")
- if StrListContains(output, "item3") {
- t.Fatal("bad: 'item3' should not have been present")
- }
-
- output = StrListDelete([]string{"item1", "item1", "item3"}, "item1")
- if !StrListContains(output, "item1") {
- t.Fatal("bad: 'item1' should have been present")
- }
-
- output = StrListDelete(output, "item1")
- if StrListContains(output, "item1") {
- t.Fatal("bad: 'item1' should not have been present")
- }
-
- output = StrListDelete(output, "random")
- if len(output) != 1 {
- t.Fatalf("bad: expected: 1, actual: %d", len(output))
- }
-
- output = StrListDelete(output, "item3")
- if StrListContains(output, "item3") {
- t.Fatal("bad: 'item3' should not have been present")
- }
-}
-
-func TestStrutil_EquivalentSlices(t *testing.T) {
- slice1 := []string{"test2", "test1", "test3"}
- slice2 := []string{"test3", "test2", "test1"}
- if !EquivalentSlices(slice1, slice2) {
- t.Fatalf("bad: expected a match")
- }
-
- slice2 = append(slice2, "test4")
- if EquivalentSlices(slice1, slice2) {
- t.Fatalf("bad: expected a mismatch")
- }
-}
-
-func TestStrutil_ListContains(t *testing.T) {
- haystack := []string{
- "dev",
- "ops",
- "prod",
- "root",
- }
- if StrListContains(haystack, "tubez") {
- t.Fatalf("Bad")
- }
- if !StrListContains(haystack, "root") {
- t.Fatalf("Bad")
- }
-}
-
-func TestStrutil_ListSubset(t *testing.T) {
- parent := []string{
- "dev",
- "ops",
- "prod",
- "root",
- }
- child := []string{
- "prod",
- "ops",
- }
- if !StrListSubset(parent, child) {
- t.Fatalf("Bad")
- }
- if !StrListSubset(parent, parent) {
- t.Fatalf("Bad")
- }
- if !StrListSubset(child, child) {
- t.Fatalf("Bad")
- }
- if !StrListSubset(child, nil) {
- t.Fatalf("Bad")
- }
- if StrListSubset(child, parent) {
- t.Fatalf("Bad")
- }
- if StrListSubset(nil, child) {
- t.Fatalf("Bad")
- }
-}
-
-func TestStrutil_ParseKeyValues(t *testing.T) {
- actual := make(map[string]string)
- expected := map[string]string{
- "key1": "value1",
- "key2": "value2",
- }
- var input string
- var err error
-
- input = "key1=value1,key2=value2"
- err = ParseKeyValues(input, actual, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-
- input = "key1 = value1, key2 = value2"
- err = ParseKeyValues(input, actual, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-
- input = "key1 = value1, key2 = "
- err = ParseKeyValues(input, actual, ",")
- if err == nil {
- t.Fatalf("expected an error")
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-
- input = "key1 = value1, = value2 "
- err = ParseKeyValues(input, actual, ",")
- if err == nil {
- t.Fatalf("expected an error")
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-
- input = "key1"
- err = ParseKeyValues(input, actual, ",")
- if err == nil {
- t.Fatalf("expected an error")
- }
-}
-
-func TestStrutil_ParseArbitraryKeyValues(t *testing.T) {
- actual := make(map[string]string)
- expected := map[string]string{
- "key1": "value1",
- "key2": "value2",
- }
- var input string
- var err error
-
- // Test = as comma separated string
- input = "key1=value1,key2=value2"
- err = ParseArbitraryKeyValues(input, actual, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-
- // Test = as base64 encoded comma separated string
- input = base64.StdEncoding.EncodeToString([]byte(input))
- err = ParseArbitraryKeyValues(input, actual, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-
- // Test JSON encoded = tuples
- input = `{"key1":"value1", "key2":"value2"}`
- err = ParseArbitraryKeyValues(input, actual, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-
- // Test base64 encoded JSON string of = tuples
- input = base64.StdEncoding.EncodeToString([]byte(input))
- err = ParseArbitraryKeyValues(input, actual, ",")
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
- for k, _ := range actual {
- delete(actual, k)
- }
-}
-
-func TestStrutil_ParseArbitraryStringSlice(t *testing.T) {
- input := `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';GRANT "foo-role" TO "{{name}}";ALTER ROLE "{{name}}" SET search_path = foo;GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`
-
- jsonExpected := []string{
- `DO $$
-BEGIN
- IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
- CREATE ROLE "foo-role";
- CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
- ALTER ROLE "foo-role" SET search_path = foo;
- GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
- GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
- END IF;
-END
-$$`,
- `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'`,
- `GRANT "foo-role" TO "{{name}}"`,
- `ALTER ROLE "{{name}}" SET search_path = foo`,
- `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}"`,
- ``,
- }
-
- nonJSONExpected := jsonExpected[1:]
-
- var actual []string
- var inputB64 string
- var err error
-
- // Test non-JSON string
- actual = ParseArbitraryStringSlice(input, ";")
- if !reflect.DeepEqual(nonJSONExpected, actual) {
- t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", nonJSONExpected, actual)
- }
-
- // Test base64-encoded non-JSON string
- inputB64 = base64.StdEncoding.EncodeToString([]byte(input))
- actual = ParseArbitraryStringSlice(inputB64, ";")
- if !reflect.DeepEqual(nonJSONExpected, actual) {
- t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", nonJSONExpected, actual)
- }
-
- // Test JSON encoded
- inputJSON, err := json.Marshal(jsonExpected)
- if err != nil {
- t.Fatal(err)
- }
-
- actual = ParseArbitraryStringSlice(string(inputJSON), ";")
- if !reflect.DeepEqual(jsonExpected, actual) {
- t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", string(inputJSON), actual)
- }
-
- // Test base64 encoded JSON string of = tuples
- inputB64 = base64.StdEncoding.EncodeToString(inputJSON)
- actual = ParseArbitraryStringSlice(inputB64, ";")
- if !reflect.DeepEqual(jsonExpected, actual) {
- t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", jsonExpected, actual)
- }
-}
-
-func TestGlobbedStringsMatch(t *testing.T) {
- type tCase struct {
- item string
- val string
- expect bool
- }
-
- tCases := []tCase{
- tCase{"", "", true},
- tCase{"*", "*", true},
- tCase{"**", "**", true},
- tCase{"*t", "t", true},
- tCase{"*t", "test", true},
- tCase{"t*", "test", true},
- tCase{"*test", "test", true},
- tCase{"*test", "a test", true},
- tCase{"test", "a test", false},
- tCase{"*test", "tests", false},
- tCase{"test*", "test", true},
- tCase{"test*", "testsss", true},
- tCase{"test**", "testsss", false},
- tCase{"test**", "test*", true},
- tCase{"**test", "*test", true},
- tCase{"TEST", "test", false},
- tCase{"test", "test", true},
- }
-
- for _, tc := range tCases {
- actual := GlobbedStringsMatch(tc.item, tc.val)
-
- if actual != tc.expect {
- t.Fatalf("Bad testcase %#v, expected %b, got %b", tc, tc.expect, actual)
- }
- }
-}
-
-func TestTrimStrings(t *testing.T) {
- input := []string{"abc", "123", "abcd ", "123 "}
- expected := []string{"abc", "123", "abcd", "123"}
- actual := TrimStrings(input)
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("Bad TrimStrings: expected:%#v, got:%#v", expected, actual)
- }
-}
-
-func TestStrutil_AppendIfMissing(t *testing.T) {
- keys := []string{}
-
- keys = AppendIfMissing(keys, "foo")
-
- if len(keys) != 1 {
- t.Fatalf("expected slice to be length of 1: %v", keys)
- }
- if keys[0] != "foo" {
- t.Fatalf("expected slice to contain key 'foo': %v", keys)
- }
-
- keys = AppendIfMissing(keys, "bar")
-
- if len(keys) != 2 {
- t.Fatalf("expected slice to be length of 2: %v", keys)
- }
- if keys[0] != "foo" {
- t.Fatalf("expected slice to contain key 'foo': %v", keys)
- }
- if keys[1] != "bar" {
- t.Fatalf("expected slice to contain key 'bar': %v", keys)
- }
-
- keys = AppendIfMissing(keys, "foo")
-
- if len(keys) != 2 {
- t.Fatalf("expected slice to still be length of 2: %v", keys)
- }
- if keys[0] != "foo" {
- t.Fatalf("expected slice to still contain key 'foo': %v", keys)
- }
- if keys[1] != "bar" {
- t.Fatalf("expected slice to still contain key 'bar': %v", keys)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go
deleted file mode 100644
index 08b3ebd..0000000
--- a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package tlsutil
-
-import (
- "crypto/tls"
- "fmt"
-
- "github.com/hashicorp/vault/helper/strutil"
-)
-
-// TLSLookup maps the tls_min_version configuration to the internal value
-var TLSLookup = map[string]uint16{
- "tls10": tls.VersionTLS10,
- "tls11": tls.VersionTLS11,
- "tls12": tls.VersionTLS12,
-}
-
-// ParseCiphers parse ciphersuites from the comma-separated string into recognized slice
-func ParseCiphers(cipherStr string) ([]uint16, error) {
- suites := []uint16{}
- ciphers := strutil.ParseStringSlice(cipherStr, ",")
- cipherMap := map[string]uint16{
- "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
- "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
- "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
- "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
- "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
- for _, cipher := range ciphers {
- if v, ok := cipherMap[cipher]; ok {
- suites = append(suites, v)
- } else {
- return suites, fmt.Errorf("unsupported cipher %q", cipher)
- }
- }
-
- return suites, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go
deleted file mode 100644
index 79aac9b..0000000
--- a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package tlsutil
-
-import (
- "crypto/tls"
- "reflect"
- "testing"
-)
-
-func TestParseCiphers(t *testing.T) {
- testOk := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"
- v, err := ParseCiphers(testOk)
- if err != nil {
- t.Fatal(err)
- }
- if len(v) != 17 {
- t.Fatal("missed ciphers after parse")
- }
-
- testBad := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,cipherX"
- if _, err := ParseCiphers(testBad); err == nil {
- t.Fatal("should fail on unsupported cipherX")
- }
-
- testOrder := "TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- v, _ = ParseCiphers(testOrder)
- expected := []uint16{tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_RSA_WITH_AES_128_GCM_SHA256}
- if !reflect.DeepEqual(expected, v) {
- t.Fatal("cipher order is not preserved")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go b/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go
deleted file mode 100644
index 2242c7b..0000000
--- a/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package wrapping
-
-import "time"
-
-type ResponseWrapInfo struct {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
-
- // The token containing the wrapped response
- Token string `json:"token" structs:"token" mapstructure:"token"`
-
- // The creation time. This can be used with the TTL to figure out an
- // expected expiration.
- CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
-
- // If the contained response is the output of a token creation call, the
- // created token's accessor will be accessible here
- WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"`
-
- // The format to use. This doesn't get returned, it's only internal.
- Format string `json:"format" structs:"format" mapstructure:"format"`
-
- // CreationPath is the original request path that was used to create
- // the wrapped response.
- CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path"`
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/xor/xor.go b/vendor/github.com/hashicorp/vault/helper/xor/xor.go
deleted file mode 100644
index 4c5f88c..0000000
--- a/vendor/github.com/hashicorp/vault/helper/xor/xor.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package xor
-
-import (
- "encoding/base64"
- "fmt"
-)
-
-// XORBytes takes two byte slices and XORs them together, returning the final
-// byte slice. It is an error to pass in two byte slices that do not have the
-// same length.
-func XORBytes(a, b []byte) ([]byte, error) {
- if len(a) != len(b) {
- return nil, fmt.Errorf("length of byte slices is not equivalent: %d != %d", len(a), len(b))
- }
-
- buf := make([]byte, len(a))
-
- for i, _ := range a {
- buf[i] = a[i] ^ b[i]
- }
-
- return buf, nil
-}
-
-// XORBase64 takes two base64-encoded strings and XORs the decoded byte slices
-// together, returning the final byte slice. It is an error to pass in two
-// strings that do not have the same length to their base64-decoded byte slice.
-func XORBase64(a, b string) ([]byte, error) {
- aBytes, err := base64.StdEncoding.DecodeString(a)
- if err != nil {
- return nil, fmt.Errorf("error decoding first base64 value: %v", err)
- }
- if aBytes == nil || len(aBytes) == 0 {
- return nil, fmt.Errorf("decoded first base64 value is nil or empty")
- }
-
- bBytes, err := base64.StdEncoding.DecodeString(b)
- if err != nil {
- return nil, fmt.Errorf("error decoding second base64 value: %v", err)
- }
- if bBytes == nil || len(bBytes) == 0 {
- return nil, fmt.Errorf("decoded second base64 value is nil or empty")
- }
-
- return XORBytes(aBytes, bBytes)
-}
diff --git a/vendor/github.com/hashicorp/vault/helper/xor/xor_test.go b/vendor/github.com/hashicorp/vault/helper/xor/xor_test.go
deleted file mode 100644
index f50f525..0000000
--- a/vendor/github.com/hashicorp/vault/helper/xor/xor_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package xor
-
-import (
- "encoding/base64"
- "testing"
-)
-
-const (
- tokenB64 = "ZGE0N2JiODkzYjhkMDYxYw=="
- xorB64 = "iGiQYG9L0nIp+jRL5+Zk2w=="
- expectedB64 = "7AmkVw0p6ksamAwv19BVuA=="
-)
-
-func TestBase64XOR(t *testing.T) {
- ret, err := XORBase64(tokenB64, xorB64)
- if err != nil {
- t.Fatal(err)
- }
- if res := base64.StdEncoding.EncodeToString(ret); res != expectedB64 {
- t.Fatalf("bad: %s", res)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/auth_token_test.go b/vendor/github.com/hashicorp/vault/http/auth_token_test.go
deleted file mode 100644
index 9822f7d..0000000
--- a/vendor/github.com/hashicorp/vault/http/auth_token_test.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package http
-
-import (
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestAuthTokenCreate(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- config := api.DefaultConfig()
- config.Address = addr
-
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(token)
-
- secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatal(err)
- }
- if secret.Auth.LeaseDuration != 3600 {
- t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration)
- }
-
- renewCreateRequest := &api.TokenCreateRequest{
- TTL: "1h",
- Renewable: new(bool),
- }
-
- secret, err = client.Auth().Token().Create(renewCreateRequest)
- if err != nil {
- t.Fatal(err)
- }
- if secret.Auth.LeaseDuration != 3600 {
- t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration)
- }
- if secret.Auth.Renewable {
- t.Errorf("expected non-renewable token")
- }
-
- *renewCreateRequest.Renewable = true
- secret, err = client.Auth().Token().Create(renewCreateRequest)
- if err != nil {
- t.Fatal(err)
- }
- if secret.Auth.LeaseDuration != 3600 {
- t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration)
- }
- if !secret.Auth.Renewable {
- t.Errorf("expected renewable token")
- }
-
- explicitMaxCreateRequest := &api.TokenCreateRequest{
- TTL: "1h",
- ExplicitMaxTTL: "1800s",
- }
-
- secret, err = client.Auth().Token().Create(explicitMaxCreateRequest)
- if err != nil {
- t.Fatal(err)
- }
- if secret.Auth.LeaseDuration != 1800 {
- t.Errorf("expected 1800 seconds, got %q", secret.Auth.LeaseDuration)
- }
-
- explicitMaxCreateRequest.ExplicitMaxTTL = "2h"
- secret, err = client.Auth().Token().Create(explicitMaxCreateRequest)
- if err != nil {
- t.Fatal(err)
- }
- if secret.Auth.LeaseDuration != 3600 {
- t.Errorf("expected 3600 seconds, got %q", secret.Auth.LeaseDuration)
- }
-}
-
-func TestAuthTokenLookup(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- config := api.DefaultConfig()
- config.Address = addr
-
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(token)
-
- // Create a new token ...
- secret2, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // lookup details of this token
- secret, err := client.Auth().Token().Lookup(secret2.Auth.ClientToken)
- if err != nil {
- t.Fatalf("unable to lookup details of token, err = %v", err)
- }
-
- if secret.Data["id"] != secret2.Auth.ClientToken {
- t.Errorf("Did not get back details about our provided token, id returned=%s", secret.Data["id"])
- }
-
-}
-
-func TestAuthTokenLookupSelf(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- config := api.DefaultConfig()
- config.Address = addr
-
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(token)
-
- // you should be able to lookup your own token
- secret, err := client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatalf("should be allowed to lookup self, err = %v", err)
- }
-
- if secret.Data["id"] != token {
- t.Errorf("Did not get back details about our own (self) token, id returned=%s", secret.Data["id"])
- }
- if secret.Data["display_name"] != "root" {
- t.Errorf("Did not get back details about our own (self) token, display_name returned=%s", secret.Data["display_name"])
- }
-
-}
-
-func TestAuthTokenRenew(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- config := api.DefaultConfig()
- config.Address = addr
-
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(token)
-
- // The default root token is not renewable, so this should not work
- _, err = client.Auth().Token().Renew(token, 0)
- if err == nil {
- t.Fatal("should not be allowed to renew root token")
- }
- if !strings.Contains(err.Error(), "lease is not renewable") {
- t.Fatalf("wrong error; got %v", err)
- }
-
- // Create a new token that should be renewable
- secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{
- Lease: "1h",
- })
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(secret.Auth.ClientToken)
-
- // Now attempt a renew with the new token
- secret, err = client.Auth().Token().Renew(secret.Auth.ClientToken, 3600)
- if err != nil {
- t.Fatal(err)
- }
-
- if secret.Auth.LeaseDuration != 3600 {
- t.Errorf("expected 1h, got %v", secret.Auth.LeaseDuration)
- }
-
- if secret.Auth.Renewable != true {
- t.Error("expected lease to be renewable")
- }
-
- // Do the same thing with the self variant
- secret, err = client.Auth().Token().RenewSelf(3600)
- if err != nil {
- t.Fatal(err)
- }
-
- if secret.Auth.LeaseDuration != 3600 {
- t.Errorf("expected 1h, got %v", secret.Auth.LeaseDuration)
- }
-
- if secret.Auth.Renewable != true {
- t.Error("expected lease to be renewable")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/cors.go b/vendor/github.com/hashicorp/vault/http/cors.go
deleted file mode 100644
index a01228b..0000000
--- a/vendor/github.com/hashicorp/vault/http/cors.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package http
-
-import (
- "fmt"
- "net/http"
- "strings"
-
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/vault"
-)
-
-var allowedMethods = []string{
- http.MethodDelete,
- http.MethodGet,
- http.MethodOptions,
- http.MethodPost,
- http.MethodPut,
- "LIST", // LIST is not an official HTTP method, but Vault supports it.
-}
-
-func wrapCORSHandler(h http.Handler, core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- corsConf := core.CORSConfig()
-
- origin := req.Header.Get("Origin")
- requestMethod := req.Header.Get("Access-Control-Request-Method")
-
- // If CORS is not enabled or if no Origin header is present (i.e. the request
- // is from the Vault CLI. A browser will always send an Origin header), then
- // just return a 204.
- if !corsConf.IsEnabled() || origin == "" {
- h.ServeHTTP(w, req)
- return
- }
-
- // Return a 403 if the origin is not allowed to make cross-origin requests.
- if !corsConf.IsValidOrigin(origin) {
- respondError(w, http.StatusForbidden, fmt.Errorf("origin not allowed"))
- return
- }
-
- if req.Method == http.MethodOptions && !strutil.StrListContains(allowedMethods, requestMethod) {
- w.WriteHeader(http.StatusMethodNotAllowed)
- return
- }
-
- w.Header().Set("Access-Control-Allow-Origin", origin)
- w.Header().Set("Vary", "Origin")
-
- // apply headers for preflight requests
- if req.Method == http.MethodOptions {
- w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ","))
- w.Header().Set("Access-Control-Allow-Headers", strings.Join(corsConf.AllowedHeaders, ","))
- w.Header().Set("Access-Control-Max-Age", "300")
-
- return
- }
-
- h.ServeHTTP(w, req)
- return
- })
-}
diff --git a/vendor/github.com/hashicorp/vault/http/forwarding_test.go b/vendor/github.com/hashicorp/vault/http/forwarding_test.go
deleted file mode 100644
index 4f1aefe..0000000
--- a/vendor/github.com/hashicorp/vault/http/forwarding_test.go
+++ /dev/null
@@ -1,575 +0,0 @@
-package http
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io"
- "math/rand"
- "net/http"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "golang.org/x/net/http2"
-
- cleanhttp "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/api"
- credCert "github.com/hashicorp/vault/builtin/credential/cert"
- "github.com/hashicorp/vault/builtin/logical/transit"
- "github.com/hashicorp/vault/helper/keysutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestHTTP_Fallback_Bad_Address(t *testing.T) {
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "transit": transit.Factory,
- },
- ClusterAddr: "https://127.3.4.1:8382",
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
- cores := cluster.Cores
-
- // make it easy to get access to the active
- core := cores[0].Core
- vault.TestWaitActive(t, core)
-
- addrs := []string{
- fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
- fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
- }
-
- for _, addr := range addrs {
- config := api.DefaultConfig()
- config.Address = addr
- config.HttpClient = cleanhttp.DefaultClient()
- config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(cluster.RootToken)
-
- secret, err := client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil {
- t.Fatal("secret is nil")
- }
- if secret.Data["id"].(string) != cluster.RootToken {
- t.Fatal("token mismatch")
- }
- }
-}
-
-func TestHTTP_Fallback_Disabled(t *testing.T) {
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "transit": transit.Factory,
- },
- ClusterAddr: "empty",
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
- cores := cluster.Cores
-
- // make it easy to get access to the active
- core := cores[0].Core
- vault.TestWaitActive(t, core)
-
- addrs := []string{
- fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
- fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
- }
-
- for _, addr := range addrs {
- config := api.DefaultConfig()
- config.Address = addr
- config.HttpClient = cleanhttp.DefaultClient()
- config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(cluster.RootToken)
-
- secret, err := client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil {
- t.Fatal("secret is nil")
- }
- if secret.Data["id"].(string) != cluster.RootToken {
- t.Fatal("token mismatch")
- }
- }
-}
-
-// This function recreates the fuzzy testing from transit to pipe a large
-// number of requests from the standbys to the active node.
-func TestHTTP_Forwarding_Stress(t *testing.T) {
- testHTTP_Forwarding_Stress_Common(t, false, 50)
- testHTTP_Forwarding_Stress_Common(t, true, 50)
-}
-
-func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) {
- testPlaintext := "the quick brown fox"
- testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
-
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "transit": transit.Factory,
- },
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
- cores := cluster.Cores
-
- // make it easy to get access to the active
- core := cores[0].Core
- vault.TestWaitActive(t, core)
-
- wg := sync.WaitGroup{}
-
- funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"}
- keys := []string{"test1", "test2", "test3"}
-
- hosts := []string{
- fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[1].Listeners[0].Address.Port),
- fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[2].Listeners[0].Address.Port),
- }
-
- transport := &http.Transport{
- TLSClientConfig: cores[0].TLSConfig,
- }
- if err := http2.ConfigureTransport(transport); err != nil {
- t.Fatal(err)
- }
-
- client := &http.Client{
- Transport: transport,
- CheckRedirect: func(*http.Request, []*http.Request) error {
- return fmt.Errorf("redirects not allowed in this test")
- },
- }
-
- //core.Logger().Printf("[TRACE] mounting transit")
- req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port),
- bytes.NewBuffer([]byte("{\"type\": \"transit\"}")))
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set(AuthHeaderName, cluster.RootToken)
- _, err = client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- //core.Logger().Printf("[TRACE] done mounting transit")
-
- var totalOps uint64
- var successfulOps uint64
- var key1ver int64 = 1
- var key2ver int64 = 1
- var key3ver int64 = 1
- var numWorkers uint64 = 50
- var numWorkersStarted uint64
- var waitLock sync.Mutex
- waitCond := sync.NewCond(&waitLock)
-
- // This is the goroutine loop
- doFuzzy := func(id int, parallel bool) {
- var myTotalOps uint64
- var mySuccessfulOps uint64
- var keyVer int64 = 1
- // Check for panics, otherwise notify we're done
- defer func() {
- if err := recover(); err != nil {
- core.Logger().Error("got a panic: %v", err)
- t.Fail()
- }
- atomic.AddUint64(&totalOps, myTotalOps)
- atomic.AddUint64(&successfulOps, mySuccessfulOps)
- wg.Done()
- }()
-
- // Holds the latest encrypted value for each key
- latestEncryptedText := map[string]string{}
-
- client := &http.Client{
- Transport: transport,
- }
-
- var chosenFunc, chosenKey, chosenHost string
-
- myRand := rand.New(rand.NewSource(int64(id) * 400))
-
- doReq := func(method, url string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set(AuthHeaderName, cluster.RootToken)
- resp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
- return resp, nil
- }
-
- doResp := func(resp *http.Response) (*api.Secret, error) {
- if resp == nil {
- return nil, fmt.Errorf("nil response")
- }
- defer resp.Body.Close()
-
- // Make sure we weren't redirected
- if resp.StatusCode > 300 && resp.StatusCode < 400 {
- return nil, fmt.Errorf("got status code %d, resp was %#v", resp.StatusCode, *resp)
- }
-
- result := &api.Response{Response: resp}
- err := result.Error()
- if err != nil {
- return nil, err
- }
-
- secret, err := api.ParseSecret(result.Body)
- if err != nil {
- return nil, err
- }
-
- return secret, nil
- }
-
- for _, chosenHost := range hosts {
- for _, chosenKey := range keys {
- // Try to write the key to make sure it exists
- _, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBuffer([]byte("{}")))
- if err != nil {
- panic(err)
- }
- }
- }
-
- if !parallel {
- chosenHost = hosts[id%len(hosts)]
- chosenKey = fmt.Sprintf("key-%t-%d", parallel, id)
-
- _, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}")))
- if err != nil {
- panic(err)
- }
- }
-
- atomic.AddUint64(&numWorkersStarted, 1)
-
- waitCond.L.Lock()
- for atomic.LoadUint64(&numWorkersStarted) != numWorkers {
- waitCond.Wait()
- }
- waitCond.L.Unlock()
- waitCond.Broadcast()
-
- core.Logger().Trace("Starting goroutine", "id", id)
-
- startTime := time.Now()
- for {
- // Stop after 10 seconds
- if time.Now().Sub(startTime) > 10*time.Second {
- return
- }
-
- myTotalOps++
-
- // Pick a function and a key
- chosenFunc = funcs[myRand.Int()%len(funcs)]
- if parallel {
- chosenKey = fmt.Sprintf("%s-%t", keys[myRand.Int()%len(keys)], parallel)
- chosenHost = hosts[myRand.Int()%len(hosts)]
- }
-
- switch chosenFunc {
- // Encrypt our plaintext and store the result
- case "encrypt":
- //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
- resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64))))
- if err != nil {
- panic(err)
- }
-
- secret, err := doResp(resp)
- if err != nil {
- panic(err)
- }
-
- latest := secret.Data["ciphertext"].(string)
- if latest == "" {
- panic(fmt.Errorf("bad ciphertext"))
- }
- latestEncryptedText[chosenKey] = secret.Data["ciphertext"].(string)
-
- mySuccessfulOps++
-
- // Decrypt the ciphertext and compare the result
- case "decrypt":
- ct := latestEncryptedText[chosenKey]
- if ct == "" {
- mySuccessfulOps++
- continue
- }
-
- //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
- resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct))))
- if err != nil {
- panic(err)
- }
-
- secret, err := doResp(resp)
- if err != nil {
- // This could well happen since the min version is jumping around
- if strings.Contains(err.Error(), keysutil.ErrTooOld) {
- mySuccessfulOps++
- continue
- }
- panic(err)
- }
-
- ptb64 := secret.Data["plaintext"].(string)
- pt, err := base64.StdEncoding.DecodeString(ptb64)
- if err != nil {
- panic(fmt.Errorf("got an error decoding base64 plaintext: %v", err))
- }
- if string(pt) != testPlaintext {
- panic(fmt.Errorf("got bad plaintext back: %s", pt))
- }
-
- mySuccessfulOps++
-
- // Rotate to a new key version
- case "rotate":
- //core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id)
- _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBuffer([]byte("{}")))
- if err != nil {
- panic(err)
- }
- if parallel {
- switch chosenKey {
- case "test1":
- atomic.AddInt64(&key1ver, 1)
- case "test2":
- atomic.AddInt64(&key2ver, 1)
- case "test3":
- atomic.AddInt64(&key3ver, 1)
- }
- } else {
- keyVer++
- }
-
- mySuccessfulOps++
-
- // Change the min version, which also tests the archive functionality
- case "change_min_version":
- var latestVersion int64 = keyVer
- if parallel {
- switch chosenKey {
- case "test1":
- latestVersion = atomic.LoadInt64(&key1ver)
- case "test2":
- latestVersion = atomic.LoadInt64(&key2ver)
- case "test3":
- latestVersion = atomic.LoadInt64(&key3ver)
- }
- }
-
- setVersion := (myRand.Int63() % latestVersion) + 1
-
- //core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion)
-
- _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBuffer([]byte(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion))))
- if err != nil {
- panic(err)
- }
-
- mySuccessfulOps++
- }
- }
- }
-
- atomic.StoreUint64(&numWorkers, num)
-
- // Spawn some of these workers for 10 seconds
- for i := 0; i < int(atomic.LoadUint64(&numWorkers)); i++ {
- wg.Add(1)
- //core.Logger().Printf("[TRACE] spawning %d", i)
- go doFuzzy(i+1, parallel)
- }
-
- // Wait for them all to finish
- wg.Wait()
-
- if totalOps == 0 || totalOps != successfulOps {
- t.Fatalf("total/successful ops zero or mismatch: %d/%d; parallel: %t, num %d", totalOps, successfulOps, parallel, num)
- }
- t.Logf("total operations tried: %d, total successful: %d; parallel: %t, num %d", totalOps, successfulOps, parallel, num)
-}
-
-// This tests TLS connection state forwarding by ensuring that we can use a
-// client TLS to authenticate against the cert backend
-func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
- coreConfig := &vault.CoreConfig{
- CredentialBackends: map[string]logical.Factory{
- "cert": credCert.Factory,
- },
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
- cores := cluster.Cores
-
- // make it easy to get access to the active
- core := cores[0].Core
- vault.TestWaitActive(t, core)
-
- transport := cleanhttp.DefaultTransport()
- transport.TLSClientConfig = cores[0].TLSConfig
- if err := http2.ConfigureTransport(transport); err != nil {
- t.Fatal(err)
- }
-
- client := &http.Client{
- Transport: transport,
- }
-
- req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/auth/cert", cores[0].Listeners[0].Address.Port),
- bytes.NewBuffer([]byte("{\"type\": \"cert\"}")))
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set(AuthHeaderName, cluster.RootToken)
- _, err = client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
-
- type certConfig struct {
- Certificate string `json:"certificate"`
- Policies string `json:"policies"`
- }
- encodedCertConfig, err := json.Marshal(&certConfig{
- Certificate: string(cluster.CACertPEM),
- Policies: "default",
- })
- if err != nil {
- t.Fatal(err)
- }
- req, err = http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/auth/cert/certs/test", cores[0].Listeners[0].Address.Port),
- bytes.NewBuffer(encodedCertConfig))
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set(AuthHeaderName, cluster.RootToken)
- _, err = client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
-
- addrs := []string{
- fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
- fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
- }
-
- // Ensure we can't possibly use lingering connections even though it should be to a different address
-
- transport = cleanhttp.DefaultTransport()
- transport.TLSClientConfig = cores[0].TLSConfig
-
- client = &http.Client{
- Transport: transport,
- CheckRedirect: func(*http.Request, []*http.Request) error {
- return fmt.Errorf("redirects not allowed in this test")
- },
- }
-
- //cores[0].Logger().Printf("cluster.RootToken token is %s", cluster.RootToken)
- //time.Sleep(4 * time.Hour)
-
- for _, addr := range addrs {
- client := cores[0].Client
- client.SetAddress(addr)
-
- secret, err := client.Logical().Write("auth/cert/login", nil)
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil {
- t.Fatal("secret is nil")
- }
- if secret.Auth == nil {
- t.Fatal("auth is nil")
- }
- if secret.Auth.Policies == nil || len(secret.Auth.Policies) == 0 || secret.Auth.Policies[0] != "default" {
- t.Fatalf("bad policies: %#v", secret.Auth.Policies)
- }
- if secret.Auth.ClientToken == "" {
- t.Fatalf("bad client token: %#v", *secret.Auth)
- }
- client.SetToken(secret.Auth.ClientToken)
- secret, err = client.Auth().Token().LookupSelf()
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil {
- t.Fatal("secret is nil")
- }
- if secret.Data == nil || len(secret.Data) == 0 {
- t.Fatal("secret data was empty")
- }
- }
-}
-
-func TestHTTP_Forwarding_HelpOperation(t *testing.T) {
- cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
- HandlerFunc: Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
- cores := cluster.Cores
-
- vault.TestWaitActive(t, cores[0].Core)
-
- testHelp := func(client *api.Client) {
- help, err := client.Help("auth/token")
- if err != nil {
- t.Fatal(err)
- }
- if help == nil {
- t.Fatal("help was nil")
- }
- }
-
- testHelp(cores[0].Client)
- testHelp(cores[1].Client)
-}
diff --git a/vendor/github.com/hashicorp/vault/http/handler.go b/vendor/github.com/hashicorp/vault/http/handler.go
deleted file mode 100644
index 6290768..0000000
--- a/vendor/github.com/hashicorp/vault/http/handler.go
+++ /dev/null
@@ -1,355 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-)
-
-const (
- // AuthHeaderName is the name of the header containing the token.
- AuthHeaderName = "X-Vault-Token"
-
- // WrapTTLHeaderName is the name of the header containing a directive to
- // wrap the response
- WrapTTLHeaderName = "X-Vault-Wrap-TTL"
-
- // WrapFormatHeaderName is the name of the header containing the format to
- // wrap in; has no effect if the wrap TTL is not set
- WrapFormatHeaderName = "X-Vault-Wrap-Format"
-
- // NoRequestForwardingHeaderName is the name of the header telling Vault
- // not to use request forwarding
- NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
-
- // MaxRequestSize is the maximum accepted request size. This is to prevent
- // a denial of service attack where no Content-Length is provided and the server
- // is fed ever more data until it exhausts memory.
- MaxRequestSize = 32 * 1024 * 1024
-)
-
-// Handler returns an http.Handler for the API. This can be used on
-// its own to mount the Vault API within another web server.
-func Handler(core *vault.Core) http.Handler {
- // Create the muxer to handle the actual endpoints
- mux := http.NewServeMux()
- mux.Handle("/v1/sys/init", handleSysInit(core))
- mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
- mux.Handle("/v1/sys/seal", handleSysSeal(core))
- mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core)))
- mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
- mux.Handle("/v1/sys/renew", handleRequestForwarding(core, handleLogical(core, false, nil)))
- mux.Handle("/v1/sys/renew/", handleRequestForwarding(core, handleLogical(core, false, nil)))
- mux.Handle("/v1/sys/leases/", handleRequestForwarding(core, handleLogical(core, false, nil)))
- mux.Handle("/v1/sys/leader", handleSysLeader(core))
- mux.Handle("/v1/sys/health", handleSysHealth(core))
- mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, handleSysGenerateRootAttempt(core)))
- mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core, handleSysGenerateRootUpdate(core)))
- mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false)))
- mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false)))
- mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
- mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
- mux.Handle("/v1/sys/wrapping/lookup", handleRequestForwarding(core, handleLogical(core, false, wrappingVerificationFunc)))
- mux.Handle("/v1/sys/wrapping/rewrap", handleRequestForwarding(core, handleLogical(core, false, wrappingVerificationFunc)))
- mux.Handle("/v1/sys/wrapping/unwrap", handleRequestForwarding(core, handleLogical(core, false, wrappingVerificationFunc)))
- mux.Handle("/v1/sys/capabilities-self", handleRequestForwarding(core, handleLogical(core, true, nil)))
- mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core, true, nil)))
- mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core, false, nil)))
-
- // Wrap the handler in another handler to trigger all help paths.
- helpWrappedHandler := wrapHelpHandler(mux, core)
- corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
-
- // Wrap the help wrapped handler with another layer with a generic
- // handler
- genericWrappedHandler := wrapGenericHandler(corsWrappedHandler)
-
- return genericWrappedHandler
-}
-
-// wrapGenericHandler wraps the handler with an extra layer of handler where
-// tasks that should be commonly handled for all the requests and/or responses
-// are performed.
-func wrapGenericHandler(h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Set the Cache-Control header for all the responses returned
- // by Vault
- w.Header().Set("Cache-Control", "no-store")
- h.ServeHTTP(w, r)
- return
- })
-}
-
-// A lookup on a token that is about to expire returns nil, which means by the
-// time we can validate a wrapping token lookup will return nil since it will
-// be revoked after the call. So we have to do the validation here.
-func wrappingVerificationFunc(core *vault.Core, req *logical.Request) error {
- if req == nil {
- return fmt.Errorf("invalid request")
- }
-
- valid, err := core.ValidateWrappingToken(req)
- if err != nil {
- return fmt.Errorf("error validating wrapping token: %v", err)
- }
- if !valid {
- return fmt.Errorf("wrapping token is not valid or does not exist")
- }
-
- return nil
-}
-
-// stripPrefix is a helper to strip a prefix from the path. It will
-// return false from the second return value if it the prefix doesn't exist.
-func stripPrefix(prefix, path string) (string, bool) {
- if !strings.HasPrefix(path, prefix) {
- return "", false
- }
-
- path = path[len(prefix):]
- if path == "" {
- return "", false
- }
-
- return path, true
-}
-
-func parseRequest(r *http.Request, w http.ResponseWriter, out interface{}) error {
- // Limit the maximum number of bytes to MaxRequestSize to protect
- // against an indefinite amount of data being read.
- limit := http.MaxBytesReader(w, r.Body, MaxRequestSize)
- err := jsonutil.DecodeJSONFromReader(limit, out)
- if err != nil && err != io.EOF {
- return errwrap.Wrapf("failed to parse JSON input: {{err}}", err)
- }
- return err
-}
-
-// handleRequestForwarding determines whether to forward a request or not,
-// falling back on the older behavior of redirecting the client
-func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
- handler.ServeHTTP(w, r)
- return
- }
-
- if r.Header.Get(NoRequestForwardingHeaderName) != "" {
- // Forwarding explicitly disabled, fall back to previous behavior
- core.Logger().Trace("http/handleRequestForwarding: forwarding disabled by client request")
- handler.ServeHTTP(w, r)
- return
- }
-
- // Note: in an HA setup, this call will also ensure that connections to
- // the leader are set up, as that happens once the advertised cluster
- // values are read during this function
- isLeader, leaderAddr, _, err := core.Leader()
- if err != nil {
- if err == vault.ErrHANotEnabled {
- // Standalone node, serve request normally
- handler.ServeHTTP(w, r)
- return
- }
- // Some internal error occurred
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- if isLeader {
- // No forwarding needed, we're leader
- handler.ServeHTTP(w, r)
- return
- }
- if leaderAddr == "" {
- respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found"))
- return
- }
-
- // Attempt forwarding the request. If we cannot forward -- perhaps it's
- // been disabled on the active node -- this will return with an
- // ErrCannotForward and we simply fall back
- statusCode, header, retBytes, err := core.ForwardRequest(r)
- if err != nil {
- if err == vault.ErrCannotForward {
- core.Logger().Trace("http/handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
- } else {
- core.Logger().Error("http/handleRequestForwarding: error forwarding request", "error", err)
- }
-
- // Fall back to redirection
- handler.ServeHTTP(w, r)
- return
- }
-
- if header != nil {
- for k, v := range header {
- for _, j := range v {
- w.Header().Add(k, j)
- }
- }
- }
-
- w.WriteHeader(statusCode)
- w.Write(retBytes)
- return
- })
-}
-
-// request is a helper to perform a request and properly exit in the
-// case of an error.
-func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool) {
- resp, err := core.HandleRequest(r)
- if errwrap.Contains(err, consts.ErrStandby.Error()) {
- respondStandby(core, w, rawReq.URL)
- return resp, false
- }
- if respondErrorCommon(w, r, resp, err) {
- return resp, false
- }
-
- return resp, true
-}
-
-// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
-func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
- // Request the leader address
- _, redirectAddr, _, err := core.Leader()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- // If there is no leader, generate a 503 error
- if redirectAddr == "" {
- err = fmt.Errorf("no active Vault instance found")
- respondError(w, http.StatusServiceUnavailable, err)
- return
- }
-
- // Parse the redirect location
- redirectURL, err := url.Parse(redirectAddr)
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- // Generate a redirect URL
- finalURL := url.URL{
- Scheme: redirectURL.Scheme,
- Host: redirectURL.Host,
- Path: reqURL.Path,
- RawQuery: reqURL.RawQuery,
- }
-
- // Ensure there is a scheme, default to https
- if finalURL.Scheme == "" {
- finalURL.Scheme = "https"
- }
-
- // If we have an address, redirect! We use a 307 code
- // because we don't actually know if its permanent and
- // the request method should be preserved.
- w.Header().Set("Location", finalURL.String())
- w.WriteHeader(307)
-}
-
-// requestAuth adds the token to the logical.Request if it exists.
-func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) *logical.Request {
- // Attach the header value if we have it
- if v := r.Header.Get(AuthHeaderName); v != "" {
- req.ClientToken = v
-
- // Also attach the accessor if we have it. This doesn't fail if it
- // doesn't exist because the request may be to an unauthenticated
- // endpoint/login endpoint where a bad current token doesn't matter, or
- // a token from a Vault version pre-accessors.
- te, err := core.LookupToken(v)
- if err == nil && te != nil {
- req.ClientTokenAccessor = te.Accessor
- req.ClientTokenRemainingUses = te.NumUses
- }
- }
-
- return req
-}
-
-// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
-func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) {
- // First try for the header value
- wrapTTL := r.Header.Get(WrapTTLHeaderName)
- if wrapTTL == "" {
- return req, nil
- }
-
- // If it has an allowed suffix parse as a duration string
- dur, err := parseutil.ParseDurationSecond(wrapTTL)
- if err != nil {
- return req, err
- }
- if int64(dur) < 0 {
- return req, fmt.Errorf("requested wrap ttl cannot be negative")
- }
-
- req.WrapInfo = &logical.RequestWrapInfo{
- TTL: dur,
- }
-
- wrapFormat := r.Header.Get(WrapFormatHeaderName)
- switch wrapFormat {
- case "jwt":
- req.WrapInfo.Format = "jwt"
- }
-
- return req, nil
-}
-
-func respondError(w http.ResponseWriter, status int, err error) {
- logical.AdjustErrorStatusCode(&status, err)
-
- w.Header().Add("Content-Type", "application/json")
- w.WriteHeader(status)
-
- resp := &ErrorResponse{Errors: make([]string, 0, 1)}
- if err != nil {
- resp.Errors = append(resp.Errors, err.Error())
- }
-
- enc := json.NewEncoder(w)
- enc.Encode(resp)
-}
-
-func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool {
- statusCode, newErr := logical.RespondErrorCommon(req, resp, err)
- if newErr == nil && statusCode == 0 {
- return false
- }
-
- respondError(w, statusCode, newErr)
- return true
-}
-
-func respondOk(w http.ResponseWriter, body interface{}) {
- w.Header().Add("Content-Type", "application/json")
-
- if body == nil {
- w.WriteHeader(http.StatusNoContent)
- } else {
- w.WriteHeader(http.StatusOK)
- enc := json.NewEncoder(w)
- enc.Encode(body)
- }
-}
-
-type ErrorResponse struct {
- Errors []string `json:"errors"`
-}
diff --git a/vendor/github.com/hashicorp/vault/http/handler_test.go b/vendor/github.com/hashicorp/vault/http/handler_test.go
deleted file mode 100644
index 8eae984..0000000
--- a/vendor/github.com/hashicorp/vault/http/handler_test.go
+++ /dev/null
@@ -1,340 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "errors"
- "net/http"
- "net/http/httptest"
- "reflect"
- "strings"
- "testing"
-
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestHandler_cors(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- // Enable CORS and allow from any origin for testing.
- corsConfig := core.CORSConfig()
- err := corsConfig.Enable([]string{addr}, nil)
- if err != nil {
- t.Fatalf("Error enabling CORS: %s", err)
- }
-
- req, err := http.NewRequest(http.MethodOptions, addr+"/v1/sys/seal-status", nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- req.Header.Set("Origin", "BAD ORIGIN")
-
- // Requests from unacceptable origins will be rejected with a 403.
- client := cleanhttp.DefaultClient()
- resp, err := client.Do(req)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if resp.StatusCode != http.StatusForbidden {
- t.Fatalf("Bad status:\nexpected: 403 Forbidden\nactual: %s", resp.Status)
- }
-
- //
- // Test preflight requests
- //
-
- // Set a valid origin
- req.Header.Set("Origin", addr)
-
- // Server should NOT accept arbitrary methods.
- req.Header.Set("Access-Control-Request-Method", "FOO")
-
- client = cleanhttp.DefaultClient()
- resp, err = client.Do(req)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Fail if an arbitrary method is accepted.
- if resp.StatusCode != http.StatusMethodNotAllowed {
- t.Fatalf("Bad status:\nexpected: 405 Method Not Allowed\nactual: %s", resp.Status)
- }
-
- // Server SHOULD accept acceptable methods.
- req.Header.Set("Access-Control-Request-Method", http.MethodPost)
-
- client = cleanhttp.DefaultClient()
- resp, err = client.Do(req)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- //
- // Test that the CORS headers are applied correctly.
- //
- expHeaders := map[string]string{
- "Access-Control-Allow-Origin": addr,
- "Access-Control-Allow-Headers": strings.Join(vault.StdAllowedHeaders, ","),
- "Access-Control-Max-Age": "300",
- "Vary": "Origin",
- }
-
- for expHeader, expected := range expHeaders {
- actual := resp.Header.Get(expHeader)
- if actual == "" {
- t.Fatalf("bad:\nHeader: %#v was not on response.", expHeader)
- }
-
- if actual != expected {
- t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual)
- }
- }
-}
-
-func TestHandler_CacheControlNoStore(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- req.Header.Set(AuthHeaderName, token)
- req.Header.Set(WrapTTLHeaderName, "60s")
-
- client := cleanhttp.DefaultClient()
- resp, err := client.Do(req)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if resp == nil {
- t.Fatalf("nil response")
- }
-
- actual := resp.Header.Get("Cache-Control")
-
- if actual == "" {
- t.Fatalf("missing 'Cache-Control' header entry in response writer")
- }
-
- if actual != "no-store" {
- t.Fatalf("bad: Cache-Control. Expected: 'no-store', Actual: %q", actual)
- }
-}
-
-// We use this test to verify header auth
-func TestSysMounts_headerAuth(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- req.Header.Set(AuthHeaderName, token)
-
- client := cleanhttp.DefaultClient()
- resp, err := client.Do(req)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual)
- }
-}
-
-// We use this test to verify header auth wrapping
-func TestSysMounts_headerAuth_Wrapped(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- req.Header.Set(AuthHeaderName, token)
- req.Header.Set(WrapTTLHeaderName, "60s")
-
- client := cleanhttp.DefaultClient()
- resp, err := client.Do(req)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "request_id": "",
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "data": nil,
- "wrap_info": map[string]interface{}{
- "ttl": json.Number("60"),
- },
- "warnings": nil,
- "auth": nil,
- }
-
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- actualToken, ok := actual["wrap_info"].(map[string]interface{})["token"]
- if !ok || actualToken == "" {
- t.Fatal("token missing in wrap info")
- }
- expected["wrap_info"].(map[string]interface{})["token"] = actualToken
-
- actualCreationTime, ok := actual["wrap_info"].(map[string]interface{})["creation_time"]
- if !ok || actualCreationTime == "" {
- t.Fatal("creation_time missing in wrap info")
- }
- expected["wrap_info"].(map[string]interface{})["creation_time"] = actualCreationTime
-
- actualCreationPath, ok := actual["wrap_info"].(map[string]interface{})["creation_path"]
- if !ok || actualCreationPath == "" {
- t.Fatal("creation_path missing in wrap info")
- }
- expected["wrap_info"].(map[string]interface{})["creation_path"] = actualCreationPath
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n%T %T", expected, actual, actual["warnings"], actual["data"])
- }
-}
-
-func TestHandler_sealed(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- core.Seal(token)
-
- resp, err := http.Get(addr + "/v1/secret/foo")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- testResponseStatus(t, resp, 503)
-}
-
-func TestHandler_error(t *testing.T) {
- w := httptest.NewRecorder()
-
- respondError(w, 500, errors.New("Test Error"))
-
- if w.Code != 500 {
- t.Fatalf("expected 500, got %d", w.Code)
- }
-
- // The code inside of the error should override
- // the argument to respondError
- w2 := httptest.NewRecorder()
- e := logical.CodedError(403, "error text")
-
- respondError(w2, 500, e)
-
- if w2.Code != 403 {
- t.Fatalf("expected 403, got %d", w2.Code)
- }
-
- // vault.ErrSealed is a special case
- w3 := httptest.NewRecorder()
-
- respondError(w3, 400, consts.ErrSealed)
-
- if w3.Code != 503 {
- t.Fatalf("expected 503, got %d", w3.Code)
- }
-
-}
diff --git a/vendor/github.com/hashicorp/vault/http/help.go b/vendor/github.com/hashicorp/vault/http/help.go
deleted file mode 100644
index 1c3a956..0000000
--- a/vendor/github.com/hashicorp/vault/http/help.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package http
-
-import (
- "net/http"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-)
-
-func wrapHelpHandler(h http.Handler, core *vault.Core) http.Handler {
- return http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) {
- // If the help parameter is not blank, then show the help. We request
- // forward because standby nodes do not have mounts and other state.
- if v := req.URL.Query().Get("help"); v != "" || req.Method == "HELP" {
- handleRequestForwarding(core,
- http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- handleHelp(core, w, r)
- })).ServeHTTP(writer, req)
- return
- }
-
- h.ServeHTTP(writer, req)
- return
- })
-}
-
-func handleHelp(core *vault.Core, w http.ResponseWriter, req *http.Request) {
- path, ok := stripPrefix("/v1/", req.URL.Path)
- if !ok {
- respondError(w, http.StatusNotFound, nil)
- return
- }
-
- lreq := requestAuth(core, req, &logical.Request{
- Operation: logical.HelpOperation,
- Path: path,
- Connection: getConnection(req),
- })
-
- resp, err := core.HandleRequest(lreq)
- if err != nil {
- respondErrorCommon(w, lreq, resp, err)
- return
- }
-
- respondOk(w, resp.Data)
-}
diff --git a/vendor/github.com/hashicorp/vault/http/help_test.go b/vendor/github.com/hashicorp/vault/http/help_test.go
deleted file mode 100644
index cc3879b..0000000
--- a/vendor/github.com/hashicorp/vault/http/help_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package http
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestHelp(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpGet(t, token, addr+"/v1/sys/mounts?help=1")
-
- var actual map[string]interface{}
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if _, ok := actual["help"]; !ok {
- t.Fatalf("bad: %#v", actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/http_test.go b/vendor/github.com/hashicorp/vault/http/http_test.go
deleted file mode 100644
index eb43817..0000000
--- a/vendor/github.com/hashicorp/vault/http/http_test.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package http
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "regexp"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-func testHttpGet(t *testing.T, token string, addr string) *http.Response {
- t.Logf("Token is %s", token)
- return testHttpData(t, "GET", token, addr, nil, false)
-}
-
-func testHttpDelete(t *testing.T, token string, addr string) *http.Response {
- return testHttpData(t, "DELETE", token, addr, nil, false)
-}
-
-// Go 1.8+ clients redirect automatically which breaks our 307 standby testing
-func testHttpDeleteDisableRedirect(t *testing.T, token string, addr string) *http.Response {
- return testHttpData(t, "DELETE", token, addr, nil, true)
-}
-
-func testHttpPost(t *testing.T, token string, addr string, body interface{}) *http.Response {
- return testHttpData(t, "POST", token, addr, body, false)
-}
-
-func testHttpPut(t *testing.T, token string, addr string, body interface{}) *http.Response {
- return testHttpData(t, "PUT", token, addr, body, false)
-}
-
-// Go 1.8+ clients redirect automatically which breaks our 307 standby testing
-func testHttpPutDisableRedirect(t *testing.T, token string, addr string, body interface{}) *http.Response {
- return testHttpData(t, "PUT", token, addr, body, true)
-}
-
-func testHttpData(t *testing.T, method string, token string, addr string, body interface{}, disableRedirect bool) *http.Response {
- bodyReader := new(bytes.Buffer)
- if body != nil {
- enc := json.NewEncoder(bodyReader)
- if err := enc.Encode(body); err != nil {
- t.Fatalf("err:%s", err)
- }
- }
-
- req, err := http.NewRequest(method, addr, bodyReader)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Get the address of the local listener in order to attach it to an Origin header.
- // This will allow for the testing of requests that require CORS, without using a browser.
- hostURLRegexp, _ := regexp.Compile("http[s]?://.+:[0-9]+")
- req.Header.Set("Origin", hostURLRegexp.FindString(addr))
-
- req.Header.Set("Content-Type", "application/json")
-
- if len(token) != 0 {
- req.Header.Set("X-Vault-Token", token)
- }
-
- client := cleanhttp.DefaultClient()
- client.Timeout = 60 * time.Second
-
- // From https://github.com/michiwend/gomusicbrainz/pull/4/files
- defaultRedirectLimit := 30
-
- client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- if disableRedirect {
- return fmt.Errorf("checkRedirect disabled for test")
- }
- if len(via) > defaultRedirectLimit {
- return fmt.Errorf("%d consecutive requests(redirects)", len(via))
- }
- if len(via) == 0 {
- // No redirects
- return nil
- }
- // mutate the subsequent redirect requests with the first Header
- if token := via[0].Header.Get("X-Vault-Token"); len(token) != 0 {
- req.Header.Set("X-Vault-Token", token)
- }
- return nil
- }
-
- resp, err := client.Do(req)
- if err != nil && !strings.Contains(err.Error(), "checkRedirect disabled for test") {
- t.Fatalf("err: %s", err)
- }
-
- return resp
-}
-
-func testResponseStatus(t *testing.T, resp *http.Response, code int) {
- if resp.StatusCode != code {
- body := new(bytes.Buffer)
- io.Copy(body, resp.Body)
- resp.Body.Close()
-
- t.Fatalf(
- "Expected status %d, got %d. Body:\n\n%s",
- code, resp.StatusCode, body.String())
- }
-}
-
-func testResponseBody(t *testing.T, resp *http.Response, out interface{}) {
- defer resp.Body.Close()
-
- if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil {
- t.Fatalf("err: %s", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/logical.go b/vendor/github.com/hashicorp/vault/http/logical.go
deleted file mode 100644
index 642314e..0000000
--- a/vendor/github.com/hashicorp/vault/http/logical.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package http
-
-import (
- "io"
- "net"
- "net/http"
- "strconv"
- "strings"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-)
-
-type PrepareRequestFunc func(*vault.Core, *logical.Request) error
-
-func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, int, error) {
- // Determine the path...
- if !strings.HasPrefix(r.URL.Path, "/v1/") {
- return nil, http.StatusNotFound, nil
- }
- path := r.URL.Path[len("/v1/"):]
- if path == "" {
- return nil, http.StatusNotFound, nil
- }
-
- // Determine the operation
- var op logical.Operation
- switch r.Method {
- case "DELETE":
- op = logical.DeleteOperation
- case "GET":
- op = logical.ReadOperation
- // Need to call ParseForm to get query params loaded
- queryVals := r.URL.Query()
- listStr := queryVals.Get("list")
- if listStr != "" {
- list, err := strconv.ParseBool(listStr)
- if err != nil {
- return nil, http.StatusBadRequest, nil
- }
- if list {
- op = logical.ListOperation
- }
- }
- case "POST", "PUT":
- op = logical.UpdateOperation
- case "LIST":
- op = logical.ListOperation
- case "OPTIONS":
- default:
- return nil, http.StatusMethodNotAllowed, nil
- }
-
- if op == logical.ListOperation {
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
- }
-
- // Parse the request if we can
- var data map[string]interface{}
- if op == logical.UpdateOperation {
- err := parseRequest(r, w, &data)
- if err == io.EOF {
- data = nil
- err = nil
- }
- if err != nil {
- return nil, http.StatusBadRequest, err
- }
- }
-
- var err error
- request_id, err := uuid.GenerateUUID()
- if err != nil {
- return nil, http.StatusBadRequest, errwrap.Wrapf("failed to generate identifier for the request: {{err}}", err)
- }
-
- req := requestAuth(core, r, &logical.Request{
- ID: request_id,
- Operation: op,
- Path: path,
- Data: data,
- Connection: getConnection(r),
- Headers: r.Header,
- })
-
- req, err = requestWrapInfo(r, req)
- if err != nil {
- return nil, http.StatusBadRequest, errwrap.Wrapf("error parsing X-Vault-Wrap-TTL header: {{err}}", err)
- }
-
- return req, 0, nil
-}
-
-func handleLogical(core *vault.Core, injectDataIntoTopLevel bool, prepareRequestCallback PrepareRequestFunc) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- req, statusCode, err := buildLogicalRequest(core, w, r)
- if err != nil || statusCode != 0 {
- respondError(w, statusCode, err)
- return
- }
-
- // Certain endpoints may require changes to the request object. They
- // will have a callback registered to do the needed operations, so
- // invoke it before proceeding.
- if prepareRequestCallback != nil {
- if err := prepareRequestCallback(core, req); err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
- }
-
- // Make the internal request. We attach the connection info
- // as well in case this is an authentication request that requires
- // it. Vault core handles stripping this if we need to. This also
- // handles all error cases; if we hit respondLogical, the request is a
- // success.
- resp, ok := request(core, w, r, req)
- if !ok {
- return
- }
-
- // Build the proper response
- respondLogical(w, r, req, injectDataIntoTopLevel, resp)
- })
-}
-
-func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request, injectDataIntoTopLevel bool, resp *logical.Response) {
- var httpResp *logical.HTTPResponse
- var ret interface{}
-
- if resp != nil {
- if resp.Redirect != "" {
- // If we have a redirect, redirect! We use a 307 code
- // because we don't actually know if its permanent.
- http.Redirect(w, r, resp.Redirect, 307)
- return
- }
-
- // Check if this is a raw response
- if _, ok := resp.Data[logical.HTTPStatusCode]; ok {
- respondRaw(w, r, resp)
- return
- }
-
- if resp.WrapInfo != nil && resp.WrapInfo.Token != "" {
- httpResp = &logical.HTTPResponse{
- WrapInfo: &logical.HTTPWrapInfo{
- Token: resp.WrapInfo.Token,
- TTL: int(resp.WrapInfo.TTL.Seconds()),
- CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano),
- CreationPath: resp.WrapInfo.CreationPath,
- WrappedAccessor: resp.WrapInfo.WrappedAccessor,
- },
- }
- } else {
- httpResp = logical.LogicalResponseToHTTPResponse(resp)
- httpResp.RequestID = req.ID
- }
-
- ret = httpResp
-
- if injectDataIntoTopLevel {
- injector := logical.HTTPSysInjector{
- Response: httpResp,
- }
- ret = injector
- }
- }
-
- // Respond
- respondOk(w, ret)
- return
-}
-
-// respondRaw is used when the response is using HTTPContentType and HTTPRawBody
-// to change the default response handling. This is only used for specific things like
-// returning the CRL information on the PKI backends.
-func respondRaw(w http.ResponseWriter, r *http.Request, resp *logical.Response) {
- retErr := func(w http.ResponseWriter, err string) {
- w.Header().Set("X-Vault-Raw-Error", err)
- w.WriteHeader(http.StatusInternalServerError)
- w.Write(nil)
- }
-
- // Ensure this is never a secret or auth response
- if resp.Secret != nil || resp.Auth != nil {
- retErr(w, "raw responses cannot contain secrets or auth")
- return
- }
-
- // Get the status code
- statusRaw, ok := resp.Data[logical.HTTPStatusCode]
- if !ok {
- retErr(w, "no status code given")
- return
- }
- status, ok := statusRaw.(int)
- if !ok {
- retErr(w, "cannot decode status code")
- return
- }
-
- nonEmpty := status != http.StatusNoContent
-
- var contentType string
- var body []byte
-
- // Get the content type header; don't require it if the body is empty
- contentTypeRaw, ok := resp.Data[logical.HTTPContentType]
- if !ok && !nonEmpty {
- retErr(w, "no content type given")
- return
- }
- if ok {
- contentType, ok = contentTypeRaw.(string)
- if !ok {
- retErr(w, "cannot decode content type")
- return
- }
- }
-
- if nonEmpty {
- // Get the body
- bodyRaw, ok := resp.Data[logical.HTTPRawBody]
- if !ok {
- retErr(w, "no body given")
- return
- }
- body, ok = bodyRaw.([]byte)
- if !ok {
- retErr(w, "cannot decode body")
- return
- }
- }
-
- // Write the response
- if contentType != "" {
- w.Header().Set("Content-Type", contentType)
- }
-
- w.WriteHeader(status)
- w.Write(body)
-}
-
-// getConnection is used to format the connection information for
-// attaching to a logical request
-func getConnection(r *http.Request) (connection *logical.Connection) {
- var remoteAddr string
-
- remoteAddr, _, err := net.SplitHostPort(r.RemoteAddr)
- if err != nil {
- remoteAddr = ""
- }
-
- connection = &logical.Connection{
- RemoteAddr: remoteAddr,
- ConnState: r.TLS,
- }
- return
-}
diff --git a/vendor/github.com/hashicorp/vault/http/logical_test.go b/vendor/github.com/hashicorp/vault/http/logical_test.go
deleted file mode 100644
index e4101a5..0000000
--- a/vendor/github.com/hashicorp/vault/http/logical_test.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package http
-
-import (
- "bytes"
- "encoding/json"
- "io"
- "net/http"
- "reflect"
- "strconv"
- "strings"
- "testing"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/physical/inmem"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestLogical(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- // WRITE
- resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
- "data": "bar",
- })
- testResponseStatus(t, resp, 204)
-
- // READ
- // Bad token should return a 403
- resp = testHttpGet(t, token+"bad", addr+"/v1/secret/foo")
- testResponseStatus(t, resp, 403)
-
- resp = testHttpGet(t, token, addr+"/v1/secret/foo")
- var actual map[string]interface{}
- var nilWarnings interface{}
- expected := map[string]interface{}{
- "renewable": false,
- "lease_duration": json.Number(strconv.Itoa(int((32 * 24 * time.Hour) / time.Second))),
- "data": map[string]interface{}{
- "data": "bar",
- },
- "auth": nil,
- "wrap_info": nil,
- "warnings": nilWarnings,
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- delete(actual, "lease_id")
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nactual:\n%#v\nexpected:\n%#v", actual, expected)
- }
-
- // DELETE
- resp = testHttpDelete(t, token, addr+"/v1/secret/foo")
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/secret/foo")
- testResponseStatus(t, resp, 404)
-}
-
-func TestLogical_noExist(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpGet(t, token, addr+"/v1/secret/foo")
- testResponseStatus(t, resp, 404)
-}
-
-func TestLogical_StandbyRedirect(t *testing.T) {
- ln1, addr1 := TestListener(t)
- defer ln1.Close()
- ln2, addr2 := TestListener(t)
- defer ln2.Close()
-
- // Create an HA Vault
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- conf := &vault.CoreConfig{
- Physical: inmha,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: addr1,
- DisableMlock: true,
- }
- core1, err := vault.NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- keys, root := vault.TestCoreInit(t, core1)
- for _, key := range keys {
- if _, err := core1.Unseal(vault.TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Attempt to fix raciness in this test by giving the first core a chance
- // to grab the lock
- time.Sleep(2 * time.Second)
-
- // Create a second HA Vault
- conf2 := &vault.CoreConfig{
- Physical: inmha,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: addr2,
- DisableMlock: true,
- }
- core2, err := vault.NewCore(conf2)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for _, key := range keys {
- if _, err := core2.Unseal(vault.TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- TestServerWithListener(t, ln1, addr1, core1)
- TestServerWithListener(t, ln2, addr2, core2)
- TestServerAuth(t, addr1, root)
-
- // WRITE to STANDBY
- resp := testHttpPutDisableRedirect(t, root, addr2+"/v1/secret/foo", map[string]interface{}{
- "data": "bar",
- })
- logger.Trace("307 test one starting")
- testResponseStatus(t, resp, 307)
- logger.Trace("307 test one stopping")
-
- //// READ to standby
- resp = testHttpGet(t, root, addr2+"/v1/auth/token/lookup-self")
- var actual map[string]interface{}
- var nilWarnings interface{}
- expected := map[string]interface{}{
- "renewable": false,
- "lease_duration": json.Number("0"),
- "data": map[string]interface{}{
- "meta": nil,
- "num_uses": json.Number("0"),
- "path": "auth/token/root",
- "policies": []interface{}{"root"},
- "display_name": "root",
- "orphan": true,
- "id": root,
- "ttl": json.Number("0"),
- "creation_ttl": json.Number("0"),
- "explicit_max_ttl": json.Number("0"),
- "expire_time": nil,
- },
- "warnings": nilWarnings,
- "wrap_info": nil,
- "auth": nil,
- }
-
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- actualDataMap := actual["data"].(map[string]interface{})
- delete(actualDataMap, "creation_time")
- delete(actualDataMap, "accessor")
- actual["data"] = actualDataMap
- expected["request_id"] = actual["request_id"]
- delete(actual, "lease_id")
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got %#v; expected %#v", actual, expected)
- }
-
- //// DELETE to standby
- resp = testHttpDeleteDisableRedirect(t, root, addr2+"/v1/secret/foo")
- logger.Trace("307 test two starting")
- testResponseStatus(t, resp, 307)
- logger.Trace("307 test two stopping")
-}
-
-func TestLogical_CreateToken(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- // WRITE
- resp := testHttpPut(t, token, addr+"/v1/auth/token/create", map[string]interface{}{
- "data": "bar",
- })
-
- var actual map[string]interface{}
- var nilWarnings interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "data": nil,
- "wrap_info": nil,
- "auth": map[string]interface{}{
- "policies": []interface{}{"root"},
- "metadata": nil,
- "lease_duration": json.Number("0"),
- "renewable": false,
- },
- "warnings": nilWarnings,
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- delete(actual["auth"].(map[string]interface{}), "client_token")
- delete(actual["auth"].(map[string]interface{}), "accessor")
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nexpected:\n%#v\nactual:\n%#v", expected, actual)
- }
-}
-
-func TestLogical_RawHTTP(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "http",
- })
- testResponseStatus(t, resp, 204)
-
- // Get the raw response
- resp = testHttpGet(t, token, addr+"/v1/foo/raw")
- testResponseStatus(t, resp, 200)
-
- // Test the headers
- if resp.Header.Get("Content-Type") != "plain/text" {
- t.Fatalf("Bad: %#v", resp.Header)
- }
-
- // Get the body
- body := new(bytes.Buffer)
- io.Copy(body, resp.Body)
- if string(body.Bytes()) != "hello world" {
- t.Fatalf("Bad: %s", body.Bytes())
- }
-}
-
-func TestLogical_RequestSizeLimit(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- // Write a very large object, should fail
- resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
- "data": make([]byte, MaxRequestSize),
- })
- testResponseStatus(t, resp, 413)
-}
-
-func TestLogical_ListSuffix(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- req, _ := http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo", nil)
- lreq, status, err := buildLogicalRequest(core, nil, req)
- if err != nil {
- t.Fatal(err)
- }
- if status != 0 {
- t.Fatalf("got status %d", status)
- }
- if strings.HasSuffix(lreq.Path, "/") {
- t.Fatal("trailing slash found on path")
- }
-
- req, _ = http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo?list=true", nil)
- lreq, status, err = buildLogicalRequest(core, nil, req)
- if err != nil {
- t.Fatal(err)
- }
- if status != 0 {
- t.Fatalf("got status %d", status)
- }
- if !strings.HasSuffix(lreq.Path, "/") {
- t.Fatal("trailing slash not found on path")
- }
-
- req, _ = http.NewRequest("LIST", "http://127.0.0.1:8200/v1/secret/foo", nil)
- lreq, status, err = buildLogicalRequest(core, nil, req)
- if err != nil {
- t.Fatal(err)
- }
- if status != 0 {
- t.Fatalf("got status %d", status)
- }
- if !strings.HasSuffix(lreq.Path, "/") {
- t.Fatal("trailing slash not found on path")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_audit_test.go b/vendor/github.com/hashicorp/vault/http/sys_audit_test.go
deleted file mode 100644
index 58873bf..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_audit_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysAudit(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{
- "type": "noop",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/audit")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "noop/": map[string]interface{}{
- "path": "noop/",
- "type": "noop",
- "description": "",
- "options": map[string]interface{}{},
- "local": false,
- },
- },
- "noop/": map[string]interface{}{
- "path": "noop/",
- "type": "noop",
- "description": "",
- "options": map[string]interface{}{},
- "local": false,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["request_id"] = actual["request_id"]
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:\n%#v actual:\n%#v\n", expected, actual)
- }
-}
-
-func TestSysDisableAudit(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/audit/foo", map[string]interface{}{
- "type": "noop",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/audit/foo")
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/audit")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{},
- }
-
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["request_id"] = actual["request_id"]
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nactual: %#v\nexpected: %#v\n", actual, expected)
- }
-}
-
-func TestSysAuditHash(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{
- "type": "noop",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpPost(t, token, addr+"/v1/sys/audit-hash/noop", map[string]interface{}{
- "input": "bar",
- })
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
- },
- "hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["request_id"] = actual["request_id"]
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:\n%#v\n, got:\n%#v\n", expected, actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_auth_test.go b/vendor/github.com/hashicorp/vault/http/sys_auth_test.go
deleted file mode 100644
index fa3c692..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_auth_test.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysAuth(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpGet(t, token, addr+"/v1/sys/auth")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "token/": map[string]interface{}{
- "description": "token based credentials",
- "type": "token",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "local": false,
- },
- },
- "token/": map[string]interface{}{
- "description": "token based credentials",
- "type": "token",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "local": false,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-}
-
-func TestSysEnableAuth(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{
- "type": "noop",
- "description": "foo",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/auth")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "noop",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "local": false,
- },
- "token/": map[string]interface{}{
- "description": "token based credentials",
- "type": "token",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "local": false,
- },
- },
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "noop",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "local": false,
- },
- "token/": map[string]interface{}{
- "description": "token based credentials",
- "type": "token",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "local": false,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-}
-
-func TestSysDisableAuth(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{
- "type": "noop",
- "description": "foo",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/auth/foo")
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/auth")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "token/": map[string]interface{}{
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "description": "token based credentials",
- "type": "token",
- "local": false,
- },
- },
- "token/": map[string]interface{}{
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- },
- "description": "token based credentials",
- "type": "token",
- "local": false,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go b/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go
deleted file mode 100644
index bd6c7ae..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "net/http"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysConfigCors(t *testing.T) {
- var resp *http.Response
-
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- corsConf := core.CORSConfig()
-
- // Try to enable CORS without providing a value for allowed_origins
- resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{
- "allowed_headers": "X-Custom-Header",
- })
-
- testResponseStatus(t, resp, 500)
-
- // Enable CORS, but provide an origin this time.
- resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{
- "allowed_origins": addr,
- "allowed_headers": "X-Custom-Header",
- })
-
- testResponseStatus(t, resp, 204)
-
- // Read the CORS configuration
- resp = testHttpGet(t, token, addr+"/v1/sys/config/cors")
- testResponseStatus(t, resp, 200)
-
- var actual map[string]interface{}
- var expected map[string]interface{}
-
- lenStdHeaders := len(corsConf.AllowedHeaders)
-
- expectedHeaders := make([]interface{}, lenStdHeaders)
-
- for i := range corsConf.AllowedHeaders {
- expectedHeaders[i] = corsConf.AllowedHeaders[i]
- }
-
- expected = map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "enabled": true,
- "allowed_origins": []interface{}{addr},
- "allowed_headers": expectedHeaders,
- },
- "enabled": true,
- "allowed_origins": []interface{}{addr},
- "allowed_headers": expectedHeaders,
- }
-
- testResponseStatus(t, resp, 200)
-
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
-
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_generate_root.go b/vendor/github.com/hashicorp/vault/http/sys_generate_root.go
deleted file mode 100644
index 3697f80..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_generate_root.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package http
-
-import (
- "encoding/base64"
- "encoding/hex"
- "errors"
- "fmt"
- "net/http"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func handleSysGenerateRootAttempt(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- switch r.Method {
- case "GET":
- handleSysGenerateRootAttemptGet(core, w, r)
- case "POST", "PUT":
- handleSysGenerateRootAttemptPut(core, w, r)
- case "DELETE":
- handleSysGenerateRootAttemptDelete(core, w, r)
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- }
- })
-}
-
-func handleSysGenerateRootAttemptGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- // Get the current seal configuration
- barrierConfig, err := core.SealAccess().BarrierConfig()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- if barrierConfig == nil {
- respondError(w, http.StatusBadRequest, fmt.Errorf(
- "server is not yet initialized"))
- return
- }
-
- sealConfig := barrierConfig
- if core.SealAccess().RecoveryKeySupported() {
- sealConfig, err = core.SealAccess().RecoveryConfig()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- }
-
- // Get the generation configuration
- generationConfig, err := core.GenerateRootConfiguration()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- // Get the progress
- progress, err := core.GenerateRootProgress()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- // Format the status
- status := &GenerateRootStatusResponse{
- Started: false,
- Progress: progress,
- Required: sealConfig.SecretThreshold,
- Complete: false,
- }
- if generationConfig != nil {
- status.Nonce = generationConfig.Nonce
- status.Started = true
- status.PGPFingerprint = generationConfig.PGPFingerprint
- }
-
- respondOk(w, status)
-}
-
-func handleSysGenerateRootAttemptPut(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- // Parse the request
- var req GenerateRootInitRequest
- if err := parseRequest(r, w, &req); err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
-
- if len(req.OTP) > 0 && len(req.PGPKey) > 0 {
- respondError(w, http.StatusBadRequest, fmt.Errorf("only one of \"otp\" and \"pgp_key\" must be specified"))
- return
- }
-
- // Attemptialize the generation
- err := core.GenerateRootInit(req.OTP, req.PGPKey)
- if err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
-
- handleSysGenerateRootAttemptGet(core, w, r)
-}
-
-func handleSysGenerateRootAttemptDelete(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- err := core.GenerateRootCancel()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- respondOk(w, nil)
-}
-
-func handleSysGenerateRootUpdate(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Parse the request
- var req GenerateRootUpdateRequest
- if err := parseRequest(r, w, &req); err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
- if req.Key == "" {
- respondError(
- w, http.StatusBadRequest,
- errors.New("'key' must be specified in request body as JSON"))
- return
- }
-
- // Decode the key, which is base64 or hex encoded
- min, max := core.BarrierKeyLength()
- key, err := hex.DecodeString(req.Key)
- // We check min and max here to ensure that a string that is base64
- // encoded but also valid hex will not be valid and we instead base64
- // decode it
- if err != nil || len(key) < min || len(key) > max {
- key, err = base64.StdEncoding.DecodeString(req.Key)
- if err != nil {
- respondError(
- w, http.StatusBadRequest,
- errors.New("'key' must be a valid hex or base64 string"))
- return
- }
- }
-
- // Use the key to make progress on root generation
- result, err := core.GenerateRootUpdate(key, req.Nonce)
- if err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
-
- resp := &GenerateRootStatusResponse{
- Complete: result.Progress == result.Required,
- Nonce: req.Nonce,
- Progress: result.Progress,
- Required: result.Required,
- Started: true,
- EncodedRootToken: result.EncodedRootToken,
- PGPFingerprint: result.PGPFingerprint,
- }
-
- respondOk(w, resp)
- })
-}
-
-type GenerateRootInitRequest struct {
- OTP string `json:"otp"`
- PGPKey string `json:"pgp_key"`
-}
-
-type GenerateRootStatusResponse struct {
- Nonce string `json:"nonce"`
- Started bool `json:"started"`
- Progress int `json:"progress"`
- Required int `json:"required"`
- Complete bool `json:"complete"`
- EncodedRootToken string `json:"encoded_root_token"`
- PGPFingerprint string `json:"pgp_fingerprint"`
-}
-
-type GenerateRootUpdateRequest struct {
- Nonce string
- Key string
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_generate_root_test.go b/vendor/github.com/hashicorp/vault/http/sys_generate_root_test.go
deleted file mode 100644
index 41cb2a5..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_generate_root_test.go
+++ /dev/null
@@ -1,418 +0,0 @@
-package http
-
-import (
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/helper/xor"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysGenerateRootAttempt_Status(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp, err := http.Get(addr + "/v1/sys/generate-root/attempt")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "started": false,
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "complete": false,
- "encoded_root_token": "",
- "pgp_fingerprint": "",
- "nonce": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
- "otp": otp,
- })
- testResponseStatus(t, resp, 200)
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "started": true,
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "complete": false,
- "encoded_root_token": "",
- "pgp_fingerprint": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["nonce"].(string) == "" {
- t.Fatalf("nonce was empty")
- }
- expected["nonce"] = actual["nonce"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-
- resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt")
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "started": true,
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "complete": false,
- "encoded_root_token": "",
- "pgp_fingerprint": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["nonce"].(string) == "" {
- t.Fatalf("nonce was empty")
- }
- expected["nonce"] = actual["nonce"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysGenerateRootAttempt_Setup_PGP(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
- "pgp_key": pgpkeys.TestPubKey1,
- })
- testResponseStatus(t, resp, 200)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "started": true,
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "complete": false,
- "encoded_root_token": "",
- "pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["nonce"].(string) == "" {
- t.Fatalf("nonce was empty")
- }
- expected["nonce"] = actual["nonce"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
- "otp": otp,
- })
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "started": true,
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "complete": false,
- "encoded_root_token": "",
- "pgp_fingerprint": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["nonce"].(string) == "" {
- t.Fatalf("nonce was empty")
- }
- expected["nonce"] = actual["nonce"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
- testResponseStatus(t, resp, 204)
-
- resp, err = http.Get(addr + "/v1/sys/generate-root/attempt")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "started": false,
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "complete": false,
- "encoded_root_token": "",
- "pgp_fingerprint": "",
- "nonce": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysGenerateRoot_badKey(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
- "key": "0123",
- "otp": otp,
- })
- testResponseStatus(t, resp, 400)
-}
-
-func TestSysGenerateRoot_ReAttemptUpdate(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
- resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
- "otp": otp,
- })
- testResponseStatus(t, resp, 200)
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
- testResponseStatus(t, resp, 204)
-
- resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
- "pgp_key": pgpkeys.TestPubKey1,
- })
-
- testResponseStatus(t, resp, 200)
-}
-
-func TestSysGenerateRoot_Update_OTP(t *testing.T) {
- core, keys, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- otpBytes, err := vault.GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
- otp := base64.StdEncoding.EncodeToString(otpBytes)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
- "otp": otp,
- })
- var rootGenerationStatus map[string]interface{}
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &rootGenerationStatus)
-
- var actual map[string]interface{}
- var expected map[string]interface{}
- for i, key := range keys {
- resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
- "nonce": rootGenerationStatus["nonce"].(string),
- "key": hex.EncodeToString(key),
- })
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "complete": false,
- "nonce": rootGenerationStatus["nonce"].(string),
- "progress": json.Number(fmt.Sprintf("%d", i+1)),
- "required": json.Number(fmt.Sprintf("%d", len(keys))),
- "started": true,
- "pgp_fingerprint": "",
- }
- if i+1 == len(keys) {
- expected["complete"] = true
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- }
-
- if actual["encoded_root_token"] == nil {
- t.Fatalf("no encoded root token found in response")
- }
- expected["encoded_root_token"] = actual["encoded_root_token"]
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-
- decodedToken, err := xor.XORBase64(otp, actual["encoded_root_token"].(string))
- if err != nil {
- t.Fatal(err)
- }
- newRootToken, err := uuid.FormatUUID(decodedToken)
- if err != nil {
- t.Fatal(err)
- }
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "id": newRootToken,
- "display_name": "root",
- "meta": interface{}(nil),
- "num_uses": json.Number("0"),
- "policies": []interface{}{"root"},
- "orphan": true,
- "creation_ttl": json.Number("0"),
- "ttl": json.Number("0"),
- "path": "auth/token/root",
- "explicit_max_ttl": json.Number("0"),
- "expire_time": nil,
- }
-
- resp = testHttpGet(t, newRootToken, addr+"/v1/auth/token/lookup-self")
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["creation_time"] = actual["data"].(map[string]interface{})["creation_time"]
- expected["accessor"] = actual["data"].(map[string]interface{})["accessor"]
-
- if !reflect.DeepEqual(actual["data"], expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual["data"])
- }
-}
-
-func TestSysGenerateRoot_Update_PGP(t *testing.T) {
- core, keys, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
- "pgp_key": pgpkeys.TestPubKey1,
- })
- testResponseStatus(t, resp, 200)
-
- // We need to get the nonce first before we update
- resp, err := http.Get(addr + "/v1/sys/generate-root/attempt")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- var rootGenerationStatus map[string]interface{}
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &rootGenerationStatus)
-
- var actual map[string]interface{}
- var expected map[string]interface{}
- for i, key := range keys {
- resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
- "nonce": rootGenerationStatus["nonce"].(string),
- "key": hex.EncodeToString(key),
- })
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "complete": false,
- "nonce": rootGenerationStatus["nonce"].(string),
- "progress": json.Number(fmt.Sprintf("%d", i+1)),
- "required": json.Number(fmt.Sprintf("%d", len(keys))),
- "started": true,
- "pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
- }
- if i+1 == len(keys) {
- expected["complete"] = true
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- }
-
- if actual["encoded_root_token"] == nil {
- t.Fatalf("no encoded root token found in response")
- }
- expected["encoded_root_token"] = actual["encoded_root_token"]
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-
- decodedTokenBuf, err := pgpkeys.DecryptBytes(actual["encoded_root_token"].(string), pgpkeys.TestPrivKey1)
- if err != nil {
- t.Fatal(err)
- }
- if decodedTokenBuf == nil {
- t.Fatal("decoded root token buffer is nil")
- }
-
- newRootToken := decodedTokenBuf.String()
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "id": newRootToken,
- "display_name": "root",
- "meta": interface{}(nil),
- "num_uses": json.Number("0"),
- "policies": []interface{}{"root"},
- "orphan": true,
- "creation_ttl": json.Number("0"),
- "ttl": json.Number("0"),
- "path": "auth/token/root",
- "explicit_max_ttl": json.Number("0"),
- "expire_time": nil,
- }
-
- resp = testHttpGet(t, newRootToken, addr+"/v1/auth/token/lookup-self")
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- expected["creation_time"] = actual["data"].(map[string]interface{})["creation_time"]
- expected["accessor"] = actual["data"].(map[string]interface{})["accessor"]
-
- if !reflect.DeepEqual(actual["data"], expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual["data"])
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_health.go b/vendor/github.com/hashicorp/vault/http/sys_health.go
deleted file mode 100644
index 40797be..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_health.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "strconv"
- "time"
-
- "github.com/hashicorp/vault/vault"
- "github.com/hashicorp/vault/version"
-)
-
-func handleSysHealth(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- switch r.Method {
- case "GET":
- handleSysHealthGet(core, w, r)
- case "HEAD":
- handleSysHealthHead(core, w, r)
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- }
- })
-}
-
-func fetchStatusCode(r *http.Request, field string) (int, bool, bool) {
- var err error
- statusCode := http.StatusOK
- if statusCodeStr, statusCodeOk := r.URL.Query()[field]; statusCodeOk {
- statusCode, err = strconv.Atoi(statusCodeStr[0])
- if err != nil || len(statusCodeStr) < 1 {
- return http.StatusBadRequest, false, false
- }
- return statusCode, true, true
- }
- return statusCode, false, true
-}
-
-func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- code, body, err := getSysHealth(core, r)
- if err != nil {
- respondError(w, http.StatusInternalServerError, nil)
- return
- }
-
- if body == nil {
- respondError(w, code, nil)
- return
- }
-
- w.Header().Add("Content-Type", "application/json")
- w.WriteHeader(code)
-
- // Generate the response
- enc := json.NewEncoder(w)
- enc.Encode(body)
-}
-
-func handleSysHealthHead(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- code, body, err := getSysHealth(core, r)
- if err != nil {
- code = http.StatusInternalServerError
- }
-
- if body != nil {
- w.Header().Add("Content-Type", "application/json")
- }
- w.WriteHeader(code)
-}
-
-func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, error) {
- // Check if being a standby is allowed for the purpose of a 200 OK
- _, standbyOK := r.URL.Query()["standbyok"]
-
- uninitCode := http.StatusNotImplemented
- if code, found, ok := fetchStatusCode(r, "uninitcode"); !ok {
- return http.StatusBadRequest, nil, nil
- } else if found {
- uninitCode = code
- }
-
- sealedCode := http.StatusServiceUnavailable
- if code, found, ok := fetchStatusCode(r, "sealedcode"); !ok {
- return http.StatusBadRequest, nil, nil
- } else if found {
- sealedCode = code
- }
-
- standbyCode := http.StatusTooManyRequests // Consul warning code
- if code, found, ok := fetchStatusCode(r, "standbycode"); !ok {
- return http.StatusBadRequest, nil, nil
- } else if found {
- standbyCode = code
- }
-
- activeCode := http.StatusOK
- if code, found, ok := fetchStatusCode(r, "activecode"); !ok {
- return http.StatusBadRequest, nil, nil
- } else if found {
- activeCode = code
- }
-
- // Check system status
- sealed, _ := core.Sealed()
- standby, _ := core.Standby()
- init, err := core.Initialized()
- if err != nil {
- return http.StatusInternalServerError, nil, err
- }
-
- // Determine the status code
- code := activeCode
- switch {
- case !init:
- code = uninitCode
- case sealed:
- code = sealedCode
- case !standbyOK && standby:
- code = standbyCode
- }
-
- // Fetch the local cluster name and identifier
- var clusterName, clusterID string
- if !sealed {
- cluster, err := core.Cluster()
- if err != nil {
- return http.StatusInternalServerError, nil, err
- }
- if cluster == nil {
- return http.StatusInternalServerError, nil, fmt.Errorf("failed to fetch cluster details")
- }
- clusterName = cluster.Name
- clusterID = cluster.ID
- }
-
- // Format the body
- body := &HealthResponse{
- Initialized: init,
- Sealed: sealed,
- Standby: standby,
- ServerTimeUTC: time.Now().UTC().Unix(),
- Version: version.GetVersion().VersionNumber(),
- ClusterName: clusterName,
- ClusterID: clusterID,
- }
- return code, body, nil
-}
-
-type HealthResponse struct {
- Initialized bool `json:"initialized"`
- Sealed bool `json:"sealed"`
- Standby bool `json:"standby"`
- ServerTimeUTC int64 `json:"server_time_utc"`
- Version string `json:"version"`
- ClusterName string `json:"cluster_name,omitempty"`
- ClusterID string `json:"cluster_id,omitempty"`
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_health_test.go b/vendor/github.com/hashicorp/vault/http/sys_health_test.go
deleted file mode 100644
index 056ea5f..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_health_test.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package http
-
-import (
- "io/ioutil"
-
- "net/http"
- "net/url"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysHealth_get(t *testing.T) {
- core := vault.TestCore(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp, err := http.Get(addr + "/v1/sys/health")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "initialized": false,
- "sealed": true,
- "standby": true,
- }
- testResponseStatus(t, resp, 501)
- testResponseBody(t, resp, &actual)
- expected["server_time_utc"] = actual["server_time_utc"]
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-
- keys, _ := vault.TestCoreInit(t, core)
- resp, err = http.Get(addr + "/v1/sys/health")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "initialized": true,
- "sealed": true,
- "standby": true,
- }
- testResponseStatus(t, resp, 503)
- testResponseBody(t, resp, &actual)
- expected["server_time_utc"] = actual["server_time_utc"]
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-
- for _, key := range keys {
- if _, err := vault.TestCoreUnseal(core, vault.TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
- resp, err = http.Get(addr + "/v1/sys/health")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "initialized": true,
- "sealed": false,
- "standby": false,
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["server_time_utc"] = actual["server_time_utc"]
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-
-}
-
-func TestSysHealth_customcodes(t *testing.T) {
- core := vault.TestCore(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- queryurl, err := url.Parse(addr + "/v1/sys/health?uninitcode=581&sealedcode=523&activecode=202")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- resp, err := http.Get(queryurl.String())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "initialized": false,
- "sealed": true,
- "standby": true,
- }
- testResponseStatus(t, resp, 581)
- testResponseBody(t, resp, &actual)
-
- expected["server_time_utc"] = actual["server_time_utc"]
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-
- keys, _ := vault.TestCoreInit(t, core)
- resp, err = http.Get(queryurl.String())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "initialized": true,
- "sealed": true,
- "standby": true,
- }
- testResponseStatus(t, resp, 523)
- testResponseBody(t, resp, &actual)
-
- expected["server_time_utc"] = actual["server_time_utc"]
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-
- for _, key := range keys {
- if _, err := vault.TestCoreUnseal(core, vault.TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
- resp, err = http.Get(queryurl.String())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "initialized": true,
- "sealed": false,
- "standby": false,
- }
- testResponseStatus(t, resp, 202)
- testResponseBody(t, resp, &actual)
- expected["server_time_utc"] = actual["server_time_utc"]
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
- }
-}
-
-func TestSysHealth_head(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- testData := []struct {
- uri string
- code int
- }{
- {"", 200},
- {"?activecode=503", 503},
- {"?activecode=notacode", 400},
- }
-
- for _, tt := range testData {
- queryurl, err := url.Parse(addr + "/v1/sys/health" + tt.uri)
- if err != nil {
- t.Fatalf("err on %v: %s", queryurl, err)
- }
- resp, err := http.Head(queryurl.String())
- if err != nil {
- t.Fatalf("err on %v: %s", queryurl, err)
- }
-
- if resp.StatusCode != tt.code {
- t.Fatalf("HEAD %v expected code %d, got %d.", queryurl, tt.code, resp.StatusCode)
- }
-
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatalf("err on %v: %s", queryurl, err)
- }
- if len(data) > 0 {
- t.Fatalf("HEAD %v expected no body, received \"%v\".", queryurl, data)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_init.go b/vendor/github.com/hashicorp/vault/http/sys_init.go
deleted file mode 100644
index c29b040..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_init.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package http
-
-import (
- "encoding/base64"
- "encoding/hex"
- "fmt"
- "net/http"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/vault"
-)
-
-func handleSysInit(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- switch r.Method {
- case "GET":
- handleSysInitGet(core, w, r)
- case "PUT", "POST":
- handleSysInitPut(core, w, r)
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- }
- })
-}
-
-func handleSysInitGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- init, err := core.Initialized()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- respondOk(w, &InitStatusResponse{
- Initialized: init,
- })
-}
-
-func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- // Parse the request
- var req InitRequest
- if err := parseRequest(r, w, &req); err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
-
- // Initialize
- barrierConfig := &vault.SealConfig{
- SecretShares: req.SecretShares,
- SecretThreshold: req.SecretThreshold,
- StoredShares: req.StoredShares,
- PGPKeys: req.PGPKeys,
- }
-
- recoveryConfig := &vault.SealConfig{
- SecretShares: req.RecoveryShares,
- SecretThreshold: req.RecoveryThreshold,
- PGPKeys: req.RecoveryPGPKeys,
- }
-
- if core.SealAccess().StoredKeysSupported() {
- if barrierConfig.SecretShares != 1 {
- respondError(w, http.StatusBadRequest, fmt.Errorf("secret shares must be 1"))
- return
- }
- if barrierConfig.SecretThreshold != barrierConfig.SecretShares {
- respondError(w, http.StatusBadRequest, fmt.Errorf("secret threshold must be same as secret shares"))
- return
- }
- if barrierConfig.StoredShares != barrierConfig.SecretShares {
- respondError(w, http.StatusBadRequest, fmt.Errorf("stored shares must be same as secret shares"))
- return
- }
- if barrierConfig.PGPKeys != nil && len(barrierConfig.PGPKeys) > 0 {
- respondError(w, http.StatusBadRequest, fmt.Errorf("PGP keys not supported when storing shares"))
- return
- }
- } else {
- if barrierConfig.StoredShares > 0 {
- respondError(w, http.StatusBadRequest, fmt.Errorf("stored keys are not supported"))
- return
- }
- }
-
- if len(barrierConfig.PGPKeys) > 0 && len(barrierConfig.PGPKeys) != barrierConfig.SecretShares-barrierConfig.StoredShares {
- respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys"))
- return
- }
-
- if core.SealAccess().RecoveryKeySupported() {
- if len(recoveryConfig.PGPKeys) > 0 && len(recoveryConfig.PGPKeys) != recoveryConfig.SecretShares-recoveryConfig.StoredShares {
- respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys for recovery"))
- return
- }
- }
-
- initParams := &vault.InitParams{
- BarrierConfig: barrierConfig,
- RecoveryConfig: recoveryConfig,
- RootTokenPGPKey: req.RootTokenPGPKey,
- }
-
- result, initErr := core.Initialize(initParams)
- if initErr != nil {
- if !errwrap.ContainsType(initErr, new(vault.NonFatalError)) {
- respondError(w, http.StatusBadRequest, initErr)
- return
- } else {
- // Add a warnings field? The error will be logged in the vault log
- // already.
- }
- }
-
- // Encode the keys
- keys := make([]string, 0, len(result.SecretShares))
- keysB64 := make([]string, 0, len(result.SecretShares))
- for _, k := range result.SecretShares {
- keys = append(keys, hex.EncodeToString(k))
- keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k))
- }
-
- resp := &InitResponse{
- Keys: keys,
- KeysB64: keysB64,
- RootToken: result.RootToken,
- }
-
- if len(result.RecoveryShares) > 0 {
- resp.RecoveryKeys = make([]string, 0, len(result.RecoveryShares))
- resp.RecoveryKeysB64 = make([]string, 0, len(result.RecoveryShares))
- for _, k := range result.RecoveryShares {
- resp.RecoveryKeys = append(resp.RecoveryKeys, hex.EncodeToString(k))
- resp.RecoveryKeysB64 = append(resp.RecoveryKeysB64, base64.StdEncoding.EncodeToString(k))
- }
- }
-
- core.UnsealWithStoredKeys()
-
- respondOk(w, resp)
-}
-
-type InitRequest struct {
- SecretShares int `json:"secret_shares"`
- SecretThreshold int `json:"secret_threshold"`
- StoredShares int `json:"stored_shares"`
- PGPKeys []string `json:"pgp_keys"`
- RecoveryShares int `json:"recovery_shares"`
- RecoveryThreshold int `json:"recovery_threshold"`
- RecoveryPGPKeys []string `json:"recovery_pgp_keys"`
- RootTokenPGPKey string `json:"root_token_pgp_key"`
-}
-
-type InitResponse struct {
- Keys []string `json:"keys"`
- KeysB64 []string `json:"keys_base64"`
- RecoveryKeys []string `json:"recovery_keys,omitempty"`
- RecoveryKeysB64 []string `json:"recovery_keys_base64,omitempty"`
- RootToken string `json:"root_token"`
-}
-
-type InitStatusResponse struct {
- Initialized bool `json:"initialized"`
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_init_test.go b/vendor/github.com/hashicorp/vault/http/sys_init_test.go
deleted file mode 100644
index 9dfa776..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_init_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package http
-
-import (
- "encoding/hex"
- "net/http"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysInit_get(t *testing.T) {
- core := vault.TestCore(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- {
- // Pre-init
- resp, err := http.Get(addr + "/v1/sys/init")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "initialized": false,
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
- }
-
- vault.TestCoreInit(t, core)
-
- {
- // Post-init
- resp, err := http.Get(addr + "/v1/sys/init")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "initialized": true,
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
- }
-}
-
-// Test to check if the API errors out when wrong number of PGP keys are
-// supplied
-func TestSysInit_pgpKeysEntries(t *testing.T) {
- core := vault.TestCore(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threhold": 3,
- "pgp_keys": []string{"pgpkey1"},
- })
- testResponseStatus(t, resp, 400)
-}
-
-// Test to check if the API errors out when wrong number of PGP keys are
-// supplied for recovery config
-func TestSysInit_pgpKeysEntriesForRecovery(t *testing.T) {
- core := vault.TestCoreNewSeal(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
- "secret_shares": 1,
- "secret_threshold": 1,
- "stored_shares": 1,
- "recovery_shares": 5,
- "recovery_threshold": 3,
- "recovery_pgp_keys": []string{"pgpkey1"},
- })
- testResponseStatus(t, resp, 400)
-}
-
-func TestSysInit_put(t *testing.T) {
- core := vault.TestCore(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": 3,
- })
-
- var actual map[string]interface{}
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- keysRaw, ok := actual["keys"]
- if !ok {
- t.Fatalf("no keys: %#v", actual)
- }
-
- if _, ok := actual["root_token"]; !ok {
- t.Fatal("no root token")
- }
-
- for _, key := range keysRaw.([]interface{}) {
- keySlice, err := hex.DecodeString(key.(string))
- if err != nil {
- t.Fatalf("bad: %s", err)
- }
-
- if _, err := core.Unseal(keySlice); err != nil {
- t.Fatalf("bad: %s", err)
- }
- }
-
- seal, err := core.Sealed()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if seal {
- t.Fatal("should not be sealed")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader.go b/vendor/github.com/hashicorp/vault/http/sys_leader.go
deleted file mode 100644
index 98eb04a..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_leader.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package http
-
-import (
- "net/http"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/vault"
-)
-
-func handleSysLeader(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- switch r.Method {
- case "GET":
- handleSysLeaderGet(core, w, r)
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- }
- })
-}
-
-func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- haEnabled := true
- isLeader, address, clusterAddr, err := core.Leader()
- if errwrap.Contains(err, vault.ErrHANotEnabled.Error()) {
- haEnabled = false
- err = nil
- }
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- respondOk(w, &LeaderResponse{
- HAEnabled: haEnabled,
- IsSelf: isLeader,
- LeaderAddress: address,
- LeaderClusterAddress: clusterAddr,
- })
-}
-
-type LeaderResponse struct {
- HAEnabled bool `json:"ha_enabled"`
- IsSelf bool `json:"is_self"`
- LeaderAddress string `json:"leader_address"`
- LeaderClusterAddress string `json:"leader_cluster_address"`
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader_test.go b/vendor/github.com/hashicorp/vault/http/sys_leader_test.go
deleted file mode 100644
index afe0dbd..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_leader_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package http
-
-import (
- "net/http"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysLeader_get(t *testing.T) {
- core, _, _ := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp, err := http.Get(addr + "/v1/sys/leader")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "ha_enabled": false,
- "is_self": false,
- "leader_address": "",
- "leader_cluster_address": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_lease_test.go b/vendor/github.com/hashicorp/vault/http/sys_lease_test.go
deleted file mode 100644
index de1dc6c..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_lease_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package http
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysRenew(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- // write secret
- resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
- "data": "bar",
- "lease": "1h",
- })
- testResponseStatus(t, resp, 204)
-
- // read secret
- resp = testHttpGet(t, token, addr+"/v1/secret/foo")
- var result struct {
- LeaseID string `json:"lease_id"`
- }
- if err := jsonutil.DecodeJSONFromReader(resp.Body, &result); err != nil {
- t.Fatalf("bad: %s", err)
- }
-
- var renewResult struct {
- LeaseID string `json:"lease_id"`
- Data map[string]interface{} `json:"data"`
- }
- resp = testHttpPut(t, token, addr+"/v1/sys/renew/"+result.LeaseID, nil)
- testResponseStatus(t, resp, 200)
- if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil {
- t.Fatal(err)
- }
- if result.LeaseID != renewResult.LeaseID {
- t.Fatal("lease id changed in renew request")
- }
-
- resp = testHttpPut(t, token, addr+"/v1/sys/leases/renew/"+result.LeaseID, nil)
- testResponseStatus(t, resp, 200)
- if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil {
- t.Fatal(err)
- }
- if result.LeaseID != renewResult.LeaseID {
- t.Fatal("lease id changed in renew request")
- }
-}
-
-func TestSysRevoke(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/revoke/secret/foo/1234", nil)
- testResponseStatus(t, resp, 204)
-}
-
-func TestSysRevokePrefix(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/revoke-prefix/secret/foo/1234", nil)
- testResponseStatus(t, resp, 204)
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_mount_test.go b/vendor/github.com/hashicorp/vault/http/sys_mount_test.go
deleted file mode 100644
index 57f6dd7..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_mount_test.go
+++ /dev/null
@@ -1,842 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "reflect"
- "testing"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysMounts(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpGet(t, token, addr+"/v1/sys/mounts")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestSysMount(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "kv",
- "description": "foo",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- },
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestSysMount_put(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "kv",
- "description": "foo",
- })
- testResponseStatus(t, resp, 204)
-
- // The TestSysMount test tests the thing is actually created. See that test
- // for more info.
-}
-
-func TestSysRemount(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "kv",
- "description": "foo",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{
- "from": "foo",
- "to": "bar",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "bar/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- },
- "bar/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestSysUnmount(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "kv",
- "description": "foo",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/mounts/foo")
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
-
-func TestSysTuneMount(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "kv",
- "description": "foo",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- },
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-
- // Shorter than system default
- resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
- "default_lease_ttl": "72h",
- })
- testResponseStatus(t, resp, 204)
-
- // Longer than system max
- resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
- "default_lease_ttl": "72000h",
- })
- testResponseStatus(t, resp, 204)
-
- // Longer than system default
- resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
- "max_lease_ttl": "72000h",
- })
- testResponseStatus(t, resp, 204)
-
- // Longer than backend max
- resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
- "default_lease_ttl": "72001h",
- })
- testResponseStatus(t, resp, 400)
-
- // Shorter than backend default
- resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
- "max_lease_ttl": "1h",
- })
- testResponseStatus(t, resp, 400)
-
- // Shorter than backend max, longer than system max
- resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
- "default_lease_ttl": "71999h",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/mounts")
- expected = map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("259196400"),
- "max_lease_ttl": json.Number("259200000"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- },
- "foo/": map[string]interface{}{
- "description": "foo",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("259196400"),
- "max_lease_ttl": json.Number("259200000"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "secret/": map[string]interface{}{
- "description": "key/value secret storage",
- "type": "kv",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "sys/": map[string]interface{}{
- "description": "system endpoints used for control, policy and debugging",
- "type": "system",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": false,
- },
- "cubbyhole/": map[string]interface{}{
- "description": "per-token private secret storage",
- "type": "cubbyhole",
- "config": map[string]interface{}{
- "default_lease_ttl": json.Number("0"),
- "max_lease_ttl": json.Number("0"),
- "force_no_cache": false,
- },
- "local": true,
- },
- }
-
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- for k, v := range actual["data"].(map[string]interface{}) {
- if v.(map[string]interface{})["accessor"] == "" {
- t.Fatalf("no accessor from %s", k)
- }
- expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
- }
-
- // Check simple configuration endpoint
- resp = testHttpGet(t, token, addr+"/v1/sys/mounts/foo/tune")
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "default_lease_ttl": json.Number("259196400"),
- "max_lease_ttl": json.Number("259200000"),
- "force_no_cache": false,
- },
- "default_lease_ttl": json.Number("259196400"),
- "max_lease_ttl": json.Number("259200000"),
- "force_no_cache": false,
- }
-
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
- }
-
- // Set a low max
- resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{
- "default_lease_ttl": "40s",
- "max_lease_ttl": "80s",
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune")
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "default_lease_ttl": json.Number("40"),
- "max_lease_ttl": json.Number("80"),
- "force_no_cache": false,
- },
- "default_lease_ttl": json.Number("40"),
- "max_lease_ttl": json.Number("80"),
- "force_no_cache": false,
- }
-
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
- }
-
- // First try with lease above backend max
- resp = testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
- "data": "bar",
- "ttl": "28347h",
- })
- testResponseStatus(t, resp, 204)
-
- // read secret
- resp = testHttpGet(t, token, addr+"/v1/secret/foo")
- var result struct {
- LeaseID string `json:"lease_id" structs:"lease_id"`
- LeaseDuration int `json:"lease_duration" structs:"lease_duration"`
- }
-
- testResponseBody(t, resp, &result)
-
- expected = map[string]interface{}{
- "lease_duration": int(80),
- "lease_id": result.LeaseID,
- }
-
- if !reflect.DeepEqual(structs.Map(result), expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, structs.Map(result))
- }
-
- // Now with lease TTL unspecified
- resp = testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
- "data": "bar",
- })
- testResponseStatus(t, resp, 204)
-
- // read secret
- resp = testHttpGet(t, token, addr+"/v1/secret/foo")
-
- testResponseBody(t, resp, &result)
-
- expected = map[string]interface{}{
- "lease_duration": int(40),
- "lease_id": result.LeaseID,
- }
-
- if !reflect.DeepEqual(structs.Map(result), expected) {
- t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, structs.Map(result))
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go b/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go
deleted file mode 100644
index 53e4996..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package http
-
-import (
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysMountConfig(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- config := api.DefaultConfig()
- config.Address = addr
-
- client, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- client.SetToken(token)
-
- // Set up a test mount
- path, err := testMount(client)
- if err != nil {
- t.Fatal(err)
- }
- defer client.Sys().Unmount(path)
-
- // Get config info for this mount
- mountConfig, err := client.Sys().MountConfig(path)
- if err != nil {
- t.Fatal(err)
- }
-
- expectedDefaultTTL := 2764800
- if mountConfig.DefaultLeaseTTL != expectedDefaultTTL {
- t.Fatalf("Expected default lease TTL: %d, got %d",
- expectedDefaultTTL, mountConfig.DefaultLeaseTTL)
- }
-
- expectedMaxTTL := 2764800
- if mountConfig.MaxLeaseTTL != expectedMaxTTL {
- t.Fatalf("Expected default lease TTL: %d, got %d",
- expectedMaxTTL, mountConfig.MaxLeaseTTL)
- }
-
- if mountConfig.ForceNoCache == true {
- t.Fatalf("did not expect force cache")
- }
-}
-
-// testMount sets up a test mount of a kv backend w/ a random path; caller
-// is responsible for unmounting
-func testMount(client *api.Client) (string, error) {
- rand.Seed(time.Now().UTC().UnixNano())
- randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
- path := fmt.Sprintf("testmount-%d", randInt)
- err := client.Sys().Mount(path, &api.MountInput{Type: "kv"})
- return path, err
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_policy_test.go b/vendor/github.com/hashicorp/vault/http/sys_policy_test.go
deleted file mode 100644
index 42c1e4b..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_policy_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysPolicies(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpGet(t, token, addr+"/v1/sys/policy")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "policies": []interface{}{"default", "root"},
- "keys": []interface{}{"default", "root"},
- },
- "policies": []interface{}{"default", "root"},
- "keys": []interface{}{"default", "root"},
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
- }
-}
-
-func TestSysReadPolicy(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpGet(t, token, addr+"/v1/sys/policy/root")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "name": "root",
- "rules": "",
- },
- "name": "root",
- "rules": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
- }
-}
-
-func TestSysWritePolicy(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
- "rules": `path "*" { capabilities = ["read"] }`,
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/policy")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "policies": []interface{}{"default", "foo", "root"},
- "keys": []interface{}{"default", "foo", "root"},
- },
- "policies": []interface{}{"default", "foo", "root"},
- "keys": []interface{}{"default", "foo", "root"},
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
- }
-
- resp = testHttpPost(t, token, addr+"/v1/sys/policy/response-wrapping", map[string]interface{}{
- "rules": ``,
- })
- testResponseStatus(t, resp, 400)
-}
-
-func TestSysDeletePolicy(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
- "rules": `path "*" { capabilities = ["read"] }`,
- })
- testResponseStatus(t, resp, 204)
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/policy/foo")
- testResponseStatus(t, resp, 204)
-
- // Also attempt to delete these since they should not be allowed (ignore
- // responses, if they exist later that's sufficient)
- resp = testHttpDelete(t, token, addr+"/v1/sys/policy/default")
- resp = testHttpDelete(t, token, addr+"/v1/sys/policy/response-wrapping")
-
- resp = testHttpGet(t, token, addr+"/v1/sys/policy")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "policies": []interface{}{"default", "root"},
- "keys": []interface{}{"default", "root"},
- },
- "policies": []interface{}{"default", "root"},
- "keys": []interface{}{"default", "root"},
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- expected["request_id"] = actual["request_id"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_rekey.go b/vendor/github.com/hashicorp/vault/http/sys_rekey.go
deleted file mode 100644
index 9f26f3b..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_rekey.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package http
-
-import (
- "encoding/base64"
- "encoding/hex"
- "errors"
- "fmt"
- "net/http"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/vault"
-)
-
-func handleSysRekeyInit(core *vault.Core, recovery bool) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- standby, _ := core.Standby()
- if standby {
- respondStandby(core, w, r.URL)
- return
- }
-
- repState := core.ReplicationState()
- if repState.HasState(consts.ReplicationPerformanceSecondary) {
- respondError(w, http.StatusBadRequest,
- fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated"))
- return
- }
-
- switch {
- case recovery && !core.SealAccess().RecoveryKeySupported():
- respondError(w, http.StatusBadRequest, fmt.Errorf("recovery rekeying not supported"))
- case r.Method == "GET":
- handleSysRekeyInitGet(core, recovery, w, r)
- case r.Method == "POST" || r.Method == "PUT":
- handleSysRekeyInitPut(core, recovery, w, r)
- case r.Method == "DELETE":
- handleSysRekeyInitDelete(core, recovery, w, r)
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- }
- })
-}
-
-func handleSysRekeyInitGet(core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) {
- barrierConfig, err := core.SealAccess().BarrierConfig()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- if barrierConfig == nil {
- respondError(w, http.StatusBadRequest, fmt.Errorf(
- "server is not yet initialized"))
- return
- }
-
- // Get the rekey configuration
- rekeyConf, err := core.RekeyConfig(recovery)
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- // Get the progress
- progress, err := core.RekeyProgress(recovery)
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- sealThreshold, err := core.RekeyThreshold(recovery)
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- // Format the status
- status := &RekeyStatusResponse{
- Started: false,
- T: 0,
- N: 0,
- Progress: progress,
- Required: sealThreshold,
- }
- if rekeyConf != nil {
- status.Nonce = rekeyConf.Nonce
- status.Started = true
- status.T = rekeyConf.SecretThreshold
- status.N = rekeyConf.SecretShares
- if rekeyConf.PGPKeys != nil && len(rekeyConf.PGPKeys) != 0 {
- pgpFingerprints, err := pgpkeys.GetFingerprints(rekeyConf.PGPKeys, nil)
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- status.PGPFingerprints = pgpFingerprints
- status.Backup = rekeyConf.Backup
- }
- }
- respondOk(w, status)
-}
-
-func handleSysRekeyInitPut(core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) {
- // Parse the request
- var req RekeyRequest
- if err := parseRequest(r, w, &req); err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
-
- if req.Backup && len(req.PGPKeys) == 0 {
- respondError(w, http.StatusBadRequest, fmt.Errorf("cannot request a backup of the new keys without providing PGP keys for encryption"))
- return
- }
-
- // Right now we don't support this, but the rest of the code is ready for
- // when we do, hence the check below for this to be false if
- // StoredShares is greater than zero
- if core.SealAccess().StoredKeysSupported() && !recovery {
- respondError(w, http.StatusBadRequest, fmt.Errorf("rekeying of barrier not supported when stored key support is available"))
- return
- }
-
- if len(req.PGPKeys) > 0 && len(req.PGPKeys) != req.SecretShares-req.StoredShares {
- respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys for rekey"))
- return
- }
-
- // Initialize the rekey
- err := core.RekeyInit(&vault.SealConfig{
- SecretShares: req.SecretShares,
- SecretThreshold: req.SecretThreshold,
- StoredShares: req.StoredShares,
- PGPKeys: req.PGPKeys,
- Backup: req.Backup,
- }, recovery)
- if err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
-
- handleSysRekeyInitGet(core, recovery, w, r)
-}
-
-func handleSysRekeyInitDelete(core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) {
- err := core.RekeyCancel(recovery)
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- respondOk(w, nil)
-}
-
-func handleSysRekeyUpdate(core *vault.Core, recovery bool) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- standby, _ := core.Standby()
- if standby {
- respondStandby(core, w, r.URL)
- return
- }
-
- // Parse the request
- var req RekeyUpdateRequest
- if err := parseRequest(r, w, &req); err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
- if req.Key == "" {
- respondError(
- w, http.StatusBadRequest,
- errors.New("'key' must be specified in request body as JSON"))
- return
- }
-
- // Decode the key, which is base64 or hex encoded
- min, max := core.BarrierKeyLength()
- key, err := hex.DecodeString(req.Key)
- // We check min and max here to ensure that a string that is base64
- // encoded but also valid hex will not be valid and we instead base64
- // decode it
- if err != nil || len(key) < min || len(key) > max {
- key, err = base64.StdEncoding.DecodeString(req.Key)
- if err != nil {
- respondError(
- w, http.StatusBadRequest,
- errors.New("'key' must be a valid hex or base64 string"))
- return
- }
- }
-
- // Use the key to make progress on rekey
- result, err := core.RekeyUpdate(key, req.Nonce, recovery)
- if err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
-
- // Format the response
- resp := &RekeyUpdateResponse{}
- if result != nil {
- resp.Complete = true
- resp.Nonce = req.Nonce
- resp.Backup = result.Backup
- resp.PGPFingerprints = result.PGPFingerprints
-
- // Encode the keys
- keys := make([]string, 0, len(result.SecretShares))
- keysB64 := make([]string, 0, len(result.SecretShares))
- for _, k := range result.SecretShares {
- keys = append(keys, hex.EncodeToString(k))
- keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k))
- }
- resp.Keys = keys
- resp.KeysB64 = keysB64
- respondOk(w, resp)
- } else {
- handleSysRekeyInitGet(core, recovery, w, r)
- }
- })
-}
-
-type RekeyRequest struct {
- SecretShares int `json:"secret_shares"`
- SecretThreshold int `json:"secret_threshold"`
- StoredShares int `json:"stored_shares"`
- PGPKeys []string `json:"pgp_keys"`
- Backup bool `json:"backup"`
-}
-
-type RekeyStatusResponse struct {
- Nonce string `json:"nonce"`
- Started bool `json:"started"`
- T int `json:"t"`
- N int `json:"n"`
- Progress int `json:"progress"`
- Required int `json:"required"`
- PGPFingerprints []string `json:"pgp_fingerprints"`
- Backup bool `json:"backup"`
-}
-
-type RekeyUpdateRequest struct {
- Nonce string
- Key string
-}
-
-type RekeyUpdateResponse struct {
- Nonce string `json:"nonce"`
- Complete bool `json:"complete"`
- Keys []string `json:"keys"`
- KeysB64 []string `json:"keys_base64"`
- PGPFingerprints []string `json:"pgp_fingerprints"`
- Backup bool `json:"backup"`
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_rekey_test.go b/vendor/github.com/hashicorp/vault/http/sys_rekey_test.go
deleted file mode 100644
index b4a49ad..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_rekey_test.go
+++ /dev/null
@@ -1,260 +0,0 @@
-package http
-
-import (
- "encoding/hex"
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-// Test to check if the API errors out when wrong number of PGP keys are
-// supplied for rekey
-func TestSysRekeyInit_pgpKeysEntriesForRekey(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": 3,
- "pgp_keys": []string{"pgpkey1"},
- })
- testResponseStatus(t, resp, 400)
-}
-
-func TestSysRekeyInit_Status(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp, err := http.Get(addr + "/v1/sys/rekey/init")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "started": false,
- "t": json.Number("0"),
- "n": json.Number("0"),
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "pgp_fingerprints": interface{}(nil),
- "backup": false,
- "nonce": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysRekeyInit_Setup(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": 3,
- })
- testResponseStatus(t, resp, 200)
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "started": true,
- "t": json.Number("3"),
- "n": json.Number("5"),
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "pgp_fingerprints": interface{}(nil),
- "backup": false,
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["nonce"].(string) == "" {
- t.Fatalf("nonce was empty")
- }
- expected["nonce"] = actual["nonce"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-
- resp = testHttpGet(t, token, addr+"/v1/sys/rekey/init")
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "started": true,
- "t": json.Number("3"),
- "n": json.Number("5"),
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "pgp_fingerprints": interface{}(nil),
- "backup": false,
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["nonce"].(string) == "" {
- t.Fatalf("nonce was empty")
- }
- if actual["nonce"].(string) == "" {
- t.Fatalf("nonce was empty")
- }
- expected["nonce"] = actual["nonce"]
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysRekeyInit_Cancel(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": 3,
- })
- testResponseStatus(t, resp, 200)
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init")
- testResponseStatus(t, resp, 204)
-
- resp, err := http.Get(addr + "/v1/sys/rekey/init")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "started": false,
- "t": json.Number("0"),
- "n": json.Number("0"),
- "progress": json.Number("0"),
- "required": json.Number("3"),
- "pgp_fingerprints": interface{}(nil),
- "backup": false,
- "nonce": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysRekey_badKey(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
- "key": "0123",
- })
- testResponseStatus(t, resp, 400)
-}
-
-func TestSysRekey_Update(t *testing.T) {
- core, keys, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": 3,
- })
- var rekeyStatus map[string]interface{}
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &rekeyStatus)
-
- var actual map[string]interface{}
- var expected map[string]interface{}
-
- for i, key := range keys {
- resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
- "nonce": rekeyStatus["nonce"].(string),
- "key": hex.EncodeToString(key),
- })
-
- actual = map[string]interface{}{}
- expected = map[string]interface{}{
- "started": true,
- "nonce": rekeyStatus["nonce"].(string),
- "backup": false,
- "pgp_fingerprints": interface{}(nil),
- "required": json.Number("3"),
- "t": json.Number("3"),
- "n": json.Number("5"),
- "progress": json.Number(fmt.Sprintf("%d", i+1)),
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- if i+1 == len(keys) {
- delete(expected, "started")
- delete(expected, "required")
- delete(expected, "t")
- delete(expected, "n")
- delete(expected, "progress")
- expected["complete"] = true
- expected["keys"] = actual["keys"]
- expected["keys_base64"] = actual["keys_base64"]
- }
-
- if i+1 < len(keys) && (actual["nonce"] == nil || actual["nonce"].(string) == "") {
- t.Fatalf("expected a nonce, i is %d, actual is %#v", i, actual)
- }
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected: \n%#v\nactual: \n%#v", expected, actual)
- }
- }
-
- retKeys := actual["keys"].([]interface{})
- if len(retKeys) != 5 {
- t.Fatalf("bad: %#v", retKeys)
- }
- keysB64 := actual["keys_base64"].([]interface{})
- if len(keysB64) != 5 {
- t.Fatalf("bad: %#v", keysB64)
- }
-}
-
-func TestSysRekey_ReInitUpdate(t *testing.T) {
- core, keys, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": 3,
- })
- testResponseStatus(t, resp, 200)
-
- resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init")
- testResponseStatus(t, resp, 204)
-
- resp = testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": 3,
- })
- testResponseStatus(t, resp, 200)
-
- resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
- "key": hex.EncodeToString(keys[0]),
- })
-
- testResponseStatus(t, resp, 400)
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_rotate_test.go b/vendor/github.com/hashicorp/vault/http/sys_rotate_test.go
deleted file mode 100644
index cbb8a38..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_rotate_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysRotate(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPost(t, token, addr+"/v1/sys/rotate", map[string]interface{}{})
- testResponseStatus(t, resp, 204)
-
- resp = testHttpGet(t, token, addr+"/v1/sys/key-status")
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "lease_id": "",
- "renewable": false,
- "lease_duration": json.Number("0"),
- "wrap_info": nil,
- "warnings": nil,
- "auth": nil,
- "data": map[string]interface{}{
- "term": json.Number("2"),
- },
- "term": json.Number("2"),
- }
-
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
-
- actualInstallTime, ok := actual["data"].(map[string]interface{})["install_time"]
- if !ok || actualInstallTime == "" {
- t.Fatal("install_time missing in data")
- }
- expected["data"].(map[string]interface{})["install_time"] = actualInstallTime
- expected["install_time"] = actualInstallTime
-
- expected["request_id"] = actual["request_id"]
-
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad:\nexpected: %#v\nactual: %#v", expected, actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_seal.go b/vendor/github.com/hashicorp/vault/http/sys_seal.go
deleted file mode 100644
index ef24304..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_seal.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package http
-
-import (
- "encoding/base64"
- "encoding/hex"
- "errors"
- "fmt"
- "net/http"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
- "github.com/hashicorp/vault/version"
-)
-
-func handleSysSeal(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- req, statusCode, err := buildLogicalRequest(core, w, r)
- if err != nil || statusCode != 0 {
- respondError(w, statusCode, err)
- return
- }
-
- switch req.Operation {
- case logical.UpdateOperation:
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- return
- }
-
- // Seal with the token above
- if err := core.SealWithRequest(req); err != nil {
- if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
- respondError(w, http.StatusForbidden, err)
- return
- } else {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- }
-
- respondOk(w, nil)
- })
-}
-
-func handleSysStepDown(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- req, statusCode, err := buildLogicalRequest(core, w, r)
- if err != nil || statusCode != 0 {
- respondError(w, statusCode, err)
- return
- }
-
- switch req.Operation {
- case logical.UpdateOperation:
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- return
- }
-
- // Seal with the token above
- if err := core.StepDown(req); err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- respondOk(w, nil)
- })
-}
-
-func handleSysUnseal(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- switch r.Method {
- case "PUT":
- case "POST":
- default:
- respondError(w, http.StatusMethodNotAllowed, nil)
- return
- }
-
- // Parse the request
- var req UnsealRequest
- if err := parseRequest(r, w, &req); err != nil {
- respondError(w, http.StatusBadRequest, err)
- return
- }
- if !req.Reset && req.Key == "" {
- respondError(
- w, http.StatusBadRequest,
- errors.New("'key' must be specified in request body as JSON, or 'reset' set to true"))
- return
- }
-
- if req.Reset {
- sealed, err := core.Sealed()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- if !sealed {
- respondError(w, http.StatusBadRequest, errors.New("vault is unsealed"))
- return
- }
- core.ResetUnsealProcess()
- } else {
- // Decode the key, which is base64 or hex encoded
- min, max := core.BarrierKeyLength()
- key, err := hex.DecodeString(req.Key)
- // We check min and max here to ensure that a string that is base64
- // encoded but also valid hex will not be valid and we instead base64
- // decode it
- if err != nil || len(key) < min || len(key) > max {
- key, err = base64.StdEncoding.DecodeString(req.Key)
- if err != nil {
- respondError(
- w, http.StatusBadRequest,
- errors.New("'key' must be a valid hex or base64 string"))
- return
- }
- }
-
- // Attempt the unseal
- if _, err := core.Unseal(key); err != nil {
- switch {
- case errwrap.ContainsType(err, new(vault.ErrInvalidKey)):
- case errwrap.Contains(err, vault.ErrBarrierInvalidKey.Error()):
- case errwrap.Contains(err, vault.ErrBarrierNotInit.Error()):
- case errwrap.Contains(err, vault.ErrBarrierSealed.Error()):
- case errwrap.Contains(err, consts.ErrStandby.Error()):
- default:
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- respondError(w, http.StatusBadRequest, err)
- return
- }
- }
-
- // Return the seal status
- handleSysSealStatusRaw(core, w, r)
- })
-}
-
-func handleSysSealStatus(core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.Method != "GET" {
- respondError(w, http.StatusMethodNotAllowed, nil)
- return
- }
-
- handleSysSealStatusRaw(core, w, r)
- })
-}
-
-func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) {
- sealed, err := core.Sealed()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
-
- sealConfig, err := core.SealAccess().BarrierConfig()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- if sealConfig == nil {
- respondError(w, http.StatusBadRequest, fmt.Errorf(
- "server is not yet initialized"))
- return
- }
-
- // Fetch the local cluster name and identifier
- var clusterName, clusterID string
- if !sealed {
- cluster, err := core.Cluster()
- if err != nil {
- respondError(w, http.StatusInternalServerError, err)
- return
- }
- if cluster == nil {
- respondError(w, http.StatusInternalServerError, fmt.Errorf("failed to fetch cluster details"))
- return
- }
- clusterName = cluster.Name
- clusterID = cluster.ID
- }
-
- progress, nonce := core.SecretProgress()
-
- respondOk(w, &SealStatusResponse{
- Sealed: sealed,
- T: sealConfig.SecretThreshold,
- N: sealConfig.SecretShares,
- Progress: progress,
- Nonce: nonce,
- Version: version.GetVersion().VersionNumber(),
- ClusterName: clusterName,
- ClusterID: clusterID,
- })
-}
-
-type SealStatusResponse struct {
- Sealed bool `json:"sealed"`
- T int `json:"t"`
- N int `json:"n"`
- Progress int `json:"progress"`
- Nonce string `json:"nonce"`
- Version string `json:"version"`
- ClusterName string `json:"cluster_name,omitempty"`
- ClusterID string `json:"cluster_id,omitempty"`
-}
-
-type UnsealRequest struct {
- Key string
- Reset bool
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_seal_test.go b/vendor/github.com/hashicorp/vault/http/sys_seal_test.go
deleted file mode 100644
index 8124614..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_seal_test.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package http
-
-import (
- "encoding/hex"
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "strconv"
- "testing"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/vault"
-)
-
-func TestSysSealStatus(t *testing.T) {
- core := vault.TestCore(t)
- vault.TestCoreInit(t, core)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp, err := http.Get(addr + "/v1/sys/seal-status")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "sealed": true,
- "t": json.Number("3"),
- "n": json.Number("3"),
- "progress": json.Number("0"),
- "nonce": "",
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["version"] == nil {
- t.Fatalf("expected version information")
- }
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
- }
-}
-
-func TestSysSealStatus_uninit(t *testing.T) {
- core := vault.TestCore(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp, err := http.Get(addr + "/v1/sys/seal-status")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- testResponseStatus(t, resp, 400)
-}
-
-func TestSysSeal(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/seal", nil)
- testResponseStatus(t, resp, 204)
-
- check, err := core.Sealed()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !check {
- t.Fatal("should be sealed")
- }
-}
-
-func TestSysSeal_unsealed(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/seal", nil)
- testResponseStatus(t, resp, 204)
-
- check, err := core.Sealed()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !check {
- t.Fatal("should be sealed")
- }
-}
-
-func TestSysUnseal(t *testing.T) {
- core := vault.TestCore(t)
- keys, _ := vault.TestCoreInit(t, core)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- for i, key := range keys {
- resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
- "key": hex.EncodeToString(key),
- })
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "sealed": true,
- "t": json.Number("3"),
- "n": json.Number("3"),
- "progress": json.Number(fmt.Sprintf("%d", i+1)),
- "nonce": "",
- }
- if i == len(keys)-1 {
- expected["sealed"] = false
- expected["progress"] = json.Number("0")
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if i < len(keys)-1 && (actual["nonce"] == nil || actual["nonce"].(string) == "") {
- t.Fatalf("got nil nonce, actual is %#v", actual)
- } else {
- expected["nonce"] = actual["nonce"]
- }
- if actual["version"] == nil {
- t.Fatalf("expected version information")
- }
- expected["version"] = actual["version"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: expected: \n%#v\nactual: \n%#v", expected, actual)
- }
- }
-}
-
-func TestSysUnseal_badKey(t *testing.T) {
- core := vault.TestCore(t)
- vault.TestCoreInit(t, core)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
- "key": "0123",
- })
- testResponseStatus(t, resp, 400)
-}
-
-func TestSysUnseal_Reset(t *testing.T) {
- core := vault.TestCore(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
-
- thresh := 3
- resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{
- "secret_shares": 5,
- "secret_threshold": thresh,
- })
-
- var actual map[string]interface{}
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- keysRaw, ok := actual["keys"]
- if !ok {
- t.Fatalf("no keys: %#v", actual)
- }
- for i, key := range keysRaw.([]interface{}) {
- if i > thresh-2 {
- break
- }
-
- resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
- "key": key.(string),
- })
-
- var actual map[string]interface{}
- expected := map[string]interface{}{
- "sealed": true,
- "t": json.Number("3"),
- "n": json.Number("5"),
- "progress": json.Number(strconv.Itoa(i + 1)),
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["version"] == nil {
- t.Fatalf("expected version information")
- }
- expected["version"] = actual["version"]
- if actual["nonce"] == "" && expected["sealed"].(bool) {
- t.Fatalf("expected a nonce")
- }
- expected["nonce"] = actual["nonce"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected:\n%#v\nactual:\n%#v\n", expected, actual)
- }
- }
-
- resp = testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{
- "reset": true,
- })
-
- actual = map[string]interface{}{}
- expected := map[string]interface{}{
- "sealed": true,
- "t": json.Number("3"),
- "n": json.Number("5"),
- "progress": json.Number("0"),
- }
- testResponseStatus(t, resp, 200)
- testResponseBody(t, resp, &actual)
- if actual["version"] == nil {
- t.Fatalf("expected version information")
- }
- expected["version"] = actual["version"]
- expected["nonce"] = actual["nonce"]
- if actual["cluster_name"] == nil {
- delete(expected, "cluster_name")
- } else {
- expected["cluster_name"] = actual["cluster_name"]
- }
- if actual["cluster_id"] == nil {
- delete(expected, "cluster_id")
- } else {
- expected["cluster_id"] = actual["cluster_id"]
- }
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("\nexpected:\n%#v\nactual:\n%#v\n", expected, actual)
- }
-
-}
-
-// Test Seal's permissions logic, which is slightly different than normal code
-// paths in that it queries the ACL rather than having checkToken do it. This
-// is because it was abusing RootPaths in logical_system, but that caused some
-// haywire with code paths that expected there to be an actual corresponding
-// logical.Path for it. This way is less hacky, but this test ensures that we
-// have not opened up a permissions hole.
-func TestSysSeal_Permissions(t *testing.T) {
- core, _, root := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, root)
-
- // Set the 'test' policy object to permit write access to sys/seal
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/policy/test",
- Data: map[string]interface{}{
- "rules": `path "sys/seal" { capabilities = ["read"] }`,
- },
- ClientToken: root,
- }
- resp, err := core.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Create a non-root token with access to that policy
- req.Path = "auth/token/create"
- req.Data = map[string]interface{}{
- "id": "child",
- "policies": []string{"test"},
- }
-
- resp, err = core.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v %v", err, resp)
- }
- if resp.Auth.ClientToken != "child" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // We must go through the HTTP interface since seal doesn't go through HandleRequest
-
- // We expect this to fail since it needs update and sudo
- httpResp := testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
- testResponseStatus(t, httpResp, 403)
-
- // Now modify to add update capability
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/policy/test",
- Data: map[string]interface{}{
- "rules": `path "sys/seal" { capabilities = ["update"] }`,
- },
- ClientToken: root,
- }
- resp, err = core.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // We expect this to fail since it needs sudo
- httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
- testResponseStatus(t, httpResp, 403)
-
- // Now modify to just sudo capability
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/policy/test",
- Data: map[string]interface{}{
- "rules": `path "sys/seal" { capabilities = ["sudo"] }`,
- },
- ClientToken: root,
- }
- resp, err = core.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // We expect this to fail since it needs update
- httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
- testResponseStatus(t, httpResp, 403)
-
- // Now modify to add all needed capabilities
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/policy/test",
- Data: map[string]interface{}{
- "rules": `path "sys/seal" { capabilities = ["update", "sudo"] }`,
- },
- ClientToken: root,
- }
- resp, err = core.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // We expect this to work
- httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil)
- testResponseStatus(t, httpResp, 204)
-}
-
-func TestSysStepDown(t *testing.T) {
- core, _, token := vault.TestCoreUnsealed(t)
- ln, addr := TestServer(t, core)
- defer ln.Close()
- TestServerAuth(t, addr, token)
-
- resp := testHttpPut(t, token, addr+"/v1/sys/step-down", nil)
- testResponseStatus(t, resp, 204)
-}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go b/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go
deleted file mode 100644
index 7ab2143..0000000
--- a/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go
+++ /dev/null
@@ -1,354 +0,0 @@
-package http
-
-import (
- "encoding/json"
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/vault"
-)
-
-// Test wrapping functionality
-func TestHTTP_Wrapping(t *testing.T) {
- cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
- HandlerFunc: Handler,
- })
- cluster.Start()
- defer cluster.Cleanup()
-
- cores := cluster.Cores
-
- // make it easy to get access to the active
- core := cores[0].Core
- vault.TestWaitActive(t, core)
-
- client := cores[0].Client
- client.SetToken(cluster.RootToken)
-
- // Write a value that we will use with wrapping for lookup
- _, err := client.Logical().Write("secret/foo", map[string]interface{}{
- "zip": "zap",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // Set a wrapping lookup function for reads on that path
- client.SetWrappingLookupFunc(func(operation, path string) string {
- if operation == "GET" && path == "secret/foo" {
- return "5m"
- }
-
- return api.DefaultWrappingLookupFunc(operation, path)
- })
-
- // First test: basic things that should fail, lookup edition
- // Root token isn't a wrapping token
- _, err = client.Logical().Write("sys/wrapping/lookup", nil)
- if err == nil {
- t.Fatal("expected error")
- }
- // Not supplied
- _, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{
- "foo": "bar",
- })
- if err == nil {
- t.Fatal("expected error")
- }
- // Nonexistent token isn't a wrapping token
- _, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{
- "token": "bar",
- })
- if err == nil {
- t.Fatal("expected error")
- }
-
- // Second: basic things that should fail, unwrap edition
- // Root token isn't a wrapping token
- _, err = client.Logical().Unwrap(cluster.RootToken)
- if err == nil {
- t.Fatal("expected error")
- }
- // Root token isn't a wrapping token
- _, err = client.Logical().Write("sys/wrapping/unwrap", nil)
- if err == nil {
- t.Fatal("expected error")
- }
- // Not supplied
- _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
- "foo": "bar",
- })
- if err == nil {
- t.Fatal("expected error")
- }
- // Nonexistent token isn't a wrapping token
- _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
- "token": "bar",
- })
- if err == nil {
- t.Fatal("expected error")
- }
-
- //
- // Test lookup
- //
-
- // Create a wrapping token
- secret, err := client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.WrapInfo == nil {
- t.Fatal("secret or wrap info is nil")
- }
- wrapInfo := secret.WrapInfo
-
- // Test this twice to ensure no ill effect to the wrapping token as a result of the lookup
- for i := 0; i < 2; i++ {
- secret, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{
- "token": wrapInfo.Token,
- })
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.Data == nil {
- t.Fatal("secret or secret data is nil")
- }
- creationTTL, _ := secret.Data["creation_ttl"].(json.Number).Int64()
- if int(creationTTL) != wrapInfo.TTL {
- t.Fatalf("mistmatched ttls: %d vs %d", creationTTL, wrapInfo.TTL)
- }
- if secret.Data["creation_time"].(string) != wrapInfo.CreationTime.Format(time.RFC3339Nano) {
- t.Fatalf("mistmatched creation times: %d vs %d", secret.Data["creation_time"].(string), wrapInfo.CreationTime.Format(time.RFC3339Nano))
- }
- }
-
- //
- // Test unwrap
- //
-
- // Create a wrapping token
- secret, err = client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.WrapInfo == nil {
- t.Fatal("secret or wrap info is nil")
- }
- wrapInfo = secret.WrapInfo
-
- // Test unwrap via the client token
- client.SetToken(wrapInfo.Token)
- secret, err = client.Logical().Write("sys/wrapping/unwrap", nil)
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.Data == nil {
- t.Fatal("secret or secret data is nil")
- }
- ret1 := secret
- // Should be expired and fail
- _, err = client.Logical().Write("sys/wrapping/unwrap", nil)
- if err == nil {
- t.Fatal("expected err")
- }
-
- // Create a wrapping token
- client.SetToken(cluster.RootToken)
- secret, err = client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.WrapInfo == nil {
- t.Fatal("secret or wrap info is nil")
- }
- wrapInfo = secret.WrapInfo
-
- // Test as a separate token
- secret, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
- "token": wrapInfo.Token,
- })
- if err != nil {
- t.Fatal(err)
- }
- ret2 := secret
- // Should be expired and fail
- _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
- "token": wrapInfo.Token,
- })
- if err == nil {
- t.Fatal("expected err")
- }
-
- // Create a wrapping token
- secret, err = client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.WrapInfo == nil {
- t.Fatal("secret or wrap info is nil")
- }
- wrapInfo = secret.WrapInfo
-
- // Read response directly
- client.SetToken(wrapInfo.Token)
- secret, err = client.Logical().Read("cubbyhole/response")
- if err != nil {
- t.Fatal(err)
- }
- ret3 := secret
- // Should be expired and fail
- _, err = client.Logical().Write("cubbyhole/response", nil)
- if err == nil {
- t.Fatal("expected err")
- }
-
- // Create a wrapping token
- client.SetToken(cluster.RootToken)
- secret, err = client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.WrapInfo == nil {
- t.Fatal("secret or wrap info is nil")
- }
- wrapInfo = secret.WrapInfo
-
- // Read via Unwrap method
- secret, err = client.Logical().Unwrap(wrapInfo.Token)
- if err != nil {
- t.Fatal(err)
- }
- ret4 := secret
- // Should be expired and fail
- _, err = client.Logical().Unwrap(wrapInfo.Token)
- if err == nil {
- t.Fatal("expected err")
- }
-
- if !reflect.DeepEqual(ret1.Data, map[string]interface{}{
- "zip": "zap",
- }) {
- t.Fatalf("ret1 data did not match expected: %#v", ret1.Data)
- }
- if !reflect.DeepEqual(ret2.Data, map[string]interface{}{
- "zip": "zap",
- }) {
- t.Fatalf("ret2 data did not match expected: %#v", ret2.Data)
- }
- var ret3Secret api.Secret
- err = jsonutil.DecodeJSON([]byte(ret3.Data["response"].(string)), &ret3Secret)
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(ret3Secret.Data, map[string]interface{}{
- "zip": "zap",
- }) {
- t.Fatalf("ret3 data did not match expected: %#v", ret3Secret.Data)
- }
- if !reflect.DeepEqual(ret4.Data, map[string]interface{}{
- "zip": "zap",
- }) {
- t.Fatalf("ret4 data did not match expected: %#v", ret4.Data)
- }
-
- //
- // Custom wrapping
- //
-
- client.SetToken(cluster.RootToken)
- data := map[string]interface{}{
- "zip": "zap",
- "three": json.Number("2"),
- }
-
- // Don't set a request TTL on that path, should fail
- client.SetWrappingLookupFunc(func(operation, path string) string {
- return ""
- })
- secret, err = client.Logical().Write("sys/wrapping/wrap", data)
- if err == nil {
- t.Fatal("expected error")
- }
-
- // Re-set the lookup function
- client.SetWrappingLookupFunc(func(operation, path string) string {
- if operation == "GET" && path == "secret/foo" {
- return "5m"
- }
-
- return api.DefaultWrappingLookupFunc(operation, path)
- })
- secret, err = client.Logical().Write("sys/wrapping/wrap", data)
- if err != nil {
- t.Fatal(err)
- }
- secret, err = client.Logical().Unwrap(secret.WrapInfo.Token)
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(data, secret.Data) {
- t.Fatalf("custom wrap did not match expected: %#v", secret.Data)
- }
-
- //
- // Test rewrap
- //
-
- // Create a wrapping token
- secret, err = client.Logical().Read("secret/foo")
- if err != nil {
- t.Fatal(err)
- }
- if secret == nil || secret.WrapInfo == nil {
- t.Fatal("secret or wrap info is nil")
- }
- wrapInfo = secret.WrapInfo
-
- // Check for correct CreationPath before rewrap
- if wrapInfo.CreationPath != "secret/foo" {
- t.Fatal("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath)
- }
-
- // Test rewrapping
- secret, err = client.Logical().Write("sys/wrapping/rewrap", map[string]interface{}{
- "token": wrapInfo.Token,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // Check for correct Creation path after rewrap
- if wrapInfo.CreationPath != "secret/foo" {
- t.Fatal("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath)
- }
-
- // Should be expired and fail
- _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
- "token": wrapInfo.Token,
- })
- if err == nil {
- t.Fatal("expected err")
- }
-
- // Attempt unwrapping the rewrapped token
- wrapToken := secret.WrapInfo.Token
- secret, err = client.Logical().Unwrap(wrapToken)
- if err != nil {
- t.Fatal(err)
- }
- // Should be expired and fail
- _, err = client.Logical().Unwrap(wrapToken)
- if err == nil {
- t.Fatal("expected err")
- }
-
- if !reflect.DeepEqual(secret.Data, map[string]interface{}{
- "zip": "zap",
- }) {
- t.Fatalf("secret data did not match expected: %#v", secret.Data)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/http/testing.go b/vendor/github.com/hashicorp/vault/http/testing.go
deleted file mode 100644
index 543b3e6..0000000
--- a/vendor/github.com/hashicorp/vault/http/testing.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package http
-
-import (
- "fmt"
- "net"
- "net/http"
- "testing"
-
- "golang.org/x/net/http2"
-
- "github.com/hashicorp/vault/vault"
-)
-
-func TestListener(t *testing.T) (net.Listener, string) {
- fail := func(format string, args ...interface{}) {
- panic(fmt.Sprintf(format, args...))
- }
- if t != nil {
- fail = t.Fatalf
- }
-
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- fail("err: %s", err)
- }
- addr := "http://" + ln.Addr().String()
- return ln, addr
-}
-
-func TestServerWithListener(t *testing.T, ln net.Listener, addr string, core *vault.Core) {
- // Create a muxer to handle our requests so that we can authenticate
- // for tests.
- mux := http.NewServeMux()
- mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth))
- mux.Handle("/", Handler(core))
-
- server := &http.Server{
- Addr: ln.Addr().String(),
- Handler: mux,
- }
- if err := http2.ConfigureServer(server, nil); err != nil {
- t.Fatal(err)
- }
- go server.Serve(ln)
-}
-
-func TestServer(t *testing.T, core *vault.Core) (net.Listener, string) {
- ln, addr := TestListener(t)
- TestServerWithListener(t, ln, addr, core)
- return ln, addr
-}
-
-func TestServerAuth(t *testing.T, addr string, token string) {
- if _, err := http.Get(addr + "/_test/auth?token=" + token); err != nil {
- t.Fatalf("error authenticating: %s", err)
- }
-}
-
-func testHandleAuth(w http.ResponseWriter, req *http.Request) {
- respondOk(w, nil)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/auth.go b/vendor/github.com/hashicorp/vault/logical/auth.go
deleted file mode 100644
index 09694c4..0000000
--- a/vendor/github.com/hashicorp/vault/logical/auth.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package logical
-
-import (
- "fmt"
- "time"
-)
-
-// Auth is the resulting authentication information that is part of
-// Response for credential backends.
-type Auth struct {
- LeaseOptions
-
- // InternalData is JSON-encodable data that is stored with the auth struct.
- // This will be sent back during a Renew/Revoke for storing internal data
- // used for those operations.
- InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"`
-
- // DisplayName is a non-security sensitive identifier that is
- // applicable to this Auth. It is used for logging and prefixing
- // of dynamic secrets. For example, DisplayName may be "armon" for
- // the github credential backend. If the client token is used to
- // generate a SQL credential, the user may be "github-armon-uuid".
- // This is to help identify the source without using audit tables.
- DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"`
-
- // Policies is the list of policies that the authenticated user
- // is associated with.
- Policies []string `json:"policies" mapstructure:"policies" structs:"policies"`
-
- // Metadata is used to attach arbitrary string-type metadata to
- // an authenticated user. This metadata will be outputted into the
- // audit log.
- Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"`
-
- // ClientToken is the token that is generated for the authentication.
- // This will be filled in by Vault core when an auth structure is
- // returned. Setting this manually will have no effect.
- ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"`
-
- // Accessor is the identifier for the ClientToken. This can be used
- // to perform management functionalities (especially revocation) when
- // ClientToken in the audit logs are obfuscated. Accessor can be used
- // to revoke a ClientToken and to lookup the capabilities of the ClientToken,
- // both without actually knowing the ClientToken.
- Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"`
-
- // Period indicates that the token generated using this Auth object
- // should never expire. The token should be renewed within the duration
- // specified by this period.
- Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
-
- // Number of allowed uses of the issued token
- NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
-
- // Persona is the information about the authenticated client returned by
- // the auth backend
- Persona *Persona `json:"persona" structs:"persona" mapstructure:"persona"`
-}
-
-func (a *Auth) GoString() string {
- return fmt.Sprintf("*%#v", *a)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/connection.go b/vendor/github.com/hashicorp/vault/logical/connection.go
deleted file mode 100644
index d54a0f5..0000000
--- a/vendor/github.com/hashicorp/vault/logical/connection.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package logical
-
-import (
- "crypto/tls"
-)
-
-// Connection represents the connection information for a request. This
-// is present on the Request structure for credential backends.
-type Connection struct {
- // RemoteAddr is the network address that sent the request.
- RemoteAddr string `json:"remote_addr"`
-
- // ConnState is the TLS connection state if applicable.
- ConnState *tls.ConnectionState
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/error.go b/vendor/github.com/hashicorp/vault/logical/error.go
deleted file mode 100644
index 19e3e2d..0000000
--- a/vendor/github.com/hashicorp/vault/logical/error.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package logical
-
-type HTTPCodedError interface {
- Error() string
- Code() int
-}
-
-func CodedError(c int, s string) HTTPCodedError {
- return &codedError{s, c}
-}
-
-type codedError struct {
- s string
- code int
-}
-
-func (e *codedError) Error() string {
- return e.s
-}
-
-func (e *codedError) Code() int {
- return e.code
-}
-
-// Struct to identify user input errors. This is helpful in responding the
-// appropriate status codes to clients from the HTTP endpoints.
-type StatusBadRequest struct {
- Err string
-}
-
-// Implementing error interface
-func (s *StatusBadRequest) Error() string {
- return s.Err
-}
-
-// This is a new type declared to not cause potential compatibility problems if
-// the logic around the HTTPCodedError interface changes; in particular for
-// logical request paths it is basically ignored, and changing that behavior
-// might cause unforseen issues.
-type ReplicationCodedError struct {
- Msg string
- Code int
-}
-
-func (r *ReplicationCodedError) Error() string {
- return r.Msg
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/backend.go b/vendor/github.com/hashicorp/vault/logical/framework/backend.go
deleted file mode 100644
index 477a926..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/backend.go
+++ /dev/null
@@ -1,653 +0,0 @@
-package framework
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "regexp"
- "sort"
- "strings"
- "sync"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// Backend is an implementation of logical.Backend that allows
-// the implementer to code a backend using a much more programmer-friendly
-// framework that handles a lot of the routing and validation for you.
-//
-// This is recommended over implementing logical.Backend directly.
-type Backend struct {
- // Help is the help text that is shown when a help request is made
- // on the root of this resource. The root help is special since we
- // show all the paths that can be requested.
- Help string
-
- // Paths are the various routes that the backend responds to.
- // This cannot be modified after construction (i.e. dynamically changing
- // paths, including adding or removing, is not allowed once the
- // backend is in use).
- //
- // PathsSpecial is the list of path patterns that denote the
- // paths above that require special privileges. These can't be
- // regular expressions, it is either exact match or prefix match.
- // For prefix match, append '*' as a suffix.
- Paths []*Path
- PathsSpecial *logical.Paths
-
- // Secrets is the list of secret types that this backend can
- // return. It is used to automatically generate proper responses,
- // and ease specifying callbacks for revocation, renewal, etc.
- Secrets []*Secret
-
- // PeriodicFunc is the callback, which if set, will be invoked when the
- // periodic timer of RollbackManager ticks. This can be used by
- // backends to do anything it wishes to do periodically.
- //
- // PeriodicFunc can be invoked to, say to periodically delete stale
- // entries in backend's storage, while the backend is still being used.
- // (Note the different of this action from what `Clean` does, which is
- // invoked just before the backend is unmounted).
- PeriodicFunc periodicFunc
-
- // WALRollback is called when a WAL entry (see wal.go) has to be rolled
- // back. It is called with the data from the entry.
- //
- // WALRollbackMinAge is the minimum age of a WAL entry before it is attempted
- // to be rolled back. This should be longer than the maximum time it takes
- // to successfully create a secret.
- WALRollback WALRollbackFunc
- WALRollbackMinAge time.Duration
-
- // Clean is called on unload to clean up e.g any existing connections
- // to the backend, if required.
- Clean CleanupFunc
-
- // Initialize is called after a backend is created. Storage should not be
- // written to before this function is called.
- Init InitializeFunc
-
- // Invalidate is called when a keys is modified if required
- Invalidate InvalidateFunc
-
- // AuthRenew is the callback to call when a RenewRequest for an
- // authentication comes in. By default, renewal won't be allowed.
- // See the built-in AuthRenew helpers in lease.go for common callbacks.
- AuthRenew OperationFunc
-
- // LicenseRegistration is called to register the license for a backend.
- LicenseRegistration LicenseRegistrationFunc
-
- // Type is the logical.BackendType for the backend implementation
- BackendType logical.BackendType
-
- logger log.Logger
- system logical.SystemView
- once sync.Once
- pathsRe []*regexp.Regexp
-}
-
-// periodicFunc is the callback called when the RollbackManager's timer ticks.
-// This can be utilized by the backends to do anything it wants.
-type periodicFunc func(*logical.Request) error
-
-// OperationFunc is the callback called for an operation on a path.
-type OperationFunc func(*logical.Request, *FieldData) (*logical.Response, error)
-
-// WALRollbackFunc is the callback for rollbacks.
-type WALRollbackFunc func(*logical.Request, string, interface{}) error
-
-// CleanupFunc is the callback for backend unload.
-type CleanupFunc func()
-
-// InitializeFunc is the callback for backend creation.
-type InitializeFunc func() error
-
-// InvalidateFunc is the callback for backend key invalidation.
-type InvalidateFunc func(string)
-
-// LicenseRegistrationFunc is the callback for backend license registration.
-type LicenseRegistrationFunc func(interface{}) error
-
-// HandleExistenceCheck is the logical.Backend implementation.
-func (b *Backend) HandleExistenceCheck(req *logical.Request) (checkFound bool, exists bool, err error) {
- b.once.Do(b.init)
-
- // Ensure we are only doing this when one of the correct operations is in play
- switch req.Operation {
- case logical.CreateOperation:
- case logical.UpdateOperation:
- default:
- return false, false, fmt.Errorf("incorrect operation type %v for an existence check", req.Operation)
- }
-
- // Find the matching route
- path, captures := b.route(req.Path)
- if path == nil {
- return false, false, logical.ErrUnsupportedPath
- }
-
- if path.ExistenceCheck == nil {
- return false, false, nil
- }
-
- checkFound = true
-
- // Build up the data for the route, with the URL taking priority
- // for the fields over the PUT data.
- raw := make(map[string]interface{}, len(path.Fields))
- for k, v := range req.Data {
- raw[k] = v
- }
- for k, v := range captures {
- raw[k] = v
- }
-
- fd := FieldData{
- Raw: raw,
- Schema: path.Fields}
-
- err = fd.Validate()
- if err != nil {
- return false, false, errutil.UserError{Err: err.Error()}
- }
-
- // Call the callback with the request and the data
- exists, err = path.ExistenceCheck(req, &fd)
- return
-}
-
-// HandleRequest is the logical.Backend implementation.
-func (b *Backend) HandleRequest(req *logical.Request) (*logical.Response, error) {
- b.once.Do(b.init)
-
- // Check for special cased global operations. These don't route
- // to a specific Path.
- switch req.Operation {
- case logical.RenewOperation:
- fallthrough
- case logical.RevokeOperation:
- return b.handleRevokeRenew(req)
- case logical.RollbackOperation:
- return b.handleRollback(req)
- }
-
- // If the path is empty and it is a help operation, handle that.
- if req.Path == "" && req.Operation == logical.HelpOperation {
- return b.handleRootHelp()
- }
-
- // Find the matching route
- path, captures := b.route(req.Path)
- if path == nil {
- return nil, logical.ErrUnsupportedPath
- }
-
- // Build up the data for the route, with the URL taking priority
- // for the fields over the PUT data.
- raw := make(map[string]interface{}, len(path.Fields))
- for k, v := range req.Data {
- raw[k] = v
- }
- for k, v := range captures {
- raw[k] = v
- }
-
- // Look up the callback for this operation
- var callback OperationFunc
- var ok bool
- if path.Callbacks != nil {
- callback, ok = path.Callbacks[req.Operation]
- }
- if !ok {
- if req.Operation == logical.HelpOperation {
- callback = path.helpCallback
- ok = true
- }
- }
- if !ok {
- return nil, logical.ErrUnsupportedOperation
- }
-
- fd := FieldData{
- Raw: raw,
- Schema: path.Fields}
-
- if req.Operation != logical.HelpOperation {
- err := fd.Validate()
- if err != nil {
- return nil, err
- }
- }
-
- // Call the callback with the request and the data
- return callback(req, &fd)
-}
-
-// SpecialPaths is the logical.Backend implementation.
-func (b *Backend) SpecialPaths() *logical.Paths {
- return b.PathsSpecial
-}
-
-// Cleanup is used to release resources and prepare to stop the backend
-func (b *Backend) Cleanup() {
- if b.Clean != nil {
- b.Clean()
- }
-}
-
-// Initialize calls the backend's Init func if set.
-func (b *Backend) Initialize() error {
- if b.Init != nil {
- return b.Init()
- }
-
- return nil
-}
-
-// InvalidateKey is used to clear caches and reset internal state on key changes
-func (b *Backend) InvalidateKey(key string) {
- if b.Invalidate != nil {
- b.Invalidate(key)
- }
-}
-
-// Setup is used to initialize the backend with the initial backend configuration
-func (b *Backend) Setup(config *logical.BackendConfig) error {
- b.logger = config.Logger
- b.system = config.System
- return nil
-}
-
-// Logger can be used to get the logger. If no logger has been set,
-// the logs will be discarded.
-func (b *Backend) Logger() log.Logger {
- if b.logger != nil {
- return b.logger
- }
-
- return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff)
-}
-
-// System returns the backend's system view.
-func (b *Backend) System() logical.SystemView {
- return b.system
-}
-
-// Type returns the backend type
-func (b *Backend) Type() logical.BackendType {
- return b.BackendType
-}
-
-// RegisterLicense performs backend license registration.
-func (b *Backend) RegisterLicense(license interface{}) error {
- if b.LicenseRegistration == nil {
- return nil
- }
- return b.LicenseRegistration(license)
-}
-
-// SanitizeTTLStr takes in the TTL and MaxTTL values provided by the user,
-// compares those with the SystemView values. If they are empty a value of 0 is
-// set, which will cause initial secret or LeaseExtend operations to use the
-// mount/system defaults. If they are set, their boundaries are validated.
-func (b *Backend) SanitizeTTLStr(ttlStr, maxTTLStr string) (ttl, maxTTL time.Duration, err error) {
- if len(ttlStr) == 0 || ttlStr == "0" {
- ttl = 0
- } else {
- ttl, err = time.ParseDuration(ttlStr)
- if err != nil {
- return 0, 0, fmt.Errorf("Invalid ttl: %s", err)
- }
- }
-
- if len(maxTTLStr) == 0 || maxTTLStr == "0" {
- maxTTL = 0
- } else {
- maxTTL, err = time.ParseDuration(maxTTLStr)
- if err != nil {
- return 0, 0, fmt.Errorf("Invalid max_ttl: %s", err)
- }
- }
-
- ttl, maxTTL, err = b.SanitizeTTL(ttl, maxTTL)
-
- return
-}
-
-// SanitizeTTL caps the boundaries of ttl and max_ttl values to the
-// backend mount's max_ttl value.
-func (b *Backend) SanitizeTTL(ttl, maxTTL time.Duration) (time.Duration, time.Duration, error) {
- sysMaxTTL := b.System().MaxLeaseTTL()
- if ttl > sysMaxTTL {
- return 0, 0, fmt.Errorf("\"ttl\" value must be less than allowed max lease TTL value '%s'", sysMaxTTL.String())
- }
- if maxTTL > sysMaxTTL {
- return 0, 0, fmt.Errorf("\"max_ttl\" value must be less than allowed max lease TTL value '%s'", sysMaxTTL.String())
- }
- if ttl > maxTTL && maxTTL != 0 {
- ttl = maxTTL
- }
- return ttl, maxTTL, nil
-}
-
-// Route looks up the path that would be used for a given path string.
-func (b *Backend) Route(path string) *Path {
- result, _ := b.route(path)
- return result
-}
-
-// Secret is used to look up the secret with the given type.
-func (b *Backend) Secret(k string) *Secret {
- for _, s := range b.Secrets {
- if s.Type == k {
- return s
- }
- }
-
- return nil
-}
-
-func (b *Backend) init() {
- b.pathsRe = make([]*regexp.Regexp, len(b.Paths))
- for i, p := range b.Paths {
- if len(p.Pattern) == 0 {
- panic(fmt.Sprintf("Routing pattern cannot be blank"))
- }
- // Automatically anchor the pattern
- if p.Pattern[0] != '^' {
- p.Pattern = "^" + p.Pattern
- }
- if p.Pattern[len(p.Pattern)-1] != '$' {
- p.Pattern = p.Pattern + "$"
- }
- b.pathsRe[i] = regexp.MustCompile(p.Pattern)
- }
-}
-
-func (b *Backend) route(path string) (*Path, map[string]string) {
- b.once.Do(b.init)
-
- for i, re := range b.pathsRe {
- matches := re.FindStringSubmatch(path)
- if matches == nil {
- continue
- }
-
- // We have a match, determine the mapping of the captures and
- // store that for returning.
- var captures map[string]string
- path := b.Paths[i]
- if captureNames := re.SubexpNames(); len(captureNames) > 1 {
- captures = make(map[string]string, len(captureNames))
- for i, name := range captureNames {
- if name != "" {
- captures[name] = matches[i]
- }
- }
- }
-
- return path, captures
- }
-
- return nil, nil
-}
-
-func (b *Backend) handleRootHelp() (*logical.Response, error) {
- // Build a mapping of the paths and get the paths alphabetized to
- // make the output prettier.
- pathsMap := make(map[string]*Path)
- paths := make([]string, 0, len(b.Paths))
- for i, p := range b.pathsRe {
- paths = append(paths, p.String())
- pathsMap[p.String()] = b.Paths[i]
- }
- sort.Strings(paths)
-
- // Build the path data
- pathData := make([]rootHelpTemplatePath, 0, len(paths))
- for _, route := range paths {
- p := pathsMap[route]
- pathData = append(pathData, rootHelpTemplatePath{
- Path: route,
- Help: strings.TrimSpace(p.HelpSynopsis),
- })
- }
-
- help, err := executeTemplate(rootHelpTemplate, &rootHelpTemplateData{
- Help: strings.TrimSpace(b.Help),
- Paths: pathData,
- })
- if err != nil {
- return nil, err
- }
-
- return logical.HelpResponse(help, nil), nil
-}
-
-func (b *Backend) handleRevokeRenew(
- req *logical.Request) (*logical.Response, error) {
- // Special case renewal of authentication for credential backends
- if req.Operation == logical.RenewOperation && req.Auth != nil {
- return b.handleAuthRenew(req)
- }
-
- if req.Secret == nil {
- return nil, fmt.Errorf("request has no secret")
- }
-
- rawSecretType, ok := req.Secret.InternalData["secret_type"]
- if !ok {
- return nil, fmt.Errorf("secret is unsupported by this backend")
- }
- secretType, ok := rawSecretType.(string)
- if !ok {
- return nil, fmt.Errorf("secret is unsupported by this backend")
- }
-
- secret := b.Secret(secretType)
- if secret == nil {
- return nil, fmt.Errorf("secret is unsupported by this backend")
- }
-
- switch req.Operation {
- case logical.RenewOperation:
- return secret.HandleRenew(req)
- case logical.RevokeOperation:
- return secret.HandleRevoke(req)
- default:
- return nil, fmt.Errorf(
- "invalid operation for revoke/renew: %s", req.Operation)
- }
-}
-
-// handleRollback invokes the PeriodicFunc set on the backend. It also does a WAL rollback operation.
-func (b *Backend) handleRollback(
- req *logical.Request) (*logical.Response, error) {
- // Response is not expected from the periodic operation.
- if b.PeriodicFunc != nil {
- if err := b.PeriodicFunc(req); err != nil {
- return nil, err
- }
- }
-
- return b.handleWALRollback(req)
-}
-
-func (b *Backend) handleAuthRenew(req *logical.Request) (*logical.Response, error) {
- if b.AuthRenew == nil {
- return logical.ErrorResponse("this auth type doesn't support renew"), nil
- }
-
- return b.AuthRenew(req, nil)
-}
-
-func (b *Backend) handleWALRollback(
- req *logical.Request) (*logical.Response, error) {
- if b.WALRollback == nil {
- return nil, logical.ErrUnsupportedOperation
- }
-
- var merr error
- keys, err := ListWAL(req.Storage)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
- if len(keys) == 0 {
- return nil, nil
- }
-
- // Calculate the minimum time that the WAL entries could be
- // created in order to be rolled back.
- age := b.WALRollbackMinAge
- if age == 0 {
- age = 10 * time.Minute
- }
- minAge := time.Now().Add(-1 * age)
- if _, ok := req.Data["immediate"]; ok {
- minAge = time.Now().Add(1000 * time.Hour)
- }
-
- for _, k := range keys {
- entry, err := GetWAL(req.Storage, k)
- if err != nil {
- merr = multierror.Append(merr, err)
- continue
- }
- if entry == nil {
- continue
- }
-
- // If the entry isn't old enough, then don't roll it back
- if !time.Unix(entry.CreatedAt, 0).Before(minAge) {
- continue
- }
-
- // Attempt a WAL rollback
- err = b.WALRollback(req, entry.Kind, entry.Data)
- if err != nil {
- err = fmt.Errorf(
- "Error rolling back '%s' entry: %s", entry.Kind, err)
- }
- if err == nil {
- err = DeleteWAL(req.Storage, k)
- }
- if err != nil {
- merr = multierror.Append(merr, err)
- }
- }
-
- if merr == nil {
- return nil, nil
- }
-
- return logical.ErrorResponse(merr.Error()), nil
-}
-
-// FieldSchema is a basic schema to describe the format of a path field.
-type FieldSchema struct {
- Type FieldType
- Default interface{}
- Description string
-}
-
-// DefaultOrZero returns the default value if it is set, or otherwise
-// the zero value of the type.
-func (s *FieldSchema) DefaultOrZero() interface{} {
- if s.Default != nil {
- switch s.Type {
- case TypeDurationSecond:
- var result int
- switch inp := s.Default.(type) {
- case nil:
- return s.Type.Zero()
- case int:
- result = inp
- case int64:
- result = int(inp)
- case float32:
- result = int(inp)
- case float64:
- result = int(inp)
- case string:
- dur, err := parseutil.ParseDurationSecond(inp)
- if err != nil {
- return s.Type.Zero()
- }
- result = int(dur.Seconds())
- case json.Number:
- valInt64, err := inp.Int64()
- if err != nil {
- return s.Type.Zero()
- }
- result = int(valInt64)
- default:
- return s.Type.Zero()
- }
- return result
-
- default:
- return s.Default
- }
- }
-
- return s.Type.Zero()
-}
-
-// Zero returns the correct zero-value for a specific FieldType
-func (t FieldType) Zero() interface{} {
- switch t {
- case TypeString:
- return ""
- case TypeInt:
- return 0
- case TypeBool:
- return false
- case TypeMap:
- return map[string]interface{}{}
- case TypeDurationSecond:
- return 0
- case TypeSlice:
- return []interface{}{}
- case TypeStringSlice, TypeCommaStringSlice:
- return []string{}
- default:
- panic("unknown type: " + t.String())
- }
-}
-
-type rootHelpTemplateData struct {
- Help string
- Paths []rootHelpTemplatePath
-}
-
-type rootHelpTemplatePath struct {
- Path string
- Help string
-}
-
-const rootHelpTemplate = `
-## DESCRIPTION
-
-{{.Help}}
-
-## PATHS
-
-The following paths are supported by this backend. To view help for
-any of the paths below, use the help command with any route matching
-the path pattern. Note that depending on the policy of your auth token,
-you may or may not be able to access certain paths.
-
-{{range .Paths}}{{indent 4 .Path}}
-{{indent 8 .Help}}
-
-{{end}}
-
-`
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/backend_test.go b/vendor/github.com/hashicorp/vault/logical/framework/backend_test.go
deleted file mode 100644
index d94beed..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/backend_test.go
+++ /dev/null
@@ -1,580 +0,0 @@
-package framework
-
-import (
- "reflect"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func BenchmarkBackendRoute(b *testing.B) {
- patterns := []string{
- "foo",
- "bar/(?P.+?)",
- "baz/(?Pwhat)",
- `aws/policy/(?P\w)`,
- `aws/(?P\w)`,
- }
-
- backend := &Backend{Paths: make([]*Path, 0, len(patterns))}
- for _, p := range patterns {
- backend.Paths = append(backend.Paths, &Path{Pattern: p})
- }
-
- // Warm any caches
- backend.Route("aws/policy/foo")
-
- // Reset the timer since we did a lot above
- b.ResetTimer()
-
- // Run through and route. We do a sanity check of the return value
- for i := 0; i < b.N; i++ {
- if p := backend.Route("aws/policy/foo"); p == nil {
- b.Fatal("p should not be nil")
- }
- }
-}
-
-func TestBackend_impl(t *testing.T) {
- var _ logical.Backend = new(Backend)
-}
-
-func TestBackendHandleRequest(t *testing.T) {
- callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
- return &logical.Response{
- Data: map[string]interface{}{
- "value": data.Get("value"),
- },
- }, nil
- }
-
- b := &Backend{
- Paths: []*Path{
- &Path{
- Pattern: "foo/bar",
- Fields: map[string]*FieldSchema{
- "value": &FieldSchema{Type: TypeInt},
- },
- Callbacks: map[logical.Operation]OperationFunc{
- logical.ReadOperation: callback,
- },
- },
- },
- }
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "foo/bar",
- Data: map[string]interface{}{"value": "42"},
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp.Data["value"] != 42 {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackendHandleRequest_badwrite(t *testing.T) {
- callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
- return &logical.Response{
- Data: map[string]interface{}{
- "value": data.Get("value").(bool),
- },
- }, nil
- }
-
- b := &Backend{
- Paths: []*Path{
- &Path{
- Pattern: "foo/bar",
- Fields: map[string]*FieldSchema{
- "value": &FieldSchema{Type: TypeBool},
- },
- Callbacks: map[logical.Operation]OperationFunc{
- logical.UpdateOperation: callback,
- },
- },
- },
- }
-
- _, err := b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "foo/bar",
- Data: map[string]interface{}{"value": "3false3"},
- })
-
- if err == nil {
- t.Fatalf("should have thrown a conversion error")
- }
-
-}
-
-func TestBackendHandleRequest_404(t *testing.T) {
- callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
- return &logical.Response{
- Data: map[string]interface{}{
- "value": data.Get("value"),
- },
- }, nil
- }
-
- b := &Backend{
- Paths: []*Path{
- &Path{
- Pattern: `foo/bar`,
- Fields: map[string]*FieldSchema{
- "value": &FieldSchema{Type: TypeInt},
- },
- Callbacks: map[logical.Operation]OperationFunc{
- logical.ReadOperation: callback,
- },
- },
- },
- }
-
- _, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "foo/baz",
- Data: map[string]interface{}{"value": "84"},
- })
- if err != logical.ErrUnsupportedPath {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestBackendHandleRequest_help(t *testing.T) {
- b := &Backend{
- Paths: []*Path{
- &Path{
- Pattern: "foo/bar",
- Fields: map[string]*FieldSchema{
- "value": &FieldSchema{Type: TypeInt},
- },
- HelpSynopsis: "foo",
- HelpDescription: "bar",
- },
- },
- }
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.HelpOperation,
- Path: "foo/bar",
- Data: map[string]interface{}{"value": "42"},
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp.Data["help"] == nil {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackendHandleRequest_helpRoot(t *testing.T) {
- b := &Backend{
- Help: "42",
- }
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.HelpOperation,
- Path: "",
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp.Data["help"] == nil {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackendHandleRequest_renewAuth(t *testing.T) {
- b := &Backend{}
-
- resp, err := b.HandleRequest(logical.RenewAuthRequest(
- "/foo", &logical.Auth{}, nil))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !resp.IsError() {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackendHandleRequest_renewAuthCallback(t *testing.T) {
- var called uint32
- callback := func(*logical.Request, *FieldData) (*logical.Response, error) {
- atomic.AddUint32(&called, 1)
- return nil, nil
- }
-
- b := &Backend{
- AuthRenew: callback,
- }
-
- _, err := b.HandleRequest(logical.RenewAuthRequest(
- "/foo", &logical.Auth{}, nil))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if v := atomic.LoadUint32(&called); v != 1 {
- t.Fatalf("bad: %#v", v)
- }
-}
-func TestBackendHandleRequest_renew(t *testing.T) {
- var called uint32
- callback := func(*logical.Request, *FieldData) (*logical.Response, error) {
- atomic.AddUint32(&called, 1)
- return nil, nil
- }
-
- secret := &Secret{
- Type: "foo",
- Renew: callback,
- }
- b := &Backend{
- Secrets: []*Secret{secret},
- }
-
- _, err := b.HandleRequest(logical.RenewRequest(
- "/foo", secret.Response(nil, nil).Secret, nil))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if v := atomic.LoadUint32(&called); v != 1 {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-func TestBackendHandleRequest_renewExtend(t *testing.T) {
- sysView := logical.StaticSystemView{
- DefaultLeaseTTLVal: 5 * time.Minute,
- MaxLeaseTTLVal: 30 * time.Hour,
- }
-
- secret := &Secret{
- Type: "foo",
- Renew: LeaseExtend(0, 0, sysView),
- DefaultDuration: 5 * time.Minute,
- }
- b := &Backend{
- Secrets: []*Secret{secret},
- }
-
- req := logical.RenewRequest("/foo", secret.Response(nil, nil).Secret, nil)
- req.Secret.IssueTime = time.Now()
- req.Secret.Increment = 1 * time.Hour
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp == nil || resp.Secret == nil {
- t.Fatal("should have secret")
- }
-
- if resp.Secret.TTL < 59*time.Minute || resp.Secret.TTL > 61*time.Minute {
- t.Fatalf("bad: %s", resp.Secret.TTL)
- }
-}
-
-func TestBackendHandleRequest_revoke(t *testing.T) {
- var called uint32
- callback := func(*logical.Request, *FieldData) (*logical.Response, error) {
- atomic.AddUint32(&called, 1)
- return nil, nil
- }
-
- secret := &Secret{
- Type: "foo",
- Revoke: callback,
- }
- b := &Backend{
- Secrets: []*Secret{secret},
- }
-
- _, err := b.HandleRequest(logical.RevokeRequest(
- "/foo", secret.Response(nil, nil).Secret, nil))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if v := atomic.LoadUint32(&called); v != 1 {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-func TestBackendHandleRequest_rollback(t *testing.T) {
- var called uint32
- callback := func(req *logical.Request, kind string, data interface{}) error {
- if data == "foo" {
- atomic.AddUint32(&called, 1)
- }
-
- return nil
- }
-
- b := &Backend{
- WALRollback: callback,
- WALRollbackMinAge: 1 * time.Millisecond,
- }
-
- storage := new(logical.InmemStorage)
- if _, err := PutWAL(storage, "kind", "foo"); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- time.Sleep(10 * time.Millisecond)
-
- _, err := b.HandleRequest(&logical.Request{
- Operation: logical.RollbackOperation,
- Path: "",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if v := atomic.LoadUint32(&called); v != 1 {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-func TestBackendHandleRequest_rollbackMinAge(t *testing.T) {
- var called uint32
- callback := func(req *logical.Request, kind string, data interface{}) error {
- if data == "foo" {
- atomic.AddUint32(&called, 1)
- }
-
- return nil
- }
-
- b := &Backend{
- WALRollback: callback,
- WALRollbackMinAge: 5 * time.Second,
- }
-
- storage := new(logical.InmemStorage)
- if _, err := PutWAL(storage, "kind", "foo"); err != nil {
- t.Fatalf("err: %s", err)
- }
-
- _, err := b.HandleRequest(&logical.Request{
- Operation: logical.RollbackOperation,
- Path: "",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if v := atomic.LoadUint32(&called); v != 0 {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-func TestBackendHandleRequest_unsupportedOperation(t *testing.T) {
- callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
- return &logical.Response{
- Data: map[string]interface{}{
- "value": data.Get("value"),
- },
- }, nil
- }
-
- b := &Backend{
- Paths: []*Path{
- &Path{
- Pattern: `foo/bar`,
- Fields: map[string]*FieldSchema{
- "value": &FieldSchema{Type: TypeInt},
- },
- Callbacks: map[logical.Operation]OperationFunc{
- logical.ReadOperation: callback,
- },
- },
- },
- }
-
- _, err := b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "foo/bar",
- Data: map[string]interface{}{"value": "84"},
- })
- if err != logical.ErrUnsupportedOperation {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestBackendHandleRequest_urlPriority(t *testing.T) {
- callback := func(req *logical.Request, data *FieldData) (*logical.Response, error) {
- return &logical.Response{
- Data: map[string]interface{}{
- "value": data.Get("value"),
- },
- }, nil
- }
-
- b := &Backend{
- Paths: []*Path{
- &Path{
- Pattern: `foo/(?P\d+)`,
- Fields: map[string]*FieldSchema{
- "value": &FieldSchema{Type: TypeInt},
- },
- Callbacks: map[logical.Operation]OperationFunc{
- logical.ReadOperation: callback,
- },
- },
- },
- }
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "foo/42",
- Data: map[string]interface{}{"value": "84"},
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if resp.Data["value"] != 42 {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackendRoute(t *testing.T) {
- cases := map[string]struct {
- Patterns []string
- Path string
- Match string
- }{
- "no match": {
- []string{"foo"},
- "bar",
- "",
- },
-
- "exact": {
- []string{"foo"},
- "foo",
- "^foo$",
- },
-
- "regexp": {
- []string{"fo+"},
- "foo",
- "^fo+$",
- },
-
- "anchor-start": {
- []string{"bar"},
- "foobar",
- "",
- },
-
- "anchor-end": {
- []string{"bar"},
- "barfoo",
- "",
- },
-
- "anchor-ambiguous": {
- []string{"mounts", "sys/mounts"},
- "sys/mounts",
- "^sys/mounts$",
- },
- }
-
- for n, tc := range cases {
- paths := make([]*Path, len(tc.Patterns))
- for i, pattern := range tc.Patterns {
- paths[i] = &Path{Pattern: pattern}
- }
-
- b := &Backend{Paths: paths}
- result := b.Route(tc.Path)
- match := ""
- if result != nil {
- match = result.Pattern
- }
-
- if match != tc.Match {
- t.Fatalf("bad: %s\n\nExpected: %s\nGot: %s",
- n, tc.Match, match)
- }
- }
-}
-
-func TestBackendSecret(t *testing.T) {
- cases := map[string]struct {
- Secrets []*Secret
- Search string
- Match bool
- }{
- "no match": {
- []*Secret{&Secret{Type: "foo"}},
- "bar",
- false,
- },
-
- "match": {
- []*Secret{&Secret{Type: "foo"}},
- "foo",
- true,
- },
- }
-
- for n, tc := range cases {
- b := &Backend{Secrets: tc.Secrets}
- result := b.Secret(tc.Search)
- if tc.Match != (result != nil) {
- t.Fatalf("bad: %s\n\nExpected match: %v", n, tc.Match)
- }
- if result != nil && result.Type != tc.Search {
- t.Fatalf("bad: %s\n\nExpected matching type: %#v", n, result)
- }
- }
-}
-
-func TestFieldSchemaDefaultOrZero(t *testing.T) {
- cases := map[string]struct {
- Schema *FieldSchema
- Value interface{}
- }{
- "default set": {
- &FieldSchema{Type: TypeString, Default: "foo"},
- "foo",
- },
-
- "default not set": {
- &FieldSchema{Type: TypeString},
- "",
- },
-
- "default duration set": {
- &FieldSchema{Type: TypeDurationSecond, Default: 60},
- 60,
- },
-
- "default duration int64": {
- &FieldSchema{Type: TypeDurationSecond, Default: int64(60)},
- 60,
- },
-
- "default duration string": {
- &FieldSchema{Type: TypeDurationSecond, Default: "60s"},
- 60,
- },
-
- "default duration not set": {
- &FieldSchema{Type: TypeDurationSecond},
- 0,
- },
- }
-
- for name, tc := range cases {
- actual := tc.Schema.DefaultOrZero()
- if !reflect.DeepEqual(actual, tc.Value) {
- t.Fatalf("bad: %s\n\nExpected: %#v\nGot: %#v",
- name, tc.Value, actual)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data.go
deleted file mode 100644
index 7fac976..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/field_data.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package framework
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "regexp"
-
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/mitchellh/mapstructure"
-)
-
-// FieldData is the structure passed to the callback to handle a path
-// containing the populated parameters for fields. This should be used
-// instead of the raw (*vault.Request).Data to access data in a type-safe
-// way.
-type FieldData struct {
- Raw map[string]interface{}
- Schema map[string]*FieldSchema
-}
-
-// Validate cycles through raw data and validate conversions in
-// the schema, so we don't get an error/panic later when
-// trying to get data out. Data not in the schema is not
-// an error at this point, so we don't worry about it.
-func (d *FieldData) Validate() error {
- for field, value := range d.Raw {
-
- schema, ok := d.Schema[field]
- if !ok {
- continue
- }
-
- switch schema.Type {
- case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString,
- TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice:
- _, _, err := d.getPrimitive(field, schema)
- if err != nil {
- return fmt.Errorf("Error converting input %v for field %s: %s", value, field, err)
- }
- default:
- return fmt.Errorf("unknown field type %s for field %s",
- schema.Type, field)
- }
- }
-
- return nil
-}
-
-// Get gets the value for the given field. If the key is an invalid field,
-// FieldData will panic. If you want a safer version of this method, use
-// GetOk. If the field k is not set, the default value (if set) will be
-// returned, otherwise the zero value will be returned.
-func (d *FieldData) Get(k string) interface{} {
- schema, ok := d.Schema[k]
- if !ok {
- panic(fmt.Sprintf("field %s not in the schema", k))
- }
-
- value, ok := d.GetOk(k)
- if !ok {
- value = schema.DefaultOrZero()
- }
-
- return value
-}
-
-// GetDefaultOrZero gets the default value set on the schema for the given
-// field. If there is no default value set, the zero value of the type
-// will be returned.
-func (d *FieldData) GetDefaultOrZero(k string) interface{} {
- schema, ok := d.Schema[k]
- if !ok {
- panic(fmt.Sprintf("field %s not in the schema", k))
- }
-
- return schema.DefaultOrZero()
-}
-
-// GetOk gets the value for the given field. The second return value
-// will be false if the key is invalid or the key is not set at all.
-func (d *FieldData) GetOk(k string) (interface{}, bool) {
- schema, ok := d.Schema[k]
- if !ok {
- return nil, false
- }
-
- result, ok, err := d.GetOkErr(k)
- if err != nil {
- panic(fmt.Sprintf("error reading %s: %s", k, err))
- }
-
- if ok && result == nil {
- result = schema.DefaultOrZero()
- }
-
- return result, ok
-}
-
-// GetOkErr is the most conservative of all the Get methods. It returns
-// whether key is set or not, but also an error value. The error value is
-// non-nil if the field doesn't exist or there was an error parsing the
-// field value.
-func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) {
- schema, ok := d.Schema[k]
- if !ok {
- return nil, false, fmt.Errorf("unknown field: %s", k)
- }
-
- switch schema.Type {
- case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString,
- TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice:
- return d.getPrimitive(k, schema)
- default:
- return nil, false,
- fmt.Errorf("unknown field type %s for field %s", schema.Type, k)
- }
-}
-
-func (d *FieldData) getPrimitive(
- k string, schema *FieldSchema) (interface{}, bool, error) {
- raw, ok := d.Raw[k]
- if !ok {
- return nil, false, nil
- }
-
- switch schema.Type {
- case TypeBool:
- var result bool
- if err := mapstructure.WeakDecode(raw, &result); err != nil {
- return nil, true, err
- }
- return result, true, nil
-
- case TypeInt:
- var result int
- if err := mapstructure.WeakDecode(raw, &result); err != nil {
- return nil, true, err
- }
- return result, true, nil
-
- case TypeString:
- var result string
- if err := mapstructure.WeakDecode(raw, &result); err != nil {
- return nil, true, err
- }
- return result, true, nil
-
- case TypeNameString:
- var result string
- if err := mapstructure.WeakDecode(raw, &result); err != nil {
- return nil, true, err
- }
- matched, err := regexp.MatchString("^\\w(([\\w-.]+)?\\w)?$", result)
- if err != nil {
- return nil, true, err
- }
- if !matched {
- return nil, true, errors.New("field does not match the formatting rules")
- }
- return result, true, nil
-
- case TypeMap:
- var result map[string]interface{}
- if err := mapstructure.WeakDecode(raw, &result); err != nil {
- return nil, true, err
- }
- return result, true, nil
-
- case TypeDurationSecond:
- var result int
- switch inp := raw.(type) {
- case nil:
- return nil, false, nil
- case int:
- result = inp
- case int32:
- result = int(inp)
- case int64:
- result = int(inp)
- case uint:
- result = int(inp)
- case uint32:
- result = int(inp)
- case uint64:
- result = int(inp)
- case float32:
- result = int(inp)
- case float64:
- result = int(inp)
- case string:
- dur, err := parseutil.ParseDurationSecond(inp)
- if err != nil {
- return nil, true, err
- }
- result = int(dur.Seconds())
- case json.Number:
- valInt64, err := inp.Int64()
- if err != nil {
- return nil, true, err
- }
- result = int(valInt64)
- default:
- return nil, false, fmt.Errorf("invalid input '%v'", raw)
- }
- return result, true, nil
-
- case TypeSlice:
- var result []interface{}
- if err := mapstructure.WeakDecode(raw, &result); err != nil {
- return nil, true, err
- }
- return result, true, nil
-
- case TypeStringSlice:
- var result []string
- if err := mapstructure.WeakDecode(raw, &result); err != nil {
- return nil, true, err
- }
- return strutil.TrimStrings(result), true, nil
-
- case TypeCommaStringSlice:
- var result []string
- config := &mapstructure.DecoderConfig{
- Result: &result,
- WeaklyTypedInput: true,
- DecodeHook: mapstructure.StringToSliceHookFunc(","),
- }
- decoder, err := mapstructure.NewDecoder(config)
- if err != nil {
- return nil, false, err
- }
- if err := decoder.Decode(raw); err != nil {
- return nil, false, err
- }
- return strutil.TrimStrings(result), true, nil
-
- default:
- panic(fmt.Sprintf("Unknown type: %s", schema.Type))
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go
deleted file mode 100644
index a9bc474..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package framework
-
-import (
- "reflect"
- "testing"
-)
-
-func TestFieldDataGet(t *testing.T) {
- cases := map[string]struct {
- Schema map[string]*FieldSchema
- Raw map[string]interface{}
- Key string
- Value interface{}
- }{
- "string type, string value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeString},
- },
- map[string]interface{}{
- "foo": "bar",
- },
- "foo",
- "bar",
- },
-
- "string type, int value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeString},
- },
- map[string]interface{}{
- "foo": 42,
- },
- "foo",
- "42",
- },
-
- "string type, unset value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeString},
- },
- map[string]interface{}{},
- "foo",
- "",
- },
-
- "string type, unset value with default": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{
- Type: TypeString,
- Default: "bar",
- },
- },
- map[string]interface{}{},
- "foo",
- "bar",
- },
-
- "int type, int value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeInt},
- },
- map[string]interface{}{
- "foo": 42,
- },
- "foo",
- 42,
- },
-
- "bool type, bool value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeBool},
- },
- map[string]interface{}{
- "foo": false,
- },
- "foo",
- false,
- },
-
- "map type, map value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeMap},
- },
- map[string]interface{}{
- "foo": map[string]interface{}{
- "child": true,
- },
- },
- "foo",
- map[string]interface{}{
- "child": true,
- },
- },
-
- "duration type, string value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeDurationSecond},
- },
- map[string]interface{}{
- "foo": "42",
- },
- "foo",
- 42,
- },
-
- "duration type, string duration value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeDurationSecond},
- },
- map[string]interface{}{
- "foo": "42m",
- },
- "foo",
- 2520,
- },
-
- "duration type, int value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeDurationSecond},
- },
- map[string]interface{}{
- "foo": 42,
- },
- "foo",
- 42,
- },
-
- "duration type, float value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeDurationSecond},
- },
- map[string]interface{}{
- "foo": 42.0,
- },
- "foo",
- 42,
- },
-
- "duration type, nil value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeDurationSecond},
- },
- map[string]interface{}{
- "foo": nil,
- },
- "foo",
- 0,
- },
-
- "slice type, empty slice": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeSlice},
- },
- map[string]interface{}{
- "foo": []interface{}{},
- },
- "foo",
- []interface{}{},
- },
-
- "slice type, filled, mixed slice": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeSlice},
- },
- map[string]interface{}{
- "foo": []interface{}{123, "abc"},
- },
- "foo",
- []interface{}{123, "abc"},
- },
-
- "string slice type, filled slice": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeStringSlice},
- },
- map[string]interface{}{
- "foo": []interface{}{123, "abc"},
- },
- "foo",
- []string{"123", "abc"},
- },
-
- "string slice type, single value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeStringSlice},
- },
- map[string]interface{}{
- "foo": "abc",
- },
- "foo",
- []string{"abc"},
- },
-
- "comma string slice type, comma string with one value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeCommaStringSlice},
- },
- map[string]interface{}{
- "foo": "value1",
- },
- "foo",
- []string{"value1"},
- },
-
- "comma string slice type, comma string with multi value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeCommaStringSlice},
- },
- map[string]interface{}{
- "foo": "value1,value2,value3",
- },
- "foo",
- []string{"value1", "value2", "value3"},
- },
-
- "comma string slice type, nil string slice value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeCommaStringSlice},
- },
- map[string]interface{}{
- "foo": "",
- },
- "foo",
- []string{},
- },
-
- "commma string slice type, string slice with one value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeCommaStringSlice},
- },
- map[string]interface{}{
- "foo": []interface{}{"value1"},
- },
- "foo",
- []string{"value1"},
- },
-
- "comma string slice type, string slice with multi value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeCommaStringSlice},
- },
- map[string]interface{}{
- "foo": []interface{}{"value1", "value2", "value3"},
- },
- "foo",
- []string{"value1", "value2", "value3"},
- },
-
- "comma string slice type, empty string slice value": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeCommaStringSlice},
- },
- map[string]interface{}{
- "foo": []interface{}{},
- },
- "foo",
- []string{},
- },
-
- "name string type, valid string": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeNameString},
- },
- map[string]interface{}{
- "foo": "bar",
- },
- "foo",
- "bar",
- },
-
- "name string type, valid value with special characters": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeNameString},
- },
- map[string]interface{}{
- "foo": "bar.baz-bay123",
- },
- "foo",
- "bar.baz-bay123",
- },
- }
-
- for name, tc := range cases {
- data := &FieldData{
- Raw: tc.Raw,
- Schema: tc.Schema,
- }
-
- if err := data.Validate(); err != nil {
- t.Fatalf("bad: %#v", err)
- }
-
- actual := data.Get(tc.Key)
- if !reflect.DeepEqual(actual, tc.Value) {
- t.Fatalf(
- "bad: %s\n\nExpected: %#v\nGot: %#v",
- name, tc.Value, actual)
- }
- }
-}
-
-func TestFieldDataGet_Error(t *testing.T) {
- cases := map[string]struct {
- Schema map[string]*FieldSchema
- Raw map[string]interface{}
- Key string
- }{
- "name string type, invalid value with invalid characters": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeNameString},
- },
- map[string]interface{}{
- "foo": "bar baz",
- },
- "foo",
- },
- "name string type, invalid value with special characters at beginning": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeNameString},
- },
- map[string]interface{}{
- "foo": ".barbaz",
- },
- "foo",
- },
- "name string type, invalid value with special characters at end": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeNameString},
- },
- map[string]interface{}{
- "foo": "barbaz-",
- },
- "foo",
- },
- "name string type, empty string": {
- map[string]*FieldSchema{
- "foo": &FieldSchema{Type: TypeNameString},
- },
- map[string]interface{}{
- "foo": "",
- },
- "foo",
- },
- }
-
- for _, tc := range cases {
- data := &FieldData{
- Raw: tc.Raw,
- Schema: tc.Schema,
- }
-
- _, _, err := data.GetOkErr(tc.Key)
- if err == nil {
- t.Fatalf("error expected, none received")
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_type.go b/vendor/github.com/hashicorp/vault/logical/framework/field_type.go
deleted file mode 100644
index 304d45f..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/field_type.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package framework
-
-// FieldType is the enum of types that a field can be.
-type FieldType uint
-
-const (
- TypeInvalid FieldType = 0
- TypeString FieldType = iota
- TypeInt
- TypeBool
- TypeMap
-
- // TypeDurationSecond represent as seconds, this can be either an
- // integer or go duration format string (e.g. 24h)
- TypeDurationSecond
-
- // TypeSlice represents a slice of any type
- TypeSlice
- // TypeStringSlice is a helper for TypeSlice that returns a sanitized
- // slice of strings
- TypeStringSlice
- // TypeCommaStringSlice is a helper for TypeSlice that returns a sanitized
- // slice of strings and also supports parsing a comma-separated list in
- // a string field
- TypeCommaStringSlice
-
- // TypeNameString represents a name that is URI safe and follows specific
- // rules. These rules include start and end with an alphanumeric
- // character and characters in the middle can be alphanumeric or . or -.
- TypeNameString
-)
-
-func (t FieldType) String() string {
- switch t {
- case TypeString:
- return "string"
- case TypeNameString:
- return "name string"
- case TypeInt:
- return "int"
- case TypeBool:
- return "bool"
- case TypeMap:
- return "map"
- case TypeDurationSecond:
- return "duration (sec)"
- case TypeSlice, TypeStringSlice, TypeCommaStringSlice:
- return "slice"
- default:
- return "unknown type"
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/lease.go b/vendor/github.com/hashicorp/vault/logical/framework/lease.go
deleted file mode 100644
index 4fd2ac9..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/lease.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package framework
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// LeaseExtend returns an OperationFunc that can be used to simply extend the
-// lease of the auth/secret for the duration that was requested.
-//
-// backendIncrement is the backend's requested increment -- perhaps from a user
-// request, perhaps from a role/config value. If not set, uses the mount/system
-// value.
-//
-// backendMax is the backend's requested increment -- this can be more
-// restrictive than the mount/system value but not less.
-//
-// systemView is the system view from the calling backend, used to determine
-// and/or correct default/max times.
-func LeaseExtend(backendIncrement, backendMax time.Duration, systemView logical.SystemView) OperationFunc {
- return func(req *logical.Request, data *FieldData) (*logical.Response, error) {
- var leaseOpts *logical.LeaseOptions
- switch {
- case req.Auth != nil:
- leaseOpts = &req.Auth.LeaseOptions
- case req.Secret != nil:
- leaseOpts = &req.Secret.LeaseOptions
- default:
- return nil, fmt.Errorf("no lease options for request")
- }
-
- // Use the mount's configured max unless the backend specifies
- // something more restrictive (perhaps from a role configuration
- // parameter)
- max := systemView.MaxLeaseTTL()
- if backendMax > 0 && backendMax < max {
- max = backendMax
- }
-
- // Should never happen, but guard anyways
- if max < 0 {
- return nil, fmt.Errorf("max TTL is negative")
- }
-
- // We cannot go past this time
- maxValidTime := leaseOpts.IssueTime.Add(max)
-
- // Get the current time
- now := time.Now()
-
- // If we are past the max TTL, we shouldn't be in this function...but
- // fast path out if we are
- if maxValidTime.Before(now) {
- return nil, fmt.Errorf("past the max TTL, cannot renew")
- }
-
- // Basic max safety checks have passed, now let's figure out our
- // increment. We'll use the user-supplied value first, then backend-provided default if possible, or the
- // mount/system default if not.
- increment := leaseOpts.Increment
- if increment <= 0 {
- if backendIncrement > 0 {
- increment = backendIncrement
- } else {
- increment = systemView.DefaultLeaseTTL()
- }
- }
-
- // We are proposing a time of the current time plus the increment
- proposedExpiration := now.Add(increment)
-
- // If the proposed expiration is after the maximum TTL of the lease,
- // cap the increment to whatever is left
- if maxValidTime.Before(proposedExpiration) {
- increment = maxValidTime.Sub(now)
- }
-
- // Set the lease
- leaseOpts.TTL = increment
-
- return &logical.Response{Auth: req.Auth, Secret: req.Secret}, nil
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/lease_test.go b/vendor/github.com/hashicorp/vault/logical/framework/lease_test.go
deleted file mode 100644
index b45b9c7..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/lease_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package framework
-
-import (
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestLeaseExtend(t *testing.T) {
-
- testSysView := logical.StaticSystemView{
- DefaultLeaseTTLVal: 5 * time.Hour,
- MaxLeaseTTLVal: 30 * time.Hour,
- }
-
- now := time.Now().Round(time.Hour)
-
- cases := map[string]struct {
- BackendDefault time.Duration
- BackendMax time.Duration
- Increment time.Duration
- Result time.Duration
- Error bool
- }{
- "valid request, good bounds, increment is preferred": {
- BackendDefault: 30 * time.Hour,
- Increment: 1 * time.Hour,
- Result: 1 * time.Hour,
- },
-
- "valid request, zero backend default, uses increment": {
- BackendDefault: 0,
- Increment: 1 * time.Hour,
- Result: 1 * time.Hour,
- },
-
- "lease increment is zero, uses backend default": {
- BackendDefault: 30 * time.Hour,
- Increment: 0,
- Result: 30 * time.Hour,
- },
-
- "lease increment and default are zero, uses systemview": {
- BackendDefault: 0,
- Increment: 0,
- Result: 5 * time.Hour,
- },
-
- "backend max and associated request are too long": {
- BackendDefault: 40 * time.Hour,
- BackendMax: 45 * time.Hour,
- Result: 30 * time.Hour,
- },
-
- "all request values are larger than the system view, so the system view limits": {
- BackendDefault: 40 * time.Hour,
- BackendMax: 50 * time.Hour,
- Increment: 40 * time.Hour,
- Result: 30 * time.Hour,
- },
-
- "request within backend max": {
- BackendDefault: 9 * time.Hour,
- BackendMax: 5 * time.Hour,
- Increment: 4 * time.Hour,
- Result: 4 * time.Hour,
- },
-
- "request outside backend max": {
- BackendDefault: 9 * time.Hour,
- BackendMax: 4 * time.Hour,
- Increment: 5 * time.Hour,
- Result: 4 * time.Hour,
- },
-
- "request is negative, no backend default, use sysview": {
- Increment: -7 * time.Hour,
- Result: 5 * time.Hour,
- },
-
- "lease increment too large": {
- Increment: 40 * time.Hour,
- Result: 30 * time.Hour,
- },
- }
-
- for name, tc := range cases {
- req := &logical.Request{
- Auth: &logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- TTL: 1 * time.Hour,
- IssueTime: now,
- Increment: tc.Increment,
- },
- },
- }
-
- callback := LeaseExtend(tc.BackendDefault, tc.BackendMax, testSysView)
- resp, err := callback(req, nil)
- if (err != nil) != tc.Error {
- t.Fatalf("bad: %s\nerr: %s", name, err)
- }
- if tc.Error {
- continue
- }
-
- // Round it to the nearest hour
- lease := now.Add(resp.Auth.TTL).Round(time.Hour).Sub(now)
- if lease != tc.Result {
- t.Fatalf("bad: %s\nlease: %s", name, lease)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path.go b/vendor/github.com/hashicorp/vault/logical/framework/path.go
deleted file mode 100644
index fa0c037..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package framework
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// Helper which returns a generic regex string for creating endpoint patterns
-// that are identified by the given name in the backends
-func GenericNameRegex(name string) string {
- return fmt.Sprintf("(?P<%s>\\w(([\\w-.]+)?\\w)?)", name)
-}
-
-// Helper which returns a regex string for optionally accepting the a field
-// from the API URL
-func OptionalParamRegex(name string) string {
- return fmt.Sprintf("(/(?P<%s>.+))?", name)
-}
-
-// PathAppend is a helper for appending lists of paths into a single
-// list.
-func PathAppend(paths ...[]*Path) []*Path {
- result := make([]*Path, 0, 10)
- for _, ps := range paths {
- result = append(result, ps...)
- }
-
- return result
-}
-
-// Path is a single path that the backend responds to.
-type Path struct {
- // Pattern is the pattern of the URL that matches this path.
- //
- // This should be a valid regular expression. Named captures will be
- // exposed as fields that should map to a schema in Fields. If a named
- // capture is not a field in the Fields map, then it will be ignored.
- Pattern string
-
- // Fields is the mapping of data fields to a schema describing that
- // field. Named captures in the Pattern also map to fields. If a named
- // capture name matches a PUT body name, the named capture takes
- // priority.
- //
- // Note that only named capture fields are available in every operation,
- // whereas all fields are available in the Write operation.
- Fields map[string]*FieldSchema
-
- // Callbacks are the set of callbacks that are called for a given
- // operation. If a callback for a specific operation is not present,
- // then logical.ErrUnsupportedOperation is automatically generated.
- //
- // The help operation is the only operation that the Path will
- // automatically handle if the Help field is set. If both the Help
- // field is set and there is a callback registered here, then the
- // callback will be called.
- Callbacks map[logical.Operation]OperationFunc
-
- // ExistenceCheck, if implemented, is used to query whether a given
- // resource exists or not. This is used for ACL purposes: if an Update
- // action is specified, and the existence check returns false, the action
- // is not allowed since the resource must first be created. The reverse is
- // also true. If not specified, the Update action is forced and the user
- // must have UpdateCapability on the path.
- ExistenceCheck func(*logical.Request, *FieldData) (bool, error)
-
- // Help is text describing how to use this path. This will be used
- // to auto-generate the help operation. The Path will automatically
- // generate a parameter listing and URL structure based on the
- // regular expression, so the help text should just contain a description
- // of what happens.
- //
- // HelpSynopsis is a one-sentence description of the path. This will
- // be automatically line-wrapped at 80 characters.
- //
- // HelpDescription is a long-form description of the path. This will
- // be automatically line-wrapped at 80 characters.
- HelpSynopsis string
- HelpDescription string
-}
-
-func (p *Path) helpCallback(
- req *logical.Request, data *FieldData) (*logical.Response, error) {
- var tplData pathTemplateData
- tplData.Request = req.Path
- tplData.RoutePattern = p.Pattern
- tplData.Synopsis = strings.TrimSpace(p.HelpSynopsis)
- if tplData.Synopsis == "" {
- tplData.Synopsis = ""
- }
- tplData.Description = strings.TrimSpace(p.HelpDescription)
- if tplData.Description == "" {
- tplData.Description = ""
- }
-
- // Alphabetize the fields
- fieldKeys := make([]string, 0, len(p.Fields))
- for k, _ := range p.Fields {
- fieldKeys = append(fieldKeys, k)
- }
- sort.Strings(fieldKeys)
-
- // Build the field help
- tplData.Fields = make([]pathTemplateFieldData, len(fieldKeys))
- for i, k := range fieldKeys {
- schema := p.Fields[k]
- description := strings.TrimSpace(schema.Description)
- if description == "" {
- description = ""
- }
-
- tplData.Fields[i] = pathTemplateFieldData{
- Key: k,
- Type: schema.Type.String(),
- Description: description,
- }
- }
-
- help, err := executeTemplate(pathHelpTemplate, &tplData)
- if err != nil {
- return nil, fmt.Errorf("error executing template: %s", err)
- }
-
- return logical.HelpResponse(help, nil), nil
-}
-
-type pathTemplateData struct {
- Request string
- RoutePattern string
- Synopsis string
- Description string
- Fields []pathTemplateFieldData
-}
-
-type pathTemplateFieldData struct {
- Key string
- Type string
- Description string
- URL bool
-}
-
-const pathHelpTemplate = `
-Request: {{.Request}}
-Matching Route: {{.RoutePattern}}
-
-{{.Synopsis}}
-
-{{ if .Fields -}}
-## PARAMETERS
-{{range .Fields}}
-{{indent 4 .Key}} ({{.Type}})
-{{indent 8 .Description}}
-{{end}}{{end}}
-## DESCRIPTION
-
-{{.Description}}
-`
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
deleted file mode 100644
index f9fa3a6..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package framework
-
-import (
- "fmt"
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-// PathMap can be used to generate a path that stores mappings in the
-// storage. It is a structure that also exports functions for querying the
-// mappings.
-//
-// The primary use case for this is for credential providers to do their
-// mapping to policies.
-type PathMap struct {
- Prefix string
- Name string
- Schema map[string]*FieldSchema
- CaseSensitive bool
- Salt *salt.Salt
- SaltFunc func() (*salt.Salt, error)
-
- once sync.Once
-}
-
-func (p *PathMap) init() {
- if p.Prefix == "" {
- p.Prefix = "map"
- }
-
- if p.Schema == nil {
- p.Schema = map[string]*FieldSchema{
- "value": &FieldSchema{
- Type: TypeString,
- Description: fmt.Sprintf("Value for %s mapping", p.Name),
- },
- }
- }
-}
-
-// pathStruct returns the pathStruct for this mapping
-func (p *PathMap) pathStruct(s logical.Storage, k string) (*PathStruct, error) {
- p.once.Do(p.init)
-
- // If we don't care about casing, store everything lowercase
- if !p.CaseSensitive {
- k = strings.ToLower(k)
- }
-
- // The original key before any salting
- origKey := k
-
- // If we have a salt, apply it before lookup
- salt := p.Salt
- var err error
- if p.SaltFunc != nil {
- salt, err = p.SaltFunc()
- if err != nil {
- return nil, err
- }
- }
- if salt != nil {
- k = salt.SaltID(k)
- }
-
- finalName := fmt.Sprintf("map/%s/%s", p.Name, k)
- ps := &PathStruct{
- Name: finalName,
- Schema: p.Schema,
- }
-
- // Check for unsalted version and upgrade if so
- if k != origKey {
- // Generate the unsalted name
- unsaltedName := fmt.Sprintf("map/%s/%s", p.Name, origKey)
- // Set the path struct to use the unsalted name
- ps.Name = unsaltedName
- // Ensure that no matter what happens what is returned is the final
- // path
- defer func() {
- ps.Name = finalName
- }()
- val, err := ps.Get(s)
- if err != nil {
- return nil, err
- }
- // If not nil, we have an unsalted entry -- upgrade it
- if val != nil {
- // Set the path struct to use the desired final name
- ps.Name = finalName
- err = ps.Put(s, val)
- if err != nil {
- return nil, err
- }
- // Set it back to the old path and delete
- ps.Name = unsaltedName
- err = ps.Delete(s)
- if err != nil {
- return nil, err
- }
- // We'll set this in the deferred function but doesn't hurt here
- ps.Name = finalName
- }
- }
-
- return ps, nil
-}
-
-// Get reads a value out of the mapping
-func (p *PathMap) Get(s logical.Storage, k string) (map[string]interface{}, error) {
- ps, err := p.pathStruct(s, k)
- if err != nil {
- return nil, err
- }
- return ps.Get(s)
-}
-
-// Put writes a value into the mapping
-func (p *PathMap) Put(s logical.Storage, k string, v map[string]interface{}) error {
- ps, err := p.pathStruct(s, k)
- if err != nil {
- return err
- }
- return ps.Put(s, v)
-}
-
-// Delete removes a value from the mapping
-func (p *PathMap) Delete(s logical.Storage, k string) error {
- ps, err := p.pathStruct(s, k)
- if err != nil {
- return err
- }
- return ps.Delete(s)
-}
-
-// List reads the keys under a given path
-func (p *PathMap) List(s logical.Storage, prefix string) ([]string, error) {
- stripPrefix := fmt.Sprintf("struct/map/%s/", p.Name)
- fullPrefix := fmt.Sprintf("%s%s", stripPrefix, prefix)
- out, err := s.List(fullPrefix)
- if err != nil {
- return nil, err
- }
- stripped := make([]string, len(out))
- for idx, k := range out {
- stripped[idx] = strings.TrimPrefix(k, stripPrefix)
- }
- return stripped, nil
-}
-
-// Paths are the paths to append to the Backend paths.
-func (p *PathMap) Paths() []*Path {
- p.once.Do(p.init)
-
- // Build the schema by simply adding the "key"
- schema := make(map[string]*FieldSchema)
- for k, v := range p.Schema {
- schema[k] = v
- }
- schema["key"] = &FieldSchema{
- Type: TypeString,
- Description: fmt.Sprintf("Key for the %s mapping", p.Name),
- }
-
- return []*Path{
- &Path{
- Pattern: fmt.Sprintf("%s/%s/?$", p.Prefix, p.Name),
-
- Callbacks: map[logical.Operation]OperationFunc{
- logical.ListOperation: p.pathList,
- logical.ReadOperation: p.pathList,
- },
-
- HelpSynopsis: fmt.Sprintf("Read mappings for %s", p.Name),
- },
-
- &Path{
- Pattern: fmt.Sprintf(`%s/%s/(?P[-\w]+)`, p.Prefix, p.Name),
-
- Fields: schema,
-
- Callbacks: map[logical.Operation]OperationFunc{
- logical.CreateOperation: p.pathSingleWrite,
- logical.ReadOperation: p.pathSingleRead,
- logical.UpdateOperation: p.pathSingleWrite,
- logical.DeleteOperation: p.pathSingleDelete,
- },
-
- HelpSynopsis: fmt.Sprintf("Read/write/delete a single %s mapping", p.Name),
-
- ExistenceCheck: p.pathSingleExistenceCheck,
- },
- }
-}
-
-func (p *PathMap) pathList(
- req *logical.Request, d *FieldData) (*logical.Response, error) {
- keys, err := p.List(req.Storage, "")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(keys), nil
-}
-
-func (p *PathMap) pathSingleRead(
- req *logical.Request, d *FieldData) (*logical.Response, error) {
- v, err := p.Get(req.Storage, d.Get("key").(string))
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: v,
- }, nil
-}
-
-func (p *PathMap) pathSingleWrite(
- req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Put(req.Storage, d.Get("key").(string), d.Raw)
- return nil, err
-}
-
-func (p *PathMap) pathSingleDelete(
- req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Delete(req.Storage, d.Get("key").(string))
- return nil, err
-}
-
-func (p *PathMap) pathSingleExistenceCheck(
- req *logical.Request, d *FieldData) (bool, error) {
- v, err := p.Get(req.Storage, d.Get("key").(string))
- if err != nil {
- return false, err
- }
- return v != nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go
deleted file mode 100644
index ce9215b..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go
+++ /dev/null
@@ -1,445 +0,0 @@
-package framework
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestPathMap(t *testing.T) {
- p := &PathMap{Name: "foo"}
- storage := new(logical.InmemStorage)
- var b logical.Backend = &Backend{Paths: p.Paths()}
-
- // Write via HTTP
- _, err := b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "map/foo/a",
- Data: map[string]interface{}{
- "value": "bar",
- },
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
-
- // Read via HTTP
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp.Data["value"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read via API
- v, err := p.Get(storage, "a")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v["value"] != "bar" {
- t.Fatalf("bad: %#v", v)
- }
-
- // Read via API with other casing
- v, err = p.Get(storage, "A")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v["value"] != "bar" {
- t.Fatalf("bad: %#v", v)
- }
-
- // Verify List
- keys, err := p.List(storage, "")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if len(keys) != 1 || keys[0] != "a" {
- t.Fatalf("bad: %#v", keys)
- }
-
- // LIST via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ListOperation,
- Path: "map/foo/",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if len(resp.Data) != 1 || len(resp.Data["keys"].([]string)) != 1 ||
- resp.Data["keys"].([]string)[0] != "a" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Delete via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.DeleteOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if _, ok := resp.Data["value"]; ok {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via API
- v, err = p.Get(storage, "a")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v != nil {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-func TestPathMap_getInvalid(t *testing.T) {
- p := &PathMap{Name: "foo"}
- storage := new(logical.InmemStorage)
-
- v, err := p.Get(storage, "nope")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v != nil {
- t.Fatalf("bad: %#v", v)
- }
-}
-
-func TestPathMap_routes(t *testing.T) {
- p := &PathMap{Name: "foo"}
- TestBackendRoutes(t, &Backend{Paths: p.Paths()}, []string{
- "map/foo", // Normal
- "map/foo/bar", // Normal
- "map/foo/bar-baz", // Hyphen key
- })
-}
-
-func TestPathMap_Salted(t *testing.T) {
- storage := new(logical.InmemStorage)
- salt, err := salt.NewSalt(storage, &salt.Config{
- HashFunc: salt.SHA1Hash,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- p := &PathMap{Name: "foo", Salt: salt}
- var b logical.Backend = &Backend{Paths: p.Paths()}
-
- // Write via HTTP
- _, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "map/foo/a",
- Data: map[string]interface{}{
- "value": "bar",
- },
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
-
- // Non-salted version should not be there
- out, err := storage.Get("struct/map/foo/a")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("non-salted key found")
- }
-
- // Ensure the path is salted
- expect := salt.SaltID("a")
- out, err = storage.Get("struct/map/foo/" + expect)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("missing salted key")
- }
-
- // Read via HTTP
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp.Data["value"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read via API
- v, err := p.Get(storage, "a")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v["value"] != "bar" {
- t.Fatalf("bad: %#v", v)
- }
-
- // Read via API with other casing
- v, err = p.Get(storage, "A")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v["value"] != "bar" {
- t.Fatalf("bad: %#v", v)
- }
-
- // Verify List
- keys, err := p.List(storage, "")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if len(keys) != 1 || keys[0] != expect {
- t.Fatalf("bad: %#v", keys)
- }
-
- // Delete via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.DeleteOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if _, ok := resp.Data["value"]; ok {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via API
- v, err = p.Get(storage, "a")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v != nil {
- t.Fatalf("bad: %#v", v)
- }
-
- // Put in a non-salted version and make sure that after reading it's been
- // upgraded
- err = storage.Put(&logical.StorageEntry{
- Key: "struct/map/foo/b",
- Value: []byte(`{"foo": "bar"}`),
- })
- if err != nil {
- t.Fatal("err: %v", err)
- }
- // A read should transparently upgrade
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/b",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- list, _ := storage.List("struct/map/foo/")
- if len(list) != 1 {
- t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list))
- }
- found := false
- for _, v := range list {
- if v == salt.SaltID("b") {
- found = true
- break
- }
- }
- if !found {
- t.Fatal("did not find upgraded value")
- }
-}
-
-func TestPathMap_SaltFunc(t *testing.T) {
- storage := new(logical.InmemStorage)
- locSalt, err := salt.NewSalt(storage, &salt.Config{
- HashFunc: salt.SHA1Hash,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- saltFunc := func() (*salt.Salt, error) {
- return locSalt, nil
- }
- p := &PathMap{Name: "foo", SaltFunc: saltFunc}
- var b logical.Backend = &Backend{Paths: p.Paths()}
-
- // Write via HTTP
- _, err = b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "map/foo/a",
- Data: map[string]interface{}{
- "value": "bar",
- },
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
-
- // Non-salted version should not be there
- out, err := storage.Get("struct/map/foo/a")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("non-salted key found")
- }
-
- // Ensure the path is salted
- expect := locSalt.SaltID("a")
- out, err = storage.Get("struct/map/foo/" + expect)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("missing salted key")
- }
-
- // Read via HTTP
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp.Data["value"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read via API
- v, err := p.Get(storage, "a")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v["value"] != "bar" {
- t.Fatalf("bad: %#v", v)
- }
-
- // Read via API with other casing
- v, err = p.Get(storage, "A")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v["value"] != "bar" {
- t.Fatalf("bad: %#v", v)
- }
-
- // Verify List
- keys, err := p.List(storage, "")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if len(keys) != 1 || keys[0] != expect {
- t.Fatalf("bad: %#v", keys)
- }
-
- // Delete via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.DeleteOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/a",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if _, ok := resp.Data["value"]; ok {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via API
- v, err = p.Get(storage, "a")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v != nil {
- t.Fatalf("bad: %#v", v)
- }
-
- // Put in a non-salted version and make sure that after reading it's been
- // upgraded
- err = storage.Put(&logical.StorageEntry{
- Key: "struct/map/foo/b",
- Value: []byte(`{"foo": "bar"}`),
- })
- if err != nil {
- t.Fatal("err: %v", err)
- }
- // A read should transparently upgrade
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "map/foo/b",
- Storage: storage,
- })
- if err != nil {
- t.Fatal(err)
- }
- list, _ := storage.List("struct/map/foo/")
- if len(list) != 1 {
- t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list))
- }
- found := false
- for _, v := range list {
- if v == locSalt.SaltID("b") {
- found = true
- break
- }
- }
- if !found {
- t.Fatal("did not find upgraded value")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go b/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go
deleted file mode 100644
index ae4f8d2..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package framework
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// PathStruct can be used to generate a path that stores a struct
-// in the storage. This structure is a map[string]interface{} but the
-// types are set according to the schema in this structure.
-type PathStruct struct {
- Name string
- Path string
- Schema map[string]*FieldSchema
- HelpSynopsis string
- HelpDescription string
-
- Read bool
-}
-
-// Get reads the structure.
-func (p *PathStruct) Get(s logical.Storage) (map[string]interface{}, error) {
- entry, err := s.Get(fmt.Sprintf("struct/%s", p.Name))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result map[string]interface{}
- if err := jsonutil.DecodeJSON(entry.Value, &result); err != nil {
- return nil, err
- }
-
- return result, nil
-}
-
-// Put writes the structure.
-func (p *PathStruct) Put(s logical.Storage, v map[string]interface{}) error {
- bytes, err := json.Marshal(v)
- if err != nil {
- return err
- }
-
- return s.Put(&logical.StorageEntry{
- Key: fmt.Sprintf("struct/%s", p.Name),
- Value: bytes,
- })
-}
-
-// Delete removes the structure.
-func (p *PathStruct) Delete(s logical.Storage) error {
- return s.Delete(fmt.Sprintf("struct/%s", p.Name))
-}
-
-// Paths are the paths to append to the Backend paths.
-func (p *PathStruct) Paths() []*Path {
- // The single path we support to read/write this config
- path := &Path{
- Pattern: p.Path,
- Fields: p.Schema,
-
- Callbacks: map[logical.Operation]OperationFunc{
- logical.CreateOperation: p.pathWrite,
- logical.UpdateOperation: p.pathWrite,
- logical.DeleteOperation: p.pathDelete,
- },
-
- ExistenceCheck: p.pathExistenceCheck,
-
- HelpSynopsis: p.HelpSynopsis,
- HelpDescription: p.HelpDescription,
- }
-
- // If we support reads, add that
- if p.Read {
- path.Callbacks[logical.ReadOperation] = p.pathRead
- }
-
- return []*Path{path}
-}
-
-func (p *PathStruct) pathRead(
- req *logical.Request, d *FieldData) (*logical.Response, error) {
- v, err := p.Get(req.Storage)
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: v,
- }, nil
-}
-
-func (p *PathStruct) pathWrite(
- req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Put(req.Storage, d.Raw)
- return nil, err
-}
-
-func (p *PathStruct) pathDelete(
- req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Delete(req.Storage)
- return nil, err
-}
-
-func (p *PathStruct) pathExistenceCheck(
- req *logical.Request, d *FieldData) (bool, error) {
- v, err := p.Get(req.Storage)
- if err != nil {
- return false, err
- }
-
- return v != nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_struct_test.go b/vendor/github.com/hashicorp/vault/logical/framework/path_struct_test.go
deleted file mode 100644
index 48233d3..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_struct_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package framework
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestPathStruct(t *testing.T) {
- p := &PathStruct{
- Name: "foo",
- Path: "bar",
- Schema: map[string]*FieldSchema{
- "value": &FieldSchema{Type: TypeString},
- },
- Read: true,
- }
-
- storage := new(logical.InmemStorage)
- var b logical.Backend = &Backend{Paths: p.Paths()}
-
- // Write via HTTP
- _, err := b.HandleRequest(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "bar",
- Data: map[string]interface{}{
- "value": "baz",
- },
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
-
- // Read via HTTP
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "bar",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp.Data["value"] != "baz" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read via API
- v, err := p.Get(storage)
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v["value"] != "baz" {
- t.Fatalf("bad: %#v", v)
- }
-
- // Delete via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.DeleteOperation,
- Path: "bar",
- Data: nil,
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via HTTP
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "bar",
- Storage: storage,
- })
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if _, ok := resp.Data["value"]; ok {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Re-read via API
- v, err = p.Get(storage)
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
- if v != nil {
- t.Fatalf("bad: %#v", v)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go b/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go
deleted file mode 100644
index fa6b4bc..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package framework
-
-import (
- "sort"
- "strings"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// PolicyMap is a specialization of PathMap that expects the values to
-// be lists of policies. This assists in querying and loading policies
-// from the PathMap.
-type PolicyMap struct {
- PathMap
-
- DefaultKey string
- PolicyKey string
-}
-
-func (p *PolicyMap) Policies(s logical.Storage, names ...string) ([]string, error) {
- policyKey := "value"
- if p.PolicyKey != "" {
- policyKey = p.PolicyKey
- }
-
- if p.DefaultKey != "" {
- newNames := make([]string, len(names)+1)
- newNames[0] = p.DefaultKey
- copy(newNames[1:], names)
- names = newNames
- }
-
- set := make(map[string]struct{})
- for _, name := range names {
- v, err := p.Get(s, name)
- if err != nil {
- return nil, err
- }
-
- valuesRaw, ok := v[policyKey]
- if !ok {
- continue
- }
-
- values, ok := valuesRaw.(string)
- if !ok {
- continue
- }
-
- for _, p := range strings.Split(values, ",") {
- if p = strings.TrimSpace(p); p != "" {
- set[p] = struct{}{}
- }
- }
- }
-
- list := make([]string, 0, len(set))
- for k, _ := range set {
- list = append(list, k)
- }
- sort.Strings(list)
-
- return list, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/policy_map_test.go b/vendor/github.com/hashicorp/vault/logical/framework/policy_map_test.go
deleted file mode 100644
index 14d8f66..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/policy_map_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package framework
-
-import (
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestPolicyMap(t *testing.T) {
- p := &PolicyMap{}
- p.PathMap.Name = "foo"
- s := new(logical.InmemStorage)
-
- p.Put(s, "foo", map[string]interface{}{"value": "bar"})
- p.Put(s, "bar", map[string]interface{}{"value": "foo,baz "})
-
- // Read via API
- actual, err := p.Policies(s, "foo", "bar")
- if err != nil {
- t.Fatalf("bad: %#v", err)
- }
-
- expected := []string{"bar", "baz", "foo"}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: %#v", actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/secret.go b/vendor/github.com/hashicorp/vault/logical/framework/secret.go
deleted file mode 100644
index c4f71ee..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/secret.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package framework
-
-import (
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// Secret is a type of secret that can be returned from a backend.
-type Secret struct {
- // Type is the name of this secret type. This is used to setup the
- // vault ID and to look up the proper secret structure when revocation/
- // renewal happens. Once this is set this should not be changed.
- //
- // The format of this must match (case insensitive): ^a-Z0-9_$
- Type string
-
- // Fields is the mapping of data fields and schema that comprise
- // the structure of this secret.
- Fields map[string]*FieldSchema
-
- // DefaultDuration is the default value for the duration of the lease for
- // this secret. This can be manually overwritten with the result of
- // Response().
- //
- // If these aren't set, Vault core will set a default lease period which
- // may come from a mount tuning.
- DefaultDuration time.Duration
-
- // Renew is the callback called to renew this secret. If Renew is
- // not specified then renewable is set to false in the secret.
- // See lease.go for helpers for this value.
- Renew OperationFunc
-
- // Revoke is the callback called to revoke this secret. This is required.
- Revoke OperationFunc
-}
-
-func (s *Secret) Renewable() bool {
- return s.Renew != nil
-}
-
-func (s *Secret) Response(
- data, internal map[string]interface{}) *logical.Response {
- internalData := make(map[string]interface{})
- for k, v := range internal {
- internalData[k] = v
- }
- internalData["secret_type"] = s.Type
-
- return &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: s.DefaultDuration,
- Renewable: s.Renewable(),
- },
- InternalData: internalData,
- },
-
- Data: data,
- }
-}
-
-// HandleRenew is the request handler for renewing this secret.
-func (s *Secret) HandleRenew(req *logical.Request) (*logical.Response, error) {
- if !s.Renewable() {
- return nil, logical.ErrUnsupportedOperation
- }
-
- data := &FieldData{
- Raw: req.Data,
- Schema: s.Fields,
- }
-
- return s.Renew(req, data)
-}
-
-// HandleRevoke is the request handler for renewing this secret.
-func (s *Secret) HandleRevoke(req *logical.Request) (*logical.Response, error) {
- data := &FieldData{
- Raw: req.Data,
- Schema: s.Fields,
- }
-
- if s.Revoke != nil {
- return s.Revoke(req, data)
- }
-
- return nil, logical.ErrUnsupportedOperation
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/secret_test.go b/vendor/github.com/hashicorp/vault/logical/framework/secret_test.go
deleted file mode 100644
index 83af475..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/secret_test.go
+++ /dev/null
@@ -1 +0,0 @@
-package framework
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/template.go b/vendor/github.com/hashicorp/vault/logical/framework/template.go
deleted file mode 100644
index 5ac82ef..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/template.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package framework
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strings"
- "text/template"
-)
-
-func executeTemplate(tpl string, data interface{}) (string, error) {
- // Define the functions
- funcs := map[string]interface{}{
- "indent": funcIndent,
- }
-
- // Parse the help template
- t, err := template.New("root").Funcs(funcs).Parse(tpl)
- if err != nil {
- return "", fmt.Errorf("error parsing template: %s", err)
- }
-
- // Execute the template and store the output
- var buf bytes.Buffer
- if err := t.Execute(&buf, data); err != nil {
- return "", fmt.Errorf("error executing template: %s", err)
- }
-
- return strings.TrimSpace(buf.String()), nil
-}
-
-func funcIndent(count int, text string) string {
- var buf bytes.Buffer
- prefix := strings.Repeat(" ", count)
- scan := bufio.NewScanner(strings.NewReader(text))
- for scan.Scan() {
- buf.WriteString(prefix + scan.Text() + "\n")
- }
-
- return strings.TrimRight(buf.String(), "\n")
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/testing.go b/vendor/github.com/hashicorp/vault/logical/framework/testing.go
deleted file mode 100644
index a00a324..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/testing.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package framework
-
-import (
- "testing"
-)
-
-// TestBackendRoutes is a helper to test that all the given routes will
-// route properly in the backend.
-func TestBackendRoutes(t *testing.T, b *Backend, rs []string) {
- for _, r := range rs {
- if b.Route(r) == nil {
- t.Fatalf("bad route: %s", r)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/wal.go b/vendor/github.com/hashicorp/vault/logical/framework/wal.go
deleted file mode 100644
index 4e37aec..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/wal.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package framework
-
-import (
- "encoding/json"
- "strings"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// WALPrefix is the prefix within Storage where WAL entries will be written.
-const WALPrefix = "wal/"
-
-type WALEntry struct {
- ID string `json:"-"`
- Kind string `json:"type"`
- Data interface{} `json:"data"`
- CreatedAt int64 `json:"created_at"`
-}
-
-// PutWAL writes some data to the WAL.
-//
-// The kind parameter is used by the framework to allow users to store
-// multiple kinds of WAL data and to easily disambiguate what data they're
-// expecting.
-//
-// Data within the WAL that is uncommitted (CommitWAL hasn't be called)
-// will be given to the rollback callback when an rollback operation is
-// received, allowing the backend to clean up some partial states.
-//
-// The data must be JSON encodable.
-//
-// This returns a unique ID that can be used to reference this WAL data.
-// WAL data cannot be modified. You can only add to the WAL and commit existing
-// WAL entries.
-func PutWAL(s logical.Storage, kind string, data interface{}) (string, error) {
- value, err := json.Marshal(&WALEntry{
- Kind: kind,
- Data: data,
- CreatedAt: time.Now().UTC().Unix(),
- })
- if err != nil {
- return "", err
- }
-
- id, err := uuid.GenerateUUID()
- if err != nil {
- return "", err
- }
-
- return id, s.Put(&logical.StorageEntry{
- Key: WALPrefix + id,
- Value: value,
- })
-}
-
-// GetWAL reads a specific entry from the WAL. If the entry doesn't exist,
-// then nil value is returned.
-//
-// The kind, value, and error are returned.
-func GetWAL(s logical.Storage, id string) (*WALEntry, error) {
- entry, err := s.Get(WALPrefix + id)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var raw WALEntry
- if err := jsonutil.DecodeJSON(entry.Value, &raw); err != nil {
- return nil, err
- }
- raw.ID = id
-
- return &raw, nil
-}
-
-// DeleteWAL commits the WAL entry with the given ID. Once committed,
-// it is assumed that the operation was a success and doesn't need to
-// be rolled back.
-func DeleteWAL(s logical.Storage, id string) error {
- return s.Delete(WALPrefix + id)
-}
-
-// ListWAL lists all the entries in the WAL.
-func ListWAL(s logical.Storage) ([]string, error) {
- keys, err := s.List(WALPrefix)
- if err != nil {
- return nil, err
- }
-
- for i, k := range keys {
- keys[i] = strings.TrimPrefix(k, WALPrefix)
- }
-
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/wal_test.go b/vendor/github.com/hashicorp/vault/logical/framework/wal_test.go
deleted file mode 100644
index 8ee12dc..0000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/wal_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package framework
-
-import (
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestWAL(t *testing.T) {
- s := new(logical.InmemStorage)
-
- // WAL should be empty to start
- keys, err := ListWAL(s)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if len(keys) > 0 {
- t.Fatalf("bad: %#v", keys)
- }
-
- // Write an entry to the WAL
- id, err := PutWAL(s, "foo", "bar")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // The key should be in the WAL
- keys, err = ListWAL(s)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if !reflect.DeepEqual(keys, []string{id}) {
- t.Fatalf("bad: %#v", keys)
- }
-
- // Should be able to get the value
- entry, err := GetWAL(s, id)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if entry.Kind != "foo" {
- t.Fatalf("bad: %#v", entry)
- }
- if entry.Data != "bar" {
- t.Fatalf("bad: %#v", entry)
- }
-
- // Should be able to delete the value
- if err := DeleteWAL(s, id); err != nil {
- t.Fatalf("err: %s", err)
- }
- entry, err = GetWAL(s, id)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if entry != nil {
- t.Fatalf("bad: %#v", entry)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/identity.go b/vendor/github.com/hashicorp/vault/logical/identity.go
deleted file mode 100644
index fbc4fbb..0000000
--- a/vendor/github.com/hashicorp/vault/logical/identity.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package logical
-
-// Persona represents the information used by core to create implicit entity.
-// Implicit entities get created when a client authenticates successfully from
-// any of the authentication backends (except token backend).
-//
-// This is applicable to enterprise binaries only. Persona should be set in the
-// Auth response returned by the credential backends. This structure is placed
-// in the open source repository only to enable custom authetication plugins to
-// be used along with enterprise binary. The custom auth plugins should make
-// use of this and fill out the Persona information in the authentication
-// response.
-type Persona struct {
- // MountType is the backend mount's type to which this identity belongs
- // to.
- MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"`
-
- // MountAccessor is the identifier of the mount entry to which
- // this identity
- // belongs to.
- MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor"`
-
- // Name is the identifier of this identity in its
- // authentication source.
- Name string `json:"name" structs:"name" mapstructure:"name"`
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/lease.go b/vendor/github.com/hashicorp/vault/logical/lease.go
deleted file mode 100644
index ed0b26b..0000000
--- a/vendor/github.com/hashicorp/vault/logical/lease.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package logical
-
-import "time"
-
-// LeaseOptions is an embeddable struct to capture common lease
-// settings between a Secret and Auth
-type LeaseOptions struct {
- // Lease is the duration that this secret is valid for. Vault
- // will automatically revoke it after the duration.
- TTL time.Duration `json:"lease"`
-
- // Renewable, if true, means that this secret can be renewed.
- Renewable bool `json:"renewable"`
-
- // Increment will be the lease increment that the user requested.
- // This is only available on a Renew operation and has no effect
- // when returning a response.
- Increment time.Duration `json:"-"`
-
- // IssueTime is the time of issue for the original lease. This is
- // only available on a Renew operation and has no effect when returning
- // a response. It can be used to enforce maximum lease periods by
- // a logical backend.
- IssueTime time.Time `json:"-"`
-}
-
-// LeaseEnabled checks if leasing is enabled
-func (l *LeaseOptions) LeaseEnabled() bool {
- return l.TTL > 0
-}
-
-// LeaseTotal is the lease duration with a guard against a negative TTL
-func (l *LeaseOptions) LeaseTotal() time.Duration {
- if l.TTL <= 0 {
- return 0
- }
-
- return l.TTL
-}
-
-// ExpirationTime computes the time until expiration including the grace period
-func (l *LeaseOptions) ExpirationTime() time.Time {
- var expireTime time.Time
- if l.LeaseEnabled() {
- expireTime = time.Now().Add(l.LeaseTotal())
- }
- return expireTime
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/lease_test.go b/vendor/github.com/hashicorp/vault/logical/lease_test.go
deleted file mode 100644
index 050b7db..0000000
--- a/vendor/github.com/hashicorp/vault/logical/lease_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package logical
-
-import (
- "testing"
- "time"
-)
-
-func TestLeaseOptionsLeaseTotal(t *testing.T) {
- var l LeaseOptions
- l.TTL = 1 * time.Hour
-
- actual := l.LeaseTotal()
- expected := l.TTL
- if actual != expected {
- t.Fatalf("bad: %s", actual)
- }
-}
-
-func TestLeaseOptionsLeaseTotal_grace(t *testing.T) {
- var l LeaseOptions
- l.TTL = 1 * time.Hour
-
- actual := l.LeaseTotal()
- if actual != l.TTL {
- t.Fatalf("bad: %s", actual)
- }
-}
-
-func TestLeaseOptionsLeaseTotal_negLease(t *testing.T) {
- var l LeaseOptions
- l.TTL = -1 * 1 * time.Hour
-
- actual := l.LeaseTotal()
- expected := time.Duration(0)
- if actual != expected {
- t.Fatalf("bad: %s", actual)
- }
-}
-
-func TestLeaseOptionsExpirationTime(t *testing.T) {
- var l LeaseOptions
- l.TTL = 1 * time.Hour
-
- limit := time.Now().Add(time.Hour)
- exp := l.ExpirationTime()
- if exp.Before(limit) {
- t.Fatalf("bad: %s", exp)
- }
-}
-
-func TestLeaseOptionsExpirationTime_noLease(t *testing.T) {
- var l LeaseOptions
- if !l.ExpirationTime().IsZero() {
- t.Fatal("should be zero")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/logical.go b/vendor/github.com/hashicorp/vault/logical/logical.go
deleted file mode 100644
index 9ce0d85..0000000
--- a/vendor/github.com/hashicorp/vault/logical/logical.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package logical
-
-import log "github.com/mgutz/logxi/v1"
-
-// BackendType is the type of backend that is being implemented
-type BackendType uint32
-
-// The these are the types of backends that can be derived from
-// logical.Backend
-const (
- TypeUnknown BackendType = 0 // This is also the zero-value for BackendType
- TypeLogical BackendType = 1
- TypeCredential BackendType = 2
-)
-
-// Stringer implementation
-func (b BackendType) String() string {
- switch b {
- case TypeLogical:
- return "secret"
- case TypeCredential:
- return "auth"
- }
-
- return "unknown"
-}
-
-// Backend interface must be implemented to be "mountable" at
-// a given path. Requests flow through a router which has various mount
-// points that flow to a logical backend. The logic of each backend is flexible,
-// and this is what allows materialized keys to function. There can be specialized
-// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can
-// interact with remote APIs to generate keys dynamically. This interface also
-// allows for a "procfs" like interaction, as internal state can be exposed by
-// acting like a logical backend and being mounted.
-type Backend interface {
- // HandleRequest is used to handle a request and generate a response.
- // The backends must check the operation type and handle appropriately.
- HandleRequest(*Request) (*Response, error)
-
- // SpecialPaths is a list of paths that are special in some way.
- // See PathType for the types of special paths. The key is the type
- // of the special path, and the value is a list of paths for this type.
- // This is not a regular expression but is an exact match. If the path
- // ends in '*' then it is a prefix-based match. The '*' can only appear
- // at the end.
- SpecialPaths() *Paths
-
- // System provides an interface to access certain system configuration
- // information, such as globally configured default and max lease TTLs.
- System() SystemView
-
- // Logger provides an interface to access the underlying logger. This
- // is useful when a struct embeds a Backend-implemented struct that
- // contains a private instance of logger.
- Logger() log.Logger
-
- // HandleExistenceCheck is used to handle a request and generate a response
- // indicating whether the given path exists or not; this is used to
- // understand whether the request must have a Create or Update capability
- // ACL applied. The first bool indicates whether an existence check
- // function was found for the backend; the second indicates whether, if an
- // existence check function was found, the item exists or not.
- HandleExistenceCheck(*Request) (bool, bool, error)
-
- // Cleanup is invoked during an unmount of a backend to allow it to
- // handle any cleanup like connection closing or releasing of file handles.
- Cleanup()
-
- // Initialize is invoked after a backend is created. It is the place to run
- // any operations requiring storage; these should not be in the factory.
- Initialize() error
-
- // InvalidateKey may be invoked when an object is modified that belongs
- // to the backend. The backend can use this to clear any caches or reset
- // internal state as needed.
- InvalidateKey(key string)
-
- // Setup is used to set up the backend based on the provided backend
- // configuration.
- Setup(*BackendConfig) error
-
- // Type returns the BackendType for the particular backend
- Type() BackendType
-
- // RegisterLicense performs backend license registration
- RegisterLicense(interface{}) error
-}
-
-// BackendConfig is provided to the factory to initialize the backend
-type BackendConfig struct {
- // View should not be stored, and should only be used for initialization
- StorageView Storage
-
- // The backend should use this logger. The log should not contain any secrets.
- Logger log.Logger
-
- // System provides a view into a subset of safe system information that
- // is useful for backends, such as the default/max lease TTLs
- System SystemView
-
- // Config is the opaque user configuration provided when mounting
- Config map[string]string
-}
-
-// Factory is the factory function to create a logical backend.
-type Factory func(*BackendConfig) (Backend, error)
-
-// Paths is the structure of special paths that is used for SpecialPaths.
-type Paths struct {
- // Root are the paths that require a root token to access
- Root []string
-
- // Unauthenticated are the paths that can be accessed without any auth.
- Unauthenticated []string
-
- // LocalStorage are paths (prefixes) that are local to this instance; this
- // indicates that these paths should not be replicated
- LocalStorage []string
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend.go
deleted file mode 100644
index 081922c..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/backend.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package plugin
-
-import (
- "net/rpc"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/logical"
-)
-
-// BackendPlugin is the plugin.Plugin implementation
-type BackendPlugin struct {
- Factory func(*logical.BackendConfig) (logical.Backend, error)
- metadataMode bool
-}
-
-// Server gets called when on plugin.Serve()
-func (b *BackendPlugin) Server(broker *plugin.MuxBroker) (interface{}, error) {
- return &backendPluginServer{factory: b.Factory, broker: broker}, nil
-}
-
-// Client gets called on plugin.NewClient()
-func (b BackendPlugin) Client(broker *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
- return &backendPluginClient{client: c, broker: broker, metadataMode: b.metadataMode}, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go
deleted file mode 100644
index cc2d83b..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go
+++ /dev/null
@@ -1,285 +0,0 @@
-package plugin
-
-import (
- "errors"
- "net/rpc"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/logical"
- log "github.com/mgutz/logxi/v1"
-)
-
-var (
- ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode")
-)
-
-// backendPluginClient implements logical.Backend and is the
-// go-plugin client.
-type backendPluginClient struct {
- broker *plugin.MuxBroker
- client *rpc.Client
- metadataMode bool
-
- system logical.SystemView
- logger log.Logger
-}
-
-// HandleRequestArgs is the args for HandleRequest method.
-type HandleRequestArgs struct {
- StorageID uint32
- Request *logical.Request
-}
-
-// HandleRequestReply is the reply for HandleRequest method.
-type HandleRequestReply struct {
- Response *logical.Response
- Error *plugin.BasicError
-}
-
-// SpecialPathsReply is the reply for SpecialPaths method.
-type SpecialPathsReply struct {
- Paths *logical.Paths
-}
-
-// SystemReply is the reply for System method.
-type SystemReply struct {
- SystemView logical.SystemView
- Error *plugin.BasicError
-}
-
-// HandleExistenceCheckArgs is the args for HandleExistenceCheck method.
-type HandleExistenceCheckArgs struct {
- StorageID uint32
- Request *logical.Request
-}
-
-// HandleExistenceCheckReply is the reply for HandleExistenceCheck method.
-type HandleExistenceCheckReply struct {
- CheckFound bool
- Exists bool
- Error *plugin.BasicError
-}
-
-// SetupArgs is the args for Setup method.
-type SetupArgs struct {
- StorageID uint32
- LoggerID uint32
- SysViewID uint32
- Config map[string]string
-}
-
-// SetupReply is the reply for Setup method.
-type SetupReply struct {
- Error *plugin.BasicError
-}
-
-// TypeReply is the reply for the Type method.
-type TypeReply struct {
- Type logical.BackendType
-}
-
-// RegisterLicenseArgs is the args for the RegisterLicense method.
-type RegisterLicenseArgs struct {
- License interface{}
-}
-
-// RegisterLicenseReply is the reply for the RegisterLicense method.
-type RegisterLicenseReply struct {
- Error *plugin.BasicError
-}
-
-func (b *backendPluginClient) HandleRequest(req *logical.Request) (*logical.Response, error) {
- if b.metadataMode {
- return nil, ErrClientInMetadataMode
- }
-
- // Do not send the storage, since go-plugin cannot serialize
- // interfaces. The server will pick up the storage from the shim.
- req.Storage = nil
- args := &HandleRequestArgs{
- Request: req,
- }
- var reply HandleRequestReply
-
- if req.Connection != nil {
- oldConnState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- req.Connection.ConnState = oldConnState
- }()
- }
-
- err := b.client.Call("Plugin.HandleRequest", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- if reply.Error.Error() == logical.ErrUnsupportedOperation.Error() {
- return nil, logical.ErrUnsupportedOperation
- }
- return nil, reply.Error
- }
-
- return reply.Response, nil
-}
-
-func (b *backendPluginClient) SpecialPaths() *logical.Paths {
- var reply SpecialPathsReply
- err := b.client.Call("Plugin.SpecialPaths", new(interface{}), &reply)
- if err != nil {
- return nil
- }
-
- return reply.Paths
-}
-
-// System returns vault's system view. The backend client stores the view during
-// Setup, so there is no need to shim the system just to get it back.
-func (b *backendPluginClient) System() logical.SystemView {
- return b.system
-}
-
-// Logger returns vault's logger. The backend client stores the logger during
-// Setup, so there is no need to shim the logger just to get it back.
-func (b *backendPluginClient) Logger() log.Logger {
- return b.logger
-}
-
-func (b *backendPluginClient) HandleExistenceCheck(req *logical.Request) (bool, bool, error) {
- if b.metadataMode {
- return false, false, ErrClientInMetadataMode
- }
-
- // Do not send the storage, since go-plugin cannot serialize
- // interfaces. The server will pick up the storage from the shim.
- req.Storage = nil
- args := &HandleExistenceCheckArgs{
- Request: req,
- }
- var reply HandleExistenceCheckReply
-
- if req.Connection != nil {
- oldConnState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- req.Connection.ConnState = oldConnState
- }()
- }
-
- err := b.client.Call("Plugin.HandleExistenceCheck", args, &reply)
- if err != nil {
- return false, false, err
- }
- if reply.Error != nil {
- // THINKING: Should be be a switch on all error types?
- if reply.Error.Error() == logical.ErrUnsupportedPath.Error() {
- return false, false, logical.ErrUnsupportedPath
- }
- return false, false, reply.Error
- }
-
- return reply.CheckFound, reply.Exists, nil
-}
-
-func (b *backendPluginClient) Cleanup() {
- b.client.Call("Plugin.Cleanup", new(interface{}), &struct{}{})
-}
-
-func (b *backendPluginClient) Initialize() error {
- if b.metadataMode {
- return ErrClientInMetadataMode
- }
- err := b.client.Call("Plugin.Initialize", new(interface{}), &struct{}{})
- return err
-}
-
-func (b *backendPluginClient) InvalidateKey(key string) {
- if b.metadataMode {
- return
- }
- b.client.Call("Plugin.InvalidateKey", key, &struct{}{})
-}
-
-func (b *backendPluginClient) Setup(config *logical.BackendConfig) error {
- // Shim logical.Storage
- storageImpl := config.StorageView
- if b.metadataMode {
- storageImpl = &NOOPStorage{}
- }
- storageID := b.broker.NextId()
- go b.broker.AcceptAndServe(storageID, &StorageServer{
- impl: storageImpl,
- })
-
- // Shim log.Logger
- loggerImpl := config.Logger
- if b.metadataMode {
- loggerImpl = log.NullLog
- }
- loggerID := b.broker.NextId()
- go b.broker.AcceptAndServe(loggerID, &LoggerServer{
- logger: loggerImpl,
- })
-
- // Shim logical.SystemView
- sysViewImpl := config.System
- if b.metadataMode {
- sysViewImpl = &logical.StaticSystemView{}
- }
- sysViewID := b.broker.NextId()
- go b.broker.AcceptAndServe(sysViewID, &SystemViewServer{
- impl: sysViewImpl,
- })
-
- args := &SetupArgs{
- StorageID: storageID,
- LoggerID: loggerID,
- SysViewID: sysViewID,
- Config: config.Config,
- }
- var reply SetupReply
-
- err := b.client.Call("Plugin.Setup", args, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
-
- // Set system and logger for getter methods
- b.system = config.System
- b.logger = config.Logger
-
- return nil
-}
-
-func (b *backendPluginClient) Type() logical.BackendType {
- var reply TypeReply
- err := b.client.Call("Plugin.Type", new(interface{}), &reply)
- if err != nil {
- return logical.TypeUnknown
- }
-
- return logical.BackendType(reply.Type)
-}
-
-func (b *backendPluginClient) RegisterLicense(license interface{}) error {
- if b.metadataMode {
- return ErrClientInMetadataMode
- }
-
- var reply RegisterLicenseReply
- args := RegisterLicenseArgs{
- License: license,
- }
- err := b.client.Call("Plugin.RegisterLicense", args, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go
deleted file mode 100644
index 47045b1..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package plugin
-
-import (
- "errors"
- "net/rpc"
- "os"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode")
-)
-
-// backendPluginServer is the RPC server that backendPluginClient talks to,
-// it methods conforming to requirements by net/rpc
-type backendPluginServer struct {
- broker *plugin.MuxBroker
- backend logical.Backend
- factory func(*logical.BackendConfig) (logical.Backend, error)
-
- loggerClient *rpc.Client
- sysViewClient *rpc.Client
- storageClient *rpc.Client
-}
-
-func inMetadataMode() bool {
- return os.Getenv(pluginutil.PluginMetadaModeEnv) == "true"
-}
-
-func (b *backendPluginServer) HandleRequest(args *HandleRequestArgs, reply *HandleRequestReply) error {
- if inMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- storage := &StorageClient{client: b.storageClient}
- args.Request.Storage = storage
-
- resp, err := b.backend.HandleRequest(args.Request)
- *reply = HandleRequestReply{
- Response: resp,
- Error: plugin.NewBasicError(err),
- }
-
- return nil
-}
-
-func (b *backendPluginServer) SpecialPaths(_ interface{}, reply *SpecialPathsReply) error {
- *reply = SpecialPathsReply{
- Paths: b.backend.SpecialPaths(),
- }
- return nil
-}
-
-func (b *backendPluginServer) HandleExistenceCheck(args *HandleExistenceCheckArgs, reply *HandleExistenceCheckReply) error {
- if inMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- storage := &StorageClient{client: b.storageClient}
- args.Request.Storage = storage
-
- checkFound, exists, err := b.backend.HandleExistenceCheck(args.Request)
- *reply = HandleExistenceCheckReply{
- CheckFound: checkFound,
- Exists: exists,
- Error: plugin.NewBasicError(err),
- }
-
- return nil
-}
-
-func (b *backendPluginServer) Cleanup(_ interface{}, _ *struct{}) error {
- b.backend.Cleanup()
-
- // Close rpc clients
- b.loggerClient.Close()
- b.sysViewClient.Close()
- b.storageClient.Close()
- return nil
-}
-
-func (b *backendPluginServer) Initialize(_ interface{}, _ *struct{}) error {
- if inMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- err := b.backend.Initialize()
- return err
-}
-
-func (b *backendPluginServer) InvalidateKey(args string, _ *struct{}) error {
- if inMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- b.backend.InvalidateKey(args)
- return nil
-}
-
-// Setup dials into the plugin's broker to get a shimmed storage, logger, and
-// system view of the backend. This method also instantiates the underlying
-// backend through its factory func for the server side of the plugin.
-func (b *backendPluginServer) Setup(args *SetupArgs, reply *SetupReply) error {
- // Dial for storage
- storageConn, err := b.broker.Dial(args.StorageID)
- if err != nil {
- *reply = SetupReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
- }
- rawStorageClient := rpc.NewClient(storageConn)
- b.storageClient = rawStorageClient
-
- storage := &StorageClient{client: rawStorageClient}
-
- // Dial for logger
- loggerConn, err := b.broker.Dial(args.LoggerID)
- if err != nil {
- *reply = SetupReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
- }
- rawLoggerClient := rpc.NewClient(loggerConn)
- b.loggerClient = rawLoggerClient
-
- logger := &LoggerClient{client: rawLoggerClient}
-
- // Dial for sys view
- sysViewConn, err := b.broker.Dial(args.SysViewID)
- if err != nil {
- *reply = SetupReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
- }
- rawSysViewClient := rpc.NewClient(sysViewConn)
- b.sysViewClient = rawSysViewClient
-
- sysView := &SystemViewClient{client: rawSysViewClient}
-
- config := &logical.BackendConfig{
- StorageView: storage,
- Logger: logger,
- System: sysView,
- Config: args.Config,
- }
-
- // Call the underlying backend factory after shims have been created
- // to set b.backend
- backend, err := b.factory(config)
- if err != nil {
- *reply = SetupReply{
- Error: plugin.NewBasicError(err),
- }
- }
- b.backend = backend
-
- return nil
-}
-
-func (b *backendPluginServer) Type(_ interface{}, reply *TypeReply) error {
- *reply = TypeReply{
- Type: b.backend.Type(),
- }
-
- return nil
-}
-
-func (b *backendPluginServer) RegisterLicense(args *RegisterLicenseArgs, reply *RegisterLicenseReply) error {
- if inMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- err := b.backend.RegisterLicense(args.License)
- if err != nil {
- *reply = RegisterLicenseReply{
- Error: plugin.NewBasicError(err),
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go
deleted file mode 100644
index deb5b63..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package plugin
-
-import (
- "testing"
- "time"
-
- gplugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin/mock"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestBackendPlugin_impl(t *testing.T) {
- var _ gplugin.Plugin = new(BackendPlugin)
- var _ logical.Backend = new(backendPluginClient)
-}
-
-func TestBackendPlugin_HandleRequest(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "kv/foo",
- Data: map[string]interface{}{
- "value": "bar",
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["value"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestBackendPlugin_SpecialPaths(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- paths := b.SpecialPaths()
- if paths == nil {
- t.Fatal("SpecialPaths() returned nil")
- }
-}
-
-func TestBackendPlugin_System(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- sys := b.System()
- if sys == nil {
- t.Fatal("System() returned nil")
- }
-
- actual := sys.DefaultLeaseTTL()
- expected := 300 * time.Second
-
- if actual != expected {
- t.Fatalf("bad: %v, expected %v", actual, expected)
- }
-}
-
-func TestBackendPlugin_Logger(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- logger := b.Logger()
- if logger == nil {
- t.Fatal("Logger() returned nil")
- }
-}
-
-func TestBackendPlugin_HandleExistenceCheck(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- checkFound, exists, err := b.HandleExistenceCheck(&logical.Request{
- Operation: logical.CreateOperation,
- Path: "kv/foo",
- Data: map[string]interface{}{"value": "bar"},
- })
- if err != nil {
- t.Fatal(err)
- }
- if !checkFound {
- t.Fatal("existence check not found for path 'kv/foo")
- }
- if exists {
- t.Fatal("existence check should have returned 'false' for 'kv/foo'")
- }
-}
-
-func TestBackendPlugin_Cleanup(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- b.Cleanup()
-}
-
-func TestBackendPlugin_Initialize(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- err := b.Initialize()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBackendPlugin_InvalidateKey(t *testing.T) {
- b, cleanup := testBackend(t)
- defer cleanup()
-
- resp, err := b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "internal",
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["value"] == "" {
- t.Fatalf("bad: %#v, expected non-empty value", resp)
- }
-
- b.InvalidateKey("internal")
-
- resp, err = b.HandleRequest(&logical.Request{
- Operation: logical.ReadOperation,
- Path: "internal",
- })
- if err != nil {
- t.Fatal(err)
- }
- if resp.Data["value"] != "" {
- t.Fatalf("bad: expected empty response data, got %#v", resp)
- }
-}
-
-func TestBackendPlugin_Setup(t *testing.T) {
- _, cleanup := testBackend(t)
- defer cleanup()
-}
-
-func testBackend(t *testing.T) (logical.Backend, func()) {
- // Create a mock provider
- pluginMap := map[string]gplugin.Plugin{
- "backend": &BackendPlugin{
- Factory: mock.Factory,
- },
- }
- client, _ := gplugin.TestPluginRPCConn(t, pluginMap)
- cleanup := func() {
- client.Close()
- }
-
- // Request the backend
- raw, err := client.Dispense(BackendPluginName)
- if err != nil {
- t.Fatal(err)
- }
- b := raw.(logical.Backend)
-
- err = b.Setup(&logical.BackendConfig{
- Logger: logformat.NewVaultLogger(log.LevelTrace),
- System: &logical.StaticSystemView{
- DefaultLeaseTTLVal: 300 * time.Second,
- MaxLeaseTTLVal: 1800 * time.Second,
- },
- StorageView: &logical.InmemStorage{},
- })
- if err != nil {
- t.Fatal(err)
- }
-
- return b, cleanup
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/logger.go b/vendor/github.com/hashicorp/vault/logical/plugin/logger.go
deleted file mode 100644
index ceb8947..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/logger.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package plugin
-
-import (
- "net/rpc"
-
- plugin "github.com/hashicorp/go-plugin"
- log "github.com/mgutz/logxi/v1"
-)
-
-type LoggerClient struct {
- client *rpc.Client
-}
-
-func (l *LoggerClient) Trace(msg string, args ...interface{}) {
- cArgs := &LoggerArgs{
- Msg: msg,
- Args: args,
- }
- l.client.Call("Plugin.Trace", cArgs, &struct{}{})
-}
-
-func (l *LoggerClient) Debug(msg string, args ...interface{}) {
- cArgs := &LoggerArgs{
- Msg: msg,
- Args: args,
- }
- l.client.Call("Plugin.Debug", cArgs, &struct{}{})
-}
-
-func (l *LoggerClient) Info(msg string, args ...interface{}) {
- cArgs := &LoggerArgs{
- Msg: msg,
- Args: args,
- }
- l.client.Call("Plugin.Info", cArgs, &struct{}{})
-}
-func (l *LoggerClient) Warn(msg string, args ...interface{}) error {
- var reply LoggerReply
- cArgs := &LoggerArgs{
- Msg: msg,
- Args: args,
- }
- err := l.client.Call("Plugin.Warn", cArgs, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
-
- return nil
-}
-func (l *LoggerClient) Error(msg string, args ...interface{}) error {
- var reply LoggerReply
- cArgs := &LoggerArgs{
- Msg: msg,
- Args: args,
- }
- err := l.client.Call("Plugin.Error", cArgs, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
-
- return nil
-}
-
-func (l *LoggerClient) Fatal(msg string, args ...interface{}) {
- // NOOP since it's not actually used within vault
- return
-}
-
-func (l *LoggerClient) Log(level int, msg string, args []interface{}) {
- cArgs := &LoggerArgs{
- Level: level,
- Msg: msg,
- Args: args,
- }
- l.client.Call("Plugin.Log", cArgs, &struct{}{})
-}
-
-func (l *LoggerClient) SetLevel(level int) {
- l.client.Call("Plugin.SetLevel", level, &struct{}{})
-}
-
-func (l *LoggerClient) IsTrace() bool {
- var reply LoggerReply
- l.client.Call("Plugin.IsTrace", new(interface{}), &reply)
- return reply.IsTrue
-}
-func (l *LoggerClient) IsDebug() bool {
- var reply LoggerReply
- l.client.Call("Plugin.IsDebug", new(interface{}), &reply)
- return reply.IsTrue
-}
-
-func (l *LoggerClient) IsInfo() bool {
- var reply LoggerReply
- l.client.Call("Plugin.IsInfo", new(interface{}), &reply)
- return reply.IsTrue
-}
-
-func (l *LoggerClient) IsWarn() bool {
- var reply LoggerReply
- l.client.Call("Plugin.IsWarn", new(interface{}), &reply)
- return reply.IsTrue
-}
-
-type LoggerServer struct {
- logger log.Logger
-}
-
-func (l *LoggerServer) Trace(args *LoggerArgs, _ *struct{}) error {
- l.logger.Trace(args.Msg, args.Args)
- return nil
-}
-
-func (l *LoggerServer) Debug(args *LoggerArgs, _ *struct{}) error {
- l.logger.Debug(args.Msg, args.Args)
- return nil
-}
-
-func (l *LoggerServer) Info(args *LoggerArgs, _ *struct{}) error {
- l.logger.Info(args.Msg, args.Args)
- return nil
-}
-
-func (l *LoggerServer) Warn(args *LoggerArgs, reply *LoggerReply) error {
- err := l.logger.Warn(args.Msg, args.Args)
- if err != nil {
- *reply = LoggerReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
- }
- return nil
-}
-
-func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error {
- err := l.logger.Error(args.Msg, args.Args)
- if err != nil {
- *reply = LoggerReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
- }
- return nil
-}
-
-func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error {
- l.logger.Log(args.Level, args.Msg, args.Args)
- return nil
-}
-
-func (l *LoggerServer) SetLevel(args int, _ *struct{}) error {
- l.logger.SetLevel(args)
- return nil
-}
-
-func (l *LoggerServer) IsTrace(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsTrace()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-func (l *LoggerServer) IsDebug(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsDebug()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-func (l *LoggerServer) IsInfo(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsInfo()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-func (l *LoggerServer) IsWarn(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsWarn()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-type LoggerArgs struct {
- Level int
- Msg string
- Args []interface{}
-}
-
-// LoggerReply contains the RPC reply. Not all fields may be used
-// for a particular RPC call.
-type LoggerReply struct {
- IsTrue bool
- Error *plugin.BasicError
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go
deleted file mode 100644
index 10b389c..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package plugin
-
-import (
- "bufio"
- "bytes"
- "io/ioutil"
- "strings"
- "testing"
-
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/logformat"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestLogger_impl(t *testing.T) {
- var _ log.Logger = new(LoggerClient)
-}
-
-func TestLogger_levels(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- var buf bytes.Buffer
- writer := bufio.NewWriter(&buf)
-
- l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace)
-
- server.RegisterName("Plugin", &LoggerServer{
- logger: l,
- })
-
- expected := "foobar"
- testLogger := &LoggerClient{client: client}
-
- // Test trace
- testLogger.Trace(expected)
- if err := writer.Flush(); err != nil {
- t.Fatal(err)
- }
- result := buf.String()
- buf.Reset()
- if !strings.Contains(result, expected) {
- t.Fatalf("expected log to contain %s, got %s", expected, result)
- }
-
- // Test debug
- testLogger.Debug(expected)
- if err := writer.Flush(); err != nil {
- t.Fatal(err)
- }
- result = buf.String()
- buf.Reset()
- if !strings.Contains(result, expected) {
- t.Fatalf("expected log to contain %s, got %s", expected, result)
- }
-
- // Test debug
- testLogger.Info(expected)
- if err := writer.Flush(); err != nil {
- t.Fatal(err)
- }
- result = buf.String()
- buf.Reset()
- if !strings.Contains(result, expected) {
- t.Fatalf("expected log to contain %s, got %s", expected, result)
- }
-
- // Test warn
- testLogger.Warn(expected)
- if err := writer.Flush(); err != nil {
- t.Fatal(err)
- }
- result = buf.String()
- buf.Reset()
- if !strings.Contains(result, expected) {
- t.Fatalf("expected log to contain %s, got %s", expected, result)
- }
-
- // Test error
- testLogger.Error(expected)
- if err := writer.Flush(); err != nil {
- t.Fatal(err)
- }
- result = buf.String()
- buf.Reset()
- if !strings.Contains(result, expected) {
- t.Fatalf("expected log to contain %s, got %s", expected, result)
- }
-
- // Test fatal
- testLogger.Fatal(expected)
- if err := writer.Flush(); err != nil {
- t.Fatal(err)
- }
- result = buf.String()
- buf.Reset()
- if result != "" {
- t.Fatalf("expected log Fatal() to be no-op, got %s", result)
- }
-}
-
-func TestLogger_isLevels(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- l := logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelAll)
-
- server.RegisterName("Plugin", &LoggerServer{
- logger: l,
- })
-
- testLogger := &LoggerClient{client: client}
-
- if !testLogger.IsDebug() || !testLogger.IsInfo() || !testLogger.IsTrace() || !testLogger.IsWarn() {
- t.Fatal("expected logger to return true for all logger level checks")
- }
-}
-
-func TestLogger_log(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- var buf bytes.Buffer
- writer := bufio.NewWriter(&buf)
-
- l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace)
-
- server.RegisterName("Plugin", &LoggerServer{
- logger: l,
- })
-
- expected := "foobar"
- testLogger := &LoggerClient{client: client}
-
- // Test trace
- testLogger.Log(log.LevelInfo, expected, nil)
- if err := writer.Flush(); err != nil {
- t.Fatal(err)
- }
- result := buf.String()
- if !strings.Contains(result, expected) {
- t.Fatalf("expected log to contain %s, got %s", expected, result)
- }
-
-}
-
-func TestLogger_setLevel(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- l := log.NewLogger(ioutil.Discard, "test-logger")
-
- server.RegisterName("Plugin", &LoggerServer{
- logger: l,
- })
-
- testLogger := &LoggerClient{client: client}
- testLogger.SetLevel(log.LevelWarn)
-
- if !testLogger.IsWarn() {
- t.Fatal("expected logger to support warn level")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go
deleted file mode 100644
index ac8c0ba..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package mock
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// New returns a new backend as an interface. This func
-// is only necessary for builtin backend plugins.
-func New() (interface{}, error) {
- return Backend(), nil
-}
-
-// Factory returns a new backend as logical.Backend.
-func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-// FactoryType is a wrapper func that allows the Factory func to specify
-// the backend type for the mock backend plugin instance.
-func FactoryType(backendType logical.BackendType) func(*logical.BackendConfig) (logical.Backend, error) {
- return func(conf *logical.BackendConfig) (logical.Backend, error) {
- b := Backend()
- b.BackendType = backendType
- if err := b.Setup(conf); err != nil {
- return nil, err
- }
- return b, nil
- }
-}
-
-// Backend returns a private embedded struct of framework.Backend.
-func Backend() *backend {
- var b backend
- b.Backend = &framework.Backend{
- Help: "",
- Paths: framework.PathAppend(
- errorPaths(&b),
- kvPaths(&b),
- []*framework.Path{
- pathInternal(&b),
- pathSpecial(&b),
- },
- ),
- PathsSpecial: &logical.Paths{
- Unauthenticated: []string{
- "special",
- },
- },
- Secrets: []*framework.Secret{},
- Invalidate: b.invalidate,
- BackendType: logical.TypeLogical,
- }
- b.internal = "bar"
- return &b
-}
-
-type backend struct {
- *framework.Backend
-
- // internal is used to test invalidate
- internal string
-}
-
-func (b *backend) invalidate(key string) {
- switch key {
- case "internal":
- b.internal = ""
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go
deleted file mode 100644
index 075911c..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package mock
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestBackend_impl(t *testing.T) {
- var _ logical.Backend = new(backend)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go
deleted file mode 100644
index b1b7fbd..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin"
- "github.com/hashicorp/vault/logical/plugin/mock"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:]) // Ignore command, strictly parse flags
-
- tlsConfig := apiClientMeta.GetTLSConfig()
- tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig)
-
- factoryFunc := mock.FactoryType(logical.TypeLogical)
-
- err := plugin.Serve(&plugin.ServeOpts{
- BackendFactoryFunc: factoryFunc,
- TLSProviderFunc: tlsProviderFunc,
- })
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go
deleted file mode 100644
index 00c4e3d..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package mock
-
-import (
- "net/rpc"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// pathInternal is used to test viewing internal backend values. In this case,
-// it is used to test the invalidate func.
-func errorPaths(b *backend) []*framework.Path {
- return []*framework.Path{
- &framework.Path{
- Pattern: "errors/rpc",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathErrorRPCRead,
- },
- },
- &framework.Path{
- Pattern: "errors/kill",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathErrorRPCRead,
- },
- },
- }
-}
-
-func (b *backend) pathErrorRPCRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return nil, rpc.ErrShutdown
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go
deleted file mode 100644
index 92c4f8b..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package mock
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// pathInternal is used to test viewing internal backend values. In this case,
-// it is used to test the invalidate func.
-func pathInternal(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "internal",
- Fields: map[string]*framework.FieldSchema{
- "value": &framework.FieldSchema{Type: framework.TypeString},
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathInternalUpdate,
- logical.ReadOperation: b.pathInternalRead,
- },
- }
-}
-
-func (b *backend) pathInternalUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- value := data.Get("value").(string)
- b.internal = value
- // Return the secret
- return nil, nil
-
-}
-
-func (b *backend) pathInternalRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Return the secret
- return &logical.Response{
- Data: map[string]interface{}{
- "value": b.internal,
- },
- }, nil
-
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go
deleted file mode 100644
index badede2..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package mock
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// kvPaths is used to test CRUD and List operations. It is a simplified
-// version of the passthrough backend that only accepts string values.
-func kvPaths(b *backend) []*framework.Path {
- return []*framework.Path{
- &framework.Path{
- Pattern: "kv/?",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.pathKVList,
- },
- },
- &framework.Path{
- Pattern: "kv/" + framework.GenericNameRegex("key"),
- Fields: map[string]*framework.FieldSchema{
- "key": &framework.FieldSchema{Type: framework.TypeString},
- "value": &framework.FieldSchema{Type: framework.TypeString},
- },
- ExistenceCheck: b.pathExistenceCheck,
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathKVRead,
- logical.CreateOperation: b.pathKVCreateUpdate,
- logical.UpdateOperation: b.pathKVCreateUpdate,
- logical.DeleteOperation: b.pathKVDelete,
- },
- },
- }
-}
-
-func (b *backend) pathExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
- out, err := req.Storage.Get(req.Path)
- if err != nil {
- return false, fmt.Errorf("existence check failed: %v", err)
- }
-
- return out != nil, nil
-}
-
-func (b *backend) pathKVRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get(req.Path)
- if err != nil {
- return nil, err
- }
-
- if entry == nil {
- return nil, nil
- }
-
- value := string(entry.Value)
-
- // Return the secret
- return &logical.Response{
- Data: map[string]interface{}{
- "value": value,
- },
- }, nil
-}
-
-func (b *backend) pathKVCreateUpdate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- value := data.Get("value").(string)
-
- entry := &logical.StorageEntry{
- Key: req.Path,
- Value: []byte(value),
- }
-
- s := req.Storage
- err := s.Put(entry)
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "value": value,
- },
- }, nil
-}
-
-func (b *backend) pathKVDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if err := req.Storage.Delete(req.Path); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *backend) pathKVList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- vals, err := req.Storage.List("kv/")
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(vals), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go
deleted file mode 100644
index f695e20..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package mock
-
-import (
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// pathSpecial is used to test special paths.
-func pathSpecial(b *backend) *framework.Path {
- return &framework.Path{
- Pattern: "special",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathSpecialRead,
- },
- }
-}
-
-func (b *backend) pathSpecialRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Return the secret
- return &logical.Response{
- Data: map[string]interface{}{
- "data": "foo",
- },
- }, nil
-
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go b/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go
deleted file mode 100644
index ede0622..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package plugin
-
-import (
- "crypto/ecdsa"
- "crypto/rsa"
- "encoding/gob"
- "fmt"
- "time"
-
- "sync"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
- log "github.com/mgutz/logxi/v1"
-)
-
-// Register these types since we have to serialize and de-serialize tls.ConnectionState
-// over the wire as part of logical.Request.Connection.
-func init() {
- gob.Register(rsa.PublicKey{})
- gob.Register(ecdsa.PublicKey{})
- gob.Register(time.Duration(0))
-}
-
-// BackendPluginClient is a wrapper around backendPluginClient
-// that also contains its plugin.Client instance. It's primarily
-// used to cleanly kill the client on Cleanup()
-type BackendPluginClient struct {
- client *plugin.Client
- sync.Mutex
-
- *backendPluginClient
-}
-
-// Cleanup calls the RPC client's Cleanup() func and also calls
-// the go-plugin's client Kill() func
-func (b *BackendPluginClient) Cleanup() {
- b.backendPluginClient.Cleanup()
- b.client.Kill()
-}
-
-// NewBackend will return an instance of an RPC-based client implementation of the backend for
-// external plugins, or a concrete implementation of the backend if it is a builtin backend.
-// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether
-// the plugin should run in metadata mode.
-func NewBackend(pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger, isMetadataMode bool) (logical.Backend, error) {
- // Look for plugin in the plugin catalog
- pluginRunner, err := sys.LookupPlugin(pluginName)
- if err != nil {
- return nil, err
- }
-
- var backend logical.Backend
- if pluginRunner.Builtin {
- // Plugin is builtin so we can retrieve an instance of the interface
- // from the pluginRunner. Then cast it to logical.Backend.
- backendRaw, err := pluginRunner.BuiltinFactory()
- if err != nil {
- return nil, fmt.Errorf("error getting plugin type: %s", err)
- }
-
- var ok bool
- backend, ok = backendRaw.(logical.Backend)
- if !ok {
- return nil, fmt.Errorf("unsuported backend type: %s", pluginName)
- }
-
- } else {
- // create a backendPluginClient instance
- backend, err = newPluginClient(sys, pluginRunner, logger, isMetadataMode)
- if err != nil {
- return nil, err
- }
- }
-
- return backend, nil
-}
-
-func newPluginClient(sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) {
- // pluginMap is the map of plugins we can dispense.
- pluginMap := map[string]plugin.Plugin{
- "backend": &BackendPlugin{
- metadataMode: isMetadataMode,
- },
- }
-
- var client *plugin.Client
- var err error
- if isMetadataMode {
- client, err = pluginRunner.RunMetadataMode(sys, pluginMap, handshakeConfig, []string{}, logger)
- } else {
- client, err = pluginRunner.Run(sys, pluginMap, handshakeConfig, []string{}, logger)
- }
- if err != nil {
- return nil, err
- }
-
- // Connect via RPC
- rpcClient, err := client.Client()
- if err != nil {
- return nil, err
- }
-
- // Request the plugin
- raw, err := rpcClient.Dispense("backend")
- if err != nil {
- return nil, err
- }
-
- // We should have a logical backend type now. This feels like a normal interface
- // implementation but is in fact over an RPC connection.
- backendRPC := raw.(*backendPluginClient)
-
- return &BackendPluginClient{
- client: client,
- backendPluginClient: backendRPC,
- }, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/serve.go b/vendor/github.com/hashicorp/vault/logical/plugin/serve.go
deleted file mode 100644
index 1d70b3a..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/serve.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package plugin
-
-import (
- "crypto/tls"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// BackendPluginName is the name of the plugin that can be
-// dispensed rom the plugin server.
-const BackendPluginName = "backend"
-
-type BackendFactoryFunc func(*logical.BackendConfig) (logical.Backend, error)
-type TLSProdiverFunc func() (*tls.Config, error)
-
-type ServeOpts struct {
- BackendFactoryFunc BackendFactoryFunc
- TLSProviderFunc TLSProdiverFunc
-}
-
-// Serve is a helper function used to serve a backend plugin. This
-// should be ran on the plugin's main process.
-func Serve(opts *ServeOpts) error {
- // pluginMap is the map of plugins we can dispense.
- var pluginMap = map[string]plugin.Plugin{
- "backend": &BackendPlugin{
- Factory: opts.BackendFactoryFunc,
- },
- }
-
- err := pluginutil.OptionallyEnableMlock()
- if err != nil {
- return err
- }
-
- // If FetchMetadata is true, run without TLSProvider
- plugin.Serve(&plugin.ServeConfig{
- HandshakeConfig: handshakeConfig,
- Plugins: pluginMap,
- TLSProvider: opts.TLSProviderFunc,
- })
-
- return nil
-}
-
-// handshakeConfigs are used to just do a basic handshake between
-// a plugin and host. If the handshake fails, a user friendly error is shown.
-// This prevents users from executing bad plugins or executing a plugin
-// directory. It is a UX feature, not a security feature.
-var handshakeConfig = plugin.HandshakeConfig{
- ProtocolVersion: 2,
- MagicCookieKey: "VAULT_BACKEND_PLUGIN",
- MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20",
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/storage.go b/vendor/github.com/hashicorp/vault/logical/plugin/storage.go
deleted file mode 100644
index 99c21f6..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/storage.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package plugin
-
-import (
- "net/rpc"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/logical"
-)
-
-// StorageClient is an implementation of logical.Storage that communicates
-// over RPC.
-type StorageClient struct {
- client *rpc.Client
-}
-
-func (s *StorageClient) List(prefix string) ([]string, error) {
- var reply StorageListReply
- err := s.client.Call("Plugin.List", prefix, &reply)
- if err != nil {
- return reply.Keys, err
- }
- if reply.Error != nil {
- return reply.Keys, reply.Error
- }
- return reply.Keys, nil
-}
-
-func (s *StorageClient) Get(key string) (*logical.StorageEntry, error) {
- var reply StorageGetReply
- err := s.client.Call("Plugin.Get", key, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
- return reply.StorageEntry, nil
-}
-
-func (s *StorageClient) Put(entry *logical.StorageEntry) error {
- var reply StoragePutReply
- err := s.client.Call("Plugin.Put", entry, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
- return nil
-}
-
-func (s *StorageClient) Delete(key string) error {
- var reply StorageDeleteReply
- err := s.client.Call("Plugin.Delete", key, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
- return nil
-}
-
-// StorageServer is a net/rpc compatible structure for serving
-type StorageServer struct {
- impl logical.Storage
-}
-
-func (s *StorageServer) List(prefix string, reply *StorageListReply) error {
- keys, err := s.impl.List(prefix)
- *reply = StorageListReply{
- Keys: keys,
- Error: plugin.NewBasicError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Get(key string, reply *StorageGetReply) error {
- storageEntry, err := s.impl.Get(key)
- *reply = StorageGetReply{
- StorageEntry: storageEntry,
- Error: plugin.NewBasicError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Put(entry *logical.StorageEntry, reply *StoragePutReply) error {
- err := s.impl.Put(entry)
- *reply = StoragePutReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Delete(key string, reply *StorageDeleteReply) error {
- err := s.impl.Delete(key)
- *reply = StorageDeleteReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
-}
-
-type StorageListReply struct {
- Keys []string
- Error *plugin.BasicError
-}
-
-type StorageGetReply struct {
- StorageEntry *logical.StorageEntry
- Error *plugin.BasicError
-}
-
-type StoragePutReply struct {
- Error *plugin.BasicError
-}
-
-type StorageDeleteReply struct {
- Error *plugin.BasicError
-}
-
-// NOOPStorage is used to deny access to the storage interface while running a
-// backend plugin in metadata mode.
-type NOOPStorage struct{}
-
-func (s *NOOPStorage) List(prefix string) ([]string, error) {
- return []string{}, nil
-}
-
-func (s *NOOPStorage) Get(key string) (*logical.StorageEntry, error) {
- return nil, nil
-}
-
-func (s *NOOPStorage) Put(entry *logical.StorageEntry) error {
- return nil
-}
-
-func (s *NOOPStorage) Delete(key string) error {
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go
deleted file mode 100644
index 9899a82..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package plugin
-
-import (
- "testing"
-
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestStorage_impl(t *testing.T) {
- var _ logical.Storage = new(StorageClient)
-}
-
-func TestStorage_operations(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- storage := &logical.InmemStorage{}
-
- server.RegisterName("Plugin", &StorageServer{
- impl: storage,
- })
-
- testStorage := &StorageClient{client: client}
-
- logical.TestStorage(t, testStorage)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/system.go b/vendor/github.com/hashicorp/vault/logical/plugin/system.go
deleted file mode 100644
index 16f67df..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/system.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package plugin
-
-import (
- "net/rpc"
- "time"
-
- "fmt"
-
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
-)
-
-type SystemViewClient struct {
- client *rpc.Client
-}
-
-func (s *SystemViewClient) DefaultLeaseTTL() time.Duration {
- var reply DefaultLeaseTTLReply
- err := s.client.Call("Plugin.DefaultLeaseTTL", new(interface{}), &reply)
- if err != nil {
- return 0
- }
-
- return reply.DefaultLeaseTTL
-}
-
-func (s *SystemViewClient) MaxLeaseTTL() time.Duration {
- var reply MaxLeaseTTLReply
- err := s.client.Call("Plugin.MaxLeaseTTL", new(interface{}), &reply)
- if err != nil {
- return 0
- }
-
- return reply.MaxLeaseTTL
-}
-
-func (s *SystemViewClient) SudoPrivilege(path string, token string) bool {
- var reply SudoPrivilegeReply
- args := &SudoPrivilegeArgs{
- Path: path,
- Token: token,
- }
-
- err := s.client.Call("Plugin.SudoPrivilege", args, &reply)
- if err != nil {
- return false
- }
-
- return reply.Sudo
-}
-
-func (s *SystemViewClient) Tainted() bool {
- var reply TaintedReply
-
- err := s.client.Call("Plugin.Tainted", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.Tainted
-}
-
-func (s *SystemViewClient) CachingDisabled() bool {
- var reply CachingDisabledReply
-
- err := s.client.Call("Plugin.CachingDisabled", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.CachingDisabled
-}
-
-func (s *SystemViewClient) ReplicationState() consts.ReplicationState {
- var reply ReplicationStateReply
-
- err := s.client.Call("Plugin.ReplicationState", new(interface{}), &reply)
- if err != nil {
- return consts.ReplicationDisabled
- }
-
- return reply.ReplicationState
-}
-
-func (s *SystemViewClient) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- var reply ResponseWrapDataReply
- // Do not allow JWTs to be returned
- args := &ResponseWrapDataArgs{
- Data: data,
- TTL: ttl,
- JWT: false,
- }
-
- err := s.client.Call("Plugin.ResponseWrapData", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
-
- return reply.ResponseWrapInfo, nil
-}
-
-func (s *SystemViewClient) LookupPlugin(name string) (*pluginutil.PluginRunner, error) {
- return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend")
-}
-
-func (s *SystemViewClient) MlockEnabled() bool {
- var reply MlockEnabledReply
- err := s.client.Call("Plugin.MlockEnabled", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.MlockEnabled
-}
-
-type SystemViewServer struct {
- impl logical.SystemView
-}
-
-func (s *SystemViewServer) DefaultLeaseTTL(_ interface{}, reply *DefaultLeaseTTLReply) error {
- ttl := s.impl.DefaultLeaseTTL()
- *reply = DefaultLeaseTTLReply{
- DefaultLeaseTTL: ttl,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) MaxLeaseTTL(_ interface{}, reply *MaxLeaseTTLReply) error {
- ttl := s.impl.MaxLeaseTTL()
- *reply = MaxLeaseTTLReply{
- MaxLeaseTTL: ttl,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) SudoPrivilege(args *SudoPrivilegeArgs, reply *SudoPrivilegeReply) error {
- sudo := s.impl.SudoPrivilege(args.Path, args.Token)
- *reply = SudoPrivilegeReply{
- Sudo: sudo,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) Tainted(_ interface{}, reply *TaintedReply) error {
- tainted := s.impl.Tainted()
- *reply = TaintedReply{
- Tainted: tainted,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) CachingDisabled(_ interface{}, reply *CachingDisabledReply) error {
- cachingDisabled := s.impl.CachingDisabled()
- *reply = CachingDisabledReply{
- CachingDisabled: cachingDisabled,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) ReplicationState(_ interface{}, reply *ReplicationStateReply) error {
- replicationState := s.impl.ReplicationState()
- *reply = ReplicationStateReply{
- ReplicationState: replicationState,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) ResponseWrapData(args *ResponseWrapDataArgs, reply *ResponseWrapDataReply) error {
- // Do not allow JWTs to be returned
- info, err := s.impl.ResponseWrapData(args.Data, args.TTL, false)
- if err != nil {
- *reply = ResponseWrapDataReply{
- Error: plugin.NewBasicError(err),
- }
- return nil
- }
- *reply = ResponseWrapDataReply{
- ResponseWrapInfo: info,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) MlockEnabled(_ interface{}, reply *MlockEnabledReply) error {
- enabled := s.impl.MlockEnabled()
- *reply = MlockEnabledReply{
- MlockEnabled: enabled,
- }
-
- return nil
-}
-
-type DefaultLeaseTTLReply struct {
- DefaultLeaseTTL time.Duration
-}
-
-type MaxLeaseTTLReply struct {
- MaxLeaseTTL time.Duration
-}
-
-type SudoPrivilegeArgs struct {
- Path string
- Token string
-}
-
-type SudoPrivilegeReply struct {
- Sudo bool
-}
-
-type TaintedReply struct {
- Tainted bool
-}
-
-type CachingDisabledReply struct {
- CachingDisabled bool
-}
-
-type ReplicationStateReply struct {
- ReplicationState consts.ReplicationState
-}
-
-type ResponseWrapDataArgs struct {
- Data map[string]interface{}
- TTL time.Duration
- JWT bool
-}
-
-type ResponseWrapDataReply struct {
- ResponseWrapInfo *wrapping.ResponseWrapInfo
- Error *plugin.BasicError
-}
-
-type MlockEnabledReply struct {
- MlockEnabled bool
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go
deleted file mode 100644
index 57e386b..0000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package plugin
-
-import (
- "testing"
-
- "reflect"
-
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/logical"
-)
-
-func Test_impl(t *testing.T) {
- var _ logical.SystemView = new(SystemViewClient)
-}
-
-func TestSystem_defaultLeaseTTL(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.DefaultLeaseTTL()
- actual := testSystemView.DefaultLeaseTTL()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_maxLeaseTTL(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.MaxLeaseTTL()
- actual := testSystemView.MaxLeaseTTL()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_sudoPrivilege(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.SudoPrivilegeVal = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.SudoPrivilege("foo", "bar")
- actual := testSystemView.SudoPrivilege("foo", "bar")
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_tainted(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.TaintedVal = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.Tainted()
- actual := testSystemView.Tainted()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_cachingDisabled(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.CachingDisabledVal = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.CachingDisabled()
- actual := testSystemView.CachingDisabled()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_replicationState(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.ReplicationStateVal = consts.ReplicationPerformancePrimary
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.ReplicationState()
- actual := testSystemView.ReplicationState()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
-
-func TestSystem_responseWrapData(t *testing.T) {
- t.SkipNow()
-}
-
-func TestSystem_lookupPlugin(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- if _, err := testSystemView.LookupPlugin("foo"); err == nil {
- t.Fatal("LookPlugin(): expected error on due to unsupported call from plugin")
- }
-}
-
-func TestSystem_mlockEnabled(t *testing.T) {
- client, server := plugin.TestRPCConn(t)
- defer client.Close()
-
- sys := logical.TestSystemView()
- sys.EnableMlock = true
-
- server.RegisterName("Plugin", &SystemViewServer{
- impl: sys,
- })
-
- testSystemView := &SystemViewClient{client: client}
-
- expected := sys.MlockEnabled()
- actual := testSystemView.MlockEnabled()
- if !reflect.DeepEqual(expected, actual) {
- t.Fatalf("expected: %v, got: %v", expected, actual)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/request.go b/vendor/github.com/hashicorp/vault/logical/request.go
deleted file mode 100644
index cee0f0c..0000000
--- a/vendor/github.com/hashicorp/vault/logical/request.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package logical
-
-import (
- "errors"
- "fmt"
- "time"
-)
-
-// RequestWrapInfo is a struct that stores information about desired response
-// wrapping behavior
-type RequestWrapInfo struct {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
-
- // The format to use for the wrapped response; if not specified it's a bare
- // token
- Format string `json:"format" structs:"format" mapstructure:"format"`
-}
-
-// Request is a struct that stores the parameters and context
-// of a request being made to Vault. It is used to abstract
-// the details of the higher level request protocol from the handlers.
-type Request struct {
- // Id is the uuid associated with each request
- ID string `json:"id" structs:"id" mapstructure:"id"`
-
- // If set, the name given to the replication secondary where this request
- // originated
- ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster", mapstructure:"replication_cluster"`
-
- // Operation is the requested operation type
- Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"`
-
- // Path is the part of the request path not consumed by the
- // routing. As an example, if the original request path is "prod/aws/foo"
- // and the AWS logical backend is mounted at "prod/aws/", then the
- // final path is "foo" since the mount prefix is trimmed.
- Path string `json:"path" structs:"path" mapstructure:"path"`
-
- // Request data is an opaque map that must have string keys.
- Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"`
-
- // Storage can be used to durably store and retrieve state.
- Storage Storage `json:"-"`
-
- // Secret will be non-nil only for Revoke and Renew operations
- // to represent the secret that was returned prior.
- Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"`
-
- // Auth will be non-nil only for Renew operations
- // to represent the auth that was returned prior.
- Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"`
-
- // Headers will contain the http headers from the request. This value will
- // be used in the audit broker to ensure we are auditing only the allowed
- // headers.
- Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"`
-
- // Connection will be non-nil only for credential providers to
- // inspect the connection information and potentially use it for
- // authentication/protection.
- Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"`
-
- // ClientToken is provided to the core so that the identity
- // can be verified and ACLs applied. This value is passed
- // through to the logical backends but after being salted and
- // hashed.
- ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token"`
-
- // ClientTokenAccessor is provided to the core so that the it can get
- // logged as part of request audit logging.
- ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor"`
-
- // DisplayName is provided to the logical backend to help associate
- // dynamic secrets with the source entity. This is not a sensitive
- // name, but is useful for operators.
- DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name"`
-
- // MountPoint is provided so that a logical backend can generate
- // paths relative to itself. The `Path` is effectively the client
- // request path with the MountPoint trimmed off.
- MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point"`
-
- // MountType is provided so that a logical backend can make decisions
- // based on the specific mount type (e.g., if a mount type has different
- // aliases, generating different defaults depending on the alias)
- MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"`
-
- // WrapInfo contains requested response wrapping parameters
- WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
-
- // ClientTokenRemainingUses represents the allowed number of uses left on the
- // token supplied
- ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"`
-
- // For replication, contains the last WAL on the remote side after handling
- // the request, used for best-effort avoidance of stale read-after-write
- lastRemoteWAL uint64
-}
-
-// Get returns a data field and guards for nil Data
-func (r *Request) Get(key string) interface{} {
- if r.Data == nil {
- return nil
- }
- return r.Data[key]
-}
-
-// GetString returns a data field as a string
-func (r *Request) GetString(key string) string {
- raw := r.Get(key)
- s, _ := raw.(string)
- return s
-}
-
-func (r *Request) GoString() string {
- return fmt.Sprintf("*%#v", *r)
-}
-
-func (r *Request) LastRemoteWAL() uint64 {
- return r.lastRemoteWAL
-}
-
-func (r *Request) SetLastRemoteWAL(last uint64) {
- r.lastRemoteWAL = last
-}
-
-// RenewRequest creates the structure of the renew request.
-func RenewRequest(
- path string, secret *Secret, data map[string]interface{}) *Request {
- return &Request{
- Operation: RenewOperation,
- Path: path,
- Data: data,
- Secret: secret,
- }
-}
-
-// RenewAuthRequest creates the structure of the renew request for an auth.
-func RenewAuthRequest(
- path string, auth *Auth, data map[string]interface{}) *Request {
- return &Request{
- Operation: RenewOperation,
- Path: path,
- Data: data,
- Auth: auth,
- }
-}
-
-// RevokeRequest creates the structure of the revoke request.
-func RevokeRequest(
- path string, secret *Secret, data map[string]interface{}) *Request {
- return &Request{
- Operation: RevokeOperation,
- Path: path,
- Data: data,
- Secret: secret,
- }
-}
-
-// RollbackRequest creates the structure of the revoke request.
-func RollbackRequest(path string) *Request {
- return &Request{
- Operation: RollbackOperation,
- Path: path,
- Data: make(map[string]interface{}),
- }
-}
-
-// Operation is an enum that is used to specify the type
-// of request being made
-type Operation string
-
-const (
- // The operations below are called per path
- CreateOperation Operation = "create"
- ReadOperation = "read"
- UpdateOperation = "update"
- DeleteOperation = "delete"
- ListOperation = "list"
- HelpOperation = "help"
- PersonaLookaheadOperation = "persona-lookahead"
-
- // The operations below are called globally, the path is less relevant.
- RevokeOperation Operation = "revoke"
- RenewOperation = "renew"
- RollbackOperation = "rollback"
-)
-
-var (
- // ErrUnsupportedOperation is returned if the operation is not supported
- // by the logical backend.
- ErrUnsupportedOperation = errors.New("unsupported operation")
-
- // ErrUnsupportedPath is returned if the path is not supported
- // by the logical backend.
- ErrUnsupportedPath = errors.New("unsupported path")
-
- // ErrInvalidRequest is returned if the request is invalid
- ErrInvalidRequest = errors.New("invalid request")
-
- // ErrPermissionDenied is returned if the client is not authorized
- ErrPermissionDenied = errors.New("permission denied")
-)
diff --git a/vendor/github.com/hashicorp/vault/logical/response.go b/vendor/github.com/hashicorp/vault/logical/response.go
deleted file mode 100644
index 6ee452b..0000000
--- a/vendor/github.com/hashicorp/vault/logical/response.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package logical
-
-import (
- "errors"
-
- "github.com/hashicorp/vault/helper/wrapping"
-)
-
-const (
- // HTTPContentType can be specified in the Data field of a Response
- // so that the HTTP front end can specify a custom Content-Type associated
- // with the HTTPRawBody. This can only be used for non-secrets, and should
- // be avoided unless absolutely necessary, such as implementing a specification.
- // The value must be a string.
- HTTPContentType = "http_content_type"
-
- // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType.
- // This can only be specified for non-secrets, and should should be similarly
- // avoided like the HTTPContentType. The value must be a byte slice.
- HTTPRawBody = "http_raw_body"
-
- // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType.
- // This can only be specified for non-secrets, and should should be similarly
- // avoided like the HTTPContentType. The value must be an integer.
- HTTPStatusCode = "http_status_code"
-)
-
-// Response is a struct that stores the response of a request.
-// It is used to abstract the details of the higher level request protocol.
-type Response struct {
- // Secret, if not nil, denotes that this response represents a secret.
- Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"`
-
- // Auth, if not nil, contains the authentication information for
- // this response. This is only checked and means something for
- // credential backends.
- Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"`
-
- // Response data is an opaque map that must have string keys. For
- // secrets, this data is sent down to the user as-is. To store internal
- // data that you don't want the user to see, store it in
- // Secret.InternalData.
- Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"`
-
- // Redirect is an HTTP URL to redirect to for further authentication.
- // This is only valid for credential backends. This will be blanked
- // for any logical backend and ignored.
- Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"`
-
- // Warnings allow operations or backends to return warnings in response
- // to user actions without failing the action outright.
- Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"`
-
- // Information for wrapping the response in a cubbyhole
- WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
-}
-
-// AddWarning adds a warning into the response's warning list
-func (r *Response) AddWarning(warning string) {
- if r.Warnings == nil {
- r.Warnings = make([]string, 0, 1)
- }
- r.Warnings = append(r.Warnings, warning)
-}
-
-// IsError returns true if this response seems to indicate an error.
-func (r *Response) IsError() bool {
- return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil
-}
-
-func (r *Response) Error() error {
- if !r.IsError() {
- return nil
- }
- switch r.Data["error"].(type) {
- case string:
- return errors.New(r.Data["error"].(string))
- case error:
- return r.Data["error"].(error)
- }
- return nil
-}
-
-// HelpResponse is used to format a help response
-func HelpResponse(text string, seeAlso []string) *Response {
- return &Response{
- Data: map[string]interface{}{
- "help": text,
- "see_also": seeAlso,
- },
- }
-}
-
-// ErrorResponse is used to format an error response
-func ErrorResponse(text string) *Response {
- return &Response{
- Data: map[string]interface{}{
- "error": text,
- },
- }
-}
-
-// ListResponse is used to format a response to a list operation.
-func ListResponse(keys []string) *Response {
- resp := &Response{
- Data: map[string]interface{}{},
- }
- if len(keys) != 0 {
- resp.Data["keys"] = keys
- }
- return resp
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/response_util.go b/vendor/github.com/hashicorp/vault/logical/response_util.go
deleted file mode 100644
index a3fd2bf..0000000
--- a/vendor/github.com/hashicorp/vault/logical/response_util.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package logical
-
-import (
- "errors"
- "fmt"
- "net/http"
-
- "github.com/hashicorp/errwrap"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/consts"
-)
-
-// RespondErrorCommon pulls most of the functionality from http's
-// respondErrorCommon and some of http's handleLogical and makes it available
-// to both the http package and elsewhere.
-func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) {
- if err == nil && (resp == nil || !resp.IsError()) {
- switch {
- case req.Operation == ReadOperation:
- if resp == nil {
- return http.StatusNotFound, nil
- }
-
- // Basically: if we have empty "keys" or no keys at all, 404. This
- // provides consistency with GET.
- case req.Operation == ListOperation && resp.WrapInfo == nil:
- if resp == nil || len(resp.Data) == 0 {
- return http.StatusNotFound, nil
- }
- keysRaw, ok := resp.Data["keys"]
- if !ok || keysRaw == nil {
- return http.StatusNotFound, nil
- }
- keys, ok := keysRaw.([]string)
- if !ok {
- return http.StatusInternalServerError, nil
- }
- if len(keys) == 0 {
- return http.StatusNotFound, nil
- }
- }
-
- return 0, nil
- }
-
- if errwrap.ContainsType(err, new(ReplicationCodedError)) {
- var allErrors error
- codedErr := errwrap.GetType(err, new(ReplicationCodedError)).(*ReplicationCodedError)
- errwrap.Walk(err, func(inErr error) {
- newErr, ok := inErr.(*ReplicationCodedError)
- if !ok {
- allErrors = multierror.Append(allErrors, newErr)
- }
- })
- if allErrors != nil {
- return codedErr.Code, multierror.Append(errors.New(fmt.Sprintf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg)), allErrors)
- }
- return codedErr.Code, errors.New(codedErr.Msg)
- }
-
- // Start out with internal server error since in most of these cases there
- // won't be a response so this won't be overridden
- statusCode := http.StatusInternalServerError
- // If we actually have a response, start out with bad request
- if resp != nil {
- statusCode = http.StatusBadRequest
- }
-
- // Now, check the error itself; if it has a specific logical error, set the
- // appropriate code
- if err != nil {
- switch {
- case errwrap.ContainsType(err, new(StatusBadRequest)):
- statusCode = http.StatusBadRequest
- case errwrap.Contains(err, ErrPermissionDenied.Error()):
- statusCode = http.StatusForbidden
- case errwrap.Contains(err, ErrUnsupportedOperation.Error()):
- statusCode = http.StatusMethodNotAllowed
- case errwrap.Contains(err, ErrUnsupportedPath.Error()):
- statusCode = http.StatusNotFound
- case errwrap.Contains(err, ErrInvalidRequest.Error()):
- statusCode = http.StatusBadRequest
- }
- }
-
- if resp != nil && resp.IsError() {
- err = fmt.Errorf("%s", resp.Data["error"].(string))
- }
-
- return statusCode, err
-}
-
-// AdjustErrorStatusCode adjusts the status that will be sent in error
-// conditions in a way that can be shared across http's respondError and other
-// locations.
-func AdjustErrorStatusCode(status *int, err error) {
- // Adjust status code when sealed
- if errwrap.Contains(err, consts.ErrSealed.Error()) {
- *status = http.StatusServiceUnavailable
- }
-
- // Adjust status code on
- if errwrap.Contains(err, "http: request body too large") {
- *status = http.StatusRequestEntityTooLarge
- }
-
- // Allow HTTPCoded error passthrough to specify a code
- if t, ok := err.(HTTPCodedError); ok {
- *status = t.Code()
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/secret.go b/vendor/github.com/hashicorp/vault/logical/secret.go
deleted file mode 100644
index 27ad8d9..0000000
--- a/vendor/github.com/hashicorp/vault/logical/secret.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package logical
-
-import "fmt"
-
-// Secret represents the secret part of a response.
-type Secret struct {
- LeaseOptions
-
- // InternalData is JSON-encodable data that is stored with the secret.
- // This will be sent back during a Renew/Revoke for storing internal data
- // used for those operations.
- InternalData map[string]interface{} `json:"internal_data"`
-
- // LeaseID is the ID returned to the user to manage this secret.
- // This is generated by Vault core. Any set value will be ignored.
- // For requests, this will always be blank.
- LeaseID string
-}
-
-func (s *Secret) Validate() error {
- if s.TTL < 0 {
- return fmt.Errorf("ttl duration must not be less than zero")
- }
-
- return nil
-}
-
-func (s *Secret) GoString() string {
- return fmt.Sprintf("*%#v", *s)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage.go b/vendor/github.com/hashicorp/vault/logical/storage.go
deleted file mode 100644
index 51487de..0000000
--- a/vendor/github.com/hashicorp/vault/logical/storage.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package logical
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-// ErrReadOnly is returned when a backend does not support
-// writing. This can be caused by a read-only replica or secondary
-// cluster operation.
-var ErrReadOnly = errors.New("Cannot write to readonly storage")
-
-// Storage is the way that logical backends are able read/write data.
-type Storage interface {
- List(prefix string) ([]string, error)
- Get(string) (*StorageEntry, error)
- Put(*StorageEntry) error
- Delete(string) error
-}
-
-// StorageEntry is the entry for an item in a Storage implementation.
-type StorageEntry struct {
- Key string
- Value []byte
-}
-
-// DecodeJSON decodes the 'Value' present in StorageEntry.
-func (e *StorageEntry) DecodeJSON(out interface{}) error {
- return jsonutil.DecodeJSON(e.Value, out)
-}
-
-// StorageEntryJSON creates a StorageEntry with a JSON-encoded value.
-func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) {
- encodedBytes, err := jsonutil.EncodeJSON(v)
- if err != nil {
- return nil, fmt.Errorf("failed to encode storage entry: %v", err)
- }
-
- return &StorageEntry{
- Key: k,
- Value: encodedBytes,
- }, nil
-}
-
-type ClearableView interface {
- List(string) ([]string, error)
- Delete(string) error
-}
-
-// ScanView is used to scan all the keys in a view iteratively
-func ScanView(view ClearableView, cb func(path string)) error {
- frontier := []string{""}
- for len(frontier) > 0 {
- n := len(frontier)
- current := frontier[n-1]
- frontier = frontier[:n-1]
-
- // List the contents
- contents, err := view.List(current)
- if err != nil {
- return fmt.Errorf("list failed at path '%s': %v", current, err)
- }
-
- // Handle the contents in the directory
- for _, c := range contents {
- fullPath := current + c
- if strings.HasSuffix(c, "/") {
- frontier = append(frontier, fullPath)
- } else {
- cb(fullPath)
- }
- }
- }
- return nil
-}
-
-// CollectKeys is used to collect all the keys in a view
-func CollectKeys(view ClearableView) ([]string, error) {
- // Accumulate the keys
- var existing []string
- cb := func(path string) {
- existing = append(existing, path)
- }
-
- // Scan for all the keys
- if err := ScanView(view, cb); err != nil {
- return nil, err
- }
- return existing, nil
-}
-
-// ClearView is used to delete all the keys in a view
-func ClearView(view ClearableView) error {
- // Collect all the keys
- keys, err := CollectKeys(view)
- if err != nil {
- return err
- }
-
- // Delete all the keys
- for _, key := range keys {
- if err := view.Delete(key); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
deleted file mode 100644
index 0112ae2..0000000
--- a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package logical
-
-import (
- "strings"
- "sync"
-
- radix "github.com/armon/go-radix"
-)
-
-// InmemStorage implements Storage and stores all data in memory. It is
-// basically a straight copy of physical.Inmem, but it prevents backends from
-// having to load all of physical's dependencies (which are legion) just to
-// have some testing storage.
-type InmemStorage struct {
- sync.RWMutex
- root *radix.Tree
- once sync.Once
-}
-
-func (s *InmemStorage) Get(key string) (*StorageEntry, error) {
- s.once.Do(s.init)
-
- s.RLock()
- defer s.RUnlock()
-
- if raw, ok := s.root.Get(key); ok {
- se := raw.(*StorageEntry)
- return &StorageEntry{
- Key: se.Key,
- Value: se.Value,
- }, nil
- }
-
- return nil, nil
-}
-
-func (s *InmemStorage) Put(entry *StorageEntry) error {
- s.once.Do(s.init)
-
- s.Lock()
- defer s.Unlock()
-
- s.root.Insert(entry.Key, &StorageEntry{
- Key: entry.Key,
- Value: entry.Value,
- })
- return nil
-}
-
-func (s *InmemStorage) Delete(key string) error {
- s.once.Do(s.init)
-
- s.Lock()
- defer s.Unlock()
-
- s.root.Delete(key)
- return nil
-}
-
-func (s *InmemStorage) List(prefix string) ([]string, error) {
- s.once.Do(s.init)
-
- s.RLock()
- defer s.RUnlock()
-
- var out []string
- seen := make(map[string]interface{})
- walkFn := func(s string, v interface{}) bool {
- trimmed := strings.TrimPrefix(s, prefix)
- sep := strings.Index(trimmed, "/")
- if sep == -1 {
- out = append(out, trimmed)
- } else {
- trimmed = trimmed[:sep+1]
- if _, ok := seen[trimmed]; !ok {
- out = append(out, trimmed)
- seen[trimmed] = struct{}{}
- }
- }
- return false
- }
- s.root.WalkPrefix(prefix, walkFn)
-
- return out, nil
-
-}
-
-func (s *InmemStorage) init() {
- s.root = radix.New()
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage_inmem_test.go b/vendor/github.com/hashicorp/vault/logical/storage_inmem_test.go
deleted file mode 100644
index 8e0964f..0000000
--- a/vendor/github.com/hashicorp/vault/logical/storage_inmem_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package logical
-
-import (
- "testing"
-)
-
-func TestInmemStorage(t *testing.T) {
- TestStorage(t, new(InmemStorage))
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/system_view.go b/vendor/github.com/hashicorp/vault/logical/system_view.go
deleted file mode 100644
index 64fc51c..0000000
--- a/vendor/github.com/hashicorp/vault/logical/system_view.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package logical
-
-import (
- "errors"
- "time"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
-)
-
-// SystemView exposes system configuration information in a safe way
-// for logical backends to consume
-type SystemView interface {
- // DefaultLeaseTTL returns the default lease TTL set in Vault configuration
- DefaultLeaseTTL() time.Duration
-
- // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend
- // authors should take care not to issue credentials that last longer than
- // this value, as Vault will revoke them
- MaxLeaseTTL() time.Duration
-
- // SudoPrivilege returns true if given path has sudo privileges
- // for the given client token
- SudoPrivilege(path string, token string) bool
-
- // Returns true if the mount is tainted. A mount is tainted if it is in the
- // process of being unmounted. This should only be used in special
- // circumstances; a primary use-case is as a guard in revocation functions.
- // If revocation of a backend's leases fails it can keep the unmounting
- // process from being successful. If the reason for this failure is not
- // relevant when the mount is tainted (for instance, saving a CRL to disk
- // when the stored CRL will be removed during the unmounting process
- // anyways), we can ignore the errors to allow unmounting to complete.
- Tainted() bool
-
- // Returns true if caching is disabled. If true, no caches should be used,
- // despite known slowdowns.
- CachingDisabled() bool
-
- // ReplicationState indicates the state of cluster replication
- ReplicationState() consts.ReplicationState
-
- // ResponseWrapData wraps the given data in a cubbyhole and returns the
- // token used to unwrap.
- ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
-
- // LookupPlugin looks into the plugin catalog for a plugin with the given
- // name. Returns a PluginRunner or an error if a plugin can not be found.
- LookupPlugin(string) (*pluginutil.PluginRunner, error)
-
- // MlockEnabled returns the configuration setting for enabling mlock on
- // plugins.
- MlockEnabled() bool
-}
-
-type StaticSystemView struct {
- DefaultLeaseTTLVal time.Duration
- MaxLeaseTTLVal time.Duration
- SudoPrivilegeVal bool
- TaintedVal bool
- CachingDisabledVal bool
- Primary bool
- EnableMlock bool
- ReplicationStateVal consts.ReplicationState
-}
-
-func (d StaticSystemView) DefaultLeaseTTL() time.Duration {
- return d.DefaultLeaseTTLVal
-}
-
-func (d StaticSystemView) MaxLeaseTTL() time.Duration {
- return d.MaxLeaseTTLVal
-}
-
-func (d StaticSystemView) SudoPrivilege(path string, token string) bool {
- return d.SudoPrivilegeVal
-}
-
-func (d StaticSystemView) Tainted() bool {
- return d.TaintedVal
-}
-
-func (d StaticSystemView) CachingDisabled() bool {
- return d.CachingDisabledVal
-}
-
-func (d StaticSystemView) ReplicationState() consts.ReplicationState {
- return d.ReplicationStateVal
-}
-
-func (d StaticSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView")
-}
-
-func (d StaticSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) {
- return nil, errors.New("LookupPlugin is not implemented in StaticSystemView")
-}
-
-func (d StaticSystemView) MlockEnabled() bool {
- return d.EnableMlock
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/testing.go b/vendor/github.com/hashicorp/vault/logical/testing.go
deleted file mode 100644
index 5bb60bb..0000000
--- a/vendor/github.com/hashicorp/vault/logical/testing.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package logical
-
-import (
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/logformat"
- log "github.com/mgutz/logxi/v1"
-)
-
-// TestRequest is a helper to create a purely in-memory Request struct.
-func TestRequest(t *testing.T, op Operation, path string) *Request {
- return &Request{
- Operation: op,
- Path: path,
- Data: make(map[string]interface{}),
- Storage: new(InmemStorage),
- }
-}
-
-// TestStorage is a helper that can be used from unit tests to verify
-// the behavior of a Storage impl.
-func TestStorage(t *testing.T, s Storage) {
- keys, err := s.List("")
- if err != nil {
- t.Fatalf("list error: %s", err)
- }
- if len(keys) > 0 {
- t.Fatalf("should have no keys to start: %#v", keys)
- }
-
- entry := &StorageEntry{Key: "foo", Value: []byte("bar")}
- if err := s.Put(entry); err != nil {
- t.Fatalf("put error: %s", err)
- }
-
- actual, err := s.Get("foo")
- if err != nil {
- t.Fatalf("get error: %s", err)
- }
- if !reflect.DeepEqual(actual, entry) {
- t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual)
- }
-
- keys, err = s.List("")
- if err != nil {
- t.Fatalf("list error: %s", err)
- }
- if !reflect.DeepEqual(keys, []string{"foo"}) {
- t.Fatalf("bad keys: %#v", keys)
- }
-
- if err := s.Delete("foo"); err != nil {
- t.Fatalf("put error: %s", err)
- }
-
- keys, err = s.List("")
- if err != nil {
- t.Fatalf("list error: %s", err)
- }
- if len(keys) > 0 {
- t.Fatalf("should have no keys to start: %#v", keys)
- }
-}
-
-func TestSystemView() *StaticSystemView {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 2
- return &StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- }
-}
-
-func TestBackendConfig() *BackendConfig {
- bc := &BackendConfig{
- Logger: logformat.NewVaultLogger(log.LevelTrace),
- System: TestSystemView(),
- }
- bc.Logger.SetLevel(log.LevelTrace)
-
- return bc
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/testing/testing.go b/vendor/github.com/hashicorp/vault/logical/testing/testing.go
deleted file mode 100644
index ca52cdd..0000000
--- a/vendor/github.com/hashicorp/vault/logical/testing/testing.go
+++ /dev/null
@@ -1,412 +0,0 @@
-package testing
-
-import (
- "crypto/tls"
- "fmt"
- "os"
- "reflect"
- "sort"
- "testing"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/http"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical/inmem"
- "github.com/hashicorp/vault/vault"
-)
-
-// TestEnvVar must be set to a non-empty value for acceptance tests to run.
-const TestEnvVar = "VAULT_ACC"
-
-// TestCase is a single set of tests to run for a backend. A TestCase
-// should generally map 1:1 to each test method for your acceptance
-// tests.
-type TestCase struct {
- // Precheck, if non-nil, will be called once before the test case
- // runs at all. This can be used for some validation prior to the
- // test running.
- PreCheck func()
-
- // Backend is the backend that will be mounted.
- Backend logical.Backend
-
- // Factory can be used instead of Backend if the
- // backend requires more construction
- Factory logical.Factory
-
- // Steps are the set of operations that are run for this test case.
- Steps []TestStep
-
- // Teardown will be called before the test case is over regardless
- // of if the test succeeded or failed. This should return an error
- // in the case that the test can't guarantee all resources were
- // properly cleaned up.
- Teardown TestTeardownFunc
-
- // AcceptanceTest, if set, the test case will be run only if
- // the environment variable VAULT_ACC is set. If not this test case
- // will be run as a unit test.
- AcceptanceTest bool
-}
-
-// TestStep is a single step within a TestCase.
-type TestStep struct {
- // Operation is the operation to execute
- Operation logical.Operation
-
- // Path is the request path. The mount prefix will be automatically added.
- Path string
-
- // Arguments to pass in
- Data map[string]interface{}
-
- // Check is called after this step is executed in order to test that
- // the step executed successfully. If this is not set, then the next
- // step will be called
- Check TestCheckFunc
-
- // PreFlight is called directly before execution of the request, allowing
- // modification of the request parameters (e.g. Path) with dynamic values.
- PreFlight PreFlightFunc
-
- // ErrorOk, if true, will let erroneous responses through to the check
- ErrorOk bool
-
- // Unauthenticated, if true, will make the request unauthenticated.
- Unauthenticated bool
-
- // RemoteAddr, if set, will set the remote addr on the request.
- RemoteAddr string
-
- // ConnState, if set, will set the tls conneciton state
- ConnState *tls.ConnectionState
-}
-
-// TestCheckFunc is the callback used for Check in TestStep.
-type TestCheckFunc func(*logical.Response) error
-
-// PreFlightFunc is used to modify request parameters directly before execution
-// in each TestStep.
-type PreFlightFunc func(*logical.Request) error
-
-// TestTeardownFunc is the callback used for Teardown in TestCase.
-type TestTeardownFunc func() error
-
-// Test performs an acceptance test on a backend with the given test case.
-//
-// Tests are not run unless an environmental variable "VAULT_ACC" is
-// set to some non-empty value. This is to avoid test cases surprising
-// a user by creating real resources.
-//
-// Tests will fail unless the verbose flag (`go test -v`, or explicitly
-// the "-test.v" flag) is set. Because some acceptance tests take quite
-// long, we require the verbose flag so users are able to see progress
-// output.
-func Test(tt TestT, c TestCase) {
- // We only run acceptance tests if an env var is set because they're
- // slow and generally require some outside configuration.
- if c.AcceptanceTest && os.Getenv(TestEnvVar) == "" {
- tt.Skip(fmt.Sprintf(
- "Acceptance tests skipped unless env '%s' set",
- TestEnvVar))
- return
- }
-
- // We require verbose mode so that the user knows what is going on.
- if c.AcceptanceTest && !testTesting && !testing.Verbose() {
- tt.Fatal("Acceptance tests must be run with the -v flag on tests")
- return
- }
-
- // Run the PreCheck if we have it
- if c.PreCheck != nil {
- c.PreCheck()
- }
-
- // Check that something is provided
- if c.Backend == nil && c.Factory == nil {
- tt.Fatal("Must provide either Backend or Factory")
- return
- }
-
- // Create an in-memory Vault core
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- phys, err := inmem.NewInmem(nil, logger)
- if err != nil {
- tt.Fatal(err)
- return
- }
-
- core, err := vault.NewCore(&vault.CoreConfig{
- Physical: phys,
- LogicalBackends: map[string]logical.Factory{
- "test": func(conf *logical.BackendConfig) (logical.Backend, error) {
- if c.Backend != nil {
- return c.Backend, nil
- }
- return c.Factory(conf)
- },
- },
- DisableMlock: true,
- })
- if err != nil {
- tt.Fatal("error initializing core: ", err)
- return
- }
-
- // Initialize the core
- init, err := core.Initialize(&vault.InitParams{
- BarrierConfig: &vault.SealConfig{
- SecretShares: 1,
- SecretThreshold: 1,
- },
- RecoveryConfig: nil,
- })
- if err != nil {
- tt.Fatal("error initializing core: ", err)
- return
- }
-
- // Unseal the core
- if unsealed, err := core.Unseal(init.SecretShares[0]); err != nil {
- tt.Fatal("error unsealing core: ", err)
- return
- } else if !unsealed {
- tt.Fatal("vault shouldn't be sealed")
- return
- }
-
- // Create an HTTP API server and client
- ln, addr := http.TestServer(nil, core)
- defer ln.Close()
- clientConfig := api.DefaultConfig()
- clientConfig.Address = addr
- client, err := api.NewClient(clientConfig)
- if err != nil {
- tt.Fatal("error initializing HTTP client: ", err)
- return
- }
-
- // Set the token so we're authenticated
- client.SetToken(init.RootToken)
-
- // Mount the backend
- prefix := "mnt"
- mountInfo := &api.MountInput{
- Type: "test",
- Description: "acceptance test",
- }
- if err := client.Sys().Mount(prefix, mountInfo); err != nil {
- tt.Fatal("error mounting backend: ", err)
- return
- }
-
- // Make requests
- var revoke []*logical.Request
- for i, s := range c.Steps {
- if log.IsWarn() {
- log.Warn("Executing test step", "step_number", i+1)
- }
-
- // Create the request
- req := &logical.Request{
- Operation: s.Operation,
- Path: s.Path,
- Data: s.Data,
- }
- if !s.Unauthenticated {
- req.ClientToken = client.Token()
- }
- if s.RemoteAddr != "" {
- req.Connection = &logical.Connection{RemoteAddr: s.RemoteAddr}
- }
- if s.ConnState != nil {
- req.Connection = &logical.Connection{ConnState: s.ConnState}
- }
-
- if s.PreFlight != nil {
- ct := req.ClientToken
- req.ClientToken = ""
- if err := s.PreFlight(req); err != nil {
- tt.Error(fmt.Sprintf("Failed preflight for step %d: %s", i+1, err))
- break
- }
- req.ClientToken = ct
- }
-
- // Make sure to prefix the path with where we mounted the thing
- req.Path = fmt.Sprintf("%s/%s", prefix, req.Path)
-
- // Make the request
- resp, err := core.HandleRequest(req)
- if resp != nil && resp.Secret != nil {
- // Revoke this secret later
- revoke = append(revoke, &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/revoke/" + resp.Secret.LeaseID,
- })
- }
-
- // Test step returned an error.
- if err != nil {
- // But if an error is expected, do not fail the test step,
- // regardless of whether the error is a 'logical.ErrorResponse'
- // or not. Set the err to nil. If the error is a logical.ErrorResponse,
- // it will be handled later.
- if s.ErrorOk {
- err = nil
- } else {
- // If the error is not expected, fail right away.
- tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err))
- break
- }
- }
-
- // If the error is a 'logical.ErrorResponse' and if error was not expected,
- // set the error so that this can be caught below.
- if resp.IsError() && !s.ErrorOk {
- err = fmt.Errorf("Erroneous response:\n\n%#v", resp)
- }
-
- // Either the 'err' was nil or if an error was expected, it was set to nil.
- // Call the 'Check' function if there is one.
- //
- // TODO: This works perfectly for now, but it would be better if 'Check'
- // function takes in both the response object and the error, and decide on
- // the action on its own.
- if err == nil && s.Check != nil {
- // Call the test method
- err = s.Check(resp)
- }
-
- if err != nil {
- tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err))
- break
- }
- }
-
- // Revoke any secrets we might have.
- var failedRevokes []*logical.Secret
- for _, req := range revoke {
- if log.IsWarn() {
- log.Warn("Revoking secret", "secret", fmt.Sprintf("%#v", req))
- }
- req.ClientToken = client.Token()
- resp, err := core.HandleRequest(req)
- if err == nil && resp.IsError() {
- err = fmt.Errorf("Erroneous response:\n\n%#v", resp)
- }
- if err != nil {
- failedRevokes = append(failedRevokes, req.Secret)
- tt.Error(fmt.Sprintf("Revoke error: %s", err))
- }
- }
-
- // Perform any rollbacks. This should no-op if there aren't any.
- // We set the "immediate" flag here that any backend can pick up on
- // to do all rollbacks immediately even if the WAL entries are new.
- log.Warn("Requesting RollbackOperation")
- req := logical.RollbackRequest(prefix + "/")
- req.Data["immediate"] = true
- req.ClientToken = client.Token()
- resp, err := core.HandleRequest(req)
- if err == nil && resp.IsError() {
- err = fmt.Errorf("Erroneous response:\n\n%#v", resp)
- }
- if err != nil {
- if !errwrap.Contains(err, logical.ErrUnsupportedOperation.Error()) {
- tt.Error(fmt.Sprintf("[ERR] Rollback error: %s", err))
- }
- }
-
- // If we have any failed revokes, log it.
- if len(failedRevokes) > 0 {
- for _, s := range failedRevokes {
- tt.Error(fmt.Sprintf(
- "WARNING: Revoking the following secret failed. It may\n"+
- "still exist. Please verify:\n\n%#v",
- s))
- }
- }
-
- // Cleanup
- if c.Teardown != nil {
- c.Teardown()
- }
-}
-
-// TestCheckMulti is a helper to have multiple checks.
-func TestCheckMulti(fs ...TestCheckFunc) TestCheckFunc {
- return func(resp *logical.Response) error {
- for _, f := range fs {
- if err := f(resp); err != nil {
- return err
- }
- }
-
- return nil
- }
-}
-
-// TestCheckAuth is a helper to check that a request generated an
-// auth token with the proper policies.
-func TestCheckAuth(policies []string) TestCheckFunc {
- return func(resp *logical.Response) error {
- if resp == nil || resp.Auth == nil {
- return fmt.Errorf("no auth in response")
- }
- expected := make([]string, len(policies))
- copy(expected, policies)
- sort.Strings(expected)
- ret := make([]string, len(resp.Auth.Policies))
- copy(ret, resp.Auth.Policies)
- sort.Strings(ret)
- if !reflect.DeepEqual(ret, expected) {
- return fmt.Errorf("invalid policies: expected %#v, got %#v", expected, ret)
- }
-
- return nil
- }
-}
-
-// TestCheckAuthDisplayName is a helper to check that a request generated a
-// valid display name.
-func TestCheckAuthDisplayName(n string) TestCheckFunc {
- return func(resp *logical.Response) error {
- if resp.Auth == nil {
- return fmt.Errorf("no auth in response")
- }
- if n != "" && resp.Auth.DisplayName != "mnt-"+n {
- return fmt.Errorf("invalid display name: %#v", resp.Auth.DisplayName)
- }
-
- return nil
- }
-}
-
-// TestCheckError is a helper to check that a response is an error.
-func TestCheckError() TestCheckFunc {
- return func(resp *logical.Response) error {
- if !resp.IsError() {
- return fmt.Errorf("response should be error")
- }
-
- return nil
- }
-}
-
-// TestT is the interface used to handle the test lifecycle of a test.
-//
-// Users should just use a *testing.T object, which implements this.
-type TestT interface {
- Error(args ...interface{})
- Fatal(args ...interface{})
- Skip(args ...interface{})
-}
-
-var testTesting = false
diff --git a/vendor/github.com/hashicorp/vault/logical/testing/testing_test.go b/vendor/github.com/hashicorp/vault/logical/testing/testing_test.go
deleted file mode 100644
index 5a4096b..0000000
--- a/vendor/github.com/hashicorp/vault/logical/testing/testing_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package testing
-
-import (
- "os"
- "testing"
-)
-
-func init() {
- testTesting = true
-
- if err := os.Setenv(TestEnvVar, "1"); err != nil {
- panic(err)
- }
-}
-
-func TestTest_noEnv(t *testing.T) {
- // Unset the variable
- if err := os.Setenv(TestEnvVar, ""); err != nil {
- t.Fatalf("err: %s", err)
- }
- defer os.Setenv(TestEnvVar, "1")
-
- mt := new(mockT)
- Test(mt, TestCase{
- AcceptanceTest: true,
- })
-
- if !mt.SkipCalled {
- t.Fatal("skip not called")
- }
-}
-
-func TestTest_preCheck(t *testing.T) {
- called := false
-
- mt := new(mockT)
- Test(mt, TestCase{
- PreCheck: func() { called = true },
- })
-
- if !called {
- t.Fatal("precheck should be called")
- }
-}
-
-// mockT implements TestT for testing
-type mockT struct {
- ErrorCalled bool
- ErrorArgs []interface{}
- FatalCalled bool
- FatalArgs []interface{}
- SkipCalled bool
- SkipArgs []interface{}
-
- f bool
-}
-
-func (t *mockT) Error(args ...interface{}) {
- t.ErrorCalled = true
- t.ErrorArgs = args
- t.f = true
-}
-
-func (t *mockT) Fatal(args ...interface{}) {
- t.FatalCalled = true
- t.FatalArgs = args
- t.f = true
-}
-
-func (t *mockT) Skip(args ...interface{}) {
- t.SkipCalled = true
- t.SkipArgs = args
- t.f = true
-}
-
-func (t *mockT) failed() bool {
- return t.f
-}
-
-func (t *mockT) failMessage() string {
- if t.FatalCalled {
- return t.FatalArgs[0].(string)
- } else if t.ErrorCalled {
- return t.ErrorArgs[0].(string)
- } else if t.SkipCalled {
- return t.SkipArgs[0].(string)
- }
-
- return "unknown"
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/translate_response.go b/vendor/github.com/hashicorp/vault/logical/translate_response.go
deleted file mode 100644
index d3d7271..0000000
--- a/vendor/github.com/hashicorp/vault/logical/translate_response.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package logical
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "time"
-)
-
-// This logic was pulled from the http package so that it can be used for
-// encoding wrapped responses as well. It simply translates the logical request
-// to an http response, with the values we want and omitting the values we
-// don't.
-func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse {
- httpResp := &HTTPResponse{
- Data: input.Data,
- Warnings: input.Warnings,
- }
-
- if input.Secret != nil {
- httpResp.LeaseID = input.Secret.LeaseID
- httpResp.Renewable = input.Secret.Renewable
- httpResp.LeaseDuration = int(input.Secret.TTL.Seconds())
- }
-
- // If we have authentication information, then
- // set up the result structure.
- if input.Auth != nil {
- httpResp.Auth = &HTTPAuth{
- ClientToken: input.Auth.ClientToken,
- Accessor: input.Auth.Accessor,
- Policies: input.Auth.Policies,
- Metadata: input.Auth.Metadata,
- LeaseDuration: int(input.Auth.TTL.Seconds()),
- Renewable: input.Auth.Renewable,
- }
- }
-
- return httpResp
-}
-
-func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response {
- logicalResp := &Response{
- Data: input.Data,
- Warnings: input.Warnings,
- }
-
- if input.LeaseID != "" {
- logicalResp.Secret = &Secret{
- LeaseID: input.LeaseID,
- }
- logicalResp.Secret.Renewable = input.Renewable
- logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration)
- }
-
- if input.Auth != nil {
- logicalResp.Auth = &Auth{
- ClientToken: input.Auth.ClientToken,
- Accessor: input.Auth.Accessor,
- Policies: input.Auth.Policies,
- Metadata: input.Auth.Metadata,
- }
- logicalResp.Auth.Renewable = input.Auth.Renewable
- logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration)
- }
-
- return logicalResp
-}
-
-type HTTPResponse struct {
- RequestID string `json:"request_id"`
- LeaseID string `json:"lease_id"`
- Renewable bool `json:"renewable"`
- LeaseDuration int `json:"lease_duration"`
- Data map[string]interface{} `json:"data"`
- WrapInfo *HTTPWrapInfo `json:"wrap_info"`
- Warnings []string `json:"warnings"`
- Auth *HTTPAuth `json:"auth"`
-}
-
-type HTTPAuth struct {
- ClientToken string `json:"client_token"`
- Accessor string `json:"accessor"`
- Policies []string `json:"policies"`
- Metadata map[string]string `json:"metadata"`
- LeaseDuration int `json:"lease_duration"`
- Renewable bool `json:"renewable"`
-}
-
-type HTTPWrapInfo struct {
- Token string `json:"token"`
- TTL int `json:"ttl"`
- CreationTime string `json:"creation_time"`
- CreationPath string `json:"creation_path"`
- WrappedAccessor string `json:"wrapped_accessor,omitempty"`
-}
-
-type HTTPSysInjector struct {
- Response *HTTPResponse
-}
-
-func (h HTTPSysInjector) MarshalJSON() ([]byte, error) {
- j, err := json.Marshal(h.Response)
- if err != nil {
- return nil, err
- }
-
- // Fast path no data or empty data
- if h.Response.Data == nil || len(h.Response.Data) == 0 {
- return j, nil
- }
-
- // Marshaling a response will always be a JSON object, meaning it will
- // always start with '{', so we hijack this to prepend necessary values
-
- // Make a guess at the capacity, and write the object opener
- buf := bytes.NewBuffer(make([]byte, 0, len(j)*2))
- buf.WriteRune('{')
-
- for k, v := range h.Response.Data {
- // Marshal each key/value individually
- mk, err := json.Marshal(k)
- if err != nil {
- return nil, err
- }
- mv, err := json.Marshal(v)
- if err != nil {
- return nil, err
- }
- // Write into the final buffer. We'll never have a valid response
- // without any fields so we can unconditionally add a comma after each.
- buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv))
- }
-
- // Add the rest, without the first '{'
- buf.Write(j[1:])
-
- return buf.Bytes(), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/main.go b/vendor/github.com/hashicorp/vault/main.go
deleted file mode 100644
index 6cd34fe..0000000
--- a/vendor/github.com/hashicorp/vault/main.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package main // import "github.com/hashicorp/vault"
-
-import (
- "os"
-
- "github.com/hashicorp/vault/cli"
-)
-
-func main() {
- os.Exit(cli.Run(os.Args[1:]))
-}
diff --git a/vendor/github.com/hashicorp/vault/main_test.go b/vendor/github.com/hashicorp/vault/main_test.go
deleted file mode 100644
index 4c4c79a..0000000
--- a/vendor/github.com/hashicorp/vault/main_test.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package main // import "github.com/hashicorp/vault"
-
-// This file is intentionally empty to force early versions of Go
-// to test compilation for tests.
diff --git a/vendor/github.com/hashicorp/vault/make.bat b/vendor/github.com/hashicorp/vault/make.bat
deleted file mode 100644
index 34adbfd..0000000
--- a/vendor/github.com/hashicorp/vault/make.bat
+++ /dev/null
@@ -1,107 +0,0 @@
-@echo off
-setlocal
-
-set _EXITCODE=0
-
-REM If no target is provided, default to test.
-if [%1]==[] goto test
-
-set _TARGETS=bin,dev,generate,test,testacc,testrace,vet
-
-REM Run target.
-for %%a in (%_TARGETS%) do (if x%1==x%%a goto %%a)
-goto usage
-
-REM bin generates the releaseable binaries for Vault
-:bin
- call :generate
- call .\scripts\windows\build.bat "%CD%"
- goto :eof
-
-REM dev creates binaries for testing Vault locally. These are put
-REM into ./bin/ as well as %GOPATH%/bin
-:dev
- call :generate
- call .\scripts\windows\build.bat "%CD%" VAULT_DEV
- goto :eof
-
-REM generate runs `go generate` to build the dynamically generated
-REM source files.
-:generate
- go list ./... | findstr /v vendor | go generate
- goto :eof
-
-REM test runs the unit tests and vets the code.
-:test
- call :testsetup
- go test %_TEST% %TESTARGS% -timeout=30s -parallel=4
- call :setMaxExitCode %ERRORLEVEL%
- echo.
- goto vet
-
-REM testacc runs acceptance tests.
-:testacc
- call :testsetup
- if x%_TEST% == x./... goto testacc_fail
- if x%_TEST% == x.\... goto testacc_fail
- set VAULT_ACC=1
- go test %_TEST% -v %TESTARGS% -timeout 45m
- exit /b %ERRORLEVEL%
-:testacc_fail
- echo ERROR: Set %%TEST%% to a specific package.
- exit /b 1
-
-REM testrace runs the race checker.
-:testrace
- call :testsetup
- go test -race %_TEST% %TESTARGS%
- exit /b %ERRORLEVEL%
-
-REM testsetup calls `go generate` and defines the variables VAULT_ACC
-REM and _TEST. VAULT_ACC is always cleared. _TEST defaults to the value
-REM of the TEST environment variable, provided that TEST is defined,
-REM otherwise _TEST it is set to "./...".
-:testsetup
- call :generate
- set VAULT_ACC=
- set _TEST=./...
- if defined TEST set _TEST=%TEST%
- goto :eof
-
-REM vet runs the Go source code static analysis tool `vet` to find
-REM any common errors.
-:vet
- set _VETARGS=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
- if defined VETARGS set _VETARGS=%VETARGS%
-
- go tool vet 2>nul
- if %ERRORLEVEL% equ 3 go get golang.org/x/tools/cmd/vet
-
- set _vetExitCode=0
- set _VAULT_PKG_DIRS=%TEMP%\vault-pkg-dirs.txt
-
- go list -f {{.Dir}} ./... | findstr /v vendor >"%_VAULT_PKG_DIRS%"
- REM Skip the first row, which is the main vault package (.*github.com/hashicorp/vault$)
- for /f "delims= skip=1" %%d in ("%_VAULT_PKG_DIRS%") do (
- go tool vet %_VETARGS% "%%d"
- if ERRORLEVEL 1 set _vetExitCode=1
- call :setMaxExitCode %_vetExitCode%
- )
- del /f "%_VAULT_PKG_DIRS%" 2>NUL
- if %_vetExitCode% equ 0 exit /b %_EXITCODE%
- echo.
- echo Vet found suspicious constructs. Please check the reported constructs
- echo and fix them if necessary before submitting the code for reviewal.
- exit /b %_EXITCODE%
-
-:setMaxExitCode
- if %1 gtr %_EXITCODE% set _EXITCODE=%1
- goto :eof
-
-:usage
- echo usage: make [target]
- echo.
- echo target is in {%_TARGETS%}.
- echo target defaults to test if none is provided.
- exit /b 2
- goto :eof
diff --git a/vendor/github.com/hashicorp/vault/meta/meta.go b/vendor/github.com/hashicorp/vault/meta/meta.go
deleted file mode 100644
index a81cbde..0000000
--- a/vendor/github.com/hashicorp/vault/meta/meta.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package meta
-
-import (
- "bufio"
- "flag"
- "io"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/command/token"
- "github.com/mitchellh/cli"
-)
-
-// FlagSetFlags is an enum to define what flags are present in the
-// default FlagSet returned by Meta.FlagSet.
-type FlagSetFlags uint
-
-type TokenHelperFunc func() (token.TokenHelper, error)
-
-const (
- FlagSetNone FlagSetFlags = 0
- FlagSetServer FlagSetFlags = 1 << iota
- FlagSetDefault = FlagSetServer
-)
-
-var (
- additionalOptionsUsage = func() string {
- return `
- -wrap-ttl="" Indicates that the response should be wrapped in a
- cubbyhole token with the requested TTL. The response
- can be fetched by calling the "sys/wrapping/unwrap"
- endpoint, passing in the wrapping token's ID. This
- is a numeric string with an optional suffix
- "s", "m", or "h"; if no suffix is specified it will
- be parsed as seconds. May also be specified via
- VAULT_WRAP_TTL.
-`
- }
-)
-
-// Meta contains the meta-options and functionality that nearly every
-// Vault command inherits.
-type Meta struct {
- ClientToken string
- Ui cli.Ui
-
- // The things below can be set, but aren't common
- ForceAddress string // Address to force for API clients
-
- // These are set by the command line flags.
- flagAddress string
- flagCACert string
- flagCAPath string
- flagClientCert string
- flagClientKey string
- flagWrapTTL string
- flagInsecure bool
-
- // Queried if no token can be found
- TokenHelper TokenHelperFunc
-}
-
-func (m *Meta) DefaultWrappingLookupFunc(operation, path string) string {
- if m.flagWrapTTL != "" {
- return m.flagWrapTTL
- }
-
- return api.DefaultWrappingLookupFunc(operation, path)
-}
-
-// Client returns the API client to a Vault server given the configured
-// flag settings for this command.
-func (m *Meta) Client() (*api.Client, error) {
- config := api.DefaultConfig()
-
- err := config.ReadEnvironment()
- if err != nil {
- return nil, errwrap.Wrapf("error reading environment: {{err}}", err)
- }
-
- if m.flagAddress != "" {
- config.Address = m.flagAddress
- }
- if m.ForceAddress != "" {
- config.Address = m.ForceAddress
- }
- // If we need custom TLS configuration, then set it
- if m.flagCACert != "" || m.flagCAPath != "" || m.flagClientCert != "" || m.flagClientKey != "" || m.flagInsecure {
- t := &api.TLSConfig{
- CACert: m.flagCACert,
- CAPath: m.flagCAPath,
- ClientCert: m.flagClientCert,
- ClientKey: m.flagClientKey,
- TLSServerName: "",
- Insecure: m.flagInsecure,
- }
- config.ConfigureTLS(t)
- }
-
- // Build the client
- client, err := api.NewClient(config)
- if err != nil {
- return nil, err
- }
-
- client.SetWrappingLookupFunc(m.DefaultWrappingLookupFunc)
-
- // If we have a token directly, then set that
- token := m.ClientToken
-
- // Try to set the token to what is already stored
- if token == "" {
- token = client.Token()
- }
-
- // If we don't have a token, check the token helper
- if token == "" {
- if m.TokenHelper != nil {
- // If we have a token, then set that
- tokenHelper, err := m.TokenHelper()
- if err != nil {
- return nil, err
- }
- token, err = tokenHelper.Get()
- if err != nil {
- return nil, err
- }
- }
- }
-
- // Set the token
- if token != "" {
- client.SetToken(token)
- }
-
- return client, nil
-}
-
-// FlagSet returns a FlagSet with the common flags that every
-// command implements. The exact behavior of FlagSet can be configured
-// using the flags as the second parameter, for example to disable
-// server settings on the commands that don't talk to a server.
-func (m *Meta) FlagSet(n string, fs FlagSetFlags) *flag.FlagSet {
- f := flag.NewFlagSet(n, flag.ContinueOnError)
-
- // FlagSetServer tells us to enable the settings for selecting
- // the server information.
- if fs&FlagSetServer != 0 {
- f.StringVar(&m.flagAddress, "address", "", "")
- f.StringVar(&m.flagCACert, "ca-cert", "", "")
- f.StringVar(&m.flagCAPath, "ca-path", "", "")
- f.StringVar(&m.flagClientCert, "client-cert", "", "")
- f.StringVar(&m.flagClientKey, "client-key", "", "")
- f.StringVar(&m.flagWrapTTL, "wrap-ttl", "", "")
- f.BoolVar(&m.flagInsecure, "insecure", false, "")
- f.BoolVar(&m.flagInsecure, "tls-skip-verify", false, "")
- }
-
- // Create an io.Writer that writes to our Ui properly for errors.
- // This is kind of a hack, but it does the job. Basically: create
- // a pipe, use a scanner to break it into lines, and output each line
- // to the UI. Do this forever.
- errR, errW := io.Pipe()
- errScanner := bufio.NewScanner(errR)
- go func() {
- for errScanner.Scan() {
- m.Ui.Error(errScanner.Text())
- }
- }()
- f.SetOutput(errW)
-
- return f
-}
-
-// GeneralOptionsUsage returns the usage documentation for commonly
-// available options
-func GeneralOptionsUsage() string {
- general := `
- -address=addr The address of the Vault server.
- Overrides the VAULT_ADDR environment variable if set.
-
- -ca-cert=path Path to a PEM encoded CA cert file to use to
- verify the Vault server SSL certificate.
- Overrides the VAULT_CACERT environment variable if set.
-
- -ca-path=path Path to a directory of PEM encoded CA cert files
- to verify the Vault server SSL certificate. If both
- -ca-cert and -ca-path are specified, -ca-cert is used.
- Overrides the VAULT_CAPATH environment variable if set.
-
- -client-cert=path Path to a PEM encoded client certificate for TLS
- authentication to the Vault server. Must also specify
- -client-key. Overrides the VAULT_CLIENT_CERT
- environment variable if set.
-
- -client-key=path Path to an unencrypted PEM encoded private key
- matching the client certificate from -client-cert.
- Overrides the VAULT_CLIENT_KEY environment variable
- if set.
-
- -tls-skip-verify Do not verify TLS certificate. This is highly
- not recommended. Verification will also be skipped
- if VAULT_SKIP_VERIFY is set.
-`
-
- general += additionalOptionsUsage()
- return general
-}
diff --git a/vendor/github.com/hashicorp/vault/meta/meta_test.go b/vendor/github.com/hashicorp/vault/meta/meta_test.go
deleted file mode 100644
index 1ef9c13..0000000
--- a/vendor/github.com/hashicorp/vault/meta/meta_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package meta
-
-import (
- "flag"
- "reflect"
- "sort"
- "testing"
-)
-
-func TestFlagSet(t *testing.T) {
- cases := []struct {
- Flags FlagSetFlags
- Expected []string
- }{
- {
- FlagSetNone,
- []string{},
- },
- {
- FlagSetServer,
- []string{"address", "ca-cert", "ca-path", "client-cert", "client-key", "insecure", "tls-skip-verify", "wrap-ttl"},
- },
- }
-
- for i, tc := range cases {
- var m Meta
- fs := m.FlagSet("foo", tc.Flags)
-
- actual := make([]string, 0, 0)
- fs.VisitAll(func(f *flag.Flag) {
- actual = append(actual, f.Name)
- })
- sort.Strings(actual)
- sort.Strings(tc.Expected)
-
- if !reflect.DeepEqual(actual, tc.Expected) {
- t.Fatalf("%d: flags: %#v\n\nExpected: %#v\nGot: %#v",
- i, tc.Flags, tc.Expected, actual)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/azure/azure.go b/vendor/github.com/hashicorp/vault/physical/azure/azure.go
deleted file mode 100644
index f938ae4..0000000
--- a/vendor/github.com/hashicorp/vault/physical/azure/azure.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package azure
-
-import (
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "os"
- "sort"
- "strconv"
- "strings"
- "time"
-
- storage "github.com/Azure/azure-sdk-for-go/storage"
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- cleanhttp "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
-)
-
-// MaxBlobSize at this time
-var MaxBlobSize = 1024 * 1024 * 4
-
-// AzureBackend is a physical backend that stores data
-// within an Azure blob container.
-type AzureBackend struct {
- container *storage.Container
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-// NewAzureBackend constructs an Azure backend using a pre-existing
-// bucket. Credentials can be provided to the backend, sourced
-// from the environment, AWS credential files or by IAM role.
-func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- name := os.Getenv("AZURE_BLOB_CONTAINER")
- if name == "" {
- name = conf["container"]
- if name == "" {
- return nil, fmt.Errorf("'container' must be set")
- }
- }
-
- accountName := os.Getenv("AZURE_ACCOUNT_NAME")
- if accountName == "" {
- accountName = conf["accountName"]
- if accountName == "" {
- return nil, fmt.Errorf("'accountName' must be set")
- }
- }
-
- accountKey := os.Getenv("AZURE_ACCOUNT_KEY")
- if accountKey == "" {
- accountKey = conf["accountKey"]
- if accountKey == "" {
- return nil, fmt.Errorf("'accountKey' must be set")
- }
- }
-
- client, err := storage.NewBasicClient(accountName, accountKey)
- if err != nil {
- return nil, fmt.Errorf("failed to create Azure client: %v", err)
- }
- client.HTTPClient = cleanhttp.DefaultPooledClient()
-
- blobClient := client.GetBlobService()
- container := blobClient.GetContainerReference(name)
- _, err = container.CreateIfNotExists(&storage.CreateContainerOptions{
- Access: storage.ContainerAccessTypePrivate,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to create %q container: %v", name, err)
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("azure: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- a := &AzureBackend{
- container: container,
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }
- return a, nil
-}
-
-// Put is used to insert or update an entry
-func (a *AzureBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"azure", "put"}, time.Now())
-
- if len(entry.Value) >= MaxBlobSize {
- return fmt.Errorf("value is bigger than the current supported limit of 4MBytes")
- }
-
- blockID := base64.StdEncoding.EncodeToString([]byte("AAAA"))
- blocks := make([]storage.Block, 1)
- blocks[0] = storage.Block{ID: blockID, Status: storage.BlockStatusLatest}
-
- a.permitPool.Acquire()
- defer a.permitPool.Release()
-
- blob := &storage.Blob{
- Container: a.container,
- Name: entry.Key,
- }
- if err := blob.PutBlock(blockID, entry.Value, nil); err != nil {
- return err
- }
-
- return blob.PutBlockList(blocks, nil)
-}
-
-// Get is used to fetch an entry
-func (a *AzureBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"azure", "get"}, time.Now())
-
- a.permitPool.Acquire()
- defer a.permitPool.Release()
-
- blob := &storage.Blob{
- Container: a.container,
- Name: key,
- }
- exists, err := blob.Exists()
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, nil
- }
-
- reader, err := blob.Get(nil)
- if err != nil {
- return nil, err
- }
- defer reader.Close()
- data, err := ioutil.ReadAll(reader)
-
- ent := &physical.Entry{
- Key: key,
- Value: data,
- }
-
- return ent, err
-}
-
-// Delete is used to permanently delete an entry
-func (a *AzureBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now())
-
- blob := &storage.Blob{
- Container: a.container,
- Name: key,
- }
-
- a.permitPool.Acquire()
- defer a.permitPool.Release()
-
- _, err := blob.DeleteIfExists(nil)
- return err
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (a *AzureBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"azure", "list"}, time.Now())
-
- a.permitPool.Acquire()
- list, err := a.container.ListBlobs(storage.ListBlobsParameters{Prefix: prefix})
- if err != nil {
- // Break early.
- a.permitPool.Release()
- return nil, err
- }
- a.permitPool.Release()
-
- keys := []string{}
- for _, blob := range list.Blobs {
- key := strings.TrimPrefix(blob.Name, prefix)
- if i := strings.Index(key, "/"); i == -1 {
- keys = append(keys, key)
- } else {
- keys = strutil.AppendIfMissing(keys, key[:i+1])
- }
- }
-
- sort.Strings(keys)
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/azure/azure_test.go b/vendor/github.com/hashicorp/vault/physical/azure/azure_test.go
deleted file mode 100644
index eb0c510..0000000
--- a/vendor/github.com/hashicorp/vault/physical/azure/azure_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package azure
-
-import (
- "fmt"
- "os"
- "testing"
- "time"
-
- cleanhttp "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- storage "github.com/Azure/azure-sdk-for-go/storage"
-)
-
-func TestAzureBackend(t *testing.T) {
- if os.Getenv("AZURE_ACCOUNT_NAME") == "" ||
- os.Getenv("AZURE_ACCOUNT_KEY") == "" {
- t.SkipNow()
- }
-
- accountName := os.Getenv("AZURE_ACCOUNT_NAME")
- accountKey := os.Getenv("AZURE_ACCOUNT_KEY")
-
- ts := time.Now().UnixNano()
- name := fmt.Sprintf("vault-test-%d", ts)
-
- cleanupClient, _ := storage.NewBasicClient(accountName, accountKey)
- cleanupClient.HTTPClient = cleanhttp.DefaultPooledClient()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- backend, err := NewAzureBackend(map[string]string{
- "container": name,
- "accountName": accountName,
- "accountKey": accountKey,
- }, logger)
-
- defer func() {
- blobService := cleanupClient.GetBlobService()
- container := blobService.GetContainerReference(name)
- container.DeleteIfExists(nil)
- }()
-
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, backend)
- physical.ExerciseBackend_ListPrefix(t, backend)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/cache.go b/vendor/github.com/hashicorp/vault/physical/cache.go
deleted file mode 100644
index fc44d09..0000000
--- a/vendor/github.com/hashicorp/vault/physical/cache.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package physical
-
-import (
- "strings"
-
- "github.com/hashicorp/golang-lru"
- "github.com/hashicorp/vault/helper/locksutil"
- log "github.com/mgutz/logxi/v1"
-)
-
-const (
- // DefaultCacheSize is used if no cache size is specified for NewCache
- DefaultCacheSize = 32 * 1024
-)
-
-// Cache is used to wrap an underlying physical backend
-// and provide an LRU cache layer on top. Most of the reads done by
-// Vault are for policy objects so there is a large read reduction
-// by using a simple write-through cache.
-type Cache struct {
- backend Backend
- lru *lru.TwoQueueCache
- locks []*locksutil.LockEntry
- logger log.Logger
-}
-
-// TransactionalCache is a Cache that wraps the physical that is transactional
-type TransactionalCache struct {
- *Cache
- Transactional
-}
-
-// NewCache returns a physical cache of the given size.
-// If no size is provided, the default size is used.
-func NewCache(b Backend, size int, logger log.Logger) *Cache {
- if size <= 0 {
- size = DefaultCacheSize
- }
- if logger.IsTrace() {
- logger.Trace("physical/cache: creating LRU cache", "size", size)
- }
- cache, _ := lru.New2Q(size)
- c := &Cache{
- backend: b,
- lru: cache,
- locks: locksutil.CreateLocks(),
- logger: logger,
- }
-
- return c
-}
-
-func NewTransactionalCache(b Backend, size int, logger log.Logger) *TransactionalCache {
- c := &TransactionalCache{
- Cache: NewCache(b, size, logger),
- Transactional: b.(Transactional),
- }
- return c
-}
-
-// Purge is used to clear the cache
-func (c *Cache) Purge() {
- // Lock the world
- for _, lock := range c.locks {
- lock.Lock()
- defer lock.Unlock()
- }
-
- c.lru.Purge()
-}
-
-func (c *Cache) Put(entry *Entry) error {
- lock := locksutil.LockForKey(c.locks, entry.Key)
- lock.Lock()
- defer lock.Unlock()
-
- err := c.backend.Put(entry)
- if err == nil && !strings.HasPrefix(entry.Key, "core/") {
- c.lru.Add(entry.Key, entry)
- }
- return err
-}
-
-func (c *Cache) Get(key string) (*Entry, error) {
- lock := locksutil.LockForKey(c.locks, key)
- lock.RLock()
- defer lock.RUnlock()
-
- // We do NOT cache negative results for keys in the 'core/' prefix
- // otherwise we risk certain race conditions upstream. The primary issue is
- // with the HA mode, we could potentially negatively cache the leader entry
- // and cause leader discovery to fail.
- if strings.HasPrefix(key, "core/") {
- return c.backend.Get(key)
- }
-
- // Check the LRU first
- if raw, ok := c.lru.Get(key); ok {
- if raw == nil {
- return nil, nil
- } else {
- return raw.(*Entry), nil
- }
- }
-
- // Read from the underlying backend
- ent, err := c.backend.Get(key)
- if err != nil {
- return nil, err
- }
-
- // Cache the result
- if ent != nil {
- c.lru.Add(key, ent)
- }
-
- return ent, nil
-}
-
-func (c *Cache) Delete(key string) error {
- lock := locksutil.LockForKey(c.locks, key)
- lock.Lock()
- defer lock.Unlock()
-
- err := c.backend.Delete(key)
- if err == nil && !strings.HasPrefix(key, "core/") {
- c.lru.Remove(key)
- }
- return err
-}
-
-func (c *Cache) List(prefix string) ([]string, error) {
- // Always pass-through as this would be difficult to cache. For the same
- // reason we don't lock as we can't reasonably know which locks to readlock
- // ahead of time.
- return c.backend.List(prefix)
-}
-
-func (c *TransactionalCache) Transaction(txns []TxnEntry) error {
- // Lock the world
- for _, lock := range c.locks {
- lock.Lock()
- defer lock.Unlock()
- }
-
- if err := c.Transactional.Transaction(txns); err != nil {
- return err
- }
-
- for _, txn := range txns {
- switch txn.Operation {
- case PutOperation:
- c.lru.Add(txn.Entry.Key, txn.Entry)
- case DeleteOperation:
- c.lru.Remove(txn.Entry.Key)
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go
deleted file mode 100644
index 493e156..0000000
--- a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package cassandra
-
-import (
- "crypto/tls"
- "fmt"
- "io/ioutil"
- "net"
- "strconv"
- "strings"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- "github.com/gocql/gocql"
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/physical"
-)
-
-// CassandraBackend is a physical backend that stores data in Cassandra.
-type CassandraBackend struct {
- sess *gocql.Session
- table string
-
- logger log.Logger
-}
-
-// NewCassandraBackend constructs a Cassandra backend using a pre-existing
-// keyspace and table.
-func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- splitArray := func(v string) []string {
- return strings.FieldsFunc(v, func(r rune) bool {
- return r == ','
- })
- }
-
- var (
- hosts = splitArray(conf["hosts"])
- port = 9042
- explicitPort = false
- keyspace = conf["keyspace"]
- table = conf["table"]
- consistency = gocql.LocalQuorum
- )
-
- if len(hosts) == 0 {
- hosts = []string{"localhost"}
- }
- for i, hp := range hosts {
- h, ps, err := net.SplitHostPort(hp)
- if err != nil {
- continue
- }
- p, err := strconv.Atoi(ps)
- if err != nil {
- return nil, err
- }
-
- if explicitPort && p != port {
- return nil, fmt.Errorf("all hosts must have the same port")
- }
- hosts[i], port = h, p
- explicitPort = true
- }
-
- if keyspace == "" {
- keyspace = "vault"
- }
- if table == "" {
- table = "entries"
- }
- if cs, ok := conf["consistency"]; ok {
- switch cs {
- case "ANY":
- consistency = gocql.Any
- case "ONE":
- consistency = gocql.One
- case "TWO":
- consistency = gocql.Two
- case "THREE":
- consistency = gocql.Three
- case "QUORUM":
- consistency = gocql.Quorum
- case "ALL":
- consistency = gocql.All
- case "LOCAL_QUORUM":
- consistency = gocql.LocalQuorum
- case "EACH_QUORUM":
- consistency = gocql.EachQuorum
- case "LOCAL_ONE":
- consistency = gocql.LocalOne
- default:
- return nil, fmt.Errorf("'consistency' must be one of {ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_QUORUM, EACH_QUORUM, LOCAL_ONE}")
- }
- }
-
- connectStart := time.Now()
- cluster := gocql.NewCluster(hosts...)
- cluster.Port = port
- cluster.Keyspace = keyspace
-
- cluster.ProtoVersion = 2
- if protoVersionStr, ok := conf["protocol_version"]; ok {
- protoVersion, err := strconv.Atoi(protoVersionStr)
- if err != nil {
- return nil, fmt.Errorf("'protocol_version' must be an integer")
- }
- cluster.ProtoVersion = protoVersion
- }
-
- if username, ok := conf["username"]; ok {
- if cluster.ProtoVersion < 2 {
- return nil, fmt.Errorf("Authentication is not supported with protocol version < 2")
- }
- authenticator := gocql.PasswordAuthenticator{Username: username}
- if password, ok := conf["password"]; ok {
- authenticator.Password = password
- }
- cluster.Authenticator = authenticator
- }
-
- if connTimeoutStr, ok := conf["connection_timeout"]; ok {
- connectionTimeout, err := strconv.Atoi(connTimeoutStr)
- if err != nil {
- return nil, fmt.Errorf("'connection_timeout' must be an integer")
- }
- cluster.Timeout = time.Duration(connectionTimeout) * time.Second
- }
-
- if err := setupCassandraTLS(conf, cluster); err != nil {
- return nil, err
- }
-
- sess, err := cluster.CreateSession()
- if err != nil {
- return nil, err
- }
- metrics.MeasureSince([]string{"cassandra", "connect"}, connectStart)
- sess.SetConsistency(consistency)
-
- impl := &CassandraBackend{
- sess: sess,
- table: table,
- logger: logger}
- return impl, nil
-}
-
-func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) error {
- tlsOnStr, ok := conf["tls"]
- if !ok {
- return nil
- }
-
- tlsOn, err := strconv.Atoi(tlsOnStr)
- if err != nil {
- return fmt.Errorf("'tls' must be an integer (0 or 1)")
- }
-
- if tlsOn == 0 {
- return nil
- }
-
- var tlsConfig = &tls.Config{}
- if pemBundlePath, ok := conf["pem_bundle_file"]; ok {
- pemBundleData, err := ioutil.ReadFile(pemBundlePath)
- if err != nil {
- return fmt.Errorf("Error reading pem bundle from %s: %v", pemBundlePath, err)
- }
- pemBundle, err := certutil.ParsePEMBundle(string(pemBundleData))
- if err != nil {
- return fmt.Errorf("Error parsing 'pem_bundle': %v", err)
- }
- tlsConfig, err = pemBundle.GetTLSConfig(certutil.TLSClient)
- if err != nil {
- return err
- }
- } else {
- if pemJSONPath, ok := conf["pem_json_file"]; ok {
- pemJSONData, err := ioutil.ReadFile(pemJSONPath)
- if err != nil {
- return fmt.Errorf("Error reading json bundle from %s: %v", pemJSONPath, err)
- }
- pemJSON, err := certutil.ParsePKIJSON([]byte(pemJSONData))
- if err != nil {
- return err
- }
- tlsConfig, err = pemJSON.GetTLSConfig(certutil.TLSClient)
- if err != nil {
- return err
- }
- }
- }
-
- if tlsSkipVerifyStr, ok := conf["tls_skip_verify"]; ok {
- tlsSkipVerify, err := strconv.Atoi(tlsSkipVerifyStr)
- if err != nil {
- return fmt.Errorf("'tls_skip_verify' must be an integer (0 or 1)")
- }
- if tlsSkipVerify == 0 {
- tlsConfig.InsecureSkipVerify = false
- } else {
- tlsConfig.InsecureSkipVerify = true
- }
- }
-
- if tlsMinVersion, ok := conf["tls_min_version"]; ok {
- switch tlsMinVersion {
- case "tls10":
- tlsConfig.MinVersion = tls.VersionTLS10
- case "tls11":
- tlsConfig.MinVersion = tls.VersionTLS11
- case "tls12":
- tlsConfig.MinVersion = tls.VersionTLS12
- default:
- return fmt.Errorf("'tls_min_version' must be one of `tls10`, `tls11` or `tls12`")
- }
- }
-
- cluster.SslOpts = &gocql.SslOptions{
- Config: tlsConfig.Clone()}
- return nil
-}
-
-// bucketName sanitises a bucket name for Cassandra
-func (c *CassandraBackend) bucketName(name string) string {
- if name == "" {
- name = "."
- }
- return strings.TrimRight(name, "/")
-}
-
-// bucket returns all the prefix buckets the key should be stored at
-func (c *CassandraBackend) buckets(key string) []string {
- vals := append([]string{""}, physical.Prefixes(key)...)
- for i, v := range vals {
- vals[i] = c.bucketName(v)
- }
- return vals
-}
-
-// bucket returns the most specific bucket for the key
-func (c *CassandraBackend) bucket(key string) string {
- bs := c.buckets(key)
- return bs[len(bs)-1]
-}
-
-// Put is used to insert or update an entry
-func (c *CassandraBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"cassandra", "put"}, time.Now())
-
- // Execute inserts to each key prefix simultaneously
- stmt := fmt.Sprintf(`INSERT INTO "%s" (bucket, key, value) VALUES (?, ?, ?)`, c.table)
- results := make(chan error)
- buckets := c.buckets(entry.Key)
- for _, _bucket := range buckets {
- go func(bucket string) {
- results <- c.sess.Query(stmt, bucket, entry.Key, entry.Value).Exec()
- }(_bucket)
- }
- for i := 0; i < len(buckets); i++ {
- if err := <-results; err != nil {
- return err
- }
- }
- return nil
-}
-
-// Get is used to fetch an entry
-func (c *CassandraBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"cassandra", "get"}, time.Now())
-
- v := []byte(nil)
- stmt := fmt.Sprintf(`SELECT value FROM "%s" WHERE bucket = ? AND key = ? LIMIT 1`, c.table)
- q := c.sess.Query(stmt, c.bucket(key), key)
- if err := q.Scan(&v); err != nil {
- if err == gocql.ErrNotFound {
- return nil, nil
- }
- return nil, err
- }
-
- return &physical.Entry{
- Key: key,
- Value: v,
- }, nil
-}
-
-// Delete is used to permanently delete an entry
-func (c *CassandraBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"cassandra", "delete"}, time.Now())
-
- stmt := fmt.Sprintf(`DELETE FROM "%s" WHERE bucket = ? AND key = ?`, c.table)
- batch := gocql.NewBatch(gocql.LoggedBatch)
- for _, bucket := range c.buckets(key) {
- batch.Entries = append(batch.Entries, gocql.BatchEntry{
- Stmt: stmt,
- Args: []interface{}{bucket, key}})
- }
- return c.sess.ExecuteBatch(batch)
-}
-
-// List is used ot list all the keys under a given
-// prefix, up to the next prefix.
-func (c *CassandraBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"cassandra", "list"}, time.Now())
-
- stmt := fmt.Sprintf(`SELECT key FROM "%s" WHERE bucket = ?`, c.table)
- q := c.sess.Query(stmt, c.bucketName(prefix))
- iter := q.Iter()
- k, keys := "", []string{}
- for iter.Scan(&k) {
- // Only return the next "component" (with a trailing slash if it has children)
- k = strings.TrimPrefix(k, prefix)
- if parts := strings.SplitN(k, "/", 2); len(parts) > 1 {
- k = parts[0] + "/"
- } else {
- k = parts[0]
- }
-
- // Deduplicate; this works because the keys are sorted
- if len(keys) > 0 && keys[len(keys)-1] == k {
- continue
- }
- keys = append(keys, k)
- }
- return keys, iter.Close()
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go
deleted file mode 100644
index 1c9b1f1..0000000
--- a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package cassandra
-
-import (
- "fmt"
- "os"
- "reflect"
- "strconv"
- "testing"
- "time"
-
- "github.com/gocql/gocql"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-func TestCassandraBackend(t *testing.T) {
- if testing.Short() {
- t.Skipf("skipping in short mode")
- }
-
- cleanup, hosts := prepareCassandraTestContainer(t)
- defer cleanup()
-
- // Run vault tests
- logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewCassandraBackend(map[string]string{
- "hosts": hosts,
- "protocol_version": "3",
- }, logger)
-
- if err != nil {
- t.Fatalf("Failed to create new backend: %v", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
-
-func TestCassandraBackendBuckets(t *testing.T) {
- expectations := map[string][]string{
- "": {"."},
- "a": {"."},
- "a/b": {".", "a"},
- "a/b/c/d/e": {".", "a", "a/b", "a/b/c", "a/b/c/d"}}
-
- b := &CassandraBackend{}
- for input, expected := range expectations {
- actual := b.buckets(input)
- if !reflect.DeepEqual(actual, expected) {
- t.Errorf("bad: %v expected: %v", actual, expected)
- }
- }
-}
-
-func prepareCassandraTestContainer(t *testing.T) (func(), string) {
- if os.Getenv("CASSANDRA_HOSTS") != "" {
- return func() {}, os.Getenv("CASSANDRA_HOSTS")
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("cassandra: failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("cassandra", "3.11", []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"})
- if err != nil {
- t.Fatalf("cassandra: could not start container: %s", err)
- }
-
- cleanup := func() {
- pool.Purge(resource)
- }
-
- setup := func() error {
- cluster := gocql.NewCluster("127.0.0.1")
- p, _ := strconv.Atoi(resource.GetPort("9042/tcp"))
- cluster.Port = p
- cluster.Timeout = 5 * time.Second
- sess, err := cluster.CreateSession()
- if err != nil {
- return err
- }
- defer sess.Close()
-
- // Create keyspace
- q := sess.Query(`CREATE KEYSPACE "vault" WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };`)
- if err := q.Exec(); err != nil {
- t.Fatalf("could not create cassandra keyspace: %v", err)
- }
-
- // Create table
- q = sess.Query(`CREATE TABLE "vault"."entries" (
- bucket text,
- key text,
- value blob,
- PRIMARY KEY (bucket, key)
- ) WITH CLUSTERING ORDER BY (key ASC);`)
- if err := q.Exec(); err != nil {
- t.Fatalf("could not create cassandra table: %v", err)
- }
-
- return nil
- }
- if pool.Retry(setup); err != nil {
- cleanup()
- t.Fatalf("cassandra: could not setup container: %s", err)
- }
-
- return cleanup, fmt.Sprintf("127.0.0.1:%s", resource.GetPort("9042/tcp"))
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go
deleted file mode 100644
index 395c2da..0000000
--- a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package cockroachdb
-
-import (
- "context"
- "database/sql"
- "fmt"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/cockroachdb/cockroach-go/crdb"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- // CockroachDB uses the Postgres SQL driver
- _ "github.com/lib/pq"
-)
-
-// CockroachDBBackend Backend is a physical backend that stores data
-// within a CockroachDB database.
-type CockroachDBBackend struct {
- table string
- client *sql.DB
- rawStatements map[string]string
- statements map[string]*sql.Stmt
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-// NewCockroachDBBackend constructs a CockroachDB backend using the given
-// API client, server address, credentials, and database.
-func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- // Get the CockroachDB credentials to perform read/write operations.
- connURL, ok := conf["connection_url"]
- if !ok || connURL == "" {
- return nil, fmt.Errorf("missing connection_url")
- }
-
- dbTable, ok := conf["table"]
- if !ok {
- dbTable = "vault_kv_store"
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- var err error
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("cockroachdb: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- // Create CockroachDB handle for the database.
- db, err := sql.Open("postgres", connURL)
- if err != nil {
- return nil, fmt.Errorf("failed to connect to cockroachdb: %v", err)
- }
-
- // Create the required table if it doesn't exists.
- createQuery := "CREATE TABLE IF NOT EXISTS " + dbTable +
- " (path STRING, value BYTES, PRIMARY KEY (path))"
- if _, err := db.Exec(createQuery); err != nil {
- return nil, fmt.Errorf("failed to create mysql table: %v", err)
- }
-
- // Setup the backend
- c := &CockroachDBBackend{
- table: dbTable,
- client: db,
- rawStatements: map[string]string{
- "put": "INSERT INTO " + dbTable + " VALUES($1, $2)" +
- " ON CONFLICT (path) DO " +
- " UPDATE SET (path, value) = ($1, $2)",
- "get": "SELECT value FROM " + dbTable + " WHERE path = $1",
- "delete": "DELETE FROM " + dbTable + " WHERE path = $1",
- "list": "SELECT path FROM " + dbTable + " WHERE path LIKE $1",
- },
- statements: make(map[string]*sql.Stmt),
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }
-
- // Prepare all the statements required
- for name, query := range c.rawStatements {
- if err := c.prepare(name, query); err != nil {
- return nil, err
- }
- }
- return c, nil
-}
-
-// prepare is a helper to prepare a query for future execution
-func (c *CockroachDBBackend) prepare(name, query string) error {
- stmt, err := c.client.Prepare(query)
- if err != nil {
- return fmt.Errorf("failed to prepare '%s': %v", name, err)
- }
- c.statements[name] = stmt
- return nil
-}
-
-// Put is used to insert or update an entry.
-func (c *CockroachDBBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"cockroachdb", "put"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- _, err := c.statements["put"].Exec(entry.Key, entry.Value)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Get is used to fetch and entry.
-func (c *CockroachDBBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"cockroachdb", "get"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- var result []byte
- err := c.statements["get"].QueryRow(key).Scan(&result)
- if err == sql.ErrNoRows {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
-
- ent := &physical.Entry{
- Key: key,
- Value: result,
- }
- return ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (c *CockroachDBBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"cockroachdb", "delete"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- _, err := c.statements["delete"].Exec(key)
- if err != nil {
- return err
- }
- return nil
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (c *CockroachDBBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"cockroachdb", "list"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- likePrefix := prefix + "%"
- rows, err := c.statements["list"].Query(likePrefix)
- if err != nil {
- return nil, err
- }
- defer rows.Close()
-
- var keys []string
- for rows.Next() {
- var key string
- err = rows.Scan(&key)
- if err != nil {
- return nil, fmt.Errorf("failed to scan rows: %v", err)
- }
-
- key = strings.TrimPrefix(key, prefix)
- if i := strings.Index(key, "/"); i == -1 {
- // Add objects only from the current 'folder'
- keys = append(keys, key)
- } else if i != -1 {
- // Add truncated 'folder' paths
- keys = strutil.AppendIfMissing(keys, string(key[:i+1]))
- }
- }
-
- sort.Strings(keys)
- return keys, nil
-}
-
-// Transaction is used to run multiple entries via a transaction
-func (c *CockroachDBBackend) Transaction(txns []physical.TxnEntry) error {
- defer metrics.MeasureSince([]string{"cockroachdb", "transaction"}, time.Now())
- if len(txns) == 0 {
- return nil
- }
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- return crdb.ExecuteTx(context.Background(), c.client, nil, func(tx *sql.Tx) error {
- return c.transaction(tx, txns)
- })
-}
-
-func (c *CockroachDBBackend) transaction(tx *sql.Tx, txns []physical.TxnEntry) error {
- deleteStmt, err := tx.Prepare(c.rawStatements["delete"])
- if err != nil {
- return err
- }
- putStmt, err := tx.Prepare(c.rawStatements["put"])
- if err != nil {
- return err
- }
-
- for _, op := range txns {
- switch op.Operation {
- case physical.DeleteOperation:
- _, err = deleteStmt.Exec(op.Entry.Key)
- case physical.PutOperation:
- _, err = putStmt.Exec(op.Entry.Key, op.Entry.Value)
- default:
- return fmt.Errorf("%q is not a supported transaction operation", op.Operation)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go
deleted file mode 100644
index 35bcecf..0000000
--- a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package cockroachdb
-
-import (
- "database/sql"
- "fmt"
- "os"
- "testing"
-
- dockertest "gopkg.in/ory-am/dockertest.v3"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- _ "github.com/lib/pq"
-)
-
-func prepareCockroachDBTestContainer(t *testing.T) (cleanup func(), retURL, tableName string) {
- tableName = os.Getenv("CR_TABLE")
- if tableName == "" {
- tableName = "vault_kv_store"
- }
- retURL = os.Getenv("CR_URL")
- if retURL != "" {
- return func() {}, retURL, tableName
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- dockerOptions := &dockertest.RunOptions{
- Repository: "cockroachdb/cockroach",
- Tag: "release-1.0",
- Cmd: []string{"start", "--insecure"},
- }
- resource, err := pool.RunWithOptions(dockerOptions)
- if err != nil {
- t.Fatalf("Could not start local CockroachDB docker container: %s", err)
- }
-
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local container: %s", err)
- }
- }
-
- retURL = fmt.Sprintf("postgresql://root@localhost:%s/?sslmode=disable", resource.GetPort("26257/tcp"))
- database := "database"
- tableName = database + ".vault_kv"
-
- // exponential backoff-retry
- if err = pool.Retry(func() error {
- var err error
- db, err := sql.Open("postgres", retURL)
- if err != nil {
- return err
- }
- _, err = db.Exec("CREATE DATABASE database")
- return err
- }); err != nil {
- cleanup()
- t.Fatalf("Could not connect to docker: %s", err)
- }
- return cleanup, retURL, tableName
-}
-
-func TestCockroachDBBackend(t *testing.T) {
- cleanup, connURL, table := prepareCockroachDBTestContainer(t)
- defer cleanup()
-
- // Run vault tests
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewCockroachDBBackend(map[string]string{
- "connection_url": connURL,
- "table": table,
- }, logger)
-
- if err != nil {
- t.Fatalf("Failed to create new backend: %v", err)
- }
-
- defer func() {
- truncate(t, b)
- }()
-
- physical.ExerciseBackend(t, b)
- truncate(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
- truncate(t, b)
- physical.ExerciseTransactionalBackend(t, b)
-}
-
-func truncate(t *testing.T, b physical.Backend) {
- crdb := b.(*CockroachDBBackend)
- _, err := crdb.client.Exec("TRUNCATE TABLE " + crdb.table)
- if err != nil {
- t.Fatalf("Failed to drop table: %v", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/consul/consul.go b/vendor/github.com/hashicorp/vault/physical/consul/consul.go
deleted file mode 100644
index 6c31466..0000000
--- a/vendor/github.com/hashicorp/vault/physical/consul/consul.go
+++ /dev/null
@@ -1,793 +0,0 @@
-package consul
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/http2"
-
- log "github.com/mgutz/logxi/v1"
-
- "crypto/tls"
- "crypto/x509"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/consul/lib"
- "github.com/hashicorp/errwrap"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/physical"
-)
-
-const (
- // checkJitterFactor specifies the jitter factor used to stagger checks
- checkJitterFactor = 16
-
- // checkMinBuffer specifies provides a guarantee that a check will not
- // be executed too close to the TTL check timeout
- checkMinBuffer = 100 * time.Millisecond
-
- // consulRetryInterval specifies the retry duration to use when an
- // API call to the Consul agent fails.
- consulRetryInterval = 1 * time.Second
-
- // defaultCheckTimeout changes the timeout of TTL checks
- defaultCheckTimeout = 5 * time.Second
-
- // DefaultServiceName is the default Consul service name used when
- // advertising a Vault instance.
- DefaultServiceName = "vault"
-
- // reconcileTimeout is how often Vault should query Consul to detect
- // and fix any state drift.
- reconcileTimeout = 60 * time.Second
-
- // consistencyModeDefault is the configuration value used to tell
- // consul to use default consistency.
- consistencyModeDefault = "default"
-
- // consistencyModeStrong is the configuration value used to tell
- // consul to use strong consistency.
- consistencyModeStrong = "strong"
-)
-
-type notifyEvent struct{}
-
-// ConsulBackend is a physical backend that stores data at specific
-// prefix within Consul. It is used for most production situations as
-// it allows Vault to run on multiple machines in a highly-available manner.
-type ConsulBackend struct {
- path string
- logger log.Logger
- client *api.Client
- kv *api.KV
- permitPool *physical.PermitPool
- serviceLock sync.RWMutex
- redirectHost string
- redirectPort int64
- serviceName string
- serviceTags []string
- disableRegistration bool
- checkTimeout time.Duration
- consistencyMode string
-
- notifyActiveCh chan notifyEvent
- notifySealedCh chan notifyEvent
-}
-
-// NewConsulBackend constructs a Consul backend using the given API client
-// and the prefix in the KV store.
-func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- // Get the path in Consul
- path, ok := conf["path"]
- if !ok {
- path = "vault/"
- }
- if logger.IsDebug() {
- logger.Debug("physical/consul: config path set", "path", path)
- }
-
- // Ensure path is suffixed but not prefixed
- if !strings.HasSuffix(path, "/") {
- logger.Warn("physical/consul: appending trailing forward slash to path")
- path += "/"
- }
- if strings.HasPrefix(path, "/") {
- logger.Warn("physical/consul: trimming path of its forward slash")
- path = strings.TrimPrefix(path, "/")
- }
-
- // Allow admins to disable consul integration
- disableReg, ok := conf["disable_registration"]
- var disableRegistration bool
- if ok && disableReg != "" {
- b, err := strconv.ParseBool(disableReg)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing disable_registration parameter: {{err}}", err)
- }
- disableRegistration = b
- }
- if logger.IsDebug() {
- logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration)
- }
-
- // Get the service name to advertise in Consul
- service, ok := conf["service"]
- if !ok {
- service = DefaultServiceName
- }
- if logger.IsDebug() {
- logger.Debug("physical/consul: config service set", "service", service)
- }
-
- // Get the additional tags to attach to the registered service name
- tags := conf["service_tags"]
-
- if logger.IsDebug() {
- logger.Debug("physical/consul: config service_tags set", "service_tags", tags)
- }
-
- checkTimeout := defaultCheckTimeout
- checkTimeoutStr, ok := conf["check_timeout"]
- if ok {
- d, err := time.ParseDuration(checkTimeoutStr)
- if err != nil {
- return nil, err
- }
-
- min, _ := lib.DurationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor)
- if min < checkMinBuffer {
- return nil, fmt.Errorf("Consul check_timeout must be greater than %v", min)
- }
-
- checkTimeout = d
- if logger.IsDebug() {
- logger.Debug("physical/consul: config check_timeout set", "check_timeout", d)
- }
- }
-
- // Configure the client
- consulConf := api.DefaultConfig()
- // Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore
- consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
-
- if addr, ok := conf["address"]; ok {
- consulConf.Address = addr
- if logger.IsDebug() {
- logger.Debug("physical/consul: config address set", "address", addr)
- }
- }
- if scheme, ok := conf["scheme"]; ok {
- consulConf.Scheme = scheme
- if logger.IsDebug() {
- logger.Debug("physical/consul: config scheme set", "scheme", scheme)
- }
- }
- if token, ok := conf["token"]; ok {
- consulConf.Token = token
- logger.Debug("physical/consul: config token set")
- }
-
- if consulConf.Scheme == "https" {
- tlsClientConfig, err := setupTLSConfig(conf)
- if err != nil {
- return nil, err
- }
-
- consulConf.Transport.TLSClientConfig = tlsClientConfig
- if err := http2.ConfigureTransport(consulConf.Transport); err != nil {
- return nil, err
- }
- logger.Debug("physical/consul: configured TLS")
- }
-
- consulConf.HttpClient = &http.Client{Transport: consulConf.Transport}
- client, err := api.NewClient(consulConf)
- if err != nil {
- return nil, errwrap.Wrapf("client setup failed: {{err}}", err)
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- consistencyMode, ok := conf["consistency_mode"]
- if ok {
- switch consistencyMode {
- case consistencyModeDefault, consistencyModeStrong:
- default:
- return nil, fmt.Errorf("invalid consistency_mode value: %s", consistencyMode)
- }
- } else {
- consistencyMode = consistencyModeDefault
- }
-
- // Setup the backend
- c := &ConsulBackend{
- path: path,
- logger: logger,
- client: client,
- kv: client.KV(),
- permitPool: physical.NewPermitPool(maxParInt),
- serviceName: service,
- serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","),
- checkTimeout: checkTimeout,
- disableRegistration: disableRegistration,
- consistencyMode: consistencyMode,
- notifyActiveCh: make(chan notifyEvent),
- notifySealedCh: make(chan notifyEvent),
- }
- return c, nil
-}
-
-func setupTLSConfig(conf map[string]string) (*tls.Config, error) {
- serverName, _, err := net.SplitHostPort(conf["address"])
- switch {
- case err == nil:
- case strings.Contains(err.Error(), "missing port"):
- serverName = conf["address"]
- default:
- return nil, err
- }
-
- insecureSkipVerify := false
- if _, ok := conf["tls_skip_verify"]; ok {
- insecureSkipVerify = true
- }
-
- tlsMinVersionStr, ok := conf["tls_min_version"]
- if !ok {
- // Set the default value
- tlsMinVersionStr = "tls12"
- }
-
- tlsMinVersion, ok := tlsutil.TLSLookup[tlsMinVersionStr]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_min_version'")
- }
-
- tlsClientConfig := &tls.Config{
- MinVersion: tlsMinVersion,
- InsecureSkipVerify: insecureSkipVerify,
- ServerName: serverName,
- }
-
- _, okCert := conf["tls_cert_file"]
- _, okKey := conf["tls_key_file"]
-
- if okCert && okKey {
- tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"])
- if err != nil {
- return nil, fmt.Errorf("client tls setup failed: %v", err)
- }
-
- tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
- }
-
- if tlsCaFile, ok := conf["tls_ca_file"]; ok {
- caPool := x509.NewCertPool()
-
- data, err := ioutil.ReadFile(tlsCaFile)
- if err != nil {
- return nil, fmt.Errorf("failed to read CA file: %v", err)
- }
-
- if !caPool.AppendCertsFromPEM(data) {
- return nil, fmt.Errorf("failed to parse CA certificate")
- }
-
- tlsClientConfig.RootCAs = caPool
- }
-
- return tlsClientConfig, nil
-}
-
-// Used to run multiple entries via a transaction
-func (c *ConsulBackend) Transaction(txns []physical.TxnEntry) error {
- if len(txns) == 0 {
- return nil
- }
-
- ops := make([]*api.KVTxnOp, 0, len(txns))
-
- for _, op := range txns {
- cop := &api.KVTxnOp{
- Key: c.path + op.Entry.Key,
- }
- switch op.Operation {
- case physical.DeleteOperation:
- cop.Verb = api.KVDelete
- case physical.PutOperation:
- cop.Verb = api.KVSet
- cop.Value = op.Entry.Value
- default:
- return fmt.Errorf("%q is not a supported transaction operation", op.Operation)
- }
-
- ops = append(ops, cop)
- }
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- ok, resp, _, err := c.kv.Txn(ops, nil)
- if err != nil {
- return err
- }
- if ok {
- return nil
- }
-
- var retErr *multierror.Error
- for _, res := range resp.Errors {
- retErr = multierror.Append(retErr, errors.New(res.What))
- }
-
- return retErr
-}
-
-// Put is used to insert or update an entry
-func (c *ConsulBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"consul", "put"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- pair := &api.KVPair{
- Key: c.path + entry.Key,
- Value: entry.Value,
- }
-
- _, err := c.kv.Put(pair, nil)
- return err
-}
-
-// Get is used to fetch an entry
-func (c *ConsulBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"consul", "get"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- var queryOptions *api.QueryOptions
- if c.consistencyMode == consistencyModeStrong {
- queryOptions = &api.QueryOptions{
- RequireConsistent: true,
- }
- }
-
- pair, _, err := c.kv.Get(c.path+key, queryOptions)
- if err != nil {
- return nil, err
- }
- if pair == nil {
- return nil, nil
- }
- ent := &physical.Entry{
- Key: key,
- Value: pair.Value,
- }
- return ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (c *ConsulBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"consul", "delete"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- _, err := c.kv.Delete(c.path+key, nil)
- return err
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (c *ConsulBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"consul", "list"}, time.Now())
- scan := c.path + prefix
-
- // The TrimPrefix call below will not work correctly if we have "//" at the
- // end. This can happen in cases where you are e.g. listing the root of a
- // prefix in a logical backend via "/" instead of ""
- if strings.HasSuffix(scan, "//") {
- scan = scan[:len(scan)-1]
- }
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- out, _, err := c.kv.Keys(scan, "/", nil)
- for idx, val := range out {
- out[idx] = strings.TrimPrefix(val, scan)
- }
-
- return out, err
-}
-
-// Lock is used for mutual exclusion based on the given key.
-func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) {
- // Create the lock
- opts := &api.LockOptions{
- Key: c.path + key,
- Value: []byte(value),
- SessionName: "Vault Lock",
- MonitorRetries: 5,
- }
- lock, err := c.client.LockOpts(opts)
- if err != nil {
- return nil, fmt.Errorf("failed to create lock: %v", err)
- }
- cl := &ConsulLock{
- client: c.client,
- key: c.path + key,
- lock: lock,
- consistencyMode: c.consistencyMode,
- }
- return cl, nil
-}
-
-// HAEnabled indicates whether the HA functionality should be exposed.
-// Currently always returns true.
-func (c *ConsulBackend) HAEnabled() bool {
- return true
-}
-
-// DetectHostAddr is used to detect the host address by asking the Consul agent
-func (c *ConsulBackend) DetectHostAddr() (string, error) {
- agent := c.client.Agent()
- self, err := agent.Self()
- if err != nil {
- return "", err
- }
- addr, ok := self["Member"]["Addr"].(string)
- if !ok {
- return "", fmt.Errorf("Unable to convert an address to string")
- }
- return addr, nil
-}
-
-// ConsulLock is used to provide the Lock interface backed by Consul
-type ConsulLock struct {
- client *api.Client
- key string
- lock *api.Lock
- consistencyMode string
-}
-
-func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
- return c.lock.Lock(stopCh)
-}
-
-func (c *ConsulLock) Unlock() error {
- return c.lock.Unlock()
-}
-
-func (c *ConsulLock) Value() (bool, string, error) {
- kv := c.client.KV()
-
- var queryOptions *api.QueryOptions
- if c.consistencyMode == consistencyModeStrong {
- queryOptions = &api.QueryOptions{
- RequireConsistent: true,
- }
- }
-
- pair, _, err := kv.Get(c.key, queryOptions)
- if err != nil {
- return false, "", err
- }
- if pair == nil {
- return false, "", nil
- }
- held := pair.Session != ""
- value := string(pair.Value)
- return held, value, nil
-}
-
-func (c *ConsulBackend) NotifyActiveStateChange() error {
- select {
- case c.notifyActiveCh <- notifyEvent{}:
- default:
- // NOTE: If this occurs Vault's active status could be out of
- // sync with Consul until reconcileTimer expires.
- c.logger.Warn("physical/consul: Concurrent state change notify dropped")
- }
-
- return nil
-}
-
-func (c *ConsulBackend) NotifySealedStateChange() error {
- select {
- case c.notifySealedCh <- notifyEvent{}:
- default:
- // NOTE: If this occurs Vault's sealed status could be out of
- // sync with Consul until checkTimer expires.
- c.logger.Warn("physical/consul: Concurrent sealed state change notify dropped")
- }
-
- return nil
-}
-
-func (c *ConsulBackend) checkDuration() time.Duration {
- return lib.DurationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor)
-}
-
-func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (err error) {
- if err := c.setRedirectAddr(redirectAddr); err != nil {
- return err
- }
-
- // 'server' command will wait for the below goroutine to complete
- waitGroup.Add(1)
-
- go c.runEventDemuxer(waitGroup, shutdownCh, redirectAddr, activeFunc, sealedFunc)
-
- return nil
-}
-
-func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) {
- // This defer statement should be executed last. So push it first.
- defer waitGroup.Done()
-
- // Fire the reconcileTimer immediately upon starting the event demuxer
- reconcileTimer := time.NewTimer(0)
- defer reconcileTimer.Stop()
-
- // Schedule the first check. Consul TTL checks are passing by
- // default, checkTimer does not need to be run immediately.
- checkTimer := time.NewTimer(c.checkDuration())
- defer checkTimer.Stop()
-
- // Use a reactor pattern to handle and dispatch events to singleton
- // goroutine handlers for execution. It is not acceptable to drop
- // inbound events from Notify*().
- //
- // goroutines are dispatched if the demuxer can acquire a lock (via
- // an atomic CAS incr) on the handler. Handlers are responsible for
- // deregistering themselves (atomic CAS decr). Handlers and the
- // demuxer share a lock to synchronize information at the beginning
- // and end of a handler's life (or after a handler wakes up from
- // sleeping during a back-off/retry).
- var shutdown bool
- var checkLock int64
- var registeredServiceID string
- var serviceRegLock int64
-
- for !shutdown {
- select {
- case <-c.notifyActiveCh:
- // Run reconcile immediately upon active state change notification
- reconcileTimer.Reset(0)
- case <-c.notifySealedCh:
- // Run check timer immediately upon a seal state change notification
- checkTimer.Reset(0)
- case <-reconcileTimer.C:
- // Unconditionally rearm the reconcileTimer
- reconcileTimer.Reset(reconcileTimeout - lib.RandomStagger(reconcileTimeout/checkJitterFactor))
-
- // Abort if service discovery is disabled or a
- // reconcile handler is already active
- if !c.disableRegistration && atomic.CompareAndSwapInt64(&serviceRegLock, 0, 1) {
- // Enter handler with serviceRegLock held
- go func() {
- defer atomic.CompareAndSwapInt64(&serviceRegLock, 1, 0)
- for !shutdown {
- serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc)
- if err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("physical/consul: reconcile unable to talk with Consul backend", "error", err)
- }
- time.Sleep(consulRetryInterval)
- continue
- }
-
- c.serviceLock.Lock()
- defer c.serviceLock.Unlock()
-
- registeredServiceID = serviceID
- return
- }
- }()
- }
- case <-checkTimer.C:
- checkTimer.Reset(c.checkDuration())
- // Abort if service discovery is disabled or a
- // reconcile handler is active
- if !c.disableRegistration && atomic.CompareAndSwapInt64(&checkLock, 0, 1) {
- // Enter handler with checkLock held
- go func() {
- defer atomic.CompareAndSwapInt64(&checkLock, 1, 0)
- for !shutdown {
- sealed := sealedFunc()
- if err := c.runCheck(sealed); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("physical/consul: check unable to talk with Consul backend", "error", err)
- }
- time.Sleep(consulRetryInterval)
- continue
- }
- return
- }
- }()
- }
- case <-shutdownCh:
- c.logger.Info("physical/consul: Shutting down consul backend")
- shutdown = true
- }
- }
-
- c.serviceLock.RLock()
- defer c.serviceLock.RUnlock()
- if err := c.client.Agent().ServiceDeregister(registeredServiceID); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("physical/consul: service deregistration failed", "error", err)
- }
- }
-}
-
-// checkID returns the ID used for a Consul Check. Assume at least a read
-// lock is held.
-func (c *ConsulBackend) checkID() string {
- return fmt.Sprintf("%s:vault-sealed-check", c.serviceID())
-}
-
-// serviceID returns the Vault ServiceID for use in Consul. Assume at least
-// a read lock is held.
-func (c *ConsulBackend) serviceID() string {
- return fmt.Sprintf("%s:%s:%d", c.serviceName, c.redirectHost, c.redirectPort)
-}
-
-// reconcileConsul queries the state of Vault Core and Consul and fixes up
-// Consul's state according to what's in Vault. reconcileConsul is called
-// without any locks held and can be run concurrently, therefore no changes
-// to ConsulBackend can be made in this method (i.e. wtb const receiver for
-// compiler enforced safety).
-func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (serviceID string, err error) {
- // Query vault Core for its current state
- active := activeFunc()
- sealed := sealedFunc()
-
- agent := c.client.Agent()
- catalog := c.client.Catalog()
-
- serviceID = c.serviceID()
-
- // Get the current state of Vault from Consul
- var currentVaultService *api.CatalogService
- if services, _, err := catalog.Service(c.serviceName, "", &api.QueryOptions{AllowStale: true}); err == nil {
- for _, service := range services {
- if serviceID == service.ServiceID {
- currentVaultService = service
- break
- }
- }
- }
-
- tags := c.fetchServiceTags(active)
-
- var reregister bool
-
- switch {
- case currentVaultService == nil, registeredServiceID == "":
- reregister = true
- default:
- switch {
- case !strutil.EquivalentSlices(currentVaultService.ServiceTags, tags):
- reregister = true
- }
- }
-
- if !reregister {
- // When re-registration is not required, return a valid serviceID
- // to avoid registration in the next cycle.
- return serviceID, nil
- }
-
- service := &api.AgentServiceRegistration{
- ID: serviceID,
- Name: c.serviceName,
- Tags: tags,
- Port: int(c.redirectPort),
- Address: c.redirectHost,
- EnableTagOverride: false,
- }
-
- checkStatus := api.HealthCritical
- if !sealed {
- checkStatus = api.HealthPassing
- }
-
- sealedCheck := &api.AgentCheckRegistration{
- ID: c.checkID(),
- Name: "Vault Sealed Status",
- Notes: "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server",
- ServiceID: serviceID,
- AgentServiceCheck: api.AgentServiceCheck{
- TTL: c.checkTimeout.String(),
- Status: checkStatus,
- },
- }
-
- if err := agent.ServiceRegister(service); err != nil {
- return "", errwrap.Wrapf(`service registration failed: {{err}}`, err)
- }
-
- if err := agent.CheckRegister(sealedCheck); err != nil {
- return serviceID, errwrap.Wrapf(`service check registration failed: {{err}}`, err)
- }
-
- return serviceID, nil
-}
-
-// runCheck immediately pushes a TTL check.
-func (c *ConsulBackend) runCheck(sealed bool) error {
- // Run a TTL check
- agent := c.client.Agent()
- if !sealed {
- return agent.PassTTL(c.checkID(), "Vault Unsealed")
- } else {
- return agent.FailTTL(c.checkID(), "Vault Sealed")
- }
-}
-
-// fetchServiceTags returns all of the relevant tags for Consul.
-func (c *ConsulBackend) fetchServiceTags(active bool) []string {
- activeTag := "standby"
- if active {
- activeTag = "active"
- }
- return append(c.serviceTags, activeTag)
-}
-
-func (c *ConsulBackend) setRedirectAddr(addr string) (err error) {
- if addr == "" {
- return fmt.Errorf("redirect address must not be empty")
- }
-
- url, err := url.Parse(addr)
- if err != nil {
- return errwrap.Wrapf(fmt.Sprintf(`failed to parse redirect URL "%v": {{err}}`, addr), err)
- }
-
- var portStr string
- c.redirectHost, portStr, err = net.SplitHostPort(url.Host)
- if err != nil {
- if url.Scheme == "http" {
- portStr = "80"
- } else if url.Scheme == "https" {
- portStr = "443"
- } else if url.Scheme == "unix" {
- portStr = "-1"
- c.redirectHost = url.Path
- } else {
- return errwrap.Wrapf(fmt.Sprintf(`failed to find a host:port in redirect address "%v": {{err}}`, url.Host), err)
- }
- }
- c.redirectPort, err = strconv.ParseInt(portStr, 10, 0)
- if err != nil || c.redirectPort < -1 || c.redirectPort > 65535 {
- return errwrap.Wrapf(fmt.Sprintf(`failed to parse valid port "%v": {{err}}`, portStr), err)
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/consul/consul_test.go b/vendor/github.com/hashicorp/vault/physical/consul/consul_test.go
deleted file mode 100644
index 4d3230c..0000000
--- a/vendor/github.com/hashicorp/vault/physical/consul/consul_test.go
+++ /dev/null
@@ -1,541 +0,0 @@
-package consul
-
-import (
- "fmt"
- "math/rand"
- "os"
- "reflect"
- "sync"
- "testing"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/consul/api"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
- dockertest "gopkg.in/ory-am/dockertest.v2"
-)
-
-type consulConf map[string]string
-
-var (
- addrCount int = 0
- testImagePull sync.Once
-)
-
-func testHostIP() string {
- a := addrCount
- addrCount++
- return fmt.Sprintf("127.0.0.%d", a)
-}
-
-func testConsulBackend(t *testing.T) *ConsulBackend {
- return testConsulBackendConfig(t, &consulConf{})
-}
-
-func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- be, err := NewConsulBackend(*conf, logger)
- if err != nil {
- t.Fatalf("Expected Consul to initialize: %v", err)
- }
-
- c, ok := be.(*ConsulBackend)
- if !ok {
- t.Fatalf("Expected ConsulBackend")
- }
-
- return c
-}
-
-func testConsul_testConsulBackend(t *testing.T) {
- c := testConsulBackend(t)
- if c == nil {
- t.Fatalf("bad")
- }
-}
-
-func testActiveFunc(activePct float64) physical.ActiveFunction {
- return func() bool {
- var active bool
- standbyProb := rand.Float64()
- if standbyProb > activePct {
- active = true
- }
- return active
- }
-}
-
-func testSealedFunc(sealedPct float64) physical.SealedFunction {
- return func() bool {
- var sealed bool
- unsealedProb := rand.Float64()
- if unsealedProb > sealedPct {
- sealed = true
- }
- return sealed
- }
-}
-
-func TestConsul_ServiceTags(t *testing.T) {
- consulConfig := map[string]string{
- "path": "seaTech/",
- "service": "astronomy",
- "service_tags": "deadbeef, cafeefac, deadc0de, feedface",
- "redirect_addr": "http://127.0.0.2:8200",
- "check_timeout": "6s",
- "address": "127.0.0.2",
- "scheme": "https",
- "token": "deadbeef-cafeefac-deadc0de-feedface",
- "max_parallel": "4",
- "disable_registration": "false",
- }
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- be, err := NewConsulBackend(consulConfig, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- c, ok := be.(*ConsulBackend)
- if !ok {
- t.Fatalf("failed to create physical Consul backend")
- }
-
- expected := []string{"deadbeef", "cafeefac", "deadc0de", "feedface"}
- actual := c.fetchServiceTags(false)
- if !strutil.EquivalentSlices(actual, append(expected, "standby")) {
- t.Fatalf("bad: expected:%s actual:%s", append(expected, "standby"), actual)
- }
-
- actual = c.fetchServiceTags(true)
- if !strutil.EquivalentSlices(actual, append(expected, "active")) {
- t.Fatalf("bad: expected:%s actual:%s", append(expected, "active"), actual)
- }
-}
-
-func TestConsul_newConsulBackend(t *testing.T) {
- tests := []struct {
- name string
- consulConfig map[string]string
- fail bool
- redirectAddr string
- checkTimeout time.Duration
- path string
- service string
- address string
- scheme string
- token string
- max_parallel int
- disableReg bool
- consistencyMode string
- }{
- {
- name: "Valid default config",
- consulConfig: map[string]string{},
- checkTimeout: 5 * time.Second,
- redirectAddr: "http://127.0.0.1:8200",
- path: "vault/",
- service: "vault",
- address: "127.0.0.1:8500",
- scheme: "http",
- token: "",
- max_parallel: 4,
- disableReg: false,
- consistencyMode: "default",
- },
- {
- name: "Valid modified config",
- consulConfig: map[string]string{
- "path": "seaTech/",
- "service": "astronomy",
- "redirect_addr": "http://127.0.0.2:8200",
- "check_timeout": "6s",
- "address": "127.0.0.2",
- "scheme": "https",
- "token": "deadbeef-cafeefac-deadc0de-feedface",
- "max_parallel": "4",
- "disable_registration": "false",
- "consistency_mode": "strong",
- },
- checkTimeout: 6 * time.Second,
- path: "seaTech/",
- service: "astronomy",
- redirectAddr: "http://127.0.0.2:8200",
- address: "127.0.0.2",
- scheme: "https",
- token: "deadbeef-cafeefac-deadc0de-feedface",
- max_parallel: 4,
- consistencyMode: "strong",
- },
- {
- name: "check timeout too short",
- fail: true,
- consulConfig: map[string]string{
- "check_timeout": "99ms",
- },
- },
- }
-
- for _, test := range tests {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- be, err := NewConsulBackend(test.consulConfig, logger)
- if test.fail {
- if err == nil {
- t.Fatalf(`Expected config "%s" to fail`, test.name)
- } else {
- continue
- }
- } else if !test.fail && err != nil {
- t.Fatalf("Expected config %s to not fail: %v", test.name, err)
- }
-
- c, ok := be.(*ConsulBackend)
- if !ok {
- t.Fatalf("Expected ConsulBackend: %s", test.name)
- }
- c.disableRegistration = true
-
- if c.disableRegistration == false {
- addr := os.Getenv("CONSUL_HTTP_ADDR")
- if addr == "" {
- continue
- }
- }
-
- var shutdownCh physical.ShutdownChannel
- waitGroup := &sync.WaitGroup{}
- if err := c.RunServiceDiscovery(waitGroup, shutdownCh, test.redirectAddr, testActiveFunc(0.5), testSealedFunc(0.5)); err != nil {
- t.Fatalf("bad: %v", err)
- }
-
- if test.checkTimeout != c.checkTimeout {
- t.Errorf("bad: %v != %v", test.checkTimeout, c.checkTimeout)
- }
-
- if test.path != c.path {
- t.Errorf("bad: %s %v != %v", test.name, test.path, c.path)
- }
-
- if test.service != c.serviceName {
- t.Errorf("bad: %v != %v", test.service, c.serviceName)
- }
-
- if test.consistencyMode != c.consistencyMode {
- t.Errorf("bad consistency_mode value: %v != %v", test.consistencyMode, c.consistencyMode)
- }
-
- // FIXME(sean@): Unable to test max_parallel
- // if test.max_parallel != cap(c.permitPool) {
- // t.Errorf("bad: %v != %v", test.max_parallel, cap(c.permitPool))
- // }
- }
-}
-
-func TestConsul_serviceTags(t *testing.T) {
- tests := []struct {
- active bool
- tags []string
- }{
- {
- active: true,
- tags: []string{"active"},
- },
- {
- active: false,
- tags: []string{"standby"},
- },
- }
-
- c := testConsulBackend(t)
-
- for _, test := range tests {
- tags := c.fetchServiceTags(test.active)
- if !reflect.DeepEqual(tags[:], test.tags[:]) {
- t.Errorf("Bad %v: %v %v", test.active, tags, test.tags)
- }
- }
-}
-
-func TestConsul_setRedirectAddr(t *testing.T) {
- tests := []struct {
- addr string
- host string
- port int64
- pass bool
- }{
- {
- addr: "http://127.0.0.1:8200/",
- host: "127.0.0.1",
- port: 8200,
- pass: true,
- },
- {
- addr: "http://127.0.0.1:8200",
- host: "127.0.0.1",
- port: 8200,
- pass: true,
- },
- {
- addr: "https://127.0.0.1:8200",
- host: "127.0.0.1",
- port: 8200,
- pass: true,
- },
- {
- addr: "unix:///tmp/.vault.addr.sock",
- host: "/tmp/.vault.addr.sock",
- port: -1,
- pass: true,
- },
- {
- addr: "127.0.0.1:8200",
- pass: false,
- },
- {
- addr: "127.0.0.1",
- pass: false,
- },
- }
- for _, test := range tests {
- c := testConsulBackend(t)
- err := c.setRedirectAddr(test.addr)
- if test.pass {
- if err != nil {
- t.Fatalf("bad: %v", err)
- }
- } else {
- if err == nil {
- t.Fatalf("bad, expected fail")
- } else {
- continue
- }
- }
-
- if c.redirectHost != test.host {
- t.Fatalf("bad: %v != %v", c.redirectHost, test.host)
- }
-
- if c.redirectPort != test.port {
- t.Fatalf("bad: %v != %v", c.redirectPort, test.port)
- }
- }
-}
-
-func TestConsul_NotifyActiveStateChange(t *testing.T) {
- c := testConsulBackend(t)
-
- if err := c.NotifyActiveStateChange(); err != nil {
- t.Fatalf("bad: %v", err)
- }
-}
-
-func TestConsul_NotifySealedStateChange(t *testing.T) {
- c := testConsulBackend(t)
-
- if err := c.NotifySealedStateChange(); err != nil {
- t.Fatalf("bad: %v", err)
- }
-}
-
-func TestConsul_serviceID(t *testing.T) {
- passingTests := []struct {
- name string
- redirectAddr string
- serviceName string
- expected string
- }{
- {
- name: "valid host w/o slash",
- redirectAddr: "http://127.0.0.1:8200",
- serviceName: "sea-tech-astronomy",
- expected: "sea-tech-astronomy:127.0.0.1:8200",
- },
- {
- name: "valid host w/ slash",
- redirectAddr: "http://127.0.0.1:8200/",
- serviceName: "sea-tech-astronomy",
- expected: "sea-tech-astronomy:127.0.0.1:8200",
- },
- {
- name: "valid https host w/ slash",
- redirectAddr: "https://127.0.0.1:8200/",
- serviceName: "sea-tech-astronomy",
- expected: "sea-tech-astronomy:127.0.0.1:8200",
- },
- }
-
- for _, test := range passingTests {
- c := testConsulBackendConfig(t, &consulConf{
- "service": test.serviceName,
- })
-
- if err := c.setRedirectAddr(test.redirectAddr); err != nil {
- t.Fatalf("bad: %s %v", test.name, err)
- }
-
- serviceID := c.serviceID()
- if serviceID != test.expected {
- t.Fatalf("bad: %v != %v", serviceID, test.expected)
- }
- }
-}
-
-func TestConsulBackend(t *testing.T) {
- var token string
- addr := os.Getenv("CONSUL_HTTP_ADDR")
- if addr == "" {
- cid, connURL := prepareTestContainer(t)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- addr = connURL
- token = dockertest.ConsulACLMasterToken
- }
-
- conf := api.DefaultConfig()
- conf.Address = addr
- conf.Token = token
- client, err := api.NewClient(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
- defer func() {
- client.KV().DeleteTree(randPath, nil)
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewConsulBackend(map[string]string{
- "address": conf.Address,
- "path": randPath,
- "max_parallel": "256",
- "token": conf.Token,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
-
-func TestConsulHABackend(t *testing.T) {
- var token string
- addr := os.Getenv("CONSUL_HTTP_ADDR")
- if addr == "" {
- cid, connURL := prepareTestContainer(t)
- if cid != "" {
- defer cleanupTestContainer(t, cid)
- }
- addr = connURL
- token = dockertest.ConsulACLMasterToken
- }
-
- conf := api.DefaultConfig()
- conf.Address = addr
- conf.Token = token
- client, err := api.NewClient(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
- defer func() {
- client.KV().DeleteTree(randPath, nil)
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewConsulBackend(map[string]string{
- "address": conf.Address,
- "path": randPath,
- "max_parallel": "-1",
- "token": conf.Token,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- ha, ok := b.(physical.HABackend)
- if !ok {
- t.Fatalf("consul does not implement HABackend")
- }
- physical.ExerciseHABackend(t, ha, ha)
-
- detect, ok := b.(physical.RedirectDetect)
- if !ok {
- t.Fatalf("consul does not implement RedirectDetect")
- }
- host, err := detect.DetectHostAddr()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if host == "" {
- t.Fatalf("bad addr: %v", host)
- }
-}
-
-func prepareTestContainer(t *testing.T) (cid dockertest.ContainerID, retAddress string) {
- if os.Getenv("CONSUL_HTTP_ADDR") != "" {
- return "", os.Getenv("CONSUL_HTTP_ADDR")
- }
-
- // Without this the checks for whether the container has started seem to
- // never actually pass. There's really no reason to expose the test
- // containers, so don't.
- dockertest.BindDockerToLocalhost = "yep"
-
- testImagePull.Do(func() {
- dockertest.Pull(dockertest.ConsulImageName)
- })
-
- try := 0
- cid, connErr := dockertest.ConnectToConsul(60, 500*time.Millisecond, func(connAddress string) bool {
- try += 1
- // Build a client and verify that the credentials work
- config := api.DefaultConfig()
- config.Address = connAddress
- config.Token = dockertest.ConsulACLMasterToken
- client, err := api.NewClient(config)
- if err != nil {
- if try > 50 {
- panic(err)
- }
- return false
- }
-
- _, err = client.KV().Put(&api.KVPair{
- Key: "setuptest",
- Value: []byte("setuptest"),
- }, nil)
- if err != nil {
- if try > 50 {
- panic(err)
- }
- return false
- }
-
- retAddress = connAddress
- return true
- })
-
- if connErr != nil {
- t.Fatalf("could not connect to consul: %v", connErr)
- }
-
- return
-}
-
-func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
- err := cid.KillRemove()
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go
deleted file mode 100644
index e7f945f..0000000
--- a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go
+++ /dev/null
@@ -1,305 +0,0 @@
-package couchdb
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "strings"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- cleanhttp "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-// CouchDBBackend allows the management of couchdb users
-type CouchDBBackend struct {
- logger log.Logger
- client *couchDBClient
- permitPool *physical.PermitPool
-}
-
-type couchDBClient struct {
- endpoint string
- username string
- password string
- *http.Client
-}
-
-type couchDBListItem struct {
- ID string `json:"id"`
- Key string `json:"key"`
- Value struct {
- Revision string
- } `json:"value"`
-}
-
-type couchDBList struct {
- TotalRows int `json:"total_rows"`
- Offset int `json:"offset"`
- Rows []couchDBListItem `json:"rows"`
-}
-
-func (m *couchDBClient) rev(key string) (string, error) {
- req, err := http.NewRequest("HEAD", fmt.Sprintf("%s/%s", m.endpoint, key), nil)
- if err != nil {
- return "", err
- }
- req.SetBasicAuth(m.username, m.password)
-
- resp, err := m.Client.Do(req)
- if err != nil {
- return "", err
- }
- resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- return "", nil
- }
- etag := resp.Header.Get("Etag")
- if len(etag) < 2 {
- return "", nil
- }
- return etag[1 : len(etag)-1], nil
-}
-
-func (m *couchDBClient) put(e couchDBEntry) error {
- bs, err := json.Marshal(e)
- if err != nil {
- return err
- }
-
- req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", m.endpoint, e.ID), bytes.NewReader(bs))
- if err != nil {
- return err
- }
- req.SetBasicAuth(m.username, m.password)
- _, err = m.Client.Do(req)
-
- return err
-}
-
-func (m *couchDBClient) get(key string) (*physical.Entry, error) {
- req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", m.endpoint, url.PathEscape(key)), nil)
- if err != nil {
- return nil, err
- }
- req.SetBasicAuth(m.username, m.password)
- resp, err := m.Client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusNotFound {
- return nil, nil
- } else if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("GET returned %s", resp.Status)
- }
- bs, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
- entry := couchDBEntry{}
- if err := json.Unmarshal(bs, &entry); err != nil {
- return nil, err
- }
- return entry.Entry, nil
-}
-
-func (m *couchDBClient) list(prefix string) ([]couchDBListItem, error) {
- req, _ := http.NewRequest("GET", fmt.Sprintf("%s/_all_docs", m.endpoint), nil)
- req.SetBasicAuth(m.username, m.password)
- values := req.URL.Query()
- values.Set("skip", "0")
- values.Set("limit", "100")
- values.Set("include_docs", "false")
- if prefix != "" {
- values.Set("startkey", fmt.Sprintf("%q", prefix))
- values.Set("endkey", fmt.Sprintf("%q", prefix+"{}"))
- }
- req.URL.RawQuery = values.Encode()
-
- resp, err := m.Client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- data, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
-
- results := couchDBList{}
- if err := json.Unmarshal(data, &results); err != nil {
- return nil, err
- }
-
- return results.Rows, nil
-}
-
-func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBackend, error) {
- endpoint := os.Getenv("COUCHDB_ENDPOINT")
- if endpoint == "" {
- endpoint = conf["endpoint"]
- }
- if endpoint == "" {
- return nil, fmt.Errorf("missing endpoint")
- }
-
- username := os.Getenv("COUCHDB_USERNAME")
- if username == "" {
- username = conf["username"]
- }
-
- password := os.Getenv("COUCHDB_PASSWORD")
- if password == "" {
- password = conf["password"]
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- var err error
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("couchdb: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- return &CouchDBBackend{
- client: &couchDBClient{
- endpoint: endpoint,
- username: username,
- password: password,
- Client: cleanhttp.DefaultPooledClient(),
- },
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }, nil
-}
-
-func NewCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- return buildCouchDBBackend(conf, logger)
-}
-
-type couchDBEntry struct {
- Entry *physical.Entry `json:"entry"`
- Rev string `json:"_rev,omitempty"`
- ID string `json:"_id"`
- Deleted *bool `json:"_deleted,omitempty"`
-}
-
-// Put is used to insert or update an entry
-func (m *CouchDBBackend) Put(entry *physical.Entry) error {
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- return m.PutInternal(entry)
-}
-
-// Get is used to fetch an entry
-func (m *CouchDBBackend) Get(key string) (*physical.Entry, error) {
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- return m.GetInternal(key)
-}
-
-// Delete is used to permanently delete an entry
-func (m *CouchDBBackend) Delete(key string) error {
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- return m.DeleteInternal(key)
-}
-
-// List is used to list all the keys under a given prefix
-func (m *CouchDBBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- items, err := m.client.list(prefix)
- if err != nil {
- return nil, err
- }
-
- var out []string
- seen := make(map[string]interface{})
- for _, result := range items {
- trimmed := strings.TrimPrefix(result.ID, prefix)
- sep := strings.Index(trimmed, "/")
- if sep == -1 {
- out = append(out, trimmed)
- } else {
- trimmed = trimmed[:sep+1]
- if _, ok := seen[trimmed]; !ok {
- out = append(out, trimmed)
- seen[trimmed] = struct{}{}
- }
- }
- }
- return out, nil
-}
-
-// TransactionalCouchDBBackend creates a couchdb backend that forces all operations to happen
-// in serial
-type TransactionalCouchDBBackend struct {
- CouchDBBackend
-}
-
-func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- backend, err := buildCouchDBBackend(conf, logger)
- if err != nil {
- return nil, err
- }
- backend.permitPool = physical.NewPermitPool(1)
-
- return &TransactionalCouchDBBackend{
- CouchDBBackend: *backend,
- }, nil
-}
-
-// GetInternal is used to fetch an entry
-func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now())
-
- return m.client.get(key)
-}
-
-// PutInternal is used to insert or update an entry
-func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now())
-
- revision, _ := m.client.rev(url.PathEscape(entry.Key))
-
- return m.client.put(couchDBEntry{
- Entry: entry,
- Rev: revision,
- ID: url.PathEscape(entry.Key),
- })
-}
-
-// DeleteInternal is used to permanently delete an entry
-func (m *CouchDBBackend) DeleteInternal(key string) error {
- defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now())
-
- revision, _ := m.client.rev(url.PathEscape(key))
- deleted := true
- return m.client.put(couchDBEntry{
- ID: url.PathEscape(key),
- Rev: revision,
- Deleted: &deleted,
- })
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go
deleted file mode 100644
index de4d05d..0000000
--- a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package couchdb
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-func TestCouchDBBackend(t *testing.T) {
- cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t)
- defer cleanup()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewCouchDBBackend(map[string]string{
- "endpoint": endpoint,
- "username": username,
- "password": password,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
-
-func TestTransactionalCouchDBBackend(t *testing.T) {
- cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t)
- defer cleanup()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewTransactionalCouchDBBackend(map[string]string{
- "endpoint": endpoint,
- "username": username,
- "password": password,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
-
-func prepareCouchdbDBTestContainer(t *testing.T) (cleanup func(), retAddress, username, password string) {
- // If environment variable is set, assume caller wants to target a real
- // DynamoDB.
- if os.Getenv("COUCHDB_ENDPOINT") != "" {
- return func() {}, os.Getenv("COUCHDB_ENDPOINT"), os.Getenv("COUCHDB_USERNAME"), os.Getenv("COUCHDB_PASSWORD")
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("couchdb", "1.6", []string{})
- if err != nil {
- t.Fatalf("Could not start local DynamoDB: %s", err)
- }
-
- retAddress = "http://localhost:" + resource.GetPort("5984/tcp")
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local DynamoDB: %s", err)
- }
- }
-
- // exponential backoff-retry, because the couchDB may not be able to accept
- // connections yet
- if err := pool.Retry(func() error {
- var err error
- resp, err := http.Get(retAddress)
- if err != nil {
- return err
- }
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("Expected couchdb to return status code 200, got (%s) instead.", resp.Status)
- }
- return nil
- }); err != nil {
- t.Fatalf("Could not connect to docker: %s", err)
- }
-
- dbName := fmt.Sprintf("vault-test-%d", time.Now().Unix())
- {
- req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", retAddress, dbName), nil)
- if err != nil {
- t.Fatalf("Could not create create database request: %q", err)
- }
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Could not create database: %q", err)
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusCreated {
- bs, _ := ioutil.ReadAll(resp.Body)
- t.Fatalf("Failed to create database: %s %s\n", resp.Status, string(bs))
- }
- }
- {
- req, err := http.NewRequest("PUT", fmt.Sprintf("%s/_config/admins/admin", retAddress), strings.NewReader(`"admin"`))
- if err != nil {
- t.Fatalf("Could not create admin user request: %q", err)
- }
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Could not create admin user: %q", err)
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- bs, _ := ioutil.ReadAll(resp.Body)
- t.Fatalf("Failed to create admin user: %s %s\n", resp.Status, string(bs))
- }
- }
-
- return cleanup, retAddress + "/" + dbName, "admin", "admin"
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go
deleted file mode 100644
index c0b3f3e..0000000
--- a/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go
+++ /dev/null
@@ -1,777 +0,0 @@
-package dynamodb
-
-import (
- "fmt"
- "math"
- "net/http"
- "os"
- pkgPath "path"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
- "github.com/hashicorp/errwrap"
- cleanhttp "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/awsutil"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/physical"
-)
-
-const (
- // DefaultDynamoDBRegion is used when no region is configured
- // explicitly.
- DefaultDynamoDBRegion = "us-east-1"
- // DefaultDynamoDBTableName is used when no table name
- // is configured explicitly.
- DefaultDynamoDBTableName = "vault-dynamodb-backend"
-
- // DefaultDynamoDBReadCapacity is the default read capacity
- // that is used when none is configured explicitly.
- DefaultDynamoDBReadCapacity = 5
- // DefaultDynamoDBWriteCapacity is the default write capacity
- // that is used when none is configured explicitly.
- DefaultDynamoDBWriteCapacity = 5
-
- // DynamoDBEmptyPath is the string that is used instead of
- // empty strings when stored in DynamoDB.
- DynamoDBEmptyPath = " "
- // DynamoDBLockPrefix is the prefix used to mark DynamoDB records
- // as locks. This prefix causes them not to be returned by
- // List operations.
- DynamoDBLockPrefix = "_"
-
- // The lock TTL matches the default that Consul API uses, 15 seconds.
- DynamoDBLockTTL = 15 * time.Second
-
- // The amount of time to wait between the lock renewals
- DynamoDBLockRenewInterval = 5 * time.Second
-
- // DynamoDBLockRetryInterval is the amount of time to wait
- // if a lock fails before trying again.
- DynamoDBLockRetryInterval = time.Second
- // DynamoDBWatchRetryMax is the number of times to re-try a
- // failed watch before signaling that leadership is lost.
- DynamoDBWatchRetryMax = 5
- // DynamoDBWatchRetryInterval is the amount of time to wait
- // if a watch fails before trying again.
- DynamoDBWatchRetryInterval = 5 * time.Second
-)
-
-// DynamoDBBackend is a physical backend that stores data in
-// a DynamoDB table. It can be run in high-availability mode
-// as DynamoDB has locking capabilities.
-type DynamoDBBackend struct {
- table string
- client *dynamodb.DynamoDB
- recovery bool
- logger log.Logger
- haEnabled bool
- permitPool *physical.PermitPool
-}
-
-// DynamoDBRecord is the representation of a vault entry in
-// DynamoDB. The vault key is split up into two components
-// (Path and Key) in order to allow more efficient listings.
-type DynamoDBRecord struct {
- Path string
- Key string
- Value []byte
-}
-
-// DynamoDBLock implements a lock using an DynamoDB client.
-type DynamoDBLock struct {
- backend *DynamoDBBackend
- value, key string
- identity string
- held bool
- lock sync.Mutex
- recovery bool
- // Allow modifying the Lock durations for ease of unit testing.
- renewInterval time.Duration
- ttl time.Duration
- watchRetryInterval time.Duration
-}
-
-type DynamoDBLockRecord struct {
- Path string
- Key string
- Value []byte
- Identity []byte
- Expires int64
-}
-
-// NewDynamoDBBackend constructs a DynamoDB backend. If the
-// configured DynamoDB table does not exist, it creates it.
-func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- table := os.Getenv("AWS_DYNAMODB_TABLE")
- if table == "" {
- table = conf["table"]
- if table == "" {
- table = DefaultDynamoDBTableName
- }
- }
- readCapacityString := os.Getenv("AWS_DYNAMODB_READ_CAPACITY")
- if readCapacityString == "" {
- readCapacityString = conf["read_capacity"]
- if readCapacityString == "" {
- readCapacityString = "0"
- }
- }
- readCapacity, err := strconv.Atoi(readCapacityString)
- if err != nil {
- return nil, fmt.Errorf("invalid read capacity: %s", readCapacityString)
- }
- if readCapacity == 0 {
- readCapacity = DefaultDynamoDBReadCapacity
- }
-
- writeCapacityString := os.Getenv("AWS_DYNAMODB_WRITE_CAPACITY")
- if writeCapacityString == "" {
- writeCapacityString = conf["write_capacity"]
- if writeCapacityString == "" {
- writeCapacityString = "0"
- }
- }
- writeCapacity, err := strconv.Atoi(writeCapacityString)
- if err != nil {
- return nil, fmt.Errorf("invalid write capacity: %s", writeCapacityString)
- }
- if writeCapacity == 0 {
- writeCapacity = DefaultDynamoDBWriteCapacity
- }
-
- accessKey := os.Getenv("AWS_ACCESS_KEY_ID")
- if accessKey == "" {
- accessKey = conf["access_key"]
- }
- secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
- if secretKey == "" {
- secretKey = conf["secret_key"]
- }
- sessionToken := os.Getenv("AWS_SESSION_TOKEN")
- if sessionToken == "" {
- sessionToken = conf["session_token"]
- }
-
- endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT")
- if endpoint == "" {
- endpoint = conf["endpoint"]
- }
- region := os.Getenv("AWS_REGION")
- if region == "" {
- region = os.Getenv("AWS_DEFAULT_REGION")
- if region == "" {
- region = conf["region"]
- if region == "" {
- region = DefaultDynamoDBRegion
- }
- }
- }
-
- credsConfig := &awsutil.CredentialsConfig{
- AccessKey: accessKey,
- SecretKey: secretKey,
- SessionToken: sessionToken,
- }
- creds, err := credsConfig.GenerateCredentialChain()
- if err != nil {
- return nil, err
- }
-
- pooledTransport := cleanhttp.DefaultPooledTransport()
- pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
-
- awsConf := aws.NewConfig().
- WithCredentials(creds).
- WithRegion(region).
- WithEndpoint(endpoint).
- WithHTTPClient(&http.Client{
- Transport: pooledTransport,
- })
- client := dynamodb.New(session.New(awsConf))
-
- if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil {
- return nil, err
- }
-
- haEnabled := os.Getenv("DYNAMODB_HA_ENABLED")
- if haEnabled == "" {
- haEnabled = conf["ha_enabled"]
- }
- haEnabledBool, _ := strconv.ParseBool(haEnabled)
-
- recoveryMode := os.Getenv("RECOVERY_MODE")
- if recoveryMode == "" {
- recoveryMode = conf["recovery_mode"]
- }
- recoveryModeBool, _ := strconv.ParseBool(recoveryMode)
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("physical/dynamodb: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- return &DynamoDBBackend{
- table: table,
- client: client,
- permitPool: physical.NewPermitPool(maxParInt),
- recovery: recoveryModeBool,
- haEnabled: haEnabledBool,
- logger: logger,
- }, nil
-}
-
-// Put is used to insert or update an entry
-func (d *DynamoDBBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"dynamodb", "put"}, time.Now())
-
- record := DynamoDBRecord{
- Path: recordPathForVaultKey(entry.Key),
- Key: recordKeyForVaultKey(entry.Key),
- Value: entry.Value,
- }
- item, err := dynamodbattribute.ConvertToMap(record)
- if err != nil {
- return fmt.Errorf("could not convert prefix record to DynamoDB item: %s", err)
- }
- requests := []*dynamodb.WriteRequest{{
- PutRequest: &dynamodb.PutRequest{
- Item: item,
- },
- }}
-
- for _, prefix := range physical.Prefixes(entry.Key) {
- record = DynamoDBRecord{
- Path: recordPathForVaultKey(prefix),
- Key: fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)),
- }
- item, err := dynamodbattribute.ConvertToMap(record)
- if err != nil {
- return fmt.Errorf("could not convert prefix record to DynamoDB item: %s", err)
- }
- requests = append(requests, &dynamodb.WriteRequest{
- PutRequest: &dynamodb.PutRequest{
- Item: item,
- },
- })
- }
-
- return d.batchWriteRequests(requests)
-}
-
-// Get is used to fetch an entry
-func (d *DynamoDBBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"dynamodb", "get"}, time.Now())
-
- d.permitPool.Acquire()
- defer d.permitPool.Release()
-
- resp, err := d.client.GetItem(&dynamodb.GetItemInput{
- TableName: aws.String(d.table),
- ConsistentRead: aws.Bool(true),
- Key: map[string]*dynamodb.AttributeValue{
- "Path": {S: aws.String(recordPathForVaultKey(key))},
- "Key": {S: aws.String(recordKeyForVaultKey(key))},
- },
- })
- if err != nil {
- return nil, err
- }
- if resp.Item == nil {
- return nil, nil
- }
-
- record := &DynamoDBRecord{}
- if err := dynamodbattribute.ConvertFromMap(resp.Item, record); err != nil {
- return nil, err
- }
-
- return &physical.Entry{
- Key: vaultKey(record),
- Value: record.Value,
- }, nil
-}
-
-// Delete is used to permanently delete an entry
-func (d *DynamoDBBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"dynamodb", "delete"}, time.Now())
-
- requests := []*dynamodb.WriteRequest{{
- DeleteRequest: &dynamodb.DeleteRequest{
- Key: map[string]*dynamodb.AttributeValue{
- "Path": {S: aws.String(recordPathForVaultKey(key))},
- "Key": {S: aws.String(recordKeyForVaultKey(key))},
- },
- },
- }}
-
- // clean up now empty 'folders'
- prefixes := physical.Prefixes(key)
- sort.Sort(sort.Reverse(sort.StringSlice(prefixes)))
- for _, prefix := range prefixes {
- hasChildren, err := d.hasChildren(prefix)
- if err != nil {
- return err
- }
- if !hasChildren {
- requests = append(requests, &dynamodb.WriteRequest{
- DeleteRequest: &dynamodb.DeleteRequest{
- Key: map[string]*dynamodb.AttributeValue{
- "Path": {S: aws.String(recordPathForVaultKey(prefix))},
- "Key": {S: aws.String(fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)))},
- },
- },
- })
- }
- }
-
- return d.batchWriteRequests(requests)
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (d *DynamoDBBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"dynamodb", "list"}, time.Now())
-
- prefix = strings.TrimSuffix(prefix, "/")
-
- keys := []string{}
- prefix = escapeEmptyPath(prefix)
- queryInput := &dynamodb.QueryInput{
- TableName: aws.String(d.table),
- ConsistentRead: aws.Bool(true),
- KeyConditions: map[string]*dynamodb.Condition{
- "Path": {
- ComparisonOperator: aws.String("EQ"),
- AttributeValueList: []*dynamodb.AttributeValue{{
- S: aws.String(prefix),
- }},
- },
- },
- }
-
- d.permitPool.Acquire()
- defer d.permitPool.Release()
-
- err := d.client.QueryPages(queryInput, func(out *dynamodb.QueryOutput, lastPage bool) bool {
- var record DynamoDBRecord
- for _, item := range out.Items {
- dynamodbattribute.ConvertFromMap(item, &record)
- if !strings.HasPrefix(record.Key, DynamoDBLockPrefix) {
- keys = append(keys, record.Key)
- }
- }
- return !lastPage
- })
- if err != nil {
- return nil, err
- }
-
- return keys, nil
-}
-
-// hasChildren returns true if there exist items below a certain path prefix.
-// To do so, the method fetches such items from DynamoDB. If there are more
-// than one item (which is the "directory" item), there are children.
-func (d *DynamoDBBackend) hasChildren(prefix string) (bool, error) {
- prefix = strings.TrimSuffix(prefix, "/")
- prefix = escapeEmptyPath(prefix)
-
- queryInput := &dynamodb.QueryInput{
- TableName: aws.String(d.table),
- ConsistentRead: aws.Bool(true),
- KeyConditions: map[string]*dynamodb.Condition{
- "Path": {
- ComparisonOperator: aws.String("EQ"),
- AttributeValueList: []*dynamodb.AttributeValue{{
- S: aws.String(prefix),
- }},
- },
- },
- // Avoid fetching too many items from DynamoDB for performance reasons.
- // We need at least two because one is the directory item, all others
- // are children.
- Limit: aws.Int64(2),
- }
-
- d.permitPool.Acquire()
- defer d.permitPool.Release()
-
- out, err := d.client.Query(queryInput)
- if err != nil {
- return false, err
- }
- return len(out.Items) > 1, nil
-}
-
-// LockWith is used for mutual exclusion based on the given key.
-func (d *DynamoDBBackend) LockWith(key, value string) (physical.Lock, error) {
- identity, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- return &DynamoDBLock{
- backend: d,
- key: pkgPath.Join(pkgPath.Dir(key), DynamoDBLockPrefix+pkgPath.Base(key)),
- value: value,
- identity: identity,
- recovery: d.recovery,
- renewInterval: DynamoDBLockRenewInterval,
- ttl: DynamoDBLockTTL,
- watchRetryInterval: DynamoDBWatchRetryInterval,
- }, nil
-}
-
-func (d *DynamoDBBackend) HAEnabled() bool {
- return d.haEnabled
-}
-
-// batchWriteRequests takes a list of write requests and executes them in badges
-// with a maximum size of 25 (which is the limit of BatchWriteItem requests).
-func (d *DynamoDBBackend) batchWriteRequests(requests []*dynamodb.WriteRequest) error {
- for len(requests) > 0 {
- batchSize := int(math.Min(float64(len(requests)), 25))
- batch := requests[:batchSize]
- requests = requests[batchSize:]
-
- d.permitPool.Acquire()
- _, err := d.client.BatchWriteItem(&dynamodb.BatchWriteItemInput{
- RequestItems: map[string][]*dynamodb.WriteRequest{
- d.table: batch,
- },
- })
- d.permitPool.Release()
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Lock tries to acquire the lock by repeatedly trying to create
-// a record in the DynamoDB table. It will block until either the
-// stop channel is closed or the lock could be acquired successfully.
-// The returned channel will be closed once the lock is deleted or
-// changed in the DynamoDB table.
-func (l *DynamoDBLock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) {
- l.lock.Lock()
- defer l.lock.Unlock()
- if l.held {
- return nil, fmt.Errorf("lock already held")
- }
-
- done := make(chan struct{})
- // close done channel even in case of error
- defer func() {
- if retErr != nil {
- close(done)
- }
- }()
-
- var (
- stop = make(chan struct{})
- success = make(chan struct{})
- errors = make(chan error)
- leader = make(chan struct{})
- )
- // try to acquire the lock asynchronously
- go l.tryToLock(stop, success, errors)
-
- select {
- case <-success:
- l.held = true
- // after acquiring it successfully, we must renew the lock periodically,
- // and watch the lock in order to close the leader channel
- // once it is lost.
- go l.periodicallyRenewLock(leader)
- go l.watch(leader)
- case retErr = <-errors:
- close(stop)
- return nil, retErr
- case <-stopCh:
- close(stop)
- return nil, nil
- }
-
- return leader, retErr
-}
-
-// Unlock releases the lock by deleting the lock record from the
-// DynamoDB table.
-func (l *DynamoDBLock) Unlock() error {
- l.lock.Lock()
- defer l.lock.Unlock()
- if !l.held {
- return nil
- }
-
- l.held = false
- if err := l.backend.Delete(l.key); err != nil {
- return err
- }
- return nil
-}
-
-// Value checks whether or not the lock is held by any instance of DynamoDBLock,
-// including this one, and returns the current value.
-func (l *DynamoDBLock) Value() (bool, string, error) {
- entry, err := l.backend.Get(l.key)
- if err != nil {
- return false, "", err
- }
- if entry == nil {
- return false, "", nil
- }
-
- return true, string(entry.Value), nil
-}
-
-// tryToLock tries to create a new item in DynamoDB
-// every `DynamoDBLockRetryInterval`. As long as the item
-// cannot be created (because it already exists), it will
-// be retried. If the operation fails due to an error, it
-// is sent to the errors channel.
-// When the lock could be acquired successfully, the success
-// channel is closed.
-func (l *DynamoDBLock) tryToLock(stop, success chan struct{}, errors chan error) {
- ticker := time.NewTicker(DynamoDBLockRetryInterval)
-
- for {
- select {
- case <-stop:
- ticker.Stop()
- case <-ticker.C:
- err := l.writeItem()
- if err != nil {
- if err, ok := err.(awserr.Error); ok {
- // Don't report a condition check failure, this means that the lock
- // is already being held.
- if err.Code() != dynamodb.ErrCodeConditionalCheckFailedException {
- errors <- err
- }
- } else {
- // Its not an AWS error, and is probably not transient, bail out.
- errors <- err
- return
- }
- } else {
- ticker.Stop()
- close(success)
- return
- }
- }
- }
-}
-
-func (l *DynamoDBLock) periodicallyRenewLock(done chan struct{}) {
- ticker := time.NewTicker(l.renewInterval)
- for {
- select {
- case <-ticker.C:
- l.writeItem()
- case <-done:
- ticker.Stop()
- return
- }
- }
-}
-
-// Attempts to put/update the dynamodb item using condition expressions to
-// evaluate the TTL.
-func (l *DynamoDBLock) writeItem() error {
- now := time.Now()
-
- _, err := l.backend.client.UpdateItem(&dynamodb.UpdateItemInput{
- TableName: aws.String(l.backend.table),
- Key: map[string]*dynamodb.AttributeValue{
- "Path": &dynamodb.AttributeValue{S: aws.String(recordPathForVaultKey(l.key))},
- "Key": &dynamodb.AttributeValue{S: aws.String(recordKeyForVaultKey(l.key))},
- },
- UpdateExpression: aws.String("SET #value=:value, #identity=:identity, #expires=:expires"),
- // If both key and path already exist, we can only write if
- // A. identity is equal to our identity (or the identity doesn't exist)
- // or
- // B. The ttl on the item is <= to the current time
- ConditionExpression: aws.String(
- "attribute_not_exists(#path) or " +
- "attribute_not_exists(#key) or " +
- // To work when upgrading from older versions that did not include the
- // Identity attribute, we first check if the attr doesn't exist, and if
- // it does, then we check if the identity is equal to our own.
- "(attribute_not_exists(#identity) or #identity = :identity) or " +
- "#expires <= :now",
- ),
- ExpressionAttributeNames: map[string]*string{
- "#path": aws.String("Path"),
- "#key": aws.String("Key"),
- "#identity": aws.String("Identity"),
- "#expires": aws.String("Expires"),
- "#value": aws.String("Value"),
- },
- ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
- ":identity": &dynamodb.AttributeValue{B: []byte(l.identity)},
- ":value": &dynamodb.AttributeValue{B: []byte(l.value)},
- ":now": &dynamodb.AttributeValue{N: aws.String(strconv.FormatInt(now.UnixNano(), 10))},
- ":expires": &dynamodb.AttributeValue{N: aws.String(strconv.FormatInt(now.Add(l.ttl).UnixNano(), 10))},
- },
- })
- return err
-}
-
-// watch checks whether the lock has changed in the
-// DynamoDB table and closes the leader channel if so.
-// The interval is set by `DynamoDBWatchRetryInterval`.
-// If an error occurs during the check, watch will retry
-// the operation for `DynamoDBWatchRetryMax` times and
-// close the leader channel if it can't succeed.
-func (l *DynamoDBLock) watch(lost chan struct{}) {
- retries := DynamoDBWatchRetryMax
-
- ticker := time.NewTicker(l.watchRetryInterval)
-WatchLoop:
- for {
- select {
- case <-ticker.C:
- resp, err := l.backend.client.GetItem(&dynamodb.GetItemInput{
- TableName: aws.String(l.backend.table),
- ConsistentRead: aws.Bool(true),
- Key: map[string]*dynamodb.AttributeValue{
- "Path": {S: aws.String(recordPathForVaultKey(l.key))},
- "Key": {S: aws.String(recordKeyForVaultKey(l.key))},
- },
- })
- if err != nil {
- retries--
- if retries == 0 {
- break WatchLoop
- }
- continue
- }
-
- if resp == nil {
- break WatchLoop
- }
- record := &DynamoDBLockRecord{}
- err = dynamodbattribute.UnmarshalMap(resp.Item, record)
- if err != nil || string(record.Identity) != l.identity {
- break WatchLoop
- }
- }
- }
-
- close(lost)
-}
-
-// ensureTableExists creates a DynamoDB table with a given
-// DynamoDB client. If the table already exists, it is not
-// being reconfigured.
-func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, writeCapacity int) error {
- _, err := client.DescribeTable(&dynamodb.DescribeTableInput{
- TableName: aws.String(table),
- })
- if awserr, ok := err.(awserr.Error); ok {
- if awserr.Code() == "ResourceNotFoundException" {
- _, err = client.CreateTable(&dynamodb.CreateTableInput{
- TableName: aws.String(table),
- ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
- ReadCapacityUnits: aws.Int64(int64(readCapacity)),
- WriteCapacityUnits: aws.Int64(int64(writeCapacity)),
- },
- KeySchema: []*dynamodb.KeySchemaElement{{
- AttributeName: aws.String("Path"),
- KeyType: aws.String("HASH"),
- }, {
- AttributeName: aws.String("Key"),
- KeyType: aws.String("RANGE"),
- }},
- AttributeDefinitions: []*dynamodb.AttributeDefinition{{
- AttributeName: aws.String("Path"),
- AttributeType: aws.String("S"),
- }, {
- AttributeName: aws.String("Key"),
- AttributeType: aws.String("S"),
- }},
- })
- if err != nil {
- return err
- }
-
- err = client.WaitUntilTableExists(&dynamodb.DescribeTableInput{
- TableName: aws.String(table),
- })
- if err != nil {
- return err
- }
- }
- }
- if err != nil {
- return err
- }
- return nil
-}
-
-// recordPathForVaultKey transforms a vault key into
-// a value suitable for the `DynamoDBRecord`'s `Path`
-// property. This path equals the the vault key without
-// its last component.
-func recordPathForVaultKey(key string) string {
- if strings.Contains(key, "/") {
- return pkgPath.Dir(key)
- }
- return DynamoDBEmptyPath
-}
-
-// recordKeyForVaultKey transforms a vault key into
-// a value suitable for the `DynamoDBRecord`'s `Key`
-// property. This path equals the the vault key's
-// last component.
-func recordKeyForVaultKey(key string) string {
- return pkgPath.Base(key)
-}
-
-// vaultKey returns the vault key for a given record
-// from the DynamoDB table. This is the combination of
-// the records Path and Key.
-func vaultKey(record *DynamoDBRecord) string {
- path := unescapeEmptyPath(record.Path)
- if path == "" {
- return record.Key
- }
- return pkgPath.Join(record.Path, record.Key)
-}
-
-// escapeEmptyPath is used to escape the root key's path
-// with a value that can be stored in DynamoDB. DynamoDB
-// does not allow values to be empty strings.
-func escapeEmptyPath(s string) string {
- if s == "" {
- return DynamoDBEmptyPath
- }
- return s
-}
-
-// unescapeEmptyPath is the opposite of `escapeEmptyPath`.
-func unescapeEmptyPath(s string) string {
- if s == DynamoDBEmptyPath {
- return ""
- }
- return s
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go
deleted file mode 100644
index 426f23f..0000000
--- a/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go
+++ /dev/null
@@ -1,266 +0,0 @@
-package dynamodb
-
-import (
- "fmt"
- "math/rand"
- "net/http"
- "os"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/dynamodb"
-)
-
-func TestDynamoDBBackend(t *testing.T) {
- cleanup, endpoint, credsProvider := prepareDynamoDBTestContainer(t)
- defer cleanup()
-
- creds, err := credsProvider.Get()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- region := os.Getenv("AWS_DEFAULT_REGION")
- if region == "" {
- region = "us-east-1"
- }
-
- conn := dynamodb.New(session.New(&aws.Config{
- Credentials: credsProvider,
- Endpoint: aws.String(endpoint),
- Region: aws.String(region),
- }))
-
- var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
- table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt)
-
- defer func() {
- conn.DeleteTable(&dynamodb.DeleteTableInput{
- TableName: aws.String(table),
- })
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewDynamoDBBackend(map[string]string{
- "access_key": creds.AccessKeyID,
- "secret_key": creds.SecretAccessKey,
- "session_token": creds.SessionToken,
- "table": table,
- "region": region,
- "endpoint": endpoint,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
-
-func TestDynamoDBHABackend(t *testing.T) {
- cleanup, endpoint, credsProvider := prepareDynamoDBTestContainer(t)
- defer cleanup()
-
- creds, err := credsProvider.Get()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- region := os.Getenv("AWS_DEFAULT_REGION")
- if region == "" {
- region = "us-east-1"
- }
-
- conn := dynamodb.New(session.New(&aws.Config{
- Credentials: credsProvider,
- Endpoint: aws.String(endpoint),
- Region: aws.String(region),
- }))
-
- var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
- table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt)
-
- defer func() {
- conn.DeleteTable(&dynamodb.DeleteTableInput{
- TableName: aws.String(table),
- })
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewDynamoDBBackend(map[string]string{
- "access_key": creds.AccessKeyID,
- "secret_key": creds.SecretAccessKey,
- "session_token": creds.SessionToken,
- "table": table,
- "region": region,
- "endpoint": endpoint,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- ha, ok := b.(physical.HABackend)
- if !ok {
- t.Fatalf("dynamodb does not implement HABackend")
- }
- physical.ExerciseHABackend(t, ha, ha)
- testDynamoDBLockTTL(t, ha)
-}
-
-// Similar to testHABackend, but using internal implementation details to
-// trigger the lock failure scenario by setting the lock renew period for one
-// of the locks to a higher value than the lock TTL.
-func testDynamoDBLockTTL(t *testing.T, ha physical.HABackend) {
- // Set much smaller lock times to speed up the test.
- lockTTL := time.Second * 3
- renewInterval := time.Second * 1
- watchInterval := time.Second * 1
-
- // Get the lock
- origLock, err := ha.LockWith("dynamodbttl", "bar")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- // set the first lock renew period to double the expected TTL.
- lock := origLock.(*DynamoDBLock)
- lock.renewInterval = lockTTL * 2
- lock.ttl = lockTTL
- lock.watchRetryInterval = watchInterval
-
- // Attempt to lock
- leaderCh, err := lock.Lock(nil)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if leaderCh == nil {
- t.Fatalf("failed to get leader ch")
- }
-
- // Check the value
- held, val, err := lock.Value()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !held {
- t.Fatalf("should be held")
- }
- if val != "bar" {
- t.Fatalf("bad value: %v", err)
- }
-
- // Second acquisition should succeed because the first lock should
- // not renew within the 3 sec TTL.
- origLock2, err := ha.LockWith("dynamodbttl", "baz")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- lock2 := origLock2.(*DynamoDBLock)
- lock2.renewInterval = renewInterval
- lock2.ttl = lockTTL
- lock2.watchRetryInterval = watchInterval
-
- // Cancel attempt in 6 sec so as not to block unit tests forever
- stopCh := make(chan struct{})
- time.AfterFunc(lockTTL*2, func() {
- close(stopCh)
- })
-
- // Attempt to lock should work
- leaderCh2, err := lock2.Lock(stopCh)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if leaderCh2 == nil {
- t.Fatalf("should get leader ch")
- }
-
- // Check the value
- held, val, err = lock2.Value()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !held {
- t.Fatalf("should be held")
- }
- if val != "baz" {
- t.Fatalf("bad value: %v", err)
- }
-
- // The first lock should have lost the leader channel
- leaderChClosed := false
- blocking := make(chan struct{})
- // Attempt to read from the leader or the blocking channel, which ever one
- // happens first.
- go func() {
- select {
- case <-time.After(watchInterval * 3):
- return
- case <-leaderCh:
- leaderChClosed = true
- close(blocking)
- case <-blocking:
- return
- }
- }()
-
- <-blocking
- if !leaderChClosed {
- t.Fatalf("original lock did not have its leader channel closed.")
- }
-
- // Cleanup
- lock2.Unlock()
-}
-
-func prepareDynamoDBTestContainer(t *testing.T) (cleanup func(), retAddress string, creds *credentials.Credentials) {
- // If environment variable is set, assume caller wants to target a real
- // DynamoDB.
- if os.Getenv("AWS_DYNAMODB_ENDPOINT") != "" {
- return func() {}, os.Getenv("AWS_DYNAMODB_ENDPOINT"), credentials.NewEnvCredentials()
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("deangiberson/aws-dynamodb-local", "latest", []string{})
- if err != nil {
- t.Fatalf("Could not start local DynamoDB: %s", err)
- }
-
- retAddress = "http://localhost:" + resource.GetPort("8000/tcp")
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local DynamoDB: %s", err)
- }
- }
-
- // exponential backoff-retry, because the DynamoDB may not be able to accept
- // connections yet
- if err := pool.Retry(func() error {
- var err error
- resp, err := http.Get(retAddress)
- if err != nil {
- return err
- }
- if resp.StatusCode != 400 {
- return fmt.Errorf("Expected DynamoDB to return status code 400, got (%s) instead.", resp.Status)
- }
- return nil
- }); err != nil {
- t.Fatalf("Could not connect to docker: %s", err)
- }
- return cleanup, retAddress, credentials.NewStaticCredentials("fake", "fake", "")
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd/etcd.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd.go
deleted file mode 100644
index 5d9c26d..0000000
--- a/vendor/github.com/hashicorp/vault/physical/etcd/etcd.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package etcd
-
-import (
- "context"
- "errors"
- "fmt"
- "net/url"
- "os"
- "strings"
-
- "github.com/coreos/etcd/client"
- "github.com/coreos/go-semver/semver"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-var (
- EtcdSyncConfigError = errors.New("client setup failed: unable to parse etcd sync field in config")
- EtcdSyncClusterError = errors.New("client setup failed: unable to sync etcd cluster")
- EtcdMultipleBootstrapError = errors.New("client setup failed: multiple discovery or bootstrap flags specified, use either \"address\" or \"discovery_srv\"")
- EtcdAddressError = errors.New("client setup failed: address must be valid URL (ex. 'scheme://host:port')")
- EtcdSemaphoreKeysEmptyError = errors.New("lock queue is empty")
- EtcdLockHeldError = errors.New("lock already held")
- EtcdLockNotHeldError = errors.New("lock not held")
- EtcdSemaphoreKeyRemovedError = errors.New("semaphore key removed before lock aquisition")
- EtcdVersionUnknown = errors.New("etcd: unknown API version")
-)
-
-// NewEtcdBackend constructs a etcd backend using a given machine address.
-func NewEtcdBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- var (
- apiVersion string
- ok bool
- )
-
- // v2 client can talk to both etcd2 and etcd3 thought API v2
- c, err := newEtcdV2Client(conf)
- if err != nil {
- return nil, errors.New("failed to create etcd client: " + err.Error())
- }
-
- remoteAPIVersion, err := getEtcdAPIVersion(c)
- if err != nil {
- return nil, errors.New("failed to get etcd API version: " + err.Error())
- }
-
- if apiVersion, ok = conf["etcd_api"]; !ok {
- apiVersion = os.Getenv("ETCD_API")
- }
-
- if apiVersion == "" {
- path, ok := conf["path"]
- if !ok {
- path = "/vault"
- }
- kAPI := client.NewKeysAPI(c)
-
- // keep using v2 if vault data exists in v2 and user does not explicitly
- // ask for v3.
- _, err := kAPI.Get(context.Background(), path, &client.GetOptions{})
- if errorIsMissingKey(err) {
- apiVersion = remoteAPIVersion
- } else if err == nil {
- apiVersion = "2"
- } else {
- return nil, errors.New("failed to check etcd status: " + err.Error())
- }
- }
-
- switch apiVersion {
- case "2", "etcd2", "v2":
- return newEtcd2Backend(conf, logger)
- case "3", "etcd3", "v3":
- if remoteAPIVersion == "2" {
- return nil, errors.New("etcd3 is required: etcd2 is running")
- }
- return newEtcd3Backend(conf, logger)
- default:
- return nil, EtcdVersionUnknown
- }
-}
-
-// getEtcdAPIVersion gets the latest supported API version.
-// If etcd cluster version >= 3.1, "3" will be returned.
-// Otherwise, "2" will be returned.
-func getEtcdAPIVersion(c client.Client) (string, error) {
- v, err := c.GetVersion(context.Background())
- if err != nil {
- return "", err
- }
-
- sv, err := semver.NewVersion(v.Cluster)
- if err != nil {
- return "", nil
- }
-
- if sv.LessThan(*semver.Must(semver.NewVersion("3.1.0"))) {
- return "2", nil
- }
-
- return "3", nil
-}
-
-// Retrieves the config option in order of priority:
-// 1. The named environment variable if it exist
-// 2. The key in the config map
-func getEtcdOption(conf map[string]string, confKey, envVar string) (string, bool) {
- confVal, inConf := conf[confKey]
- envVal, inEnv := os.LookupEnv(envVar)
- if inEnv {
- return envVal, true
- }
- return confVal, inConf
-}
-
-func getEtcdEndpoints(conf map[string]string) ([]string, error) {
- address, staticBootstrap := getEtcdOption(conf, "address", "ETCD_ADDR")
- domain, useSrv := getEtcdOption(conf, "discovery_srv", "ETCD_DISCOVERY_SRV")
- if useSrv && staticBootstrap {
- return nil, EtcdMultipleBootstrapError
- }
-
- if staticBootstrap {
- endpoints := strings.Split(address, Etcd2MachineDelimiter)
- // Verify that the machines are valid URLs
- for _, e := range endpoints {
- u, urlErr := url.Parse(e)
- if urlErr != nil || u.Scheme == "" {
- return nil, EtcdAddressError
- }
- }
- return endpoints, nil
- }
-
- if useSrv {
- discoverer := client.NewSRVDiscover()
- endpoints, err := discoverer.Discover(domain)
- if err != nil {
- return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %v", err)
- }
- return endpoints, nil
- }
-
- // Set a default endpoints list if no option was set
- return []string{"http://127.0.0.1:2379"}, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go
deleted file mode 100644
index 4e08615..0000000
--- a/vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go
+++ /dev/null
@@ -1,597 +0,0 @@
-package etcd
-
-import (
- "context"
- "encoding/base64"
- "fmt"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "time"
-
- metrics "github.com/armon/go-metrics"
- "github.com/coreos/etcd/client"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-const (
- // Ideally, this prefix would match the "_" used in the file backend, but
- // that prefix has special meaining in etcd. Specifically, it excludes those
- // entries from directory listings.
- Etcd2NodeFilePrefix = "."
-
- // The lock prefix can (and probably should) cause an entry to be excluded
- // from diretory listings, so "_" works here.
- Etcd2NodeLockPrefix = "_"
-
- // The delimiter is the same as the `-C` flag of etcdctl.
- Etcd2MachineDelimiter = ","
-
- // The lock TTL matches the default that Consul API uses, 15 seconds.
- Etcd2LockTTL = 15 * time.Second
-
- // The amount of time to wait between the semaphore key renewals
- Etcd2LockRenewInterval = 5 * time.Second
-
- // The amount of time to wait if a watch fails before trying again.
- Etcd2WatchRetryInterval = time.Second
-
- // The number of times to re-try a failed watch before signaling that leadership is lost.
- Etcd2WatchRetryMax = 5
-)
-
-// Etcd2Backend is a physical backend that stores data at specific
-// prefix within etcd. It is used for most production situations as
-// it allows Vault to run on multiple machines in a highly-available manner.
-type Etcd2Backend struct {
- path string
- kAPI client.KeysAPI
- permitPool *physical.PermitPool
- logger log.Logger
- haEnabled bool
-}
-
-func newEtcd2Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- // Get the etcd path form the configuration.
- path, ok := conf["path"]
- if !ok {
- path = "/vault"
- }
-
- // Ensure path is prefixed.
- if !strings.HasPrefix(path, "/") {
- path = "/" + path
- }
-
- c, err := newEtcdV2Client(conf)
- if err != nil {
- return nil, err
- }
-
- haEnabled := os.Getenv("ETCD_HA_ENABLED")
- if haEnabled == "" {
- haEnabled = conf["ha_enabled"]
- }
- if haEnabled == "" {
- haEnabled = "false"
- }
- haEnabledBool, err := strconv.ParseBool(haEnabled)
- if err != nil {
- return nil, fmt.Errorf("value [%v] of 'ha_enabled' could not be understood", haEnabled)
- }
-
- // Should we sync the cluster state? There are three available options
- // for our client library: don't sync (required for some proxies), sync
- // once, or sync periodically with AutoSync. We currently support the
- // first two.
- sync, ok := conf["sync"]
- if !ok {
- sync = "yes"
- }
- switch sync {
- case "yes", "true", "y", "1":
- ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- syncErr := c.Sync(ctx)
- cancel()
- if syncErr != nil {
- return nil, fmt.Errorf("%s: %s", EtcdSyncClusterError, syncErr)
- }
- case "no", "false", "n", "0":
- default:
- return nil, fmt.Errorf("value of 'sync' could not be understood")
- }
-
- kAPI := client.NewKeysAPI(c)
-
- // Setup the backend.
- return &Etcd2Backend{
- path: path,
- kAPI: kAPI,
- permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
- logger: logger,
- haEnabled: haEnabledBool,
- }, nil
-}
-
-func newEtcdV2Client(conf map[string]string) (client.Client, error) {
- endpoints, err := getEtcdEndpoints(conf)
- if err != nil {
- return nil, err
- }
-
- // Create a new client from the supplied address and attempt to sync with the
- // cluster.
- var cTransport client.CancelableTransport
- cert, hasCert := conf["tls_cert_file"]
- key, hasKey := conf["tls_key_file"]
- ca, hasCa := conf["tls_ca_file"]
- if (hasCert && hasKey) || hasCa {
- var transportErr error
- tls := transport.TLSInfo{
- CAFile: ca,
- CertFile: cert,
- KeyFile: key,
- }
- cTransport, transportErr = transport.NewTransport(tls, 30*time.Second)
-
- if transportErr != nil {
- return nil, transportErr
- }
- } else {
- cTransport = client.DefaultTransport
- }
-
- cfg := client.Config{
- Endpoints: endpoints,
- Transport: cTransport,
- }
-
- // Set credentials.
- username := os.Getenv("ETCD_USERNAME")
- if username == "" {
- username, _ = conf["username"]
- }
-
- password := os.Getenv("ETCD_PASSWORD")
- if password == "" {
- password, _ = conf["password"]
- }
-
- if username != "" && password != "" {
- cfg.Username = username
- cfg.Password = password
- }
-
- return client.New(cfg)
-}
-
-// Put is used to insert or update an entry.
-func (c *Etcd2Backend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
- value := base64.StdEncoding.EncodeToString(entry.Value)
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- _, err := c.kAPI.Set(context.Background(), c.nodePath(entry.Key), value, nil)
- return err
-}
-
-// Get is used to fetch an entry.
-func (c *Etcd2Backend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- getOpts := &client.GetOptions{
- Recursive: false,
- Sort: false,
- }
- response, err := c.kAPI.Get(context.Background(), c.nodePath(key), getOpts)
- if err != nil {
- if errorIsMissingKey(err) {
- return nil, nil
- }
- return nil, err
- }
-
- // Decode the stored value from base-64.
- value, err := base64.StdEncoding.DecodeString(response.Node.Value)
- if err != nil {
- return nil, err
- }
-
- // Construct and return a new entry.
- return &physical.Entry{
- Key: key,
- Value: value,
- }, nil
-}
-
-// Delete is used to permanently delete an entry.
-func (c *Etcd2Backend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- // Remove the key, non-recursively.
- delOpts := &client.DeleteOptions{
- Recursive: false,
- }
- _, err := c.kAPI.Delete(context.Background(), c.nodePath(key), delOpts)
- if err != nil && !errorIsMissingKey(err) {
- return err
- }
- return nil
-}
-
-// List is used to list all the keys under a given prefix, up to the next
-// prefix.
-func (c *Etcd2Backend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now())
-
- // Set a directory path from the given prefix.
- path := c.nodePathDir(prefix)
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- // Get the directory, non-recursively, from etcd. If the directory is
- // missing, we just return an empty list of contents.
- getOpts := &client.GetOptions{
- Recursive: false,
- Sort: true,
- }
- response, err := c.kAPI.Get(context.Background(), path, getOpts)
- if err != nil {
- if errorIsMissingKey(err) {
- return []string{}, nil
- }
- return nil, err
- }
-
- out := make([]string, len(response.Node.Nodes))
- for i, node := range response.Node.Nodes {
-
- // etcd keys include the full path, so let's trim the prefix directory
- // path.
- name := strings.TrimPrefix(node.Key, path)
-
- // Check if this node is itself a directory. If it is, add a trailing
- // slash; if it isn't remove the node file prefix.
- if node.Dir {
- out[i] = name + "/"
- } else {
- out[i] = name[1:]
- }
- }
- return out, nil
-}
-
-// nodePath returns an etcd filepath based on the given key.
-func (b *Etcd2Backend) nodePath(key string) string {
- return filepath.Join(b.path, filepath.Dir(key), Etcd2NodeFilePrefix+filepath.Base(key))
-}
-
-// nodePathDir returns an etcd directory path based on the given key.
-func (b *Etcd2Backend) nodePathDir(key string) string {
- return filepath.Join(b.path, key) + "/"
-}
-
-// nodePathLock returns an etcd directory path used specifically for semaphore
-// indicies based on the given key.
-func (b *Etcd2Backend) nodePathLock(key string) string {
- return filepath.Join(b.path, filepath.Dir(key), Etcd2NodeLockPrefix+filepath.Base(key)+"/")
-}
-
-// Lock is used for mutual exclusion based on the given key.
-func (c *Etcd2Backend) LockWith(key, value string) (physical.Lock, error) {
- return &Etcd2Lock{
- kAPI: c.kAPI,
- value: value,
- semaphoreDirKey: c.nodePathLock(key),
- }, nil
-}
-
-// HAEnabled indicates whether the HA functionality should be exposed.
-// Currently always returns true.
-func (e *Etcd2Backend) HAEnabled() bool {
- return e.haEnabled
-}
-
-// Etcd2Lock emplements a lock using and Etcd2 backend.
-type Etcd2Lock struct {
- kAPI client.KeysAPI
- value, semaphoreDirKey, semaphoreKey string
- lock sync.Mutex
-}
-
-// addSemaphoreKey acquires a new ordered semaphore key.
-func (c *Etcd2Lock) addSemaphoreKey() (string, uint64, error) {
- // CreateInOrder is an atomic operation that can be used to enqueue a
- // request onto a semaphore. In the rest of the comments, we refer to the
- // resulting key as a "semaphore key".
- // https://coreos.com/etcd/docs/2.0.8/api.html#atomically-creating-in-order-keys
- opts := &client.CreateInOrderOptions{
- TTL: Etcd2LockTTL,
- }
- response, err := c.kAPI.CreateInOrder(context.Background(), c.semaphoreDirKey, c.value, opts)
- if err != nil {
- return "", 0, err
- }
- return response.Node.Key, response.Index, nil
-}
-
-// renewSemaphoreKey renews an existing semaphore key.
-func (c *Etcd2Lock) renewSemaphoreKey() (string, uint64, error) {
- setOpts := &client.SetOptions{
- TTL: Etcd2LockTTL,
- PrevExist: client.PrevExist,
- }
- response, err := c.kAPI.Set(context.Background(), c.semaphoreKey, c.value, setOpts)
- if err != nil {
- return "", 0, err
- }
- return response.Node.Key, response.Index, nil
-}
-
-// getSemaphoreKey determines which semaphore key holder has acquired the lock
-// and its value.
-func (c *Etcd2Lock) getSemaphoreKey() (string, string, uint64, error) {
- // Get the list of waiters in order to see if we are next.
- getOpts := &client.GetOptions{
- Recursive: false,
- Sort: true,
- }
- response, err := c.kAPI.Get(context.Background(), c.semaphoreDirKey, getOpts)
- if err != nil {
- return "", "", 0, err
- }
-
- // Make sure the list isn't empty.
- if response.Node.Nodes.Len() == 0 {
- return "", "", response.Index, nil
- }
- return response.Node.Nodes[0].Key, response.Node.Nodes[0].Value, response.Index, nil
-}
-
-// isHeld determines if we are the current holders of the lock.
-func (c *Etcd2Lock) isHeld() (bool, error) {
- if c.semaphoreKey == "" {
- return false, nil
- }
-
- // Get the key of the curren holder of the lock.
- currentSemaphoreKey, _, _, err := c.getSemaphoreKey()
- if err != nil {
- return false, err
- }
- return c.semaphoreKey == currentSemaphoreKey, nil
-}
-
-// assertHeld determines whether or not we are the current holders of the lock
-// and returns an Etcd2LockNotHeldError if we are not.
-func (c *Etcd2Lock) assertHeld() error {
- held, err := c.isHeld()
- if err != nil {
- return err
- }
-
- // Check if we don't hold the lock.
- if !held {
- return EtcdLockNotHeldError
- }
- return nil
-}
-
-// assertNotHeld determines whether or not we are the current holders of the
-// lock and returns an Etcd2LockHeldError if we are.
-func (c *Etcd2Lock) assertNotHeld() error {
- held, err := c.isHeld()
- if err != nil {
- return err
- }
-
- // Check if we hold the lock.
- if held {
- return EtcdLockHeldError
- }
- return nil
-}
-
-// periodically renew our semaphore key so that it doesn't expire
-func (c *Etcd2Lock) periodicallyRenewSemaphoreKey(stopCh chan struct{}) {
- for {
- select {
- case <-time.After(Etcd2LockRenewInterval):
- c.renewSemaphoreKey()
- case <-stopCh:
- return
- }
- }
-}
-
-// watchForKeyRemoval continuously watches a single non-directory key starting
-// from the provided etcd index and closes the provided channel when it's
-// deleted, expires, or appears to be missing.
-func (c *Etcd2Lock) watchForKeyRemoval(key string, etcdIndex uint64, closeCh chan struct{}) {
- retries := Etcd2WatchRetryMax
-
- for {
- // Start a non-recursive watch of the given key.
- w := c.kAPI.Watcher(key, &client.WatcherOptions{AfterIndex: etcdIndex, Recursive: false})
- response, err := w.Next(context.TODO())
- if err != nil {
-
- // If the key is just missing, we can exit the loop.
- if errorIsMissingKey(err) {
- break
- }
-
- // If the error is something else, there's nothing we can do but retry
- // the watch. Check that we still have retries left.
- retries -= 1
- if retries == 0 {
- break
- }
-
- // Sleep for a period of time to avoid slamming etcd.
- time.Sleep(Etcd2WatchRetryInterval)
- continue
- }
-
- // Check if the key we are concerned with has been removed. If it has, we
- // can exit the loop.
- if response.Node.Key == key &&
- (response.Action == "delete" || response.Action == "expire") {
- break
- }
-
- // Update the etcd index.
- etcdIndex = response.Index + 1
- }
-
- // Regardless of what happened, we need to close the close channel.
- close(closeCh)
-}
-
-// Lock attempts to acquire the lock by waiting for a new semaphore key in etcd
-// to become the first in the queue and will block until it is successful or
-// it receives a signal on the provided channel. The returned channel will be
-// closed when the lock is lost, either by an explicit call to Unlock or by
-// the associated semaphore key in etcd otherwise being deleted or expiring.
-//
-// If the lock is currently held by this instance of Etcd2Lock, Lock will
-// return an Etcd2LockHeldError error.
-func (c *Etcd2Lock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) {
- // Get the local lock before interacting with etcd.
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Check if the lock is already held.
- if err := c.assertNotHeld(); err != nil {
- return nil, err
- }
-
- // Add a new semaphore key that we will track.
- semaphoreKey, _, err := c.addSemaphoreKey()
- if err != nil {
- return nil, err
- }
- c.semaphoreKey = semaphoreKey
-
- // Get the current semaphore key.
- currentSemaphoreKey, _, currentEtcdIndex, err := c.getSemaphoreKey()
- if err != nil {
- return nil, err
- }
-
- // Create an etcd-compatible boolean stop channel from the provided
- // interface stop channel.
- ctx, cancel := context.WithCancel(context.Background())
- go func() {
- <-stopCh
- cancel()
- }()
- defer cancel()
-
- // Create a channel to signal when we lose the semaphore key.
- done := make(chan struct{})
- defer func() {
- if retErr != nil {
- close(done)
- }
- }()
-
- go c.periodicallyRenewSemaphoreKey(done)
-
- // Loop until the we current semaphore key matches ours.
- for semaphoreKey != currentSemaphoreKey {
- var err error
-
- // Start a watch of the entire lock directory
- w := c.kAPI.Watcher(c.semaphoreDirKey, &client.WatcherOptions{AfterIndex: currentEtcdIndex, Recursive: true})
- response, err := w.Next(ctx)
- if err != nil {
-
- // If the error is not an etcd error, we can assume it's a notification
- // of the stop channel having closed. In this scenario, we also want to
- // remove our semaphore key as we are no longer waiting to acquire the
- // lock.
- if _, ok := err.(*client.Error); !ok {
- delOpts := &client.DeleteOptions{
- Recursive: false,
- }
- _, err = c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts)
- }
- return nil, err
- }
-
- // Make sure the index we are waiting for has not been removed. If it has,
- // this is an error and nothing else needs to be done.
- if response.Node.Key == semaphoreKey &&
- (response.Action == "delete" || response.Action == "expire") {
- return nil, EtcdSemaphoreKeyRemovedError
- }
-
- // Get the current semaphore key and etcd index.
- currentSemaphoreKey, _, currentEtcdIndex, err = c.getSemaphoreKey()
- if err != nil {
- return nil, err
- }
- }
-
- go c.watchForKeyRemoval(c.semaphoreKey, currentEtcdIndex, done)
- return done, nil
-}
-
-// Unlock releases the lock by deleting the associated semaphore key in etcd.
-//
-// If the lock is not currently held by this instance of Etcd2Lock, Unlock will
-// return an Etcd2LockNotHeldError error.
-func (c *Etcd2Lock) Unlock() error {
- // Get the local lock before interacting with etcd.
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Check that the lock is held.
- if err := c.assertHeld(); err != nil {
- return err
- }
-
- // Delete our semaphore key.
- delOpts := &client.DeleteOptions{
- Recursive: false,
- }
- if _, err := c.kAPI.Delete(context.Background(), c.semaphoreKey, delOpts); err != nil {
- return err
- }
- return nil
-}
-
-// Value checks whether or not the lock is held by any instance of Etcd2Lock,
-// including this one, and returns the current value.
-func (c *Etcd2Lock) Value() (bool, string, error) {
- semaphoreKey, semaphoreValue, _, err := c.getSemaphoreKey()
- if err != nil {
- return false, "", err
- }
-
- if semaphoreKey == "" {
- return false, "", nil
- }
- return true, semaphoreValue, nil
-}
-
-// errorIsMissingKey returns true if the given error is an etcd error with an
-// error code corresponding to a missing key.
-func errorIsMissingKey(err error) bool {
- etcdErr, ok := err.(client.Error)
- return ok && etcdErr.Code == client.ErrorCodeKeyNotFound
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go
deleted file mode 100644
index 04944e5..0000000
--- a/vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go
+++ /dev/null
@@ -1,322 +0,0 @@
-package etcd
-
-import (
- "errors"
- "fmt"
- "os"
- "path"
- "strconv"
- "strings"
- "sync"
- "time"
-
- metrics "github.com/armon/go-metrics"
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
- "golang.org/x/net/context"
-)
-
-// EtcdBackend is a physical backend that stores data at specific
-// prefix within etcd. It is used for most production situations as
-// it allows Vault to run on multiple machines in a highly-available manner.
-type EtcdBackend struct {
- logger log.Logger
- path string
- haEnabled bool
-
- permitPool *physical.PermitPool
-
- etcd *clientv3.Client
-}
-
-const (
- // etcd3 default lease duration is 60s. set to 15s for faster recovery.
- etcd3LockTimeoutInSeconds = 15
- // etcd3 default request timeout is set to 5s. It should be long enough
- // for most cases, even with internal retry.
- etcd3RequestTimeout = 5 * time.Second
-)
-
-// newEtcd3Backend constructs a etcd3 backend.
-func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- // Get the etcd path form the configuration.
- path, ok := conf["path"]
- if !ok {
- path = "/vault"
- }
-
- // Ensure path is prefixed.
- if !strings.HasPrefix(path, "/") {
- path = "/" + path
- }
-
- endpoints, err := getEtcdEndpoints(conf)
- if err != nil {
- return nil, err
- }
-
- cfg := clientv3.Config{
- Endpoints: endpoints,
- }
-
- haEnabled := os.Getenv("ETCD_HA_ENABLED")
- if haEnabled == "" {
- haEnabled = conf["ha_enabled"]
- }
- if haEnabled == "" {
- haEnabled = "false"
- }
- haEnabledBool, err := strconv.ParseBool(haEnabled)
- if err != nil {
- return nil, fmt.Errorf("value [%v] of 'ha_enabled' could not be understood", haEnabled)
- }
-
- cert, hasCert := conf["tls_cert_file"]
- key, hasKey := conf["tls_key_file"]
- ca, hasCa := conf["tls_ca_file"]
- if (hasCert && hasKey) || hasCa {
- tls := transport.TLSInfo{
- CAFile: ca,
- CertFile: cert,
- KeyFile: key,
- }
-
- tlscfg, err := tls.ClientConfig()
- if err != nil {
- return nil, err
- }
- cfg.TLS = tlscfg
- }
-
- // Set credentials.
- username := os.Getenv("ETCD_USERNAME")
- if username == "" {
- username, _ = conf["username"]
- }
-
- password := os.Getenv("ETCD_PASSWORD")
- if password == "" {
- password, _ = conf["password"]
- }
-
- if username != "" && password != "" {
- cfg.Username = username
- cfg.Password = password
- }
-
- etcd, err := clientv3.New(cfg)
- if err != nil {
- return nil, err
- }
-
- ssync, ok := conf["sync"]
- if !ok {
- ssync = "true"
- }
- sync, err := strconv.ParseBool(ssync)
- if err != nil {
- return nil, fmt.Errorf("value of 'sync' (%v) could not be understood", err)
- }
-
- if sync {
- ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- err := etcd.Sync(ctx)
- cancel()
- if err != nil {
- return nil, err
- }
- }
-
- return &EtcdBackend{
- path: path,
- etcd: etcd,
- permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
- logger: logger,
- haEnabled: haEnabledBool,
- }, nil
-}
-
-func (c *EtcdBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- defer cancel()
- _, err := c.etcd.Put(ctx, path.Join(c.path, entry.Key), string(entry.Value))
- return err
-}
-
-func (c *EtcdBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- defer cancel()
- resp, err := c.etcd.Get(ctx, path.Join(c.path, key))
- if err != nil {
- return nil, err
- }
-
- if len(resp.Kvs) == 0 {
- return nil, nil
- }
- if len(resp.Kvs) > 1 {
- return nil, errors.New("unexpected number of keys from a get request")
- }
- return &physical.Entry{
- Key: key,
- Value: resp.Kvs[0].Value,
- }, nil
-}
-
-func (c *EtcdBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- defer cancel()
- _, err := c.etcd.Delete(ctx, path.Join(c.path, key))
- if err != nil {
- return err
- }
- return nil
-}
-
-func (c *EtcdBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now())
-
- c.permitPool.Acquire()
- defer c.permitPool.Release()
-
- ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- defer cancel()
- prefix = path.Join(c.path, prefix)
- resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix())
- if err != nil {
- return nil, err
- }
-
- keys := []string{}
- for _, kv := range resp.Kvs {
- key := strings.TrimPrefix(string(kv.Key), prefix)
- key = strings.TrimPrefix(key, "/")
-
- if len(key) == 0 {
- continue
- }
-
- if i := strings.Index(key, "/"); i == -1 {
- keys = append(keys, key)
- } else if i != -1 {
- keys = strutil.AppendIfMissing(keys, key[:i+1])
- }
- }
- return keys, nil
-}
-
-func (e *EtcdBackend) HAEnabled() bool {
- return e.haEnabled
-}
-
-// EtcdLock emplements a lock using and etcd backend.
-type EtcdLock struct {
- lock sync.Mutex
- held bool
-
- etcdSession *concurrency.Session
- etcdMu *concurrency.Mutex
-
- prefix string
- value string
-
- etcd *clientv3.Client
-}
-
-// Lock is used for mutual exclusion based on the given key.
-func (c *EtcdBackend) LockWith(key, value string) (physical.Lock, error) {
- session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(etcd3LockTimeoutInSeconds))
- if err != nil {
- return nil, err
- }
-
- p := path.Join(c.path, key)
- return &EtcdLock{
- etcdSession: session,
- etcdMu: concurrency.NewMutex(session, p),
- prefix: p,
- value: value,
- etcd: c.etcd,
- }, nil
-}
-
-func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if c.held {
- return nil, EtcdLockHeldError
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- go func() {
- <-stopCh
- cancel()
- }()
- if err := c.etcdMu.Lock(ctx); err != nil {
- if err == context.Canceled {
- return nil, nil
- }
- return nil, err
- }
-
- pctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- defer cancel()
- if _, err := c.etcd.Put(pctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil {
- return nil, err
- }
-
- c.held = true
-
- return c.etcdSession.Done(), nil
-}
-
-func (c *EtcdLock) Unlock() error {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- if !c.held {
- return EtcdLockNotHeldError
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- defer cancel()
- return c.etcdMu.Unlock(ctx)
-}
-
-func (c *EtcdLock) Value() (bool, string, error) {
- ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
- defer cancel()
-
- resp, err := c.etcd.Get(ctx,
- c.prefix, clientv3.WithPrefix(),
- clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend))
-
- if err != nil {
- return false, "", err
- }
- if len(resp.Kvs) == 0 {
- return false, "", nil
- }
-
- return true, string(resp.Kvs[0].Value), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go
deleted file mode 100644
index fbd842d..0000000
--- a/vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package etcd
-
-import (
- "fmt"
- "os"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestEtcd3Backend(t *testing.T) {
- addr := os.Getenv("ETCD_ADDR")
- if addr == "" {
- t.Skipf("Skipped. No etcd3 server found")
- }
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewEtcdBackend(map[string]string{
- "path": fmt.Sprintf("/vault-%d", time.Now().Unix()),
- "etcd_api": "3",
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-
- ha, ok := b.(physical.HABackend)
- if !ok {
- t.Fatalf("etcd3 does not implement HABackend")
- }
- physical.ExerciseHABackend(t, ha, ha)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go
deleted file mode 100644
index d5c30bb..0000000
--- a/vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package etcd
-
-import (
- "fmt"
- "os"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- "github.com/coreos/etcd/client"
- "golang.org/x/net/context"
-)
-
-func TestEtcdBackend(t *testing.T) {
- addr := os.Getenv("ETCD_ADDR")
- if addr == "" {
- t.SkipNow()
- }
-
- cfg := client.Config{
- Endpoints: []string{addr},
- Transport: client.DefaultTransport,
- }
-
- c, err := client.New(cfg)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- syncErr := c.Sync(ctx)
- cancel()
- if syncErr != nil {
- t.Fatalf("err: %v", EtcdSyncClusterError)
- }
-
- kAPI := client.NewKeysAPI(c)
-
- randPath := fmt.Sprintf("/vault-%d", time.Now().Unix())
- defer func() {
- delOpts := &client.DeleteOptions{
- Recursive: true,
- }
- if _, err := kAPI.Delete(context.Background(), randPath, delOpts); err != nil {
- t.Fatalf("err: %v", err)
- }
- }()
-
- // Generate new etcd backend. The etcd address is read from ETCD_ADDR. No
- // need to provide it explicitly.
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewEtcdBackend(map[string]string{
- "path": randPath,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-
- ha, ok := b.(physical.HABackend)
- if !ok {
- t.Fatalf("etcd does not implement HABackend")
- }
- physical.ExerciseHABackend(t, ha, ha)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/file/file.go b/vendor/github.com/hashicorp/vault/physical/file/file.go
deleted file mode 100644
index df05dba..0000000
--- a/vendor/github.com/hashicorp/vault/physical/file/file.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package file
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/physical"
-)
-
-// FileBackend is a physical backend that stores data on disk
-// at a given file path. It can be used for durable single server
-// situations, or to develop locally where durability is not critical.
-//
-// WARNING: the file backend implementation is currently extremely unsafe
-// and non-performant. It is meant mostly for local testing and development.
-// It can be improved in the future.
-type FileBackend struct {
- sync.RWMutex
- path string
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-type TransactionalFileBackend struct {
- FileBackend
-}
-
-// NewFileBackend constructs a FileBackend using the given directory
-func NewFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- path, ok := conf["path"]
- if !ok {
- return nil, fmt.Errorf("'path' must be set")
- }
-
- return &FileBackend{
- path: path,
- logger: logger,
- permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
- }, nil
-}
-
-func NewTransactionalFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- path, ok := conf["path"]
- if !ok {
- return nil, fmt.Errorf("'path' must be set")
- }
-
- // Create a pool of size 1 so only one operation runs at a time
- return &TransactionalFileBackend{
- FileBackend: FileBackend{
- path: path,
- logger: logger,
- permitPool: physical.NewPermitPool(1),
- },
- }, nil
-}
-
-func (b *FileBackend) Delete(path string) error {
- b.permitPool.Acquire()
- defer b.permitPool.Release()
-
- b.Lock()
- defer b.Unlock()
-
- return b.DeleteInternal(path)
-}
-
-func (b *FileBackend) DeleteInternal(path string) error {
- if path == "" {
- return nil
- }
-
- if err := b.validatePath(path); err != nil {
- return err
- }
-
- basePath, key := b.expandPath(path)
- fullPath := filepath.Join(basePath, key)
-
- err := os.Remove(fullPath)
- if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("Failed to remove %q: %v", fullPath, err)
- }
-
- err = b.cleanupLogicalPath(path)
-
- return err
-}
-
-// cleanupLogicalPath is used to remove all empty nodes, begining with deepest
-// one, aborting on first non-empty one, up to top-level node.
-func (b *FileBackend) cleanupLogicalPath(path string) error {
- nodes := strings.Split(path, fmt.Sprintf("%c", os.PathSeparator))
- for i := len(nodes) - 1; i > 0; i-- {
- fullPath := filepath.Join(b.path, filepath.Join(nodes[:i]...))
-
- dir, err := os.Open(fullPath)
- if err != nil {
- if dir != nil {
- dir.Close()
- }
- if os.IsNotExist(err) {
- return nil
- } else {
- return err
- }
- }
-
- list, err := dir.Readdir(1)
- dir.Close()
- if err != nil && err != io.EOF {
- return err
- }
-
- // If we have no entries, it's an empty directory; remove it
- if err == io.EOF || list == nil || len(list) == 0 {
- err = os.Remove(fullPath)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (b *FileBackend) Get(k string) (*physical.Entry, error) {
- b.permitPool.Acquire()
- defer b.permitPool.Release()
-
- b.RLock()
- defer b.RUnlock()
-
- return b.GetInternal(k)
-}
-
-func (b *FileBackend) GetInternal(k string) (*physical.Entry, error) {
- if err := b.validatePath(k); err != nil {
- return nil, err
- }
-
- path, key := b.expandPath(k)
- path = filepath.Join(path, key)
-
- f, err := os.Open(path)
- if f != nil {
- defer f.Close()
- }
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- var entry physical.Entry
- if err := jsonutil.DecodeJSONFromReader(f, &entry); err != nil {
- return nil, err
- }
-
- return &entry, nil
-}
-
-func (b *FileBackend) Put(entry *physical.Entry) error {
- b.permitPool.Acquire()
- defer b.permitPool.Release()
-
- b.Lock()
- defer b.Unlock()
-
- return b.PutInternal(entry)
-}
-
-func (b *FileBackend) PutInternal(entry *physical.Entry) error {
- if err := b.validatePath(entry.Key); err != nil {
- return err
- }
-
- path, key := b.expandPath(entry.Key)
-
- // Make the parent tree
- if err := os.MkdirAll(path, 0755); err != nil {
- return err
- }
-
- // JSON encode the entry and write it
- f, err := os.OpenFile(
- filepath.Join(path, key),
- os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
- 0600)
- if f != nil {
- defer f.Close()
- }
- if err != nil {
- return err
- }
- enc := json.NewEncoder(f)
- return enc.Encode(entry)
-}
-
-func (b *FileBackend) List(prefix string) ([]string, error) {
- b.permitPool.Acquire()
- defer b.permitPool.Release()
-
- b.RLock()
- defer b.RUnlock()
-
- return b.ListInternal(prefix)
-}
-
-func (b *FileBackend) ListInternal(prefix string) ([]string, error) {
- if err := b.validatePath(prefix); err != nil {
- return nil, err
- }
-
- path := b.path
- if prefix != "" {
- path = filepath.Join(path, prefix)
- }
-
- // Read the directory contents
- f, err := os.Open(path)
- if f != nil {
- defer f.Close()
- }
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- names, err := f.Readdirnames(-1)
- if err != nil {
- return nil, err
- }
-
- for i, name := range names {
- if name[0] == '_' {
- names[i] = name[1:]
- } else {
- names[i] = name + "/"
- }
- }
-
- return names, nil
-}
-
-func (b *FileBackend) expandPath(k string) (string, string) {
- path := filepath.Join(b.path, k)
- key := filepath.Base(path)
- path = filepath.Dir(path)
- return path, "_" + key
-}
-
-func (b *FileBackend) validatePath(path string) error {
- switch {
- case strings.Contains(path, ".."):
- return consts.ErrPathContainsParentReferences
- }
-
- return nil
-}
-
-func (b *TransactionalFileBackend) Transaction(txns []physical.TxnEntry) error {
- b.permitPool.Acquire()
- defer b.permitPool.Release()
-
- b.Lock()
- defer b.Unlock()
-
- return physical.GenericTransactionHandler(b, txns)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/file/file_test.go b/vendor/github.com/hashicorp/vault/physical/file/file_test.go
deleted file mode 100644
index 6438e21..0000000
--- a/vendor/github.com/hashicorp/vault/physical/file/file_test.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package file
-
-import (
- "encoding/json"
- "io/ioutil"
- "os"
- "path/filepath"
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestFileBackend_Base64URLEncoding(t *testing.T) {
- backendPath, err := ioutil.TempDir("", "vault")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer os.RemoveAll(backendPath)
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewFileBackend(map[string]string{
- "path": backendPath,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // List the entries. Length should be zero.
- keys, err := b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys))
- }
-
- // Create a storage entry without base64 encoding the file name
- rawFullPath := filepath.Join(backendPath, "_foo")
- e := &physical.Entry{Key: "foo", Value: []byte("test")}
- f, err := os.OpenFile(
- rawFullPath,
- os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
- 0600)
- if err != nil {
- t.Fatal(err)
- }
- json.NewEncoder(f).Encode(e)
- f.Close()
-
- // Get should work
- out, err := b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(out, e) {
- t.Fatalf("bad: %v expected: %v", out, e)
- }
-
- // List the entries. There should be one entry.
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 {
- t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
- }
-
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // List the entries again. There should still be one entry.
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 {
- t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
- }
-
- // Get should work
- out, err = b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(out, e) {
- t.Fatalf("bad: %v expected: %v", out, e)
- }
-
- err = b.Delete("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- out, err = b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: entry: expected: nil, actual: %#v", e)
- }
-
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys))
- }
-
- f, err = os.OpenFile(
- rawFullPath,
- os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
- 0600)
- if err != nil {
- t.Fatal(err)
- }
- json.NewEncoder(f).Encode(e)
- f.Close()
-
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 {
- t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
- }
-}
-
-func TestFileBackend_ValidatePath(t *testing.T) {
- dir, err := ioutil.TempDir("", "vault")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer os.RemoveAll(dir)
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewFileBackend(map[string]string{
- "path": dir,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := b.Delete("foo/bar/../zip"); err == nil {
- t.Fatal("expected error")
- }
- if err := b.Delete("foo/bar/zip"); err != nil {
- t.Fatal("did not expect error")
- }
-}
-
-func TestFileBackend(t *testing.T) {
- dir, err := ioutil.TempDir("", "vault")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- defer os.RemoveAll(dir)
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewFileBackend(map[string]string{
- "path": dir,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/gcs/gcs.go b/vendor/github.com/hashicorp/vault/physical/gcs/gcs.go
deleted file mode 100644
index 5e7fc78..0000000
--- a/vendor/github.com/hashicorp/vault/physical/gcs/gcs.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package gcs
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- "cloud.google.com/go/storage"
- "github.com/armon/go-metrics"
- "golang.org/x/net/context"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
-)
-
-// GCSBackend is a physical backend that stores data
-// within an Google Cloud Storage bucket.
-type GCSBackend struct {
- bucketName string
- client *storage.Client
- permitPool *physical.PermitPool
- logger log.Logger
-}
-
-// NewGCSBackend constructs a Google Cloud Storage backend using a pre-existing
-// bucket. Credentials can be provided to the backend, sourced
-// from environment variables or a service account file
-func NewGCSBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET")
-
- if bucketName == "" {
- bucketName = conf["bucket"]
- if bucketName == "" {
- return nil, fmt.Errorf("env var GOOGLE_STORAGE_BUCKET or configuration parameter 'bucket' must be set")
- }
- }
-
- ctx := context.Background()
- client, err := newGCSClient(ctx, conf, logger)
- if err != nil {
- return nil, errwrap.Wrapf("error establishing strorage client: {{err}}", err)
- }
-
- // check client connectivity by getting bucket attributes
- _, err = client.Bucket(bucketName).Attrs(ctx)
- if err != nil {
- return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err)
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("physical/gcs: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- g := GCSBackend{
- bucketName: bucketName,
- client: client,
- permitPool: physical.NewPermitPool(maxParInt),
- logger: logger,
- }
-
- return &g, nil
-}
-
-func newGCSClient(ctx context.Context, conf map[string]string, logger log.Logger) (*storage.Client, error) {
- // if credentials_file is configured, try to use it
- // else use application default credentials
- credentialsFile, ok := conf["credentials_file"]
- if ok {
- client, err := storage.NewClient(
- ctx,
- option.WithServiceAccountFile(credentialsFile),
- )
-
- if err != nil {
- return nil, fmt.Errorf("error with provided credentials: '%v'", err)
- }
- return client, nil
- }
-
- client, err := storage.NewClient(ctx)
- if err != nil {
- return nil, errwrap.Wrapf("error with application default credentials: {{err}}", err)
- }
- return client, nil
-}
-
-// Put is used to insert or update an entry
-func (g *GCSBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"gcs", "put"}, time.Now())
-
- bucket := g.client.Bucket(g.bucketName)
- writer := bucket.Object(entry.Key).NewWriter(context.Background())
-
- g.permitPool.Acquire()
- defer g.permitPool.Release()
-
- defer writer.Close()
- _, err := writer.Write(entry.Value)
-
- return err
-}
-
-// Get is used to fetch an entry
-func (g *GCSBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"gcs", "get"}, time.Now())
-
- bucket := g.client.Bucket(g.bucketName)
- reader, err := bucket.Object(key).NewReader(context.Background())
-
- // return (nil, nil) if object doesn't exist
- if err == storage.ErrObjectNotExist {
- return nil, nil
- } else if err != nil {
- return nil, fmt.Errorf("error creating bucket reader: '%v'", err)
- }
-
- g.permitPool.Acquire()
- defer g.permitPool.Release()
-
- defer reader.Close()
- value, err := ioutil.ReadAll(reader)
- if err != nil {
- return nil, fmt.Errorf("error reading object '%v': '%v'", key, err)
- }
-
- ent := physical.Entry{
- Key: key,
- Value: value,
- }
-
- return &ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (g *GCSBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"gcs", "delete"}, time.Now())
-
- bucket := g.client.Bucket(g.bucketName)
-
- g.permitPool.Acquire()
- defer g.permitPool.Release()
-
- err := bucket.Object(key).Delete(context.Background())
-
- // deletion of non existent object is OK
- if err == storage.ErrObjectNotExist {
- return nil
- } else if err != nil {
- return fmt.Errorf("error deleting object '%v': '%v'", key, err)
- }
-
- return nil
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (g *GCSBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"gcs", "list"}, time.Now())
-
- bucket := g.client.Bucket(g.bucketName)
-
- objects_it := bucket.Objects(
- context.Background(),
- &storage.Query{
- Prefix: prefix,
- Delimiter: "/",
- Versions: false,
- })
-
- keys := []string{}
-
- g.permitPool.Acquire()
- defer g.permitPool.Release()
-
- for {
- objAttrs, err := objects_it.Next()
- if err == iterator.Done {
- break
- }
- if err != nil {
- return nil, fmt.Errorf("error listing bucket '%v': '%v'", g.bucketName, err)
- }
-
- path := ""
- if objAttrs.Prefix != "" {
- // "subdirectory"
- path = objAttrs.Prefix
- } else {
- // file
- path = objAttrs.Name
- }
-
- // get relative file/dir just like "basename"
- key := strings.TrimPrefix(path, prefix)
- keys = append(keys, key)
- }
-
- sort.Strings(keys)
-
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go b/vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go
deleted file mode 100644
index dda6eed..0000000
--- a/vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package gcs
-
-import (
- "fmt"
- "math/rand"
- "os"
- "testing"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "cloud.google.com/go/storage"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- "golang.org/x/net/context"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
-)
-
-func TestGCSBackend(t *testing.T) {
- credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
-
- // projectID is only required for creating a bucket for this test
- projectID := os.Getenv("GOOGLE_PROJECT_ID")
-
- if credentialsFile == "" || projectID == "" {
- t.SkipNow()
- }
-
- client, err := storage.NewClient(
- context.Background(),
- option.WithServiceAccountFile(credentialsFile),
- )
-
- if err != nil {
- t.Fatalf("error creating storage client: '%v'", err)
- }
-
- var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
- bucketName := fmt.Sprintf("vault-gcs-testacc-%d", randInt)
-
- bucket := client.Bucket(bucketName)
- err = bucket.Create(context.Background(), projectID, nil)
-
- if err != nil {
- t.Fatalf("error creating bucket '%v': '%v'", bucketName, err)
- }
-
- // test bucket teardown
- defer func() {
- objects_it := bucket.Objects(context.Background(), nil)
-
- // have to delete all objects before deleting bucket
- for {
- objAttrs, err := objects_it.Next()
- if err == iterator.Done {
- break
- }
- if err != nil {
- t.Fatalf("error listing bucket '%v' contents: '%v'", bucketName, err)
- }
-
- // ignore errors in deleting a single object, we only care about deleting the bucket
- // occassionally we get "storage: object doesn't exist" which is fine
- bucket.Object(objAttrs.Name).Delete(context.Background())
- }
-
- err := bucket.Delete(context.Background())
- if err != nil {
- t.Fatalf("error deleting bucket '%s': '%v'", bucketName, err)
- }
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewGCSBackend(map[string]string{
- "bucket": bucketName,
- "credentials_file": credentialsFile,
- }, logger)
-
- if err != nil {
- t.Fatalf("error creating google cloud storage backend: '%s'", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go
deleted file mode 100644
index c771f03..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package inmem
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestCache(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- cache := physical.NewCache(inm, 0, logger)
- physical.ExerciseBackend(t, cache)
- physical.ExerciseBackend_ListPrefix(t, cache)
-}
-
-func TestCache_Purge(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- cache := physical.NewCache(inm, 0, logger)
-
- ent := &physical.Entry{
- Key: "foo",
- Value: []byte("bar"),
- }
- err = cache.Put(ent)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Delete from under
- inm.Delete("foo")
-
- // Read should work
- out, err := cache.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("should have key")
- }
-
- // Clear the cache
- cache.Purge()
-
- // Read should fail
- out, err = cache.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("should not have key")
- }
-}
-
-func TestCache_IgnoreCore(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- cache := physical.NewCache(inm, 0, logger)
-
- var ent *physical.Entry
-
- // First try normal handling
- ent = &physical.Entry{
- Key: "foo",
- Value: []byte("bar"),
- }
- if err := cache.Put(ent); err != nil {
- t.Fatal(err)
- }
- ent = &physical.Entry{
- Key: "foo",
- Value: []byte("foobar"),
- }
- if err := inm.Put(ent); err != nil {
- t.Fatal(err)
- }
- ent, err = cache.Get("foo")
- if err != nil {
- t.Fatal(err)
- }
- if string(ent.Value) != "bar" {
- t.Fatal("expected cached value")
- }
-
- // Now try core path
- ent = &physical.Entry{
- Key: "core/foo",
- Value: []byte("bar"),
- }
- if err := cache.Put(ent); err != nil {
- t.Fatal(err)
- }
- ent = &physical.Entry{
- Key: "core/foo",
- Value: []byte("foobar"),
- }
- if err := inm.Put(ent); err != nil {
- t.Fatal(err)
- }
- ent, err = cache.Get("core/foo")
- if err != nil {
- t.Fatal(err)
- }
- if string(ent.Value) != "foobar" {
- t.Fatal("expected cached value")
- }
-
- // Now make sure looked-up values aren't added
- ent = &physical.Entry{
- Key: "core/zip",
- Value: []byte("zap"),
- }
- if err := inm.Put(ent); err != nil {
- t.Fatal(err)
- }
- ent, err = cache.Get("core/zip")
- if err != nil {
- t.Fatal(err)
- }
- if string(ent.Value) != "zap" {
- t.Fatal("expected non-cached value")
- }
- ent = &physical.Entry{
- Key: "core/zip",
- Value: []byte("zipzap"),
- }
- if err := inm.Put(ent); err != nil {
- t.Fatal(err)
- }
- ent, err = cache.Get("core/zip")
- if err != nil {
- t.Fatal(err)
- }
- if string(ent.Value) != "zipzap" {
- t.Fatal("expected non-cached value")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go
deleted file mode 100644
index d4f9201..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package inmem
-
-import (
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-radix"
-)
-
-// InmemBackend is an in-memory only physical backend. It is useful
-// for testing and development situations where the data is not
-// expected to be durable.
-type InmemBackend struct {
- sync.RWMutex
- root *radix.Tree
- permitPool *physical.PermitPool
- logger log.Logger
-}
-
-type TransactionalInmemBackend struct {
- InmemBackend
-}
-
-// NewInmem constructs a new in-memory backend
-func NewInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- in := &InmemBackend{
- root: radix.New(),
- permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
- logger: logger,
- }
- return in, nil
-}
-
-// Basically for now just creates a permit pool of size 1 so only one operation
-// can run at a time
-func NewTransactionalInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- in := &TransactionalInmemBackend{
- InmemBackend: InmemBackend{
- root: radix.New(),
- permitPool: physical.NewPermitPool(1),
- logger: logger,
- },
- }
- return in, nil
-}
-
-// Put is used to insert or update an entry
-func (i *InmemBackend) Put(entry *physical.Entry) error {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.Lock()
- defer i.Unlock()
-
- return i.PutInternal(entry)
-}
-
-func (i *InmemBackend) PutInternal(entry *physical.Entry) error {
- i.root.Insert(entry.Key, entry)
- return nil
-}
-
-// Get is used to fetch an entry
-func (i *InmemBackend) Get(key string) (*physical.Entry, error) {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.RLock()
- defer i.RUnlock()
-
- return i.GetInternal(key)
-}
-
-func (i *InmemBackend) GetInternal(key string) (*physical.Entry, error) {
- if raw, ok := i.root.Get(key); ok {
- return raw.(*physical.Entry), nil
- }
- return nil, nil
-}
-
-// Delete is used to permanently delete an entry
-func (i *InmemBackend) Delete(key string) error {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.Lock()
- defer i.Unlock()
-
- return i.DeleteInternal(key)
-}
-
-func (i *InmemBackend) DeleteInternal(key string) error {
- i.root.Delete(key)
- return nil
-}
-
-// List is used ot list all the keys under a given
-// prefix, up to the next prefix.
-func (i *InmemBackend) List(prefix string) ([]string, error) {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.RLock()
- defer i.RUnlock()
-
- return i.ListInternal(prefix)
-}
-
-func (i *InmemBackend) ListInternal(prefix string) ([]string, error) {
- var out []string
- seen := make(map[string]interface{})
- walkFn := func(s string, v interface{}) bool {
- trimmed := strings.TrimPrefix(s, prefix)
- sep := strings.Index(trimmed, "/")
- if sep == -1 {
- out = append(out, trimmed)
- } else {
- trimmed = trimmed[:sep+1]
- if _, ok := seen[trimmed]; !ok {
- out = append(out, trimmed)
- seen[trimmed] = struct{}{}
- }
- }
- return false
- }
- i.root.WalkPrefix(prefix, walkFn)
-
- return out, nil
-}
-
-// Implements the transaction interface
-func (t *TransactionalInmemBackend) Transaction(txns []physical.TxnEntry) error {
- t.permitPool.Acquire()
- defer t.permitPool.Release()
-
- t.Lock()
- defer t.Unlock()
-
- return physical.GenericTransactionHandler(t, txns)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go
deleted file mode 100644
index d322da2..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package inmem
-
-import (
- "fmt"
- "sync"
-
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-type InmemHABackend struct {
- physical.Backend
- locks map[string]string
- l sync.Mutex
- cond *sync.Cond
- logger log.Logger
-}
-
-type TransactionalInmemHABackend struct {
- physical.Transactional
- InmemHABackend
-}
-
-// NewInmemHA constructs a new in-memory HA backend. This is only for testing.
-func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- be, err := NewInmem(nil, logger)
- if err != nil {
- return nil, err
- }
-
- in := &InmemHABackend{
- Backend: be,
- locks: make(map[string]string),
- logger: logger,
- }
- in.cond = sync.NewCond(&in.l)
- return in, nil
-}
-
-func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- transInmem, err := NewTransactionalInmem(nil, logger)
- if err != nil {
- return nil, err
- }
- inmemHA := InmemHABackend{
- Backend: transInmem,
- locks: make(map[string]string),
- logger: logger,
- }
-
- in := &TransactionalInmemHABackend{
- InmemHABackend: inmemHA,
- Transactional: transInmem.(physical.Transactional),
- }
- in.cond = sync.NewCond(&in.l)
- return in, nil
-}
-
-// LockWith is used for mutual exclusion based on the given key.
-func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) {
- l := &InmemLock{
- in: i,
- key: key,
- value: value,
- }
- return l, nil
-}
-
-// LockMapSize is used in some tests to determine whether this backend has ever
-// been used for HA purposes rather than simply for storage
-func (i *InmemHABackend) LockMapSize() int {
- return len(i.locks)
-}
-
-// HAEnabled indicates whether the HA functionality should be exposed.
-// Currently always returns true.
-func (i *InmemHABackend) HAEnabled() bool {
- return true
-}
-
-// InmemLock is an in-memory Lock implementation for the HABackend
-type InmemLock struct {
- in *InmemHABackend
- key string
- value string
-
- held bool
- leaderCh chan struct{}
- l sync.Mutex
-}
-
-func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
- i.l.Lock()
- defer i.l.Unlock()
- if i.held {
- return nil, fmt.Errorf("lock already held")
- }
-
- // Attempt an async acquisition
- didLock := make(chan struct{})
- releaseCh := make(chan bool, 1)
- go func() {
- // Wait to acquire the lock
- i.in.l.Lock()
- _, ok := i.in.locks[i.key]
- for ok {
- i.in.cond.Wait()
- _, ok = i.in.locks[i.key]
- }
- i.in.locks[i.key] = i.value
- i.in.l.Unlock()
-
- // Signal that lock is held
- close(didLock)
-
- // Handle an early abort
- release := <-releaseCh
- if release {
- i.in.l.Lock()
- delete(i.in.locks, i.key)
- i.in.l.Unlock()
- i.in.cond.Broadcast()
- }
- }()
-
- // Wait for lock acquisition or shutdown
- select {
- case <-didLock:
- releaseCh <- false
- case <-stopCh:
- releaseCh <- true
- return nil, nil
- }
-
- // Create the leader channel
- i.held = true
- i.leaderCh = make(chan struct{})
- return i.leaderCh, nil
-}
-
-func (i *InmemLock) Unlock() error {
- i.l.Lock()
- defer i.l.Unlock()
-
- if !i.held {
- return nil
- }
-
- close(i.leaderCh)
- i.leaderCh = nil
- i.held = false
-
- i.in.l.Lock()
- delete(i.in.locks, i.key)
- i.in.l.Unlock()
- i.in.cond.Broadcast()
- return nil
-}
-
-func (i *InmemLock) Value() (bool, string, error) {
- i.in.l.Lock()
- val, ok := i.in.locks[i.key]
- i.in.l.Unlock()
- return ok, val, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go
deleted file mode 100644
index 8288595..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package inmem
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestInmemHA(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- physical.ExerciseHABackend(t, inm.(physical.HABackend), inm.(physical.HABackend))
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go
deleted file mode 100644
index 998061b..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package inmem
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestInmem(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- physical.ExerciseBackend(t, inm)
- physical.ExerciseBackend_ListPrefix(t, inm)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go
deleted file mode 100644
index 719642a..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package inmem
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestPhysicalView_impl(t *testing.T) {
- var _ physical.Backend = new(physical.View)
-}
-
-func newInmemTestBackend() (physical.Backend, error) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
- return NewInmem(nil, logger)
-}
-
-func TestPhysicalView_BadKeysKeys(t *testing.T) {
- backend, err := newInmemTestBackend()
- if err != nil {
- t.Fatal(err)
- }
- view := physical.NewView(backend, "foo/")
-
- _, err = view.List("../")
- if err == nil {
- t.Fatalf("expected error")
- }
-
- _, err = view.Get("../")
- if err == nil {
- t.Fatalf("expected error")
- }
-
- err = view.Delete("../foo")
- if err == nil {
- t.Fatalf("expected error")
- }
-
- le := &physical.Entry{
- Key: "../foo",
- Value: []byte("test"),
- }
- err = view.Put(le)
- if err == nil {
- t.Fatalf("expected error")
- }
-}
-
-func TestPhysicalView(t *testing.T) {
- backend, err := newInmemTestBackend()
- if err != nil {
- t.Fatal(err)
- }
-
- view := physical.NewView(backend, "foo/")
-
- // Write a key outside of foo/
- entry := &physical.Entry{Key: "test", Value: []byte("test")}
- if err := backend.Put(entry); err != nil {
- t.Fatalf("bad: %v", err)
- }
-
- // List should have no visibility
- keys, err := view.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", err)
- }
-
- // Get should have no visibility
- out, err := view.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Try to put the same entry via the view
- if err := view.Put(entry); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check it is nested
- entry, err = backend.Get("foo/test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if entry == nil {
- t.Fatalf("missing nested foo/test")
- }
-
- // Delete nested
- if err := view.Delete("test"); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the nested key
- entry, err = backend.Get("foo/test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if entry != nil {
- t.Fatalf("nested foo/test should be gone")
- }
-
- // Check the non-nested key
- entry, err = backend.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if entry == nil {
- t.Fatalf("root test missing")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go
deleted file mode 100644
index 5565fbe..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package inmem
-
-import (
- "fmt"
- "reflect"
- "sort"
- "testing"
-
- radix "github.com/armon/go-radix"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-type faultyPseudo struct {
- underlying InmemBackend
- faultyPaths map[string]struct{}
-}
-
-func (f *faultyPseudo) Get(key string) (*physical.Entry, error) {
- return f.underlying.Get(key)
-}
-
-func (f *faultyPseudo) Put(entry *physical.Entry) error {
- return f.underlying.Put(entry)
-}
-
-func (f *faultyPseudo) Delete(key string) error {
- return f.underlying.Delete(key)
-}
-
-func (f *faultyPseudo) GetInternal(key string) (*physical.Entry, error) {
- if _, ok := f.faultyPaths[key]; ok {
- return nil, fmt.Errorf("fault")
- }
- return f.underlying.GetInternal(key)
-}
-
-func (f *faultyPseudo) PutInternal(entry *physical.Entry) error {
- if _, ok := f.faultyPaths[entry.Key]; ok {
- return fmt.Errorf("fault")
- }
- return f.underlying.PutInternal(entry)
-}
-
-func (f *faultyPseudo) DeleteInternal(key string) error {
- if _, ok := f.faultyPaths[key]; ok {
- return fmt.Errorf("fault")
- }
- return f.underlying.DeleteInternal(key)
-}
-
-func (f *faultyPseudo) List(prefix string) ([]string, error) {
- return f.underlying.List(prefix)
-}
-
-func (f *faultyPseudo) Transaction(txns []physical.TxnEntry) error {
- f.underlying.permitPool.Acquire()
- defer f.underlying.permitPool.Release()
-
- f.underlying.Lock()
- defer f.underlying.Unlock()
-
- return physical.GenericTransactionHandler(f, txns)
-}
-
-func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo {
- out := &faultyPseudo{
- underlying: InmemBackend{
- root: radix.New(),
- permitPool: physical.NewPermitPool(1),
- logger: logger,
- },
- faultyPaths: make(map[string]struct{}, len(faultyPaths)),
- }
- for _, v := range faultyPaths {
- out.faultyPaths[v] = struct{}{}
- }
- return out
-}
-
-func TestPseudo_Basic(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
- p := newFaultyPseudo(logger, nil)
- physical.ExerciseBackend(t, p)
- physical.ExerciseBackend_ListPrefix(t, p)
-}
-
-func TestPseudo_SuccessfulTransaction(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
- p := newFaultyPseudo(logger, nil)
-
- physical.ExerciseTransactionalBackend(t, p)
-}
-
-func TestPseudo_FailedTransaction(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
- p := newFaultyPseudo(logger, []string{"zip"})
-
- txns := physical.SetupTestingTransactions(t, p)
- if err := p.Transaction(txns); err == nil {
- t.Fatal("expected error during transaction")
- }
-
- keys, err := p.List("")
- if err != nil {
- t.Fatal(err)
- }
-
- expected := []string{"foo", "zip", "deleteme", "deleteme2"}
-
- sort.Strings(keys)
- sort.Strings(expected)
- if !reflect.DeepEqual(keys, expected) {
- t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
- }
-
- entry, err := p.Get("foo")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "bar" {
- t.Fatal("values did not rollback correctly")
- }
-
- entry, err = p.Get("zip")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "zap" {
- t.Fatal("values did not rollback correctly")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/latency.go b/vendor/github.com/hashicorp/vault/physical/latency.go
deleted file mode 100644
index 3253036..0000000
--- a/vendor/github.com/hashicorp/vault/physical/latency.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package physical
-
-import (
- "math/rand"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-)
-
-const (
- // DefaultJitterPercent is used if no cache size is specified for NewCache
- DefaultJitterPercent = 20
-)
-
-// LatencyInjector is used to add latency into underlying physical requests
-type LatencyInjector struct {
- backend Backend
- latency time.Duration
- jitterPercent int
- random *rand.Rand
-}
-
-// TransactionalLatencyInjector is the transactional version of the latency
-// injector
-type TransactionalLatencyInjector struct {
- *LatencyInjector
- Transactional
-}
-
-// NewLatencyInjector returns a wrapped physical backend to simulate latency
-func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector {
- if jitter < 0 || jitter > 100 {
- jitter = DefaultJitterPercent
- }
- logger.Info("physical/latency: creating latency injector")
-
- return &LatencyInjector{
- backend: b,
- latency: latency,
- jitterPercent: jitter,
- random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- }
-}
-
-// NewTransactionalLatencyInjector creates a new transactional LatencyInjector
-func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector {
- return &TransactionalLatencyInjector{
- LatencyInjector: NewLatencyInjector(b, latency, jitter, logger),
- Transactional: b.(Transactional),
- }
-}
-
-func (l *LatencyInjector) addLatency() {
- // Calculate a value between 1 +- jitter%
- min := 100 - l.jitterPercent
- max := 100 + l.jitterPercent
- percent := l.random.Intn(max-min) + min
- latencyDuration := time.Duration(int(l.latency) * percent / 100)
- time.Sleep(latencyDuration)
-}
-
-// Put is a latent put request
-func (l *LatencyInjector) Put(entry *Entry) error {
- l.addLatency()
- return l.backend.Put(entry)
-}
-
-// Get is a latent get request
-func (l *LatencyInjector) Get(key string) (*Entry, error) {
- l.addLatency()
- return l.backend.Get(key)
-}
-
-// Delete is a latent delete request
-func (l *LatencyInjector) Delete(key string) error {
- l.addLatency()
- return l.backend.Delete(key)
-}
-
-// List is a latent list request
-func (l *LatencyInjector) List(prefix string) ([]string, error) {
- l.addLatency()
- return l.backend.List(prefix)
-}
-
-// Transaction is a latent transaction request
-func (l *TransactionalLatencyInjector) Transaction(txns []TxnEntry) error {
- l.addLatency()
- return l.Transactional.Transaction(txns)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/mssql/mssql.go b/vendor/github.com/hashicorp/vault/physical/mssql/mssql.go
deleted file mode 100644
index 16228d6..0000000
--- a/vendor/github.com/hashicorp/vault/physical/mssql/mssql.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package mssql
-
-import (
- "database/sql"
- "fmt"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/armon/go-metrics"
- _ "github.com/denisenkom/go-mssqldb"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-)
-
-type MSSQLBackend struct {
- dbTable string
- client *sql.DB
- statements map[string]*sql.Stmt
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- username, ok := conf["username"]
- if !ok {
- username = ""
- }
-
- password, ok := conf["password"]
- if !ok {
- password = ""
- }
-
- server, ok := conf["server"]
- if !ok || server == "" {
- return nil, fmt.Errorf("missing server")
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- var err error
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt)
- }
- } else {
- maxParInt = physical.DefaultParallelOperations
- }
-
- database, ok := conf["database"]
- if !ok {
- database = "Vault"
- }
-
- table, ok := conf["table"]
- if !ok {
- table = "Vault"
- }
-
- appname, ok := conf["appname"]
- if !ok {
- appname = "Vault"
- }
-
- connectionTimeout, ok := conf["connectiontimeout"]
- if !ok {
- connectionTimeout = "30"
- }
-
- logLevel, ok := conf["loglevel"]
- if !ok {
- logLevel = "0"
- }
-
- schema, ok := conf["schema"]
- if !ok || schema == "" {
- schema = "dbo"
- }
-
- connectionString := fmt.Sprintf("server=%s;app name=%s;connection timeout=%s;log=%s", server, appname, connectionTimeout, logLevel)
- if username != "" {
- connectionString += ";user id=" + username
- }
-
- if password != "" {
- connectionString += ";password=" + password
- }
-
- db, err := sql.Open("mssql", connectionString)
- if err != nil {
- return nil, fmt.Errorf("failed to connect to mssql: %v", err)
- }
-
- db.SetMaxOpenConns(maxParInt)
-
- if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil {
- return nil, fmt.Errorf("failed to create mssql database: %v", err)
- }
-
- dbTable := database + "." + schema + "." + table
- createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME='" + table + "' AND TABLE_SCHEMA='" + schema +
- "') CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))"
-
- if schema != "dbo" {
- if _, err := db.Exec("USE " + database); err != nil {
- return nil, fmt.Errorf("failed to switch mssql database: %v", err)
- }
-
- var num int
- err = db.QueryRow("SELECT 1 FROM sys.schemas WHERE name = '" + schema + "'").Scan(&num)
-
- switch {
- case err == sql.ErrNoRows:
- if _, err := db.Exec("CREATE SCHEMA " + schema); err != nil {
- return nil, fmt.Errorf("failed to create mssql schema: %v", err)
- }
-
- case err != nil:
- return nil, fmt.Errorf("failed to check if mssql schema exists: %v", err)
- }
- }
-
- if _, err := db.Exec(createQuery); err != nil {
- return nil, fmt.Errorf("failed to create mssql table: %v", err)
- }
-
- m := &MSSQLBackend{
- dbTable: dbTable,
- client: db,
- statements: make(map[string]*sql.Stmt),
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }
-
- statements := map[string]string{
- "put": "IF EXISTS(SELECT 1 FROM " + dbTable + " WHERE Path = ?) UPDATE " + dbTable + " SET Value = ? WHERE Path = ?" +
- " ELSE INSERT INTO " + dbTable + " VALUES(?, ?)",
- "get": "SELECT Value FROM " + dbTable + " WHERE Path = ?",
- "delete": "DELETE FROM " + dbTable + " WHERE Path = ?",
- "list": "SELECT Path FROM " + dbTable + " WHERE Path LIKE ?",
- }
-
- for name, query := range statements {
- if err := m.prepare(name, query); err != nil {
- return nil, err
- }
- }
-
- return m, nil
-}
-
-func (m *MSSQLBackend) prepare(name, query string) error {
- stmt, err := m.client.Prepare(query)
- if err != nil {
- return fmt.Errorf("failed to prepare '%s': %v", name, err)
- }
-
- m.statements[name] = stmt
-
- return nil
-}
-
-func (m *MSSQLBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"mssql", "put"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- _, err := m.statements["put"].Exec(entry.Key, entry.Value, entry.Key, entry.Key, entry.Value)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *MSSQLBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"mssql", "get"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- var result []byte
- err := m.statements["get"].QueryRow(key).Scan(&result)
- if err == sql.ErrNoRows {
- return nil, nil
- }
-
- if err != nil {
- return nil, err
- }
-
- ent := &physical.Entry{
- Key: key,
- Value: result,
- }
-
- return ent, nil
-}
-
-func (m *MSSQLBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"mssql", "delete"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- _, err := m.statements["delete"].Exec(key)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *MSSQLBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"mssql", "list"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- likePrefix := prefix + "%"
- rows, err := m.statements["list"].Query(likePrefix)
- if err != nil {
- return nil, err
- }
- var keys []string
- for rows.Next() {
- var key string
- err = rows.Scan(&key)
- if err != nil {
- return nil, fmt.Errorf("failed to scan rows: %v", err)
- }
-
- key = strings.TrimPrefix(key, prefix)
- if i := strings.Index(key, "/"); i == -1 {
- keys = append(keys, key)
- } else if i != -1 {
- keys = strutil.AppendIfMissing(keys, string(key[:i+1]))
- }
- }
-
- sort.Strings(keys)
-
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go b/vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go
deleted file mode 100644
index 7e1446e..0000000
--- a/vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package mssql
-
-import (
- "os"
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- _ "github.com/denisenkom/go-mssqldb"
-)
-
-func TestMSSQLBackend(t *testing.T) {
- server := os.Getenv("MSSQL_SERVER")
- if server == "" {
- t.SkipNow()
- }
-
- database := os.Getenv("MSSQL_DB")
- if database == "" {
- database = "test"
- }
-
- table := os.Getenv("MSSQL_TABLE")
- if table == "" {
- table = "test"
- }
-
- username := os.Getenv("MSSQL_USERNAME")
- password := os.Getenv("MSSQL_PASSWORD")
-
- // Run vault tests
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewMSSQLBackend(map[string]string{
- "server": server,
- "database": database,
- "table": table,
- "username": username,
- "password": password,
- }, logger)
-
- if err != nil {
- t.Fatalf("Failed to create new backend: %v", err)
- }
-
- defer func() {
- mssql := b.(*MSSQLBackend)
- _, err := mssql.client.Exec("DROP TABLE " + mssql.dbTable)
- if err != nil {
- t.Fatalf("Failed to drop table: %v", err)
- }
- }()
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/mysql/mysql.go b/vendor/github.com/hashicorp/vault/physical/mysql/mysql.go
deleted file mode 100644
index 87daa9a..0000000
--- a/vendor/github.com/hashicorp/vault/physical/mysql/mysql.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package mysql
-
-import (
- "crypto/tls"
- "crypto/x509"
- "database/sql"
- "fmt"
- "io/ioutil"
- "net/url"
- "sort"
- "strconv"
- "strings"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- mysql "github.com/go-sql-driver/mysql"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
-)
-
-// Unreserved tls key
-// Reserved values are "true", "false", "skip-verify"
-const mysqlTLSKey = "default"
-
-// MySQLBackend is a physical backend that stores data
-// within MySQL database.
-type MySQLBackend struct {
- dbTable string
- client *sql.DB
- statements map[string]*sql.Stmt
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-// NewMySQLBackend constructs a MySQL backend using the given API client and
-// server address and credential for accessing mysql database.
-func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- var err error
-
- // Get the MySQL credentials to perform read/write operations.
- username, ok := conf["username"]
- if !ok || username == "" {
- return nil, fmt.Errorf("missing username")
- }
- password, ok := conf["password"]
- if !ok || username == "" {
- return nil, fmt.Errorf("missing password")
- }
-
- // Get or set MySQL server address. Defaults to localhost and default port(3306)
- address, ok := conf["address"]
- if !ok {
- address = "127.0.0.1:3306"
- }
-
- // Get the MySQL database and table details.
- database, ok := conf["database"]
- if !ok {
- database = "vault"
- }
- table, ok := conf["table"]
- if !ok {
- table = "vault"
- }
- dbTable := database + "." + table
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt)
- }
- } else {
- maxParInt = physical.DefaultParallelOperations
- }
-
- dsnParams := url.Values{}
- tlsCaFile, ok := conf["tls_ca_file"]
- if ok {
- if err := setupMySQLTLSConfig(tlsCaFile); err != nil {
- return nil, fmt.Errorf("failed register TLS config: %v", err)
- }
-
- dsnParams.Add("tls", mysqlTLSKey)
- }
-
- // Create MySQL handle for the database.
- dsn := username + ":" + password + "@tcp(" + address + ")/?" + dsnParams.Encode()
- db, err := sql.Open("mysql", dsn)
- if err != nil {
- return nil, fmt.Errorf("failed to connect to mysql: %v", err)
- }
-
- db.SetMaxOpenConns(maxParInt)
-
- // Create the required database if it doesn't exists.
- if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS " + database); err != nil {
- return nil, fmt.Errorf("failed to create mysql database: %v", err)
- }
-
- // Create the required table if it doesn't exists.
- create_query := "CREATE TABLE IF NOT EXISTS " + dbTable +
- " (vault_key varbinary(512), vault_value mediumblob, PRIMARY KEY (vault_key))"
- if _, err := db.Exec(create_query); err != nil {
- return nil, fmt.Errorf("failed to create mysql table: %v", err)
- }
-
- // Setup the backend.
- m := &MySQLBackend{
- dbTable: dbTable,
- client: db,
- statements: make(map[string]*sql.Stmt),
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }
-
- // Prepare all the statements required
- statements := map[string]string{
- "put": "INSERT INTO " + dbTable +
- " VALUES( ?, ? ) ON DUPLICATE KEY UPDATE vault_value=VALUES(vault_value)",
- "get": "SELECT vault_value FROM " + dbTable + " WHERE vault_key = ?",
- "delete": "DELETE FROM " + dbTable + " WHERE vault_key = ?",
- "list": "SELECT vault_key FROM " + dbTable + " WHERE vault_key LIKE ?",
- }
- for name, query := range statements {
- if err := m.prepare(name, query); err != nil {
- return nil, err
- }
- }
-
- return m, nil
-}
-
-// prepare is a helper to prepare a query for future execution
-func (m *MySQLBackend) prepare(name, query string) error {
- stmt, err := m.client.Prepare(query)
- if err != nil {
- return fmt.Errorf("failed to prepare '%s': %v", name, err)
- }
- m.statements[name] = stmt
- return nil
-}
-
-// Put is used to insert or update an entry.
-func (m *MySQLBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"mysql", "put"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- _, err := m.statements["put"].Exec(entry.Key, entry.Value)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Get is used to fetch and entry.
-func (m *MySQLBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"mysql", "get"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- var result []byte
- err := m.statements["get"].QueryRow(key).Scan(&result)
- if err == sql.ErrNoRows {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
-
- ent := &physical.Entry{
- Key: key,
- Value: result,
- }
- return ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (m *MySQLBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"mysql", "delete"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- _, err := m.statements["delete"].Exec(key)
- if err != nil {
- return err
- }
- return nil
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (m *MySQLBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"mysql", "list"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- // Add the % wildcard to the prefix to do the prefix search
- likePrefix := prefix + "%"
- rows, err := m.statements["list"].Query(likePrefix)
- if err != nil {
- return nil, fmt.Errorf("failed to execute statement: %v", err)
- }
-
- var keys []string
- for rows.Next() {
- var key string
- err = rows.Scan(&key)
- if err != nil {
- return nil, fmt.Errorf("failed to scan rows: %v", err)
- }
-
- key = strings.TrimPrefix(key, prefix)
- if i := strings.Index(key, "/"); i == -1 {
- // Add objects only from the current 'folder'
- keys = append(keys, key)
- } else if i != -1 {
- // Add truncated 'folder' paths
- keys = strutil.AppendIfMissing(keys, string(key[:i+1]))
- }
- }
-
- sort.Strings(keys)
- return keys, nil
-}
-
-// Establish a TLS connection with a given CA certificate
-// Register a tsl.Config associted with the same key as the dns param from sql.Open
-// foo:bar@tcp(127.0.0.1:3306)/dbname?tls=default
-func setupMySQLTLSConfig(tlsCaFile string) error {
- rootCertPool := x509.NewCertPool()
-
- pem, err := ioutil.ReadFile(tlsCaFile)
- if err != nil {
- return err
- }
-
- if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
- return err
- }
-
- err = mysql.RegisterTLSConfig(mysqlTLSKey, &tls.Config{
- RootCAs: rootCertPool,
- })
- if err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go b/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go
deleted file mode 100644
index ecf8431..0000000
--- a/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package mysql
-
-import (
- "os"
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- _ "github.com/go-sql-driver/mysql"
-)
-
-func TestMySQLBackend(t *testing.T) {
- address := os.Getenv("MYSQL_ADDR")
- if address == "" {
- t.SkipNow()
- }
-
- database := os.Getenv("MYSQL_DB")
- if database == "" {
- database = "test"
- }
-
- table := os.Getenv("MYSQL_TABLE")
- if table == "" {
- table = "test"
- }
-
- username := os.Getenv("MYSQL_USERNAME")
- password := os.Getenv("MYSQL_PASSWORD")
-
- // Run vault tests
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewMySQLBackend(map[string]string{
- "address": address,
- "database": database,
- "table": table,
- "username": username,
- "password": password,
- }, logger)
-
- if err != nil {
- t.Fatalf("Failed to create new backend: %v", err)
- }
-
- defer func() {
- mysql := b.(*MySQLBackend)
- _, err := mysql.client.Exec("DROP TABLE " + mysql.dbTable)
- if err != nil {
- t.Fatalf("Failed to drop table: %v", err)
- }
- }()
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical.go b/vendor/github.com/hashicorp/vault/physical/physical.go
deleted file mode 100644
index 088a86b..0000000
--- a/vendor/github.com/hashicorp/vault/physical/physical.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package physical
-
-import (
- "strings"
- "sync"
-
- log "github.com/mgutz/logxi/v1"
-)
-
-const DefaultParallelOperations = 128
-
-// The operation type
-type Operation string
-
-const (
- DeleteOperation Operation = "delete"
- GetOperation = "get"
- ListOperation = "list"
- PutOperation = "put"
-)
-
-// ShutdownSignal
-type ShutdownChannel chan struct{}
-
-// Backend is the interface required for a physical
-// backend. A physical backend is used to durably store
-// data outside of Vault. As such, it is completely untrusted,
-// and is only accessed via a security barrier. The backends
-// must represent keys in a hierarchical manner. All methods
-// are expected to be thread safe.
-type Backend interface {
- // Put is used to insert or update an entry
- Put(entry *Entry) error
-
- // Get is used to fetch an entry
- Get(key string) (*Entry, error)
-
- // Delete is used to permanently delete an entry
- Delete(key string) error
-
- // List is used ot list all the keys under a given
- // prefix, up to the next prefix.
- List(prefix string) ([]string, error)
-}
-
-// HABackend is an extensions to the standard physical
-// backend to support high-availability. Vault only expects to
-// use mutual exclusion to allow multiple instances to act as a
-// hot standby for a leader that services all requests.
-type HABackend interface {
- // LockWith is used for mutual exclusion based on the given key.
- LockWith(key, value string) (Lock, error)
-
- // Whether or not HA functionality is enabled
- HAEnabled() bool
-}
-
-// Purgable is an optional interface for backends that support
-// purging of their caches.
-type Purgable interface {
- Purge()
-}
-
-// RedirectDetect is an optional interface that an HABackend
-// can implement. If they do, a redirect address can be automatically
-// detected.
-type RedirectDetect interface {
- // DetectHostAddr is used to detect the host address
- DetectHostAddr() (string, error)
-}
-
-// Callback signatures for RunServiceDiscovery
-type ActiveFunction func() bool
-type SealedFunction func() bool
-
-// ServiceDiscovery is an optional interface that an HABackend can implement.
-// If they do, the state of a backend is advertised to the service discovery
-// network.
-type ServiceDiscovery interface {
- // NotifyActiveStateChange is used by Core to notify a backend
- // capable of ServiceDiscovery that this Vault instance has changed
- // its status to active or standby.
- NotifyActiveStateChange() error
-
- // NotifySealedStateChange is used by Core to notify a backend
- // capable of ServiceDiscovery that Vault has changed its Sealed
- // status to sealed or unsealed.
- NotifySealedStateChange() error
-
- // Run executes any background service discovery tasks until the
- // shutdown channel is closed.
- RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc ActiveFunction, sealedFunc SealedFunction) error
-}
-
-type Lock interface {
- // Lock is used to acquire the given lock
- // The stopCh is optional and if closed should interrupt the lock
- // acquisition attempt. The return struct should be closed when
- // leadership is lost.
- Lock(stopCh <-chan struct{}) (<-chan struct{}, error)
-
- // Unlock is used to release the lock
- Unlock() error
-
- // Returns the value of the lock and if it is held
- Value() (bool, string, error)
-}
-
-// Entry is used to represent data stored by the physical backend
-type Entry struct {
- Key string
- Value []byte
-}
-
-// Factory is the factory function to create a physical backend.
-type Factory func(config map[string]string, logger log.Logger) (Backend, error)
-
-// PermitPool is used to limit maximum outstanding requests
-type PermitPool struct {
- sem chan int
-}
-
-// NewPermitPool returns a new permit pool with the provided
-// number of permits
-func NewPermitPool(permits int) *PermitPool {
- if permits < 1 {
- permits = DefaultParallelOperations
- }
- return &PermitPool{
- sem: make(chan int, permits),
- }
-}
-
-// Acquire returns when a permit has been acquired
-func (c *PermitPool) Acquire() {
- c.sem <- 1
-}
-
-// Release returns a permit to the pool
-func (c *PermitPool) Release() {
- <-c.sem
-}
-
-// Prefixes is a shared helper function returns all parent 'folders' for a
-// given vault key.
-// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar']
-func Prefixes(s string) []string {
- components := strings.Split(s, "/")
- result := []string{}
- for i := 1; i < len(components); i++ {
- result = append(result, strings.Join(components[:i], "/"))
- }
- return result
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical_view.go b/vendor/github.com/hashicorp/vault/physical/physical_view.go
deleted file mode 100644
index 38c16e5..0000000
--- a/vendor/github.com/hashicorp/vault/physical/physical_view.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package physical
-
-import (
- "errors"
- "strings"
-)
-
-var (
- ErrRelativePath = errors.New("relative paths not supported")
-)
-
-// View represents a prefixed view of a physical backend
-type View struct {
- backend Backend
- prefix string
-}
-
-// NewView takes an underlying physical backend and returns
-// a view of it that can only operate with the given prefix.
-func NewView(backend Backend, prefix string) *View {
- return &View{
- backend: backend,
- prefix: prefix,
- }
-}
-
-// List the contents of the prefixed view
-func (v *View) List(prefix string) ([]string, error) {
- if err := v.sanityCheck(prefix); err != nil {
- return nil, err
- }
- return v.backend.List(v.expandKey(prefix))
-}
-
-// Get the key of the prefixed view
-func (v *View) Get(key string) (*Entry, error) {
- if err := v.sanityCheck(key); err != nil {
- return nil, err
- }
- entry, err := v.backend.Get(v.expandKey(key))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- if entry != nil {
- entry.Key = v.truncateKey(entry.Key)
- }
-
- return &Entry{
- Key: entry.Key,
- Value: entry.Value,
- }, nil
-}
-
-// Put the entry into the prefix view
-func (v *View) Put(entry *Entry) error {
- if err := v.sanityCheck(entry.Key); err != nil {
- return err
- }
-
- nested := &Entry{
- Key: v.expandKey(entry.Key),
- Value: entry.Value,
- }
- return v.backend.Put(nested)
-}
-
-// Delete the entry from the prefix view
-func (v *View) Delete(key string) error {
- if err := v.sanityCheck(key); err != nil {
- return err
- }
- return v.backend.Delete(v.expandKey(key))
-}
-
-// sanityCheck is used to perform a sanity check on a key
-func (v *View) sanityCheck(key string) error {
- if strings.Contains(key, "..") {
- return ErrRelativePath
- }
- return nil
-}
-
-// expandKey is used to expand to the full key path with the prefix
-func (v *View) expandKey(suffix string) string {
- return v.prefix + suffix
-}
-
-// truncateKey is used to remove the prefix of the key
-func (v *View) truncateKey(full string) string {
- return strings.TrimPrefix(full, v.prefix)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go
deleted file mode 100644
index cb35782..0000000
--- a/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package postgresql
-
-import (
- "database/sql"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- "github.com/lib/pq"
-)
-
-// PostgreSQL Backend is a physical backend that stores data
-// within a PostgreSQL database.
-type PostgreSQLBackend struct {
- table string
- client *sql.DB
- put_query string
- get_query string
- delete_query string
- list_query string
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-// NewPostgreSQLBackend constructs a PostgreSQL backend using the given
-// API client, server address, credentials, and database.
-func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- // Get the PostgreSQL credentials to perform read/write operations.
- connURL, ok := conf["connection_url"]
- if !ok || connURL == "" {
- return nil, fmt.Errorf("missing connection_url")
- }
-
- unquoted_table, ok := conf["table"]
- if !ok {
- unquoted_table = "vault_kv_store"
- }
- quoted_table := pq.QuoteIdentifier(unquoted_table)
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- var err error
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("postgres: max_parallel set", "max_parallel", maxParInt)
- }
- } else {
- maxParInt = physical.DefaultParallelOperations
- }
-
- // Create PostgreSQL handle for the database.
- db, err := sql.Open("postgres", connURL)
- if err != nil {
- return nil, fmt.Errorf("failed to connect to postgres: %v", err)
- }
- db.SetMaxOpenConns(maxParInt)
-
- // Determine if we should use an upsert function (versions < 9.5)
- var upsert_required bool
- upsert_required_query := "SELECT string_to_array(setting, '.')::int[] < '{9,5}' FROM pg_settings WHERE name = 'server_version'"
- if err := db.QueryRow(upsert_required_query).Scan(&upsert_required); err != nil {
- return nil, fmt.Errorf("failed to check for native upsert: %v", err)
- }
-
- // Setup our put strategy based on the presence or absence of a native
- // upsert.
- var put_query string
- if upsert_required {
- put_query = "SELECT vault_kv_put($1, $2, $3, $4)"
- } else {
- put_query = "INSERT INTO " + quoted_table + " VALUES($1, $2, $3, $4)" +
- " ON CONFLICT (path, key) DO " +
- " UPDATE SET (parent_path, path, key, value) = ($1, $2, $3, $4)"
- }
-
- // Setup the backend.
- m := &PostgreSQLBackend{
- table: quoted_table,
- client: db,
- put_query: put_query,
- get_query: "SELECT value FROM " + quoted_table + " WHERE path = $1 AND key = $2",
- delete_query: "DELETE FROM " + quoted_table + " WHERE path = $1 AND key = $2",
- list_query: "SELECT key FROM " + quoted_table + " WHERE path = $1" +
- "UNION SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " +
- quoted_table + " WHERE parent_path LIKE $1 || '%'",
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }
-
- return m, nil
-}
-
-// splitKey is a helper to split a full path key into individual
-// parts: parentPath, path, key
-func (m *PostgreSQLBackend) splitKey(fullPath string) (string, string, string) {
- var parentPath string
- var path string
-
- pieces := strings.Split(fullPath, "/")
- depth := len(pieces)
- key := pieces[depth-1]
-
- if depth == 1 {
- parentPath = ""
- path = "/"
- } else if depth == 2 {
- parentPath = "/"
- path = "/" + pieces[0] + "/"
- } else {
- parentPath = "/" + strings.Join(pieces[:depth-2], "/") + "/"
- path = "/" + strings.Join(pieces[:depth-1], "/") + "/"
- }
-
- return parentPath, path, key
-}
-
-// Put is used to insert or update an entry.
-func (m *PostgreSQLBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"postgres", "put"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- parentPath, path, key := m.splitKey(entry.Key)
-
- _, err := m.client.Exec(m.put_query, parentPath, path, key, entry.Value)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Get is used to fetch and entry.
-func (m *PostgreSQLBackend) Get(fullPath string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"postgres", "get"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- _, path, key := m.splitKey(fullPath)
-
- var result []byte
- err := m.client.QueryRow(m.get_query, path, key).Scan(&result)
- if err == sql.ErrNoRows {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
-
- ent := &physical.Entry{
- Key: key,
- Value: result,
- }
- return ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (m *PostgreSQLBackend) Delete(fullPath string) error {
- defer metrics.MeasureSince([]string{"postgres", "delete"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- _, path, key := m.splitKey(fullPath)
-
- _, err := m.client.Exec(m.delete_query, path, key)
- if err != nil {
- return err
- }
- return nil
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (m *PostgreSQLBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"postgres", "list"}, time.Now())
-
- m.permitPool.Acquire()
- defer m.permitPool.Release()
-
- rows, err := m.client.Query(m.list_query, "/"+prefix)
- if err != nil {
- return nil, err
- }
- defer rows.Close()
-
- var keys []string
- for rows.Next() {
- var key string
- err = rows.Scan(&key)
- if err != nil {
- return nil, fmt.Errorf("failed to scan rows: %v", err)
- }
-
- keys = append(keys, key)
- }
-
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go
deleted file mode 100644
index 940d0e2..0000000
--- a/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package postgresql
-
-import (
- "os"
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- _ "github.com/lib/pq"
-)
-
-func TestPostgreSQLBackend(t *testing.T) {
- connURL := os.Getenv("PGURL")
- if connURL == "" {
- t.SkipNow()
- }
-
- table := os.Getenv("PGTABLE")
- if table == "" {
- table = "vault_kv_store"
- }
-
- // Run vault tests
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewPostgreSQLBackend(map[string]string{
- "connection_url": connURL,
- "table": table,
- }, logger)
- if err != nil {
- t.Fatalf("Failed to create new backend: %v", err)
- }
-
- defer func() {
- pg := b.(*PostgreSQLBackend)
- _, err := pg.client.Exec("TRUNCATE TABLE " + pg.table)
- if err != nil {
- t.Fatalf("Failed to drop table: %v", err)
- }
- }()
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/s3/s3.go b/vendor/github.com/hashicorp/vault/physical/s3/s3.go
deleted file mode 100644
index 7118e7d..0000000
--- a/vendor/github.com/hashicorp/vault/physical/s3/s3.go
+++ /dev/null
@@ -1,252 +0,0 @@
-package s3
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "os"
- "sort"
- "strconv"
- "strings"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/hashicorp/errwrap"
- cleanhttp "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/awsutil"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/physical"
-)
-
-// S3Backend is a physical backend that stores data
-// within an S3 bucket.
-type S3Backend struct {
- bucket string
- client *s3.S3
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-// NewS3Backend constructs a S3 backend using a pre-existing
-// bucket. Credentials can be provided to the backend, sourced
-// from the environment, AWS credential files or by IAM role.
-func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- bucket := os.Getenv("AWS_S3_BUCKET")
- if bucket == "" {
- bucket = conf["bucket"]
- if bucket == "" {
- return nil, fmt.Errorf("'bucket' must be set")
- }
- }
-
- accessKey, ok := conf["access_key"]
- if !ok {
- accessKey = ""
- }
- secretKey, ok := conf["secret_key"]
- if !ok {
- secretKey = ""
- }
- sessionToken, ok := conf["session_token"]
- if !ok {
- sessionToken = ""
- }
- endpoint := os.Getenv("AWS_S3_ENDPOINT")
- if endpoint == "" {
- endpoint = conf["endpoint"]
- }
- region := os.Getenv("AWS_REGION")
- if region == "" {
- region = os.Getenv("AWS_DEFAULT_REGION")
- if region == "" {
- region = conf["region"]
- if region == "" {
- region = "us-east-1"
- }
- }
- }
-
- credsConfig := &awsutil.CredentialsConfig{
- AccessKey: accessKey,
- SecretKey: secretKey,
- SessionToken: sessionToken,
- }
- creds, err := credsConfig.GenerateCredentialChain()
- if err != nil {
- return nil, err
- }
-
- pooledTransport := cleanhttp.DefaultPooledTransport()
- pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
-
- s3conn := s3.New(session.New(&aws.Config{
- Credentials: creds,
- HTTPClient: &http.Client{
- Transport: pooledTransport,
- },
- Endpoint: aws.String(endpoint),
- Region: aws.String(region),
- }))
-
- _, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket})
- if err != nil {
- return nil, fmt.Errorf("unable to access bucket '%s' in region %s: %v", bucket, region, err)
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("s3: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- s := &S3Backend{
- client: s3conn,
- bucket: bucket,
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }
- return s, nil
-}
-
-// Put is used to insert or update an entry
-func (s *S3Backend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"s3", "put"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- _, err := s.client.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(s.bucket),
- Key: aws.String(entry.Key),
- Body: bytes.NewReader(entry.Value),
- })
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// Get is used to fetch an entry
-func (s *S3Backend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"s3", "get"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- resp, err := s.client.GetObject(&s3.GetObjectInput{
- Bucket: aws.String(s.bucket),
- Key: aws.String(key),
- })
- if awsErr, ok := err.(awserr.RequestFailure); ok {
- // Return nil on 404s, error on anything else
- if awsErr.StatusCode() == 404 {
- return nil, nil
- }
- return nil, err
- }
- if err != nil {
- return nil, err
- }
- if resp == nil {
- return nil, fmt.Errorf("got nil response from S3 but no error")
- }
-
- data := make([]byte, *resp.ContentLength)
- _, err = io.ReadFull(resp.Body, data)
- if err != nil {
- return nil, err
- }
-
- ent := &physical.Entry{
- Key: key,
- Value: data,
- }
-
- return ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (s *S3Backend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"s3", "delete"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- _, err := s.client.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(s.bucket),
- Key: aws.String(key),
- })
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (s *S3Backend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"s3", "list"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- params := &s3.ListObjectsV2Input{
- Bucket: aws.String(s.bucket),
- Prefix: aws.String(prefix),
- Delimiter: aws.String("/"),
- }
-
- keys := []string{}
-
- err := s.client.ListObjectsV2Pages(params,
- func(page *s3.ListObjectsV2Output, lastPage bool) bool {
- if page != nil {
- // Add truncated 'folder' paths
- for _, commonPrefix := range page.CommonPrefixes {
- // Avoid panic
- if commonPrefix == nil {
- continue
- }
-
- commonPrefix := strings.TrimPrefix(*commonPrefix.Prefix, prefix)
- keys = append(keys, commonPrefix)
- }
- // Add objects only from the current 'folder'
- for _, key := range page.Contents {
- // Avoid panic
- if key == nil {
- continue
- }
-
- key := strings.TrimPrefix(*key.Key, prefix)
- keys = append(keys, key)
- }
- }
- return true
- })
-
- if err != nil {
- return nil, err
- }
-
- sort.Strings(keys)
-
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/s3/s3_test.go b/vendor/github.com/hashicorp/vault/physical/s3/s3_test.go
deleted file mode 100644
index dbe4c93..0000000
--- a/vendor/github.com/hashicorp/vault/physical/s3/s3_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package s3
-
-import (
- "fmt"
- "math/rand"
- "os"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/awsutil"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3"
-)
-
-func TestS3Backend(t *testing.T) {
- credsConfig := &awsutil.CredentialsConfig{}
-
- credsChain, err := credsConfig.GenerateCredentialChain()
- if err != nil {
- t.SkipNow()
- }
-
- _, err = credsChain.Get()
- if err != nil {
- t.SkipNow()
- }
-
- // If the variable is empty or doesn't exist, the default
- // AWS endpoints will be used
- endpoint := os.Getenv("AWS_S3_ENDPOINT")
-
- region := os.Getenv("AWS_DEFAULT_REGION")
- if region == "" {
- region = "us-east-1"
- }
-
- s3conn := s3.New(session.New(&aws.Config{
- Credentials: credsChain,
- Endpoint: aws.String(endpoint),
- Region: aws.String(region),
- }))
-
- var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
- bucket := fmt.Sprintf("vault-s3-testacc-%d", randInt)
-
- _, err = s3conn.CreateBucket(&s3.CreateBucketInput{
- Bucket: aws.String(bucket),
- })
- if err != nil {
- t.Fatalf("unable to create test bucket: %s", err)
- }
-
- defer func() {
- // Gotta list all the objects and delete them
- // before being able to delete the bucket
- listResp, _ := s3conn.ListObjects(&s3.ListObjectsInput{
- Bucket: aws.String(bucket),
- })
-
- objects := &s3.Delete{}
- for _, key := range listResp.Contents {
- oi := &s3.ObjectIdentifier{Key: key.Key}
- objects.Objects = append(objects.Objects, oi)
- }
-
- s3conn.DeleteObjects(&s3.DeleteObjectsInput{
- Bucket: aws.String(bucket),
- Delete: objects,
- })
-
- _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{Bucket: aws.String(bucket)})
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- // This uses the same logic to find the AWS credentials as we did at the beginning of the test
- b, err := NewS3Backend(map[string]string{
- "bucket": bucket,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/swift/swift.go b/vendor/github.com/hashicorp/vault/physical/swift/swift.go
deleted file mode 100644
index 30d7e66..0000000
--- a/vendor/github.com/hashicorp/vault/physical/swift/swift.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package swift
-
-import (
- "fmt"
- "os"
- "sort"
- "strconv"
- "strings"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
- "github.com/ncw/swift"
-)
-
-// SwiftBackend is a physical backend that stores data
-// within an OpenStack Swift container.
-type SwiftBackend struct {
- container string
- client *swift.Connection
- logger log.Logger
- permitPool *physical.PermitPool
-}
-
-// NewSwiftBackend constructs a Swift backend using a pre-existing
-// container. Credentials can be provided to the backend, sourced
-// from the environment.
-func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- var ok bool
-
- username := os.Getenv("OS_USERNAME")
- if username == "" {
- username = conf["username"]
- if username == "" {
- return nil, fmt.Errorf("missing username")
- }
- }
- password := os.Getenv("OS_PASSWORD")
- if password == "" {
- password = conf["password"]
- if password == "" {
- return nil, fmt.Errorf("missing password")
- }
- }
- authUrl := os.Getenv("OS_AUTH_URL")
- if authUrl == "" {
- authUrl = conf["auth_url"]
- if authUrl == "" {
- return nil, fmt.Errorf("missing auth_url")
- }
- }
- container := os.Getenv("OS_CONTAINER")
- if container == "" {
- container = conf["container"]
- if container == "" {
- return nil, fmt.Errorf("missing container")
- }
- }
- project := os.Getenv("OS_PROJECT_NAME")
- if project == "" {
- if project, ok = conf["project"]; !ok {
- // Check for KeyStone naming prior to V3
- project = os.Getenv("OS_TENANT_NAME")
- if project == "" {
- project = conf["tenant"]
- }
- }
- }
-
- domain := os.Getenv("OS_USER_DOMAIN_NAME")
- if domain == "" {
- domain = conf["domain"]
- }
- projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
- if projectDomain == "" {
- projectDomain = conf["project-domain"]
- }
-
- c := swift.Connection{
- Domain: domain,
- UserName: username,
- ApiKey: password,
- AuthUrl: authUrl,
- Tenant: project,
- TenantDomain: projectDomain,
- Transport: cleanhttp.DefaultPooledTransport(),
- }
-
- err := c.Authenticate()
- if err != nil {
- return nil, err
- }
-
- _, _, err = c.Container(container)
- if err != nil {
- return nil, fmt.Errorf("Unable to access container '%s': %v", container, err)
- }
-
- maxParStr, ok := conf["max_parallel"]
- var maxParInt int
- if ok {
- maxParInt, err = strconv.Atoi(maxParStr)
- if err != nil {
- return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
- }
- if logger.IsDebug() {
- logger.Debug("swift: max_parallel set", "max_parallel", maxParInt)
- }
- }
-
- s := &SwiftBackend{
- client: &c,
- container: container,
- logger: logger,
- permitPool: physical.NewPermitPool(maxParInt),
- }
- return s, nil
-}
-
-// Put is used to insert or update an entry
-func (s *SwiftBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"swift", "put"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- err := s.client.ObjectPutBytes(s.container, entry.Key, entry.Value, "")
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// Get is used to fetch an entry
-func (s *SwiftBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"swift", "get"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- //Do a list of names with the key first since eventual consistency means
- //it might be deleted, but a node might return a read of bytes which fails
- //the physical test
- list, err := s.client.ObjectNames(s.container, &swift.ObjectsOpts{Prefix: key})
- if err != nil {
- return nil, err
- }
- if 0 == len(list) {
- return nil, nil
- }
- data, err := s.client.ObjectGetBytes(s.container, key)
- if err == swift.ObjectNotFound {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
- ent := &physical.Entry{
- Key: key,
- Value: data,
- }
-
- return ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (s *SwiftBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"swift", "delete"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- err := s.client.ObjectDelete(s.container, key)
-
- if err != nil && err != swift.ObjectNotFound {
- return err
- }
-
- return nil
-}
-
-// List is used to list all the keys under a given
-// prefix, up to the next prefix.
-func (s *SwiftBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"swift", "list"}, time.Now())
-
- s.permitPool.Acquire()
- defer s.permitPool.Release()
-
- list, err := s.client.ObjectNamesAll(s.container, &swift.ObjectsOpts{Prefix: prefix})
- if nil != err {
- return nil, err
- }
-
- keys := []string{}
- for _, key := range list {
- key := strings.TrimPrefix(key, prefix)
-
- if i := strings.Index(key, "/"); i == -1 {
- // Add objects only from the current 'folder'
- keys = append(keys, key)
- } else if i != -1 {
- // Add truncated 'folder' paths
- keys = strutil.AppendIfMissing(keys, key[:i+1])
- }
- }
-
- sort.Strings(keys)
-
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/swift/swift_test.go b/vendor/github.com/hashicorp/vault/physical/swift/swift_test.go
deleted file mode 100644
index 5aa2ec9..0000000
--- a/vendor/github.com/hashicorp/vault/physical/swift/swift_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package swift
-
-import (
- "fmt"
- "os"
- "testing"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- "github.com/ncw/swift"
-)
-
-func TestSwiftBackend(t *testing.T) {
- if os.Getenv("OS_USERNAME") == "" || os.Getenv("OS_PASSWORD") == "" ||
- os.Getenv("OS_AUTH_URL") == "" {
- t.SkipNow()
- }
- username := os.Getenv("OS_USERNAME")
- password := os.Getenv("OS_PASSWORD")
- authUrl := os.Getenv("OS_AUTH_URL")
- project := os.Getenv("OS_PROJECT_NAME")
- domain := os.Getenv("OS_USER_DOMAIN_NAME")
- projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
-
- ts := time.Now().UnixNano()
- container := fmt.Sprintf("vault-test-%d", ts)
-
- cleaner := swift.Connection{
- Domain: domain,
- UserName: username,
- ApiKey: password,
- AuthUrl: authUrl,
- Tenant: project,
- TenantDomain: projectDomain,
- Transport: cleanhttp.DefaultPooledTransport(),
- }
-
- err := cleaner.Authenticate()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = cleaner.ContainerCreate(container, nil)
- if nil != err {
- t.Fatalf("Unable to create test container '%s': %v", container, err)
- }
- defer func() {
- newObjects, err := cleaner.ObjectNamesAll(container, nil)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- for _, o := range newObjects {
- err := cleaner.ObjectDelete(container, o)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- }
- err = cleaner.ContainerDelete(container)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewSwiftBackend(map[string]string{
- "username": username,
- "password": password,
- "container": container,
- "auth_url": authUrl,
- "project": project,
- "domain": domain,
- "project-domain": projectDomain,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/testing.go b/vendor/github.com/hashicorp/vault/physical/testing.go
deleted file mode 100644
index 69f7167..0000000
--- a/vendor/github.com/hashicorp/vault/physical/testing.go
+++ /dev/null
@@ -1,460 +0,0 @@
-package physical
-
-import (
- "reflect"
- "sort"
- "testing"
- "time"
-)
-
-func ExerciseBackend(t *testing.T, b Backend) {
- // Should be empty
- keys, err := b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", keys)
- }
-
- // Delete should work if it does not exist
- err = b.Delete("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Get should fail
- out, err := b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Make an entry
- e := &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Get should work
- out, err = b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(out, e) {
- t.Fatalf("bad: %v expected: %v", out, e)
- }
-
- // List should not be empty
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 {
- t.Fatalf("bad: %v", keys)
- }
- if keys[0] != "foo" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Delete should work
- err = b.Delete("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should be empty
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", keys)
- }
-
- // Get should fail
- out, err = b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Multiple Puts should work; GH-189
- e = &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- e = &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Make a nested entry
- e = &Entry{Key: "foo/bar", Value: []byte("baz")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 2 {
- t.Fatalf("bad: %v", keys)
- }
- sort.Strings(keys)
- if keys[0] != "foo" || keys[1] != "foo/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Delete with children should work
- err = b.Delete("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Get should return the child
- out, err = b.Get("foo/bar")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("missing child")
- }
-
- // Removal of nested secret should not leave artifacts
- e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- err = b.Delete("foo/nested1/nested2/nested3")
- if err != nil {
- t.Fatalf("failed to remove nested secret: %v", err)
- }
-
- keys, err = b.List("foo/")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if len(keys) != 1 {
- t.Fatalf("there should be only one key left after deleting nested "+
- "secret: %v", keys)
- }
-
- if keys[0] != "bar" {
- t.Fatalf("bad keys after deleting nested: %v", keys)
- }
-
- // Make a second nested entry to test prefix removal
- e = &Entry{Key: "foo/zip", Value: []byte("zap")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Delete should not remove the prefix
- err = b.Delete("foo/bar")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 {
- t.Fatalf("bad: %v", keys)
- }
- if keys[0] != "foo/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Delete should remove the prefix
- err = b.Delete("foo/zip")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", keys)
- }
-}
-
-func ExerciseBackend_ListPrefix(t *testing.T, b Backend) {
- e1 := &Entry{Key: "foo", Value: []byte("test")}
- e2 := &Entry{Key: "foo/bar", Value: []byte("test")}
- e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")}
-
- defer func() {
- b.Delete("foo")
- b.Delete("foo/bar")
- b.Delete("foo/bar/baz")
- }()
-
- err := b.Put(e1)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b.Put(e2)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b.Put(e3)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Scan the root
- keys, err := b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 2 {
- t.Fatalf("bad: %v", keys)
- }
- sort.Strings(keys)
- if keys[0] != "foo" {
- t.Fatalf("bad: %v", keys)
- }
- if keys[1] != "foo/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Scan foo/
- keys, err = b.List("foo/")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 2 {
- t.Fatalf("bad: %v", keys)
- }
- sort.Strings(keys)
- if keys[0] != "bar" {
- t.Fatalf("bad: %v", keys)
- }
- if keys[1] != "bar/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Scan foo/bar/
- keys, err = b.List("foo/bar/")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- sort.Strings(keys)
- if len(keys) != 1 {
- t.Fatalf("bad: %v", keys)
- }
- if keys[0] != "baz" {
- t.Fatalf("bad: %v", keys)
- }
-}
-
-func ExerciseHABackend(t *testing.T, b HABackend, b2 HABackend) {
- // Get the lock
- lock, err := b.LockWith("foo", "bar")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to lock
- leaderCh, err := lock.Lock(nil)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if leaderCh == nil {
- t.Fatalf("failed to get leader ch")
- }
-
- // Check the value
- held, val, err := lock.Value()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !held {
- t.Fatalf("should be held")
- }
- if val != "bar" {
- t.Fatalf("bad value: %v", err)
- }
-
- // Second acquisition should fail
- lock2, err := b2.LockWith("foo", "baz")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Cancel attempt in 50 msec
- stopCh := make(chan struct{})
- time.AfterFunc(50*time.Millisecond, func() {
- close(stopCh)
- })
-
- // Attempt to lock
- leaderCh2, err := lock2.Lock(stopCh)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if leaderCh2 != nil {
- t.Fatalf("should not get leader ch")
- }
-
- // Release the first lock
- lock.Unlock()
-
- // Attempt to lock should work
- leaderCh2, err = lock2.Lock(nil)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if leaderCh2 == nil {
- t.Fatalf("should get leader ch")
- }
-
- // Check the value
- held, val, err = lock.Value()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !held {
- t.Fatalf("should be held")
- }
- if val != "baz" {
- t.Fatalf("bad value: %v", err)
- }
- // Cleanup
- lock2.Unlock()
-}
-
-func ExerciseTransactionalBackend(t *testing.T, b Backend) {
- tb, ok := b.(Transactional)
- if !ok {
- t.Fatal("Not a transactional backend")
- }
-
- txns := SetupTestingTransactions(t, b)
-
- if err := tb.Transaction(txns); err != nil {
- t.Fatal(err)
- }
-
- keys, err := b.List("")
- if err != nil {
- t.Fatal(err)
- }
-
- expected := []string{"foo", "zip"}
-
- sort.Strings(keys)
- sort.Strings(expected)
- if !reflect.DeepEqual(keys, expected) {
- t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
- }
-
- entry, err := b.Get("foo")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "bar3" {
- t.Fatal("updates did not apply correctly")
- }
-
- entry, err = b.Get("zip")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "zap3" {
- t.Fatal("updates did not apply correctly")
- }
-}
-
-func SetupTestingTransactions(t *testing.T, b Backend) []TxnEntry {
- // Add a few keys so that we test rollback with deletion
- if err := b.Put(&Entry{
- Key: "foo",
- Value: []byte("bar"),
- }); err != nil {
- t.Fatal(err)
- }
- if err := b.Put(&Entry{
- Key: "zip",
- Value: []byte("zap"),
- }); err != nil {
- t.Fatal(err)
- }
- if err := b.Put(&Entry{
- Key: "deleteme",
- }); err != nil {
- t.Fatal(err)
- }
- if err := b.Put(&Entry{
- Key: "deleteme2",
- }); err != nil {
- t.Fatal(err)
- }
-
- txns := []TxnEntry{
- TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "foo",
- Value: []byte("bar2"),
- },
- },
- TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: "deleteme",
- },
- },
- TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "foo",
- Value: []byte("bar3"),
- },
- },
- TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: "deleteme2",
- },
- },
- TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "zip",
- Value: []byte("zap3"),
- },
- },
- }
-
- return txns
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/transactions.go b/vendor/github.com/hashicorp/vault/physical/transactions.go
deleted file mode 100644
index f8668d2..0000000
--- a/vendor/github.com/hashicorp/vault/physical/transactions.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package physical
-
-import multierror "github.com/hashicorp/go-multierror"
-
-// TxnEntry is an operation that takes atomically as part of
-// a transactional update. Only supported by Transactional backends.
-type TxnEntry struct {
- Operation Operation
- Entry *Entry
-}
-
-// Transactional is an optional interface for backends that
-// support doing transactional updates of multiple keys. This is
-// required for some features such as replication.
-type Transactional interface {
- // The function to run a transaction
- Transaction([]TxnEntry) error
-}
-
-type PseudoTransactional interface {
- // An internal function should do no locking or permit pool acquisition.
- // Depending on the backend and if it natively supports transactions, these
- // may simply chain to the normal backend functions.
- GetInternal(string) (*Entry, error)
- PutInternal(*Entry) error
- DeleteInternal(string) error
-}
-
-// Implements the transaction interface
-func GenericTransactionHandler(t PseudoTransactional, txns []TxnEntry) (retErr error) {
- rollbackStack := make([]TxnEntry, 0, len(txns))
- var dirty bool
-
- // We walk the transactions in order; each successful operation goes into a
- // LIFO for rollback if we hit an error along the way
-TxnWalk:
- for _, txn := range txns {
- switch txn.Operation {
- case DeleteOperation:
- entry, err := t.GetInternal(txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- if entry == nil {
- // Nothing to delete or roll back
- continue
- }
- rollbackEntry := TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: entry.Key,
- Value: entry.Value,
- },
- }
- err = t.DeleteInternal(txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- rollbackStack = append([]TxnEntry{rollbackEntry}, rollbackStack...)
-
- case PutOperation:
- entry, err := t.GetInternal(txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- // Nothing existed so in fact rolling back requires a delete
- var rollbackEntry TxnEntry
- if entry == nil {
- rollbackEntry = TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: txn.Entry.Key,
- },
- }
- } else {
- rollbackEntry = TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: entry.Key,
- Value: entry.Value,
- },
- }
- }
- err = t.PutInternal(txn.Entry)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- rollbackStack = append([]TxnEntry{rollbackEntry}, rollbackStack...)
- }
- }
-
- // Need to roll back because we hit an error along the way
- if dirty {
- // While traversing this, if we get an error, we continue anyways in
- // best-effort fashion
- for _, txn := range rollbackStack {
- switch txn.Operation {
- case DeleteOperation:
- err := t.DeleteInternal(txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- }
- case PutOperation:
- err := t.PutInternal(txn.Entry)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- }
- }
- }
- }
-
- return
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go
deleted file mode 100644
index 8ecc0d6..0000000
--- a/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go
+++ /dev/null
@@ -1,452 +0,0 @@
-package zookeeper
-
-import (
- "fmt"
- "path/filepath"
- "sort"
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- metrics "github.com/armon/go-metrics"
- "github.com/samuel/go-zookeeper/zk"
-)
-
-const (
- // ZKNodeFilePrefix is prefixed to any "files" in ZooKeeper,
- // so that they do not collide with directory entries. Otherwise,
- // we cannot delete a file if the path is a full-prefix of another
- // key.
- ZKNodeFilePrefix = "_"
-)
-
-// ZooKeeperBackend is a physical backend that stores data at specific
-// prefix within ZooKeeper. It is used in production situations as
-// it allows Vault to run on multiple machines in a highly-available manner.
-type ZooKeeperBackend struct {
- path string
- client *zk.Conn
- acl []zk.ACL
- logger log.Logger
-}
-
-// NewZooKeeperBackend constructs a ZooKeeper backend using the given API client
-// and the prefix in the KV store.
-func NewZooKeeperBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
- // Get the path in ZooKeeper
- path, ok := conf["path"]
- if !ok {
- path = "vault/"
- }
-
- // Ensure path is suffixed and prefixed (zk requires prefix /)
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
- if !strings.HasPrefix(path, "/") {
- path = "/" + path
- }
-
- // Configure the client, default to localhost instance
- var machines string
- machines, ok = conf["address"]
- if !ok {
- machines = "localhost:2181"
- }
-
- // zNode owner and schema.
- var owner string
- var schema string
- var schemaAndOwner string
- schemaAndOwner, ok = conf["znode_owner"]
- if !ok {
- owner = "anyone"
- schema = "world"
- } else {
- parsedSchemaAndOwner := strings.SplitN(schemaAndOwner, ":", 2)
- if len(parsedSchemaAndOwner) != 2 {
- return nil, fmt.Errorf("znode_owner expected format is 'schema:owner'")
- } else {
- schema = parsedSchemaAndOwner[0]
- owner = parsedSchemaAndOwner[1]
-
- // znode_owner is in config and structured correctly - but does it make any sense?
- // Either 'owner' or 'schema' was set but not both - this seems like a failed attempt
- // (e.g. ':MyUser' which omit the schema, or ':' omitting both)
- if owner == "" || schema == "" {
- return nil, fmt.Errorf("znode_owner expected format is 'schema:auth'")
- }
- }
- }
-
- acl := []zk.ACL{{zk.PermAll, schema, owner}}
-
- // Authnetication info
- var schemaAndUser string
- var useAddAuth bool
- schemaAndUser, useAddAuth = conf["auth_info"]
- if useAddAuth {
- parsedSchemaAndUser := strings.SplitN(schemaAndUser, ":", 2)
- if len(parsedSchemaAndUser) != 2 {
- return nil, fmt.Errorf("auth_info expected format is 'schema:auth'")
- } else {
- schema = parsedSchemaAndUser[0]
- owner = parsedSchemaAndUser[1]
-
- // auth_info is in config and structured correctly - but does it make any sense?
- // Either 'owner' or 'schema' was set but not both - this seems like a failed attempt
- // (e.g. ':MyUser' which omit the schema, or ':' omitting both)
- if owner == "" || schema == "" {
- return nil, fmt.Errorf("auth_info expected format is 'schema:auth'")
- }
- }
- }
-
- // We have all of the configuration in hand - let's try and connect to ZK
- client, _, err := zk.Connect(strings.Split(machines, ","), time.Second)
- if err != nil {
- return nil, fmt.Errorf("client setup failed: %v", err)
- }
-
- // ZK AddAuth API if the user asked for it
- if useAddAuth {
- err = client.AddAuth(schema, []byte(owner))
- if err != nil {
- return nil, fmt.Errorf("ZooKeeper rejected authentication information provided at auth_info: %v", err)
- }
- }
-
- // Setup the backend
- c := &ZooKeeperBackend{
- path: path,
- client: client,
- acl: acl,
- logger: logger,
- }
- return c, nil
-}
-
-// ensurePath is used to create each node in the path hierarchy.
-// We avoid calling this optimistically, and invoke it when we get
-// an error during an operation
-func (c *ZooKeeperBackend) ensurePath(path string, value []byte) error {
- nodes := strings.Split(path, "/")
- fullPath := ""
- for index, node := range nodes {
- if strings.TrimSpace(node) != "" {
- fullPath += "/" + node
- isLastNode := index+1 == len(nodes)
-
- // set parent nodes to nil, leaf to value
- // this block reduces round trips by being smart on the leaf create/set
- if exists, _, _ := c.client.Exists(fullPath); !isLastNode && !exists {
- if _, err := c.client.Create(fullPath, nil, int32(0), c.acl); err != nil {
- return err
- }
- } else if isLastNode && !exists {
- if _, err := c.client.Create(fullPath, value, int32(0), c.acl); err != nil {
- return err
- }
- } else if isLastNode && exists {
- if _, err := c.client.Set(fullPath, value, int32(-1)); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-// cleanupLogicalPath is used to remove all empty nodes, begining with deepest one,
-// aborting on first non-empty one, up to top-level node.
-func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error {
- nodes := strings.Split(path, "/")
- for i := len(nodes) - 1; i > 0; i-- {
- fullPath := c.path + strings.Join(nodes[:i], "/")
-
- _, stat, err := c.client.Exists(fullPath)
- if err != nil {
- return fmt.Errorf("Failed to acquire node data: %s", err)
- }
-
- if stat.DataLength > 0 && stat.NumChildren > 0 {
- msgFmt := "Node %s is both of data and leaf type ??"
- panic(fmt.Sprintf(msgFmt, fullPath))
- } else if stat.DataLength > 0 {
- msgFmt := "Node %s is a data node, this is either a bug or " +
- "backend data is corrupted"
- panic(fmt.Sprintf(msgFmt, fullPath))
- } else if stat.NumChildren > 0 {
- return nil
- } else {
- // Empty node, lets clean it up!
- if err := c.client.Delete(fullPath, -1); err != nil && err != zk.ErrNoNode {
- msgFmt := "Removal of node `%s` failed: `%v`"
- return fmt.Errorf(msgFmt, fullPath, err)
- }
- }
- }
- return nil
-}
-
-// nodePath returns an zk path based on the given key.
-func (c *ZooKeeperBackend) nodePath(key string) string {
- return filepath.Join(c.path, filepath.Dir(key), ZKNodeFilePrefix+filepath.Base(key))
-}
-
-// Put is used to insert or update an entry
-func (c *ZooKeeperBackend) Put(entry *physical.Entry) error {
- defer metrics.MeasureSince([]string{"zookeeper", "put"}, time.Now())
-
- // Attempt to set the full path
- fullPath := c.nodePath(entry.Key)
- _, err := c.client.Set(fullPath, entry.Value, -1)
-
- // If we get ErrNoNode, we need to construct the path hierarchy
- if err == zk.ErrNoNode {
- return c.ensurePath(fullPath, entry.Value)
- }
- return err
-}
-
-// Get is used to fetch an entry
-func (c *ZooKeeperBackend) Get(key string) (*physical.Entry, error) {
- defer metrics.MeasureSince([]string{"zookeeper", "get"}, time.Now())
-
- // Attempt to read the full path
- fullPath := c.nodePath(key)
- value, _, err := c.client.Get(fullPath)
-
- // Ignore if the node does not exist
- if err == zk.ErrNoNode {
- err = nil
- }
- if err != nil {
- return nil, err
- }
-
- // Handle a non-existing value
- if value == nil {
- return nil, nil
- }
- ent := &physical.Entry{
- Key: key,
- Value: value,
- }
- return ent, nil
-}
-
-// Delete is used to permanently delete an entry
-func (c *ZooKeeperBackend) Delete(key string) error {
- defer metrics.MeasureSince([]string{"zookeeper", "delete"}, time.Now())
-
- if key == "" {
- return nil
- }
-
- // Delete the full path
- fullPath := c.nodePath(key)
- err := c.client.Delete(fullPath, -1)
-
- // Mask if the node does not exist
- if err != nil && err != zk.ErrNoNode {
- return fmt.Errorf("Failed to remove %q: %v", fullPath, err)
- }
-
- err = c.cleanupLogicalPath(key)
-
- return err
-}
-
-// List is used ot list all the keys under a given
-// prefix, up to the next prefix.
-func (c *ZooKeeperBackend) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"zookeeper", "list"}, time.Now())
-
- // Query the children at the full path
- fullPath := strings.TrimSuffix(c.path+prefix, "/")
- result, _, err := c.client.Children(fullPath)
-
- // If the path nodes are missing, no children!
- if err == zk.ErrNoNode {
- return []string{}, nil
- } else if err != nil {
- return []string{}, err
- }
-
- children := []string{}
- for _, key := range result {
- childPath := fullPath + "/" + key
- _, stat, err := c.client.Exists(childPath)
- if err != nil {
- // Node is ought to exists, so it must be something different
- return []string{}, err
- }
-
- // Check if this entry is a leaf of a node,
- // and append the slash which is what Vault depends on
- // for iteration
- if stat.DataLength > 0 && stat.NumChildren > 0 {
- if childPath == c.nodePath("core/lock") {
- // go-zookeeper Lock() breaks Vault semantics and creates a directory
- // under the lock file; just treat it like the file Vault expects
- children = append(children, key[1:])
- } else {
- msgFmt := "Node %q is both of data and leaf type ??"
- panic(fmt.Sprintf(msgFmt, childPath))
- }
- } else if stat.DataLength == 0 {
- // No, we cannot differentiate here on number of children as node
- // can have all it leafs remoed, and it still is a node.
- children = append(children, key+"/")
- } else {
- children = append(children, key[1:])
- }
- }
- sort.Strings(children)
- return children, nil
-}
-
-// LockWith is used for mutual exclusion based on the given key.
-func (c *ZooKeeperBackend) LockWith(key, value string) (physical.Lock, error) {
- l := &ZooKeeperHALock{
- in: c,
- key: key,
- value: value,
- }
- return l, nil
-}
-
-// HAEnabled indicates whether the HA functionality should be exposed.
-// Currently always returns true.
-func (c *ZooKeeperBackend) HAEnabled() bool {
- return true
-}
-
-// ZooKeeperHALock is a ZooKeeper Lock implementation for the HABackend
-type ZooKeeperHALock struct {
- in *ZooKeeperBackend
- key string
- value string
-
- held bool
- localLock sync.Mutex
- leaderCh chan struct{}
- zkLock *zk.Lock
-}
-
-func (i *ZooKeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
- i.localLock.Lock()
- defer i.localLock.Unlock()
- if i.held {
- return nil, fmt.Errorf("lock already held")
- }
-
- // Attempt an async acquisition
- didLock := make(chan struct{})
- failLock := make(chan error, 1)
- releaseCh := make(chan bool, 1)
- lockpath := i.in.nodePath(i.key)
- go i.attemptLock(lockpath, didLock, failLock, releaseCh)
-
- // Wait for lock acquisition, failure, or shutdown
- select {
- case <-didLock:
- releaseCh <- false
- case err := <-failLock:
- return nil, err
- case <-stopCh:
- releaseCh <- true
- return nil, nil
- }
-
- // Create the leader channel
- i.held = true
- i.leaderCh = make(chan struct{})
-
- // Watch for Events which could result in loss of our zkLock and close(i.leaderCh)
- currentVal, _, lockeventCh, err := i.in.client.GetW(lockpath)
- if err != nil {
- return nil, fmt.Errorf("unable to watch HA lock: %v", err)
- }
- if i.value != string(currentVal) {
- return nil, fmt.Errorf("lost HA lock immediately before watch")
- }
- go i.monitorLock(lockeventCh, i.leaderCh)
-
- return i.leaderCh, nil
-}
-
-func (i *ZooKeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) {
- // Wait to acquire the lock in ZK
- lock := zk.NewLock(i.in.client, lockpath, i.in.acl)
- err := lock.Lock()
- if err != nil {
- failLock <- err
- return
- }
- // Set node value
- data := []byte(i.value)
- err = i.in.ensurePath(lockpath, data)
- if err != nil {
- failLock <- err
- lock.Unlock()
- return
- }
- i.zkLock = lock
-
- // Signal that lock is held
- close(didLock)
-
- // Handle an early abort
- release := <-releaseCh
- if release {
- lock.Unlock()
- }
-}
-
-func (i *ZooKeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) {
- for {
- select {
- case event := <-lockeventCh:
- // Lost connection?
- switch event.State {
- case zk.StateConnected:
- case zk.StateHasSession:
- default:
- close(leaderCh)
- return
- }
-
- // Lost lock?
- switch event.Type {
- case zk.EventNodeChildrenChanged:
- case zk.EventSession:
- default:
- close(leaderCh)
- return
- }
- }
- }
-}
-
-func (i *ZooKeeperHALock) Unlock() error {
- i.localLock.Lock()
- defer i.localLock.Unlock()
- if !i.held {
- return nil
- }
-
- i.held = false
- i.zkLock.Unlock()
- return nil
-}
-
-func (i *ZooKeeperHALock) Value() (bool, string, error) {
- lockpath := i.in.nodePath(i.key)
- value, _, err := i.in.client.Get(lockpath)
- return (value != nil), string(value), err
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go
deleted file mode 100644
index a85c27c..0000000
--- a/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package zookeeper
-
-import (
- "fmt"
- "os"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- log "github.com/mgutz/logxi/v1"
-
- "github.com/samuel/go-zookeeper/zk"
-)
-
-func TestZooKeeperBackend(t *testing.T) {
- addr := os.Getenv("ZOOKEEPER_ADDR")
- if addr == "" {
- t.SkipNow()
- }
-
- client, _, err := zk.Connect([]string{addr}, time.Second)
-
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- randPath := fmt.Sprintf("/vault-%d", time.Now().Unix())
- acl := zk.WorldACL(zk.PermAll)
- _, err = client.Create(randPath, []byte("hi"), int32(0), acl)
-
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- defer func() {
- client.Delete(randPath+"/foo/nested1/nested2/nested3", -1)
- client.Delete(randPath+"/foo/nested1/nested2", -1)
- client.Delete(randPath+"/foo/nested1", -1)
- client.Delete(randPath+"/foo/bar/baz", -1)
- client.Delete(randPath+"/foo/bar", -1)
- client.Delete(randPath+"/foo", -1)
- client.Delete(randPath, -1)
- client.Close()
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewZooKeeperBackend(map[string]string{
- "address": addr + "," + addr,
- "path": randPath,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- physical.ExerciseBackend(t, b)
- physical.ExerciseBackend_ListPrefix(t, b)
-}
-
-func TestZooKeeperHABackend(t *testing.T) {
- addr := os.Getenv("ZOOKEEPER_ADDR")
- if addr == "" {
- t.SkipNow()
- }
-
- client, _, err := zk.Connect([]string{addr}, time.Second)
-
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- randPath := fmt.Sprintf("/vault-ha-%d", time.Now().Unix())
- acl := zk.WorldACL(zk.PermAll)
- _, err = client.Create(randPath, []byte("hi"), int32(0), acl)
-
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- defer func() {
- client.Delete(randPath+"/foo", -1)
- client.Delete(randPath, -1)
- client.Close()
- }()
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- b, err := NewZooKeeperBackend(map[string]string{
- "address": addr + "," + addr,
- "path": randPath,
- }, logger)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- ha, ok := b.(physical.HABackend)
- if !ok {
- t.Fatalf("zookeeper does not implement HABackend")
- }
- physical.ExerciseHABackend(t, ha, ha)
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go
deleted file mode 100644
index f9bfdeb..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/plugins/database/cassandra"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:])
-
- err := cassandra.Run(apiClientMeta.GetTLSConfig())
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go
deleted file mode 100644
index c0b5fd5..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package cassandra
-
-import (
- "strings"
- "time"
-
- "github.com/gocql/gocql"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
-)
-
-const (
- defaultUserCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;`
- defaultUserDeletionCQL = `DROP USER '{{username}}';`
- cassandraTypeName = "cassandra"
-)
-
-// Cassandra is an implementation of Database interface
-type Cassandra struct {
- connutil.ConnectionProducer
- credsutil.CredentialsProducer
-}
-
-// New returns a new Cassandra instance
-func New() (interface{}, error) {
- connProducer := &cassandraConnectionProducer{}
- connProducer.Type = cassandraTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: 15,
- RoleNameLen: 15,
- UsernameLen: 100,
- Separator: "_",
- }
-
- dbType := &Cassandra{
- ConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
-
- return dbType, nil
-}
-
-// Run instantiates a Cassandra object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- dbType, err := New()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(*Cassandra), apiTLSConfig)
-
- return nil
-}
-
-// Type returns the TypeName for this backend
-func (c *Cassandra) Type() (string, error) {
- return cassandraTypeName, nil
-}
-
-func (c *Cassandra) getConnection() (*gocql.Session, error) {
- session, err := c.Connection()
- if err != nil {
- return nil, err
- }
-
- return session.(*gocql.Session), nil
-}
-
-// CreateUser generates the username/password on the underlying Cassandra secret backend as instructed by
-// the CreationStatement provided.
-func (c *Cassandra) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- // Grab the lock
- c.Lock()
- defer c.Unlock()
-
- // Get the connection
- session, err := c.getConnection()
- if err != nil {
- return "", "", err
- }
-
- creationCQL := statements.CreationStatements
- if creationCQL == "" {
- creationCQL = defaultUserCreationCQL
- }
- rollbackCQL := statements.RollbackStatements
- if rollbackCQL == "" {
- rollbackCQL = defaultUserDeletionCQL
- }
-
- username, err = c.GenerateUsername(usernameConfig)
- username = strings.Replace(username, "-", "_", -1)
- if err != nil {
- return "", "", err
- }
- // Cassandra doesn't like the uppercase usernames
- username = strings.ToLower(username)
-
- password, err = c.GeneratePassword()
- if err != nil {
- return "", "", err
- }
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(creationCQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- err = session.Query(dbutil.QueryHelper(query, map[string]string{
- "username": username,
- "password": password,
- })).Exec()
- if err != nil {
- for _, query := range strutil.ParseArbitraryStringSlice(rollbackCQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- session.Query(dbutil.QueryHelper(query, map[string]string{
- "username": username,
- })).Exec()
- }
- return "", "", err
- }
- }
-
- return username, password, nil
-}
-
-// RenewUser is not supported on Cassandra, so this is a no-op.
-func (c *Cassandra) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
- // NOOP
- return nil
-}
-
-// RevokeUser attempts to drop the specified user.
-func (c *Cassandra) RevokeUser(statements dbplugin.Statements, username string) error {
- // Grab the lock
- c.Lock()
- defer c.Unlock()
-
- session, err := c.getConnection()
- if err != nil {
- return err
- }
-
- revocationCQL := statements.RevocationStatements
- if revocationCQL == "" {
- revocationCQL = defaultUserDeletionCQL
- }
-
- var result *multierror.Error
- for _, query := range strutil.ParseArbitraryStringSlice(revocationCQL, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- err := session.Query(dbutil.QueryHelper(query, map[string]string{
- "username": username,
- })).Exec()
-
- result = multierror.Append(result, err)
- }
-
- return result.ErrorOrNil()
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go
deleted file mode 100644
index 0f4d330..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package cassandra
-
-import (
- "os"
- "strconv"
- "testing"
- "time"
-
- "fmt"
-
- "github.com/gocql/gocql"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-func prepareCassandraTestContainer(t *testing.T) (func(), string, int) {
- if os.Getenv("CASSANDRA_HOST") != "" {
- return func() {}, os.Getenv("CASSANDRA_HOST"), 0
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- cwd, _ := os.Getwd()
- cassandraMountPath := fmt.Sprintf("%s/test-fixtures/:/etc/cassandra/", cwd)
-
- ro := &dockertest.RunOptions{
- Repository: "cassandra",
- Tag: "latest",
- Env: []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"},
- Mounts: []string{cassandraMountPath},
- }
- resource, err := pool.RunWithOptions(ro)
- if err != nil {
- t.Fatalf("Could not start local cassandra docker container: %s", err)
- }
-
- cleanup := func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local container: %s", err)
- }
- }
-
- port, _ := strconv.Atoi(resource.GetPort("9042/tcp"))
- address := fmt.Sprintf("127.0.0.1:%d", port)
-
- // exponential backoff-retry
- if err = pool.Retry(func() error {
- clusterConfig := gocql.NewCluster(address)
- clusterConfig.Authenticator = gocql.PasswordAuthenticator{
- Username: "cassandra",
- Password: "cassandra",
- }
- clusterConfig.ProtoVersion = 4
- clusterConfig.Port = port
-
- session, err := clusterConfig.CreateSession()
- if err != nil {
- return fmt.Errorf("error creating session: %s", err)
- }
- defer session.Close()
- return nil
- }); err != nil {
- cleanup()
- t.Fatalf("Could not connect to cassandra docker container: %s", err)
- }
- return cleanup, address, port
-}
-
-func TestCassandra_Initialize(t *testing.T) {
- if os.Getenv("TRAVIS") != "true" {
- t.SkipNow()
- }
- cleanup, address, port := prepareCassandraTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "hosts": address,
- "port": port,
- "username": "cassandra",
- "password": "cassandra",
- "protocol_version": 4,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*Cassandra)
- connProducer := db.ConnectionProducer.(*cassandraConnectionProducer)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if !connProducer.Initialized {
- t.Fatal("Database should be initalized")
- }
-
- err = db.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // test a string protocol
- connectionDetails = map[string]interface{}{
- "hosts": address,
- "port": strconv.Itoa(port),
- "username": "cassandra",
- "password": "cassandra",
- "protocol_version": "4",
- }
-
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestCassandra_CreateUser(t *testing.T) {
- if os.Getenv("TRAVIS") != "true" {
- t.SkipNow()
- }
- cleanup, address, port := prepareCassandraTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "hosts": address,
- "port": port,
- "username": "cassandra",
- "password": "cassandra",
- "protocol_version": 4,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*Cassandra)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testCassandraRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, address, port, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-}
-
-func TestMyCassandra_RenewUser(t *testing.T) {
- if os.Getenv("TRAVIS") != "true" {
- t.SkipNow()
- }
- cleanup, address, port := prepareCassandraTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "hosts": address,
- "port": port,
- "username": "cassandra",
- "password": "cassandra",
- "protocol_version": 4,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*Cassandra)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testCassandraRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, address, port, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- err = db.RenewUser(statements, username, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestCassandra_RevokeUser(t *testing.T) {
- if os.Getenv("TRAVIS") != "true" {
- t.SkipNow()
- }
- cleanup, address, port := prepareCassandraTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "hosts": address,
- "port": port,
- "username": "cassandra",
- "password": "cassandra",
- "protocol_version": 4,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*Cassandra)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testCassandraRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, address, port, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test default revoke statememts
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, address, port, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-}
-
-func testCredsExist(t testing.TB, address string, port int, username, password string) error {
- clusterConfig := gocql.NewCluster(address)
- clusterConfig.Authenticator = gocql.PasswordAuthenticator{
- Username: username,
- Password: password,
- }
- clusterConfig.ProtoVersion = 4
- clusterConfig.Port = port
-
- session, err := clusterConfig.CreateSession()
- if err != nil {
- return fmt.Errorf("error creating session: %s", err)
- }
- defer session.Close()
- return nil
-}
-
-const testCassandraRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;
-GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go
deleted file mode 100644
index 44b0b7d..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package cassandra
-
-import (
- "crypto/tls"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/mitchellh/mapstructure"
-
- "github.com/gocql/gocql"
- "github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
-)
-
-// cassandraConnectionProducer implements ConnectionProducer and provides an
-// interface for cassandra databases to make connections.
-type cassandraConnectionProducer struct {
- Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"`
- Port int `json:"port" structs:"port" mapstructure:"port"`
- Username string `json:"username" structs:"username" mapstructure:"username"`
- Password string `json:"password" structs:"password" mapstructure:"password"`
- TLS bool `json:"tls" structs:"tls" mapstructure:"tls"`
- InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"`
- ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"`
- ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"`
- TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"`
- Consistency string `json:"consistency" structs:"consistency" mapstructure:"consistency"`
- PemBundle string `json:"pem_bundle" structs:"pem_bundle" mapstructure:"pem_bundle"`
- PemJSON string `json:"pem_json" structs:"pem_json" mapstructure:"pem_json"`
-
- connectTimeout time.Duration
- certificate string
- privateKey string
- issuingCA string
-
- Initialized bool
- Type string
- session *gocql.Session
- sync.Mutex
-}
-
-func (c *cassandraConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error {
- c.Lock()
- defer c.Unlock()
-
- err := mapstructure.WeakDecode(conf, c)
- if err != nil {
- return err
- }
-
- if c.ConnectTimeoutRaw == nil {
- c.ConnectTimeoutRaw = "0s"
- }
- c.connectTimeout, err = parseutil.ParseDurationSecond(c.ConnectTimeoutRaw)
- if err != nil {
- return fmt.Errorf("invalid connect_timeout: %s", err)
- }
-
- switch {
- case len(c.Hosts) == 0:
- return fmt.Errorf("hosts cannot be empty")
- case len(c.Username) == 0:
- return fmt.Errorf("username cannot be empty")
- case len(c.Password) == 0:
- return fmt.Errorf("password cannot be empty")
- }
-
- var certBundle *certutil.CertBundle
- var parsedCertBundle *certutil.ParsedCertBundle
- switch {
- case len(c.PemJSON) != 0:
- parsedCertBundle, err = certutil.ParsePKIJSON([]byte(c.PemJSON))
- if err != nil {
- return fmt.Errorf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err)
- }
- certBundle, err = parsedCertBundle.ToCertBundle()
- if err != nil {
- return fmt.Errorf("Error marshaling PEM information: %s", err)
- }
- c.certificate = certBundle.Certificate
- c.privateKey = certBundle.PrivateKey
- c.issuingCA = certBundle.IssuingCA
- c.TLS = true
-
- case len(c.PemBundle) != 0:
- parsedCertBundle, err = certutil.ParsePEMBundle(c.PemBundle)
- if err != nil {
- return fmt.Errorf("Error parsing the given PEM information: %s", err)
- }
- certBundle, err = parsedCertBundle.ToCertBundle()
- if err != nil {
- return fmt.Errorf("Error marshaling PEM information: %s", err)
- }
- c.certificate = certBundle.Certificate
- c.privateKey = certBundle.PrivateKey
- c.issuingCA = certBundle.IssuingCA
- c.TLS = true
- }
-
- // Set initialized to true at this point since all fields are set,
- // and the connection can be established at a later time.
- c.Initialized = true
-
- if verifyConnection {
- if _, err := c.Connection(); err != nil {
- return fmt.Errorf("error verifying connection: %s", err)
- }
- }
-
- return nil
-}
-
-func (c *cassandraConnectionProducer) Connection() (interface{}, error) {
- if !c.Initialized {
- return nil, connutil.ErrNotInitialized
- }
-
- // If we already have a DB, return it
- if c.session != nil {
- return c.session, nil
- }
-
- session, err := c.createSession()
- if err != nil {
- return nil, err
- }
-
- // Store the session in backend for reuse
- c.session = session
-
- return session, nil
-}
-
-func (c *cassandraConnectionProducer) Close() error {
- // Grab the write lock
- c.Lock()
- defer c.Unlock()
-
- if c.session != nil {
- c.session.Close()
- }
-
- c.session = nil
-
- return nil
-}
-
-func (c *cassandraConnectionProducer) createSession() (*gocql.Session, error) {
- hosts := strings.Split(c.Hosts, ",")
- clusterConfig := gocql.NewCluster(hosts...)
- clusterConfig.Authenticator = gocql.PasswordAuthenticator{
- Username: c.Username,
- Password: c.Password,
- }
-
- if c.Port != 0 {
- clusterConfig.Port = c.Port
- }
-
- clusterConfig.ProtoVersion = c.ProtocolVersion
- if clusterConfig.ProtoVersion == 0 {
- clusterConfig.ProtoVersion = 2
- }
-
- clusterConfig.Timeout = c.connectTimeout
- if c.TLS {
- var tlsConfig *tls.Config
- if len(c.certificate) > 0 || len(c.issuingCA) > 0 {
- if len(c.certificate) > 0 && len(c.privateKey) == 0 {
- return nil, fmt.Errorf("found certificate for TLS authentication but no private key")
- }
-
- certBundle := &certutil.CertBundle{}
- if len(c.certificate) > 0 {
- certBundle.Certificate = c.certificate
- certBundle.PrivateKey = c.privateKey
- }
- if len(c.issuingCA) > 0 {
- certBundle.IssuingCA = c.issuingCA
- }
-
- parsedCertBundle, err := certBundle.ToParsedCertBundle()
- if err != nil {
- return nil, fmt.Errorf("failed to parse certificate bundle: %s", err)
- }
-
- tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient)
- if err != nil || tlsConfig == nil {
- return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%v", tlsConfig, err)
- }
- tlsConfig.InsecureSkipVerify = c.InsecureTLS
-
- if c.TLSMinVersion != "" {
- var ok bool
- tlsConfig.MinVersion, ok = tlsutil.TLSLookup[c.TLSMinVersion]
- if !ok {
- return nil, fmt.Errorf("invalid 'tls_min_version' in config")
- }
- } else {
- // MinVersion was not being set earlier. Reset it to
- // zero to gracefully handle upgrades.
- tlsConfig.MinVersion = 0
- }
- }
-
- clusterConfig.SslOpts = &gocql.SslOptions{
- Config: tlsConfig,
- }
- }
-
- session, err := clusterConfig.CreateSession()
- if err != nil {
- return nil, fmt.Errorf("error creating session: %s", err)
- }
-
- // Set consistency
- if c.Consistency != "" {
- consistencyValue, err := gocql.ParseConsistencyWrapper(c.Consistency)
- if err != nil {
- return nil, err
- }
-
- session.SetConsistency(consistencyValue)
- }
-
- // Verify the info
- err = session.Query(`LIST USERS`).Exec()
- if err != nil {
- return nil, fmt.Errorf("error validating connection info: %s", err)
- }
-
- return session, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml
deleted file mode 100644
index 7c28d84..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml
+++ /dev/null
@@ -1,1146 +0,0 @@
-# Cassandra storage config YAML
-
-# NOTE:
-# See http://wiki.apache.org/cassandra/StorageConfiguration for
-# full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: 'Test Cluster'
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting on the node's initial start,
-# on subsequent starts, this setting will apply even if initial token is set.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to
-# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
-num_tokens: 256
-
-# Triggers automatic allocation of num_tokens tokens for this node. The allocation
-# algorithm attempts to choose tokens in a way that optimizes replicated load over
-# the nodes in the datacenter for the replication strategy used by the specified
-# keyspace.
-#
-# The load assigned to each node will be close to proportional to its number of
-# vnodes.
-#
-# Only supported with the Murmur3Partitioner.
-# allocate_tokens_for_keyspace: KEYSPACE
-
-# initial_token allows you to specify tokens manually. While you can use it with
-# vnodes (num_tokens > 1, above) -- in which case you should provide a
-# comma-separated list -- it's primarily used when adding nodes to legacy clusters
-# that do not have vnodes enabled.
-# initial_token:
-
-# See http://wiki.apache.org/cassandra/HintedHandoff
-# May either be "true" or "false" to enable globally
-hinted_handoff_enabled: true
-
-# When hinted_handoff_enabled is true, a black list of data centers that will not
-# perform hinted handoff
-# hinted_handoff_disabled_datacenters:
-# - DC1
-# - DC2
-
-# this defines the maximum amount of time a dead host will have hints
-# generated. After it has been dead this long, new hints for it will not be
-# created until it has been seen alive and gone down again.
-max_hint_window_in_ms: 10800000 # 3 hours
-
-# Maximum throttle in KBs per second, per delivery thread. This will be
-# reduced proportionally to the number of nodes in the cluster. (If there
-# are two nodes in the cluster, each delivery thread will use the maximum
-# rate; if there are three, each will throttle to half of the maximum,
-# since we expect two nodes to be delivering hints simultaneously.)
-hinted_handoff_throttle_in_kb: 1024
-
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# Directory where Cassandra should store hints.
-# If not set, the default directory is $CASSANDRA_HOME/data/hints.
-# hints_directory: /var/lib/cassandra/hints
-
-# How often hints should be flushed from the internal buffers to disk.
-# Will *not* trigger fsync.
-hints_flush_period_in_ms: 10000
-
-# Maximum size for a single hints file, in megabytes.
-max_hints_file_size_in_mb: 128
-
-# Compression to apply to the hint files. If omitted, hints files
-# will be written uncompressed. LZ4, Snappy, and Deflate compressors
-# are supported.
-#hints_compression:
-# - class_name: LZ4Compressor
-# parameters:
-# -
-
-# Maximum throttle in KBs per second, total. This will be
-# reduced proportionally to the number of nodes in the cluster.
-batchlog_replay_throttle_in_kb: 1024
-
-# Authentication backend, implementing IAuthenticator; used to identify users
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
-# PasswordAuthenticator}.
-#
-# - AllowAllAuthenticator performs no checks - set it to disable authentication.
-# - PasswordAuthenticator relies on username/password pairs to authenticate
-# users. It keeps usernames and hashed passwords in system_auth.credentials table.
-# Please increase system_auth keyspace replication factor if you use this authenticator.
-# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
-authenticator: PasswordAuthenticator
-
-# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
-# CassandraAuthorizer}.
-#
-# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
-# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
-# increase system_auth keyspace replication factor if you use this authorizer.
-authorizer: CassandraAuthorizer
-
-# Part of the Authentication & Authorization backend, implementing IRoleManager; used
-# to maintain grants and memberships between roles.
-# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
-# which stores role information in the system_auth keyspace. Most functions of the
-# IRoleManager require an authenticated login, so unless the configured IAuthenticator
-# actually implements authentication, most of this functionality will be unavailable.
-#
-# - CassandraRoleManager stores role data in the system_auth keyspace. Please
-# increase system_auth keyspace replication factor if you use this role manager.
-role_manager: CassandraRoleManager
-
-# Validity period for roles cache (fetching granted roles can be an expensive
-# operation depending on the role manager, CassandraRoleManager is one example)
-# Granted roles are cached for authenticated sessions in AuthenticatedUser and
-# after the period specified here, become eligible for (async) reload.
-# Defaults to 2000, set to 0 to disable caching entirely.
-# Will be disabled automatically for AllowAllAuthenticator.
-roles_validity_in_ms: 2000
-
-# Refresh interval for roles cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If roles_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as roles_validity_in_ms.
-# roles_update_interval_in_ms: 2000
-
-# Validity period for permissions cache (fetching permissions can be an
-# expensive operation depending on the authorizer, CassandraAuthorizer is
-# one example). Defaults to 2000, set to 0 to disable.
-# Will be disabled automatically for AllowAllAuthorizer.
-permissions_validity_in_ms: 2000
-
-# Refresh interval for permissions cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If permissions_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as permissions_validity_in_ms.
-# permissions_update_interval_in_ms: 2000
-
-# Validity period for credentials cache. This cache is tightly coupled to
-# the provided PasswordAuthenticator implementation of IAuthenticator. If
-# another IAuthenticator implementation is configured, this cache will not
-# be automatically used and so the following settings will have no effect.
-# Please note, credentials are cached in their encrypted form, so while
-# activating this cache may reduce the number of queries made to the
-# underlying table, it may not bring a significant reduction in the
-# latency of individual authentication attempts.
-# Defaults to 2000, set to 0 to disable credentials caching.
-credentials_validity_in_ms: 2000
-
-# Refresh interval for credentials cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If credentials_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as credentials_validity_in_ms.
-# credentials_update_interval_in_ms: 2000
-
-# The partitioner is responsible for distributing groups of rows (by
-# partition key) across nodes in the cluster. You should leave this
-# alone for new clusters. The partitioner can NOT be changed without
-# reloading all data, so when upgrading you should set this to the
-# same partitioner you were already using.
-#
-# Besides Murmur3Partitioner, partitioners included for backwards
-# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
-# OrderPreservingPartitioner.
-#
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-
-# Directories where Cassandra should store data on disk. Cassandra
-# will spread data evenly across them, subject to the granularity of
-# the configured compaction strategy.
-# If not set, the default directory is $CASSANDRA_HOME/data/data.
-data_file_directories:
- - /var/lib/cassandra/data
-
-# commit log. when running on magnetic HDD, this should be a
-# separate spindle than the data directories.
-# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
-commitlog_directory: /var/lib/cassandra/commitlog
-
-# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
-# for write path allocation rejection (standard: never reject. cdc: reject Mutation
-# containing a CDC-enabled table if at space limit in cdc_raw_directory).
-cdc_enabled: false
-
-# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
-# segment contains mutations for a CDC-enabled table. This should be placed on a
-# separate spindle than the data directories. If not set, the default directory is
-# $CASSANDRA_HOME/data/cdc_raw.
-# cdc_raw_directory: /var/lib/cassandra/cdc_raw
-
-# Policy for data disk failures:
-#
-# die
-# shut down gossip and client transports and kill the JVM for any fs errors or
-# single-sstable errors, so the node can be replaced.
-#
-# stop_paranoid
-# shut down gossip and client transports even for single-sstable errors,
-# kill the JVM for errors during startup.
-#
-# stop
-# shut down gossip and client transports, leaving the node effectively dead, but
-# can still be inspected via JMX, kill the JVM for errors during startup.
-#
-# best_effort
-# stop using the failed disk and respond to requests based on
-# remaining available sstables. This means you WILL see obsolete
-# data at CL.ONE!
-#
-# ignore
-# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# Policy for commit disk failures:
-#
-# die
-# shut down gossip and Thrift and kill the JVM, so the node can be replaced.
-#
-# stop
-# shut down gossip and Thrift, leaving the node effectively dead, but
-# can still be inspected via JMX.
-#
-# stop_commit
-# shutdown the commit log, letting writes collect but
-# continuing to service reads, as in pre-2.0.5 Cassandra
-#
-# ignore
-# ignore fatal errors and let the batches fail
-commit_failure_policy: stop
-
-# Maximum size of the native protocol prepared statement cache
-#
-# Valid values are either "auto" (omitting the value) or a value greater 0.
-#
-# Note that specifying a too large value will result in long running GCs and possbily
-# out-of-memory errors. Keep the value at a small fraction of the heap.
-#
-# If you constantly see "prepared statements discarded in the last minute because
-# cache limit reached" messages, the first step is to investigate the root cause
-# of these messages and check whether prepared statements are used correctly -
-# i.e. use bind markers for variable parts.
-#
-# Do only change the default value, if you really have more prepared statements than
-# fit in the cache. In most cases it is not neccessary to change this value.
-# Constantly re-preparing statements is a performance penalty.
-#
-# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
-prepared_statements_cache_size_mb:
-
-# Maximum size of the Thrift prepared statement cache
-#
-# If you do not use Thrift at all, it is safe to leave this value at "auto".
-#
-# See description of 'prepared_statements_cache_size_mb' above for more information.
-#
-# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
-thrift_prepared_statements_cache_size_mb:
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must contain the entire row,
-# so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the key cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Row cache implementation class name. Available implementations:
-#
-# org.apache.cassandra.cache.OHCProvider
-# Fully off-heap row cache implementation (default).
-#
-# org.apache.cassandra.cache.SerializingCacheProvider
-# This is the row cache implementation availabile
-# in previous releases of Cassandra.
-# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-
-# Maximum size of the row cache in memory.
-# Please note that OHC cache implementation requires some additional off-heap memory to manage
-# the map structures and some in-flight memory during operations before/after cache entries can be
-# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
-# Do not specify more memory that the system can afford in the worst usual situation and leave some
-# headroom for OS block level cache. Do never allow your system to swap.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should save the row cache.
-# Caches are saved to saved_caches_directory as specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save.
-# Specify 0 (which is the default), meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# Maximum size of the counter cache in memory.
-#
-# Counter cache helps to reduce counter locks' contention for hot counter cells.
-# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
-# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
-# of the lock hold, helping with hot counter cell updates, but will not allow skipping
-# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
-# in memory, not the whole counter, so it's relatively cheap.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
-# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
-counter_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the counter cache (keys only). Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Default is 7200 or 2 hours.
-counter_cache_save_period: 7200
-
-# Number of keys from the counter cache to save
-# Disabled by default, meaning all keys are going to be saved
-# counter_cache_keys_to_save: 100
-
-# saved caches
-# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
-saved_caches_directory: /var/lib/cassandra/saved_caches
-
-# commitlog_sync may be either "periodic" or "batch."
-#
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk. It will wait
-# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
-# This window should be kept short because the writer threads will
-# be unable to do extra work while waiting. (You may need to increase
-# concurrent_writes for the same reason.)
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 2
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-
-# The size of the individual commitlog file segments. A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentially from each columnfamily in the system) has been
-# flushed to sstables.
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-# Max mutation size is also configurable via max_mutation_size_in_kb setting in
-# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
-#
-# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
-# be set to at least twice the size of max_mutation_size_in_kb / 1024
-#
-commitlog_segment_size_in_mb: 32
-
-# Compression to apply to the commit log. If omitted, the commit log
-# will be written uncompressed. LZ4, Snappy, and Deflate compressors
-# are supported.
-# commitlog_compression:
-# - class_name: LZ4Compressor
-# parameters:
-# -
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map of parameters will do.
-seed_provider:
- # Addresses of hosts that are deemed contact points.
- # Cassandra nodes use this list of hosts to find each other and learn
- # the topology of the ring. You must change this if you are running
- # multiple nodes!
- - class_name: org.apache.cassandra.locator.SimpleSeedProvider
- parameters:
- # seeds is actually a comma-delimited list of addresses.
- # Ex: ",,"
- - seeds: "127.0.0.1"
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them. Same applies to
-# "concurrent_counter_writes", since counter writes read the current
-# values before incrementing and writing them back.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-concurrent_counter_writes: 32
-
-# For materialized view writes, as there is a read involved, so this should
-# be limited by the less of concurrent reads or concurrent writes.
-concurrent_materialized_view_writes: 32
-
-# Maximum memory to use for sstable chunk cache and buffer pooling.
-# 32MB of this are reserved for pooling buffers, the rest is used as an
-# cache that holds uncompressed sstable chunks.
-# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
-# so is in addition to the memory allocated for heap. The cache also has on-heap
-# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
-# if the default 64k chunk size is used).
-# Memory is only allocated when needed.
-# file_cache_size_in_mb: 512
-
-# Flag indicating whether to allocate on or off heap when the sstable buffer
-# pool is exhausted, that is when it has exceeded the maximum memory
-# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
-
-# buffer_pool_use_heap_if_exhausted: true
-
-# The strategy for optimizing disk read
-# Possible values are:
-# ssd (for solid state disks, the default)
-# spinning (for spinning disks)
-# disk_optimization_strategy: ssd
-
-# Total permitted memory to use for memtables. Cassandra will stop
-# accepting writes when the limit is exceeded until a flush completes,
-# and will trigger a flush based on memtable_cleanup_threshold
-# If omitted, Cassandra will set both to 1/4 the size of the heap.
-# memtable_heap_space_in_mb: 2048
-# memtable_offheap_space_in_mb: 2048
-
-# Ratio of occupied non-flushing memtable size to total permitted size
-# that will trigger a flush of the largest memtable. Larger mct will
-# mean larger flushes and hence less compaction, but also less concurrent
-# flush activity which can make it difficult to keep your disks fed
-# under heavy write load.
-#
-# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
-# memtable_cleanup_threshold: 0.11
-
-# Specify the way Cassandra allocates and manages memtable memory.
-# Options are:
-#
-# heap_buffers
-# on heap nio buffers
-#
-# offheap_buffers
-# off heap (direct) nio buffers
-#
-# offheap_objects
-# off heap objects
-memtable_allocation_type: heap_buffers
-
-# Total space to use for commit logs on disk.
-#
-# If space gets above this value, Cassandra will flush every dirty CF
-# in the oldest segment and remove it. So a small total commitlog space
-# will tend to cause more flush activity on less-active columnfamilies.
-#
-# The default value is the smaller of 8192, and 1/4 of the total space
-# of the commitlog volume.
-#
-# commitlog_total_space_in_mb: 8192
-
-# This sets the amount of memtable flush writer threads. These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked.
-#
-# memtable_flush_writers defaults to one per data_file_directory.
-#
-# If your data directories are backed by SSD, you can increase this, but
-# avoid having memtable_flush_writers * data_file_directories > number of cores
-#memtable_flush_writers: 1
-
-# Total space to use for change-data-capture logs on disk.
-#
-# If space gets above this value, Cassandra will throw WriteTimeoutException
-# on Mutations including tables with CDC enabled. A CDCCompactor is responsible
-# for parsing the raw CDC logs and deleting them when parsing is completed.
-#
-# The default value is the min of 4096 mb and 1/8th of the total space
-# of the drive where cdc_raw_directory resides.
-# cdc_total_space_in_mb: 4096
-
-# When we hit our cdc_raw limit and the CDCCompactor is either running behind
-# or experiencing backpressure, we check at the following interval to see if any
-# new space for cdc-tracked tables has been made available. Default to 250ms
-# cdc_free_space_check_interval_ms: 250
-
-# A fixed memory pool size in MB for for SSTable index summaries. If left
-# empty, this will default to 5% of the heap size. If the memory usage of
-# all index summaries exceeds this limit, SSTables with low read rates will
-# shrink their index summaries in order to meet this limit. However, this
-# is a best-effort process. In extreme conditions Cassandra may need to use
-# more than this amount of memory.
-index_summary_capacity_in_mb:
-
-# How frequently index summaries should be resampled. This is done
-# periodically to redistribute memory from the fixed-size pool to sstables
-# proportional their recent read rates. Setting to -1 will disable this
-# process, leaving existing index summaries at their current sampling level.
-index_summary_resize_interval_in_minutes: 60
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSDs; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-storage_port: 7000
-
-# SSL port, for encrypted communication. Unused unless enabled in
-# encryption_options
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-ssl_storage_port: 7001
-
-# Address or interface to bind to and tell other Cassandra nodes to connect to.
-# You _must_ change this if you want multiple nodes to be able to communicate!
-#
-# Set listen_address OR listen_interface, not both.
-#
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing _if_ the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting listen_address to 0.0.0.0 is always wrong.
-#
-listen_address: 172.17.0.5
-
-# Set listen_address OR listen_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
-# listen_interface: eth0
-
-# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
-# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
-# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
-# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
-# listen_interface_prefer_ipv6: false
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-broadcast_address: 127.0.0.1
-
-# When using multiple physical network interfaces, set this
-# to true to listen on broadcast_address in addition to
-# the listen_address, allowing nodes to communicate in both
-# interfaces.
-# Ignore this property if the network configuration automatically
-# routes between the public and private networks such as EC2.
-# listen_on_broadcast_address: false
-
-# Internode authentication backend, implementing IInternodeAuthenticator;
-# used to allow/disallow connections from peer nodes.
-# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
-
-# Whether to start the native transport server.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: true
-# port for the CQL native transport to listen for clients on
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-native_transport_port: 9042
-# Enabling native transport encryption in client_encryption_options allows you to either use
-# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
-# standard native_transport_port.
-# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
-# for native_transport_port. Setting native_transport_port_ssl to a different value
-# from native_transport_port will use encryption for native_transport_port_ssl while
-# keeping native_transport_port unencrypted.
-# native_transport_port_ssl: 9142
-# The maximum threads for handling requests when the native transport is used.
-# This is similar to rpc_max_threads though the default differs slightly (and
-# there is no native_transport_min_threads, idle threads will always be stopped
-# after 30 seconds).
-# native_transport_max_threads: 128
-#
-# The maximum size of allowed frame. Frame (requests) larger than this will
-# be rejected as invalid. The default is 256MB. If you're changing this parameter,
-# you may want to adjust max_value_size_in_mb accordingly.
-# native_transport_max_frame_size_in_mb: 256
-
-# The maximum number of concurrent client connections.
-# The default is -1, which means unlimited.
-# native_transport_max_concurrent_connections: -1
-
-# The maximum number of concurrent client connections per source ip.
-# The default is -1, which means unlimited.
-# native_transport_max_concurrent_connections_per_ip: -1
-
-# Whether to start the thrift rpc server.
-start_rpc: false
-
-# The address or interface to bind the Thrift RPC service and native transport
-# server to.
-#
-# Set rpc_address OR rpc_interface, not both.
-#
-# Leaving rpc_address blank has the same effect as on listen_address
-# (i.e. it will be based on the configured hostname of the node).
-#
-# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
-# set broadcast_rpc_address to a value other than 0.0.0.0.
-#
-# For security reasons, you should not expose this port to the internet. Firewall it if needed.
-rpc_address: 0.0.0.0
-
-# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
-# rpc_interface: eth1
-
-# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
-# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
-# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
-# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
-# rpc_interface_prefer_ipv6: false
-
-# port for Thrift to listen for clients on
-rpc_port: 9160
-
-# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
-# be set to 0.0.0.0. If left blank, this will be set to the value of
-# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
-# be set.
-broadcast_rpc_address: 127.0.0.1
-
-# enable or disable keepalive on rpc/native connections
-rpc_keepalive: true
-
-# Cassandra provides two out-of-the-box options for the RPC Server:
-#
-# sync
-# One thread per thrift connection. For a very large number of clients, memory
-# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
-# per thread, and that will correspond to your use of virtual memory (but physical memory
-# may be limited depending on use of stack space).
-#
-# hsha
-# Stands for "half synchronous, half asynchronous." All thrift clients are handled
-# asynchronously using a small number of threads that does not vary with the amount
-# of thrift clients (and thus scales well to many clients). The rpc requests are still
-# synchronous (one thread per active request). If hsha is selected then it is essential
-# that rpc_max_threads is changed from the default value of unlimited.
-#
-# The default is sync because on Windows hsha is about 30% slower. On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively, can provide your own RPC server by providing the fully-qualified class name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Uncomment to set socket buffer size for internode communication
-# Note that when setting this, the buffer size is limited by net.core.wmem_max
-# and when not setting it it is defined by net.ipv4.tcp_wmem
-# See also:
-# /proc/sys/net/core/wmem_max
-# /proc/sys/net/core/rmem_max
-# /proc/sys/net/ipv4/tcp_wmem
-# /proc/sys/net/ipv4/tcp_wmem
-# and 'man tcp'
-# internode_send_buff_size_in_bytes:
-
-# Uncomment to set socket buffer size for internode communication
-# Note that when setting this, the buffer size is limited by net.core.wmem_max
-# and when not setting it it is defined by net.ipv4.tcp_wmem
-# internode_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum message length).
-thrift_framed_transport_size_in_mb: 15
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# keyspace data. Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction. Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you. Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# Granularity of the collation index of rows within a partition.
-# Increase if your rows are large, or if you have a very large
-# number of rows per partition. The competing goals are these:
-#
-# - a smaller granularity means more index entries are generated
-# and looking up rows withing the partition by collation column
-# is faster
-# - but, Cassandra will keep the collation index in memory for hot
-# rows (as part of the key cache), so a larger granularity means
-# you can cache more hot rows
-column_index_size_in_kb: 64
-
-# Per sstable indexed key cache entries (the collation index in memory
-# mentioned above) exceeding this size will not be held on heap.
-# This means that only partition information is held on heap and the
-# index entries are read from disk.
-#
-# Note that this size refers to the size of the
-# serialized index information and not the size of the partition.
-column_index_cache_size_in_kb: 2
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair. Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the smaller of (number of disks,
-# number of cores), with a minimum of 2 and a maximum of 8.
-#
-# If your data directories are backed by SSD, you should increase this
-# to the number of cores.
-#concurrent_compactors: 1
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# When compacting, the replacement sstable(s) can be opened before they
-# are completely written, and used in place of the prior sstables for
-# any range that has been written. This helps to smoothly transfer reads
-# between the sstables, reducing page cache churn and keeping hot rows hot
-sstable_preemptive_open_interval_in_mb: 50
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 200 Mbps or 25 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 200
-
-# Throttles all streaming file transfer between the datacenters,
-# this setting allows users to throttle inter dc stream throughput in addition
-# to throttling all network stream traffic as configured with
-# stream_throughput_outbound_megabits_per_sec
-# When unset, the default is 200 Mbps or 25 MB/s
-# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 5000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 2000
-# How long the coordinator should wait for counter writes to complete
-counter_write_request_timeout_in_ms: 5000
-# How long a coordinator should continue to retry a CAS operation
-# that contends with other proposals for the same row
-cas_contention_timeout_in_ms: 1000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because unless auto_snapshot is disabled
-# we need to flush first so we can snapshot before removing the data.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts. If disabled, replicas will assume that requests
-# were forwarded to them instantly by the coordinator, which means that
-# under overload conditions we will waste that much extra time processing
-# already-timed-out requests.
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Set socket timeout for streaming operation.
-# The stream session is failed if no data/ack is received by any of the participants
-# within that period, which means this should also be sufficient to stream a large
-# sstable or rebuild table indexes.
-# Default value is 86400000ms, which means stale streams timeout after 24 hours.
-# A value of zero means stream sockets should never time out.
-# streaming_socket_timeout_in_ms: 86400000
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch. The snitch has two functions:
-#
-# - it teaches Cassandra enough about your network topology to route
-# requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-# correlated failures. It does this by grouping machines into
-# "datacenters" and "racks." Cassandra will do its best not to have
-# more than one replica on the same "rack" (which may not actually
-# be a physical location)
-#
-# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH
-# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss.
-# This means that if you start with the default SimpleSnitch, which
-# locates every node on "rack1" in "datacenter1", your only options
-# if you need to add another datacenter are GossipingPropertyFileSnitch
-# (and the older PFS). From there, if you want to migrate to an
-# incompatible snitch like Ec2Snitch you can do it by adding new nodes
-# under Ec2Snitch (which will locate them in a new "datacenter") and
-# decommissioning the old ones.
-#
-# Out of the box, Cassandra provides:
-#
-# SimpleSnitch:
-# Treats Strategy order as proximity. This can improve cache
-# locality when disabling read repair. Only appropriate for
-# single-datacenter deployments.
-#
-# GossipingPropertyFileSnitch
-# This should be your go-to snitch for production use. The rack
-# and datacenter for the local node are defined in
-# cassandra-rackdc.properties and propagated to other nodes via
-# gossip. If cassandra-topology.properties exists, it is used as a
-# fallback, allowing migration from the PropertyFileSnitch.
-#
-# PropertyFileSnitch:
-# Proximity is determined by rack and data center, which are
-# explicitly configured in cassandra-topology.properties.
-#
-# Ec2Snitch:
-# Appropriate for EC2 deployments in a single Region. Loads Region
-# and Availability Zone information from the EC2 API. The Region is
-# treated as the datacenter, and the Availability Zone as the rack.
-# Only private IPs are used, so this will not work across multiple
-# Regions.
-#
-# Ec2MultiRegionSnitch:
-# Uses public IPs as broadcast_address to allow cross-region
-# connectivity. (Thus, you should set seed addresses to the public
-# IP as well.) You will need to open the storage_port or
-# ssl_storage_port on the public IP firewall. (For intra-Region
-# traffic, Cassandra will switch to the private IP after
-# establishing a connection.)
-#
-# RackInferringSnitch:
-# Proximity is determined by rack and data center, which are
-# assumed to correspond to the 3rd and 2nd octet of each node's IP
-# address, respectively. Unless this happens to match your
-# deployment conventions, this is best used as an example of
-# writing a custom Snitch class and is provided in that spirit.
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: SimpleSnitch
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it. This is
-# expressed as a double which represents a percentage. Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-#
-# NoScheduler
-# Has no options
-#
-# RoundRobin
-# throttle_limit
-# The throttle_limit is the number of in-flight
-# requests per client. Requests beyond
-# that limit are queued up until
-# running requests can complete.
-# The value of 80 here is twice the number of
-# concurrent_reads + concurrent_writes.
-# default_weight
-# default_weight is optional and allows for
-# overriding the default which is 1.
-# weights
-# Weights are optional and will default to 1 or the
-# overridden default_weight. The weight translates into how
-# many requests are handled during each turn of the
-# RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-# throttle_limit: 80
-# default_weight: 5
-# weights:
-# Keyspace1: 1
-# Keyspace2: 5
-
-# request_scheduler_id -- An identifier based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# Enable or disable inter-node encryption
-# JVM defaults for supported SSL socket protocols and cipher suites can
-# be replaced using custom encryption options. This is not recommended
-# unless you have policies in place that dictate certain settings, or
-# need to disable vulnerable ciphers or protocols in case the JVM cannot
-# be updated.
-# FIPS compliant settings can be configured at JVM level and should not
-# involve changing encryption settings here:
-# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
-# *NOTE* No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore. For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-server_encryption_options:
- internode_encryption: none
- keystore: conf/.keystore
- keystore_password: cassandra
- truststore: conf/.truststore
- truststore_password: cassandra
- # More advanced defaults below:
- # protocol: TLS
- # algorithm: SunX509
- # store_type: JKS
- # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
- # require_client_auth: false
- # require_endpoint_verification: false
-
-# enable or disable client/server encryption.
-client_encryption_options:
- enabled: false
- # If enabled and optional is set to true encrypted and unencrypted connections are handled.
- optional: false
- keystore: conf/.keystore
- keystore_password: cassandra
- # require_client_auth: false
- # Set trustore and truststore_password if require_client_auth is true
- # truststore: conf/.truststore
- # truststore_password: cassandra
- # More advanced defaults below:
- # protocol: TLS
- # algorithm: SunX509
- # store_type: JKS
- # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# Can be:
-#
-# all
-# all traffic is compressed
-#
-# dc
-# traffic between different datacenters is compressed
-#
-# none
-# nothing is compressed.
-internode_compression: dc
-
-# Enable or disable tcp_nodelay for inter-dc communication.
-# Disabling it will result in larger (but fewer) network packets being sent,
-# reducing overhead from the TCP protocol itself, at the cost of increasing
-# latency if you block for cross-datacenter responses.
-inter_dc_tcp_nodelay: false
-
-# TTL for different trace types used during logging of the repair process.
-tracetype_query_ttl: 86400
-tracetype_repair_ttl: 604800
-
-# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
-# This threshold can be adjusted to minimize logging if necessary
-# gc_log_threshold_in_ms: 200
-
-# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
-# INFO level
-# UDFs (user defined functions) are disabled by default.
-# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
-enable_user_defined_functions: false
-
-# Enables scripted UDFs (JavaScript UDFs).
-# Java UDFs are always enabled, if enable_user_defined_functions is true.
-# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
-# This option has no effect, if enable_user_defined_functions is false.
-enable_scripted_user_defined_functions: false
-
-# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
-# Lowering this value on Windows can provide much tighter latency and better throughput, however
-# some virtualized environments may see a negative performance impact from changing this setting
-# below their system default. The sysinternals 'clockres' tool can confirm your system's default
-# setting.
-windows_timer_interval: 1
-
-
-# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
-# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
-# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
-# can still (and should!) be in the keystore and will be used on decrypt operations
-# (to handle the case of key rotation).
-#
-# It is strongly recommended to download and install Java Cryptography Extension (JCE)
-# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
-# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
-#
-# Currently, only the following file types are supported for transparent data encryption, although
-# more are coming in future cassandra releases: commitlog, hints
-transparent_data_encryption_options:
- enabled: false
- chunk_length_kb: 64
- cipher: AES/CBC/PKCS5Padding
- key_alias: testing:1
- # CBC IV length for AES needs to be 16 bytes (which is also the default size)
- # iv_length: 16
- key_provider:
- - class_name: org.apache.cassandra.security.JKSKeyProvider
- parameters:
- - keystore: conf/.keystore
- keystore_password: cassandra
- store_type: JCEKS
- key_password: cassandra
-
-
-#####################
-# SAFETY THRESHOLDS #
-#####################
-
-# When executing a scan, within or across a partition, we need to keep the
-# tombstones seen in memory so we can return them to the coordinator, which
-# will use them to make sure other replicas also know about the deleted rows.
-# With workloads that generate a lot of tombstones, this can cause performance
-# problems and even exaust the server heap.
-# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
-# Adjust the thresholds here if you understand the dangers and want to
-# scan more tombstones anyway. These thresholds may also be adjusted at runtime
-# using the StorageService mbean.
-tombstone_warn_threshold: 1000
-tombstone_failure_threshold: 100000
-
-# Log WARN on any batch size exceeding this value. 5kb per batch by default.
-# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
-batch_size_warn_threshold_in_kb: 5
-
-# Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
-batch_size_fail_threshold_in_kb: 50
-
-# Log WARN on any batches not of type LOGGED than span across more partitions than this limit
-unlogged_batch_across_partitions_warn_threshold: 10
-
-# Log a warning when compacting partitions larger than this value
-compaction_large_partition_warning_threshold_mb: 100
-
-# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
-# Adjust the threshold based on your application throughput requirement
-# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
-gc_warn_threshold_in_ms: 1000
-
-# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
-# early. Any value size larger than this threshold will result into marking an SSTable
-# as corrupted.
-# max_value_size_in_mb: 256
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go
deleted file mode 100644
index f995fe0..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/plugins/database/hana"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:])
-
- err := hana.Run(apiClientMeta.GetTLSConfig())
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go
deleted file mode 100644
index aa2b53d..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package hana
-
-import (
- "database/sql"
- "fmt"
- "strings"
- "time"
-
- _ "github.com/SAP/go-hdb/driver"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
-)
-
-const (
- hanaTypeName = "hdb"
-)
-
-// HANA is an implementation of Database interface
-type HANA struct {
- connutil.ConnectionProducer
- credsutil.CredentialsProducer
-}
-
-// New implements builtinplugins.BuiltinFactory
-func New() (interface{}, error) {
- connProducer := &connutil.SQLConnectionProducer{}
- connProducer.Type = hanaTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: 32,
- RoleNameLen: 20,
- UsernameLen: 128,
- Separator: "_",
- }
-
- dbType := &HANA{
- ConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
-
- return dbType, nil
-}
-
-// Run instantiates a HANA object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- dbType, err := New()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(*HANA), apiTLSConfig)
-
- return nil
-}
-
-// Type returns the TypeName for this backend
-func (h *HANA) Type() (string, error) {
- return hanaTypeName, nil
-}
-
-func (h *HANA) getConnection() (*sql.DB, error) {
- db, err := h.Connection()
- if err != nil {
- return nil, err
- }
-
- return db.(*sql.DB), nil
-}
-
-// CreateUser generates the username/password on the underlying HANA secret backend
-// as instructed by the CreationStatement provided.
-func (h *HANA) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- // Grab the lock
- h.Lock()
- defer h.Unlock()
-
- // Get the connection
- db, err := h.getConnection()
- if err != nil {
- return "", "", err
- }
-
- if statements.CreationStatements == "" {
- return "", "", dbutil.ErrEmptyCreationStatement
- }
-
- // Generate username
- username, err = h.GenerateUsername(usernameConfig)
- if err != nil {
- return "", "", err
- }
-
- // HANA does not allow hyphens in usernames, and highly prefers capital letters
- username = strings.Replace(username, "-", "_", -1)
- username = strings.ToUpper(username)
-
- // Generate password
- password, err = h.GeneratePassword()
- if err != nil {
- return "", "", err
- }
- // Most HANA configurations have password constraints
- // Prefix with A1a to satisfy these constraints. User will be forced to change upon login
- password = strings.Replace(password, "-", "_", -1)
- password = "A1a" + password
-
- // If expiration is in the role SQL, HANA will deactivate the user when time is up,
- // regardless of whether vault is alive to revoke lease
- expirationStr, err := h.GenerateExpiration(expiration)
- if err != nil {
- return "", "", err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return "", "", err
- }
- defer tx.Rollback()
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- "password": password,
- "expiration": expirationStr,
- }))
- if err != nil {
- return "", "", err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return "", "", err
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return "", "", err
- }
-
- return username, password, nil
-}
-
-// Renewing hana user just means altering user's valid until property
-func (h *HANA) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
- // Get connection
- db, err := h.getConnection()
- if err != nil {
- return err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- // If expiration is in the role SQL, HANA will deactivate the user when time is up,
- // regardless of whether vault is alive to revoke lease
- expirationStr, err := h.GenerateExpiration(expiration)
- if err != nil {
- return err
- }
-
- // Renew user's valid until property field
- stmt, err := tx.Prepare("ALTER USER " + username + " VALID UNTIL " + "'" + expirationStr + "'")
- if err != nil {
- return err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return err
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-// Revoking hana user will deactivate user and try to perform a soft drop
-func (h *HANA) RevokeUser(statements dbplugin.Statements, username string) error {
- // default revoke will be a soft drop on user
- if statements.RevocationStatements == "" {
- return h.revokeUserDefault(username)
- }
-
- // Get connection
- db, err := h.getConnection()
- if err != nil {
- return err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- }))
- if err != nil {
- return err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return err
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (h *HANA) revokeUserDefault(username string) error {
- // Get connection
- db, err := h.getConnection()
- if err != nil {
- return err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- // Disable server login for user
- disableStmt, err := tx.Prepare(fmt.Sprintf("ALTER USER %s DEACTIVATE USER NOW", username))
- if err != nil {
- return err
- }
- defer disableStmt.Close()
- if _, err := disableStmt.Exec(); err != nil {
- return err
- }
-
- // Invalidates current sessions and performs soft drop (drop if no dependencies)
- // if hard drop is desired, custom revoke statements should be written for role
- dropStmt, err := tx.Prepare(fmt.Sprintf("DROP USER %s RESTRICT", username))
- if err != nil {
- return err
- }
- defer dropStmt.Close()
- if _, err := dropStmt.Exec(); err != nil {
- return err
- }
-
- // Commit transaction
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go
deleted file mode 100644
index 7cff7f1..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package hana
-
-import (
- "database/sql"
- "fmt"
- "os"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
-)
-
-func TestHANA_Initialize(t *testing.T) {
- if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
- t.SkipNow()
- }
- connURL := os.Getenv("HANA_URL")
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*HANA)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
- if !connProducer.Initialized {
- t.Fatal("Database should be initialized")
- }
-
- err = db.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-// this test will leave a lingering user on the system
-func TestHANA_CreateUser(t *testing.T) {
- if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
- t.SkipNow()
- }
- connURL := os.Getenv("HANA_URL")
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*HANA)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test-test",
- RoleName: "test-test",
- }
-
- // Test with no configured Creation Statememt
- _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Hour))
- if err == nil {
- t.Fatal("Expected error when no creation statement is provided")
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testHANARole,
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-}
-
-func TestHANA_RevokeUser(t *testing.T) {
- if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
- t.SkipNow()
- }
- connURL := os.Getenv("HANA_URL")
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*HANA)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testHANARole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test-test",
- RoleName: "test-test",
- }
-
- // Test default revoke statememts
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-
- // Test custom revoke statememt
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- statements.RevocationStatements = testHANADrop
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-}
-
-func testCredsExist(t testing.TB, connURL, username, password string) error {
- // Log in with the new creds
- parts := strings.Split(connURL, "@")
- connURL = fmt.Sprintf("hdb://%s:%s@%s", username, password, parts[1])
- db, err := sql.Open("hdb", connURL)
- if err != nil {
- return err
- }
- defer db.Close()
- return db.Ping()
-}
-
-const testHANARole = `
-CREATE USER {{name}} PASSWORD {{password}} VALID UNTIL '{{expiration}}';`
-
-const testHANADrop = `
-DROP USER {{name}} CASCADE;`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go
deleted file mode 100644
index f802dc3..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package mongodb
-
-import (
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/mitchellh/mapstructure"
-
- "gopkg.in/mgo.v2"
-)
-
-// mongoDBConnectionProducer implements ConnectionProducer and provides an
-// interface for databases to make connections.
-type mongoDBConnectionProducer struct {
- ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
-
- Initialized bool
- Type string
- session *mgo.Session
- sync.Mutex
-}
-
-// Initialize parses connection configuration.
-func (c *mongoDBConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error {
- c.Lock()
- defer c.Unlock()
-
- err := mapstructure.WeakDecode(conf, c)
- if err != nil {
- return err
- }
-
- if len(c.ConnectionURL) == 0 {
- return fmt.Errorf("connection_url cannot be empty")
- }
-
- // Set initialized to true at this point since all fields are set,
- // and the connection can be established at a later time.
- c.Initialized = true
-
- if verifyConnection {
- if _, err := c.Connection(); err != nil {
- return fmt.Errorf("error verifying connection: %s", err)
- }
-
- if err := c.session.Ping(); err != nil {
- return fmt.Errorf("error verifying connection: %s", err)
- }
- }
-
- return nil
-}
-
-// Connection creates a database connection.
-func (c *mongoDBConnectionProducer) Connection() (interface{}, error) {
- if !c.Initialized {
- return nil, connutil.ErrNotInitialized
- }
-
- if c.session != nil {
- return c.session, nil
- }
-
- dialInfo, err := parseMongoURL(c.ConnectionURL)
- if err != nil {
- return nil, err
- }
-
- c.session, err = mgo.DialWithInfo(dialInfo)
- if err != nil {
- return nil, err
- }
- c.session.SetSyncTimeout(1 * time.Minute)
- c.session.SetSocketTimeout(1 * time.Minute)
-
- return nil, nil
-}
-
-// Close terminates the database connection.
-func (c *mongoDBConnectionProducer) Close() error {
- c.Lock()
- defer c.Unlock()
-
- if c.session != nil {
- c.session.Close()
- }
-
- c.session = nil
-
- return nil
-}
-
-func parseMongoURL(rawURL string) (*mgo.DialInfo, error) {
- url, err := url.Parse(rawURL)
- if err != nil {
- return nil, err
- }
-
- info := mgo.DialInfo{
- Addrs: strings.Split(url.Host, ","),
- Database: strings.TrimPrefix(url.Path, "/"),
- Timeout: 10 * time.Second,
- }
-
- if url.User != nil {
- info.Username = url.User.Username()
- info.Password, _ = url.User.Password()
- }
-
- query := url.Query()
- for key, values := range query {
- var value string
- if len(values) > 0 {
- value = values[0]
- }
-
- switch key {
- case "authSource":
- info.Source = value
- case "authMechanism":
- info.Mechanism = value
- case "gssapiServiceName":
- info.Service = value
- case "replicaSet":
- info.ReplicaSetName = value
- case "maxPoolSize":
- poolLimit, err := strconv.Atoi(value)
- if err != nil {
- return nil, errors.New("bad value for maxPoolSize: " + value)
- }
- info.PoolLimit = poolLimit
- case "ssl":
- // Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that
- // ourselves. See https://github.com/go-mgo/mgo/issues/84
- ssl, err := strconv.ParseBool(value)
- if err != nil {
- return nil, errors.New("bad value for ssl: " + value)
- }
- if ssl {
- info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
- return tls.Dial("tcp", addr.String(), &tls.Config{})
- }
- }
- case "connect":
- if value == "direct" {
- info.Direct = true
- break
- }
- if value == "replicaSet" {
- break
- }
- fallthrough
- default:
- return nil, errors.New("unsupported connection URL option: " + key + "=" + value)
- }
- }
-
- return &info, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go
deleted file mode 100644
index eedb0d0..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/plugins/database/mongodb"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:])
-
- err := mongodb.Run(apiClientMeta.GetTLSConfig())
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go
deleted file mode 100644
index 52671da..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package mongodb
-
-import (
- "io"
- "strings"
- "time"
-
- "encoding/json"
-
- "fmt"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
- "gopkg.in/mgo.v2"
-)
-
-const mongoDBTypeName = "mongodb"
-
-// MongoDB is an implementation of Database interface
-type MongoDB struct {
- connutil.ConnectionProducer
- credsutil.CredentialsProducer
-}
-
-// New returns a new MongoDB instance
-func New() (interface{}, error) {
- connProducer := &mongoDBConnectionProducer{}
- connProducer.Type = mongoDBTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: 15,
- RoleNameLen: 15,
- UsernameLen: 100,
- Separator: "-",
- }
-
- dbType := &MongoDB{
- ConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
- return dbType, nil
-}
-
-// Run instantiates a MongoDB object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- dbType, err := New()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(*MongoDB), apiTLSConfig)
-
- return nil
-}
-
-// Type returns the TypeName for this backend
-func (m *MongoDB) Type() (string, error) {
- return mongoDBTypeName, nil
-}
-
-func (m *MongoDB) getConnection() (*mgo.Session, error) {
- session, err := m.Connection()
- if err != nil {
- return nil, err
- }
-
- return session.(*mgo.Session), nil
-}
-
-// CreateUser generates the username/password on the underlying secret backend as instructed by
-// the CreationStatement provided. The creation statement is a JSON blob that has a db value,
-// and an array of roles that accepts a role, and an optional db value pair. This array will
-// be normalized the format specified in the mongoDB docs:
-// https://docs.mongodb.com/manual/reference/command/createUser/#dbcmd.createUser
-//
-// JSON Example:
-// { "db": "admin", "roles": [{ "role": "readWrite" }, {"role": "read", "db": "foo"}] }
-func (m *MongoDB) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- // Grab the lock
- m.Lock()
- defer m.Unlock()
-
- if statements.CreationStatements == "" {
- return "", "", dbutil.ErrEmptyCreationStatement
- }
-
- session, err := m.getConnection()
- if err != nil {
- return "", "", err
- }
-
- username, err = m.GenerateUsername(usernameConfig)
- if err != nil {
- return "", "", err
- }
-
- password, err = m.GeneratePassword()
- if err != nil {
- return "", "", err
- }
-
- // Unmarshal statements.CreationStatements into mongodbRoles
- var mongoCS mongoDBStatement
- err = json.Unmarshal([]byte(statements.CreationStatements), &mongoCS)
- if err != nil {
- return "", "", err
- }
-
- // Default to "admin" if no db provided
- if mongoCS.DB == "" {
- mongoCS.DB = "admin"
- }
-
- if len(mongoCS.Roles) == 0 {
- return "", "", fmt.Errorf("roles array is required in creation statement")
- }
-
- createUserCmd := createUserCommand{
- Username: username,
- Password: password,
- Roles: mongoCS.Roles.toStandardRolesArray(),
- }
-
- err = session.DB(mongoCS.DB).Run(createUserCmd, nil)
- switch {
- case err == nil:
- case err == io.EOF, strings.Contains(err.Error(), "EOF"):
- if err := m.ConnectionProducer.Close(); err != nil {
- return "", "", errwrap.Wrapf("error closing EOF'd mongo connection: {{err}}", err)
- }
- session, err := m.getConnection()
- if err != nil {
- return "", "", err
- }
- err = session.DB(mongoCS.DB).Run(createUserCmd, nil)
- if err != nil {
- return "", "", err
- }
- default:
- return "", "", err
- }
-
- return username, password, nil
-}
-
-// RenewUser is not supported on MongoDB, so this is a no-op.
-func (m *MongoDB) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
- // NOOP
- return nil
-}
-
-// RevokeUser drops the specified user from the authentication databse. If none is provided
-// in the revocation statement, the default "admin" authentication database will be assumed.
-func (m *MongoDB) RevokeUser(statements dbplugin.Statements, username string) error {
- session, err := m.getConnection()
- if err != nil {
- return err
- }
-
- // If no revocation statements provided, pass in empty JSON
- revocationStatement := statements.RevocationStatements
- if revocationStatement == "" {
- revocationStatement = `{}`
- }
-
- // Unmarshal revocation statements into mongodbRoles
- var mongoCS mongoDBStatement
- err = json.Unmarshal([]byte(revocationStatement), &mongoCS)
- if err != nil {
- return err
- }
-
- db := mongoCS.DB
- // If db is not specified, use the default authenticationDatabase "admin"
- if db == "" {
- db = "admin"
- }
-
- err = session.DB(db).RemoveUser(username)
- switch {
- case err == nil, err == mgo.ErrNotFound:
- case err == io.EOF, strings.Contains(err.Error(), "EOF"):
- if err := m.ConnectionProducer.Close(); err != nil {
- return errwrap.Wrapf("error closing EOF'd mongo connection: {{err}}", err)
- }
- session, err := m.getConnection()
- if err != nil {
- return err
- }
- err = session.DB(db).RemoveUser(username)
- if err != nil {
- return err
- }
- default:
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go
deleted file mode 100644
index 95f6e90..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package mongodb
-
-import (
- "fmt"
- "os"
- "testing"
- "time"
-
- mgo "gopkg.in/mgo.v2"
-
- "strings"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-const testMongoDBRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
-
-func prepareMongoDBTestContainer(t *testing.T) (cleanup func(), retURL string) {
- if os.Getenv("MONGODB_URL") != "" {
- return func() {}, os.Getenv("MONGODB_URL")
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("mongo", "latest", []string{})
- if err != nil {
- t.Fatalf("Could not start local mongo docker container: %s", err)
- }
-
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local container: %s", err)
- }
- }
-
- retURL = fmt.Sprintf("mongodb://localhost:%s", resource.GetPort("27017/tcp"))
-
- // exponential backoff-retry
- if err = pool.Retry(func() error {
- var err error
- dialInfo, err := parseMongoURL(retURL)
- if err != nil {
- return err
- }
-
- session, err := mgo.DialWithInfo(dialInfo)
- if err != nil {
- return err
- }
- session.SetSyncTimeout(1 * time.Minute)
- session.SetSocketTimeout(1 * time.Minute)
- return session.Ping()
- }); err != nil {
- t.Fatalf("Could not connect to mongo docker container: %s", err)
- }
-
- return
-}
-
-func TestMongoDB_Initialize(t *testing.T) {
- cleanup, connURL := prepareMongoDBTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, err := New()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- db := dbRaw.(*MongoDB)
- connProducer := db.ConnectionProducer.(*mongoDBConnectionProducer)
-
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if !connProducer.Initialized {
- t.Fatal("Database should be initialized")
- }
-
- err = db.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestMongoDB_CreateUser(t *testing.T) {
- cleanup, connURL := prepareMongoDBTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, err := New()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- db := dbRaw.(*MongoDB)
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testMongoDBRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-}
-
-func TestMongoDB_RevokeUser(t *testing.T) {
- cleanup, connURL := prepareMongoDBTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, err := New()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- db := dbRaw.(*MongoDB)
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testMongoDBRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test default revocation statememt
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-}
-
-func testCredsExist(t testing.TB, connURL, username, password string) error {
- connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
- dialInfo, err := parseMongoURL(connURL)
- if err != nil {
- return err
- }
-
- session, err := mgo.DialWithInfo(dialInfo)
- if err != nil {
- return err
- }
- session.SetSyncTimeout(1 * time.Minute)
- session.SetSocketTimeout(1 * time.Minute)
- return session.Ping()
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go
deleted file mode 100644
index 9004a3c..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package mongodb
-
-type createUserCommand struct {
- Username string `bson:"createUser"`
- Password string `bson:"pwd"`
- Roles []interface{} `bson:"roles"`
-}
-type mongodbRole struct {
- Role string `json:"role" bson:"role"`
- DB string `json:"db" bson:"db"`
-}
-
-type mongodbRoles []mongodbRole
-
-type mongoDBStatement struct {
- DB string `json:"db"`
- Roles mongodbRoles `json:"roles"`
-}
-
-// Convert array of role documents like:
-//
-// [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ]
-//
-// into a "standard" MongoDB roles array containing both strings and role documents:
-//
-// [ "readWrite", { "role": "readWrite", "db": "test" } ]
-//
-// MongoDB's createUser command accepts the latter.
-func (roles mongodbRoles) toStandardRolesArray() []interface{} {
- var standardRolesArray []interface{}
- for _, role := range roles {
- if role.DB == "" {
- standardRolesArray = append(standardRolesArray, role.Role)
- } else {
- standardRolesArray = append(standardRolesArray, role)
- }
- }
- return standardRolesArray
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go
deleted file mode 100644
index 9201b48..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/plugins/database/mssql"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:])
-
- err := mssql.Run(apiClientMeta.GetTLSConfig())
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go
deleted file mode 100644
index 7b920c8..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go
+++ /dev/null
@@ -1,321 +0,0 @@
-package mssql
-
-import (
- "database/sql"
- "fmt"
- "strings"
- "time"
-
- _ "github.com/denisenkom/go-mssqldb"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
-)
-
-const msSQLTypeName = "mssql"
-
-// MSSQL is an implementation of Database interface
-type MSSQL struct {
- connutil.ConnectionProducer
- credsutil.CredentialsProducer
-}
-
-func New() (interface{}, error) {
- connProducer := &connutil.SQLConnectionProducer{}
- connProducer.Type = msSQLTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: 20,
- RoleNameLen: 20,
- UsernameLen: 128,
- Separator: "-",
- }
-
- dbType := &MSSQL{
- ConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
-
- return dbType, nil
-}
-
-// Run instantiates a MSSQL object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- dbType, err := New()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(*MSSQL), apiTLSConfig)
-
- return nil
-}
-
-// Type returns the TypeName for this backend
-func (m *MSSQL) Type() (string, error) {
- return msSQLTypeName, nil
-}
-
-func (m *MSSQL) getConnection() (*sql.DB, error) {
- db, err := m.Connection()
- if err != nil {
- return nil, err
- }
-
- return db.(*sql.DB), nil
-}
-
-// CreateUser generates the username/password on the underlying MSSQL secret backend as instructed by
-// the CreationStatement provided.
-func (m *MSSQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- // Grab the lock
- m.Lock()
- defer m.Unlock()
-
- // Get the connection
- db, err := m.getConnection()
- if err != nil {
- return "", "", err
- }
-
- if statements.CreationStatements == "" {
- return "", "", dbutil.ErrEmptyCreationStatement
- }
-
- username, err = m.GenerateUsername(usernameConfig)
- if err != nil {
- return "", "", err
- }
-
- password, err = m.GeneratePassword()
- if err != nil {
- return "", "", err
- }
-
- expirationStr, err := m.GenerateExpiration(expiration)
- if err != nil {
- return "", "", err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return "", "", err
- }
- defer tx.Rollback()
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- "password": password,
- "expiration": expirationStr,
- }))
- if err != nil {
- return "", "", err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return "", "", err
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return "", "", err
- }
-
- return username, password, nil
-}
-
-// RenewUser is not supported on MSSQL, so this is a no-op.
-func (m *MSSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
- // NOOP
- return nil
-}
-
-// RevokeUser attempts to drop the specified user. It will first attempt to disable login,
-// then kill pending connections from that user, and finally drop the user and login from the
-// database instance.
-func (m *MSSQL) RevokeUser(statements dbplugin.Statements, username string) error {
- if statements.RevocationStatements == "" {
- return m.revokeUserDefault(username)
- }
-
- // Get connection
- db, err := m.getConnection()
- if err != nil {
- return err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- }))
- if err != nil {
- return err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return err
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *MSSQL) revokeUserDefault(username string) error {
- // Get connection
- db, err := m.getConnection()
- if err != nil {
- return err
- }
-
- // First disable server login
- disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username))
- if err != nil {
- return err
- }
- defer disableStmt.Close()
- if _, err := disableStmt.Exec(); err != nil {
- return err
- }
-
- // Query for sessions for the login so that we can kill any outstanding
- // sessions. There cannot be any active sessions before we drop the logins
- // This isn't done in a transaction because even if we fail along the way,
- // we want to remove as much access as possible
- sessionStmt, err := db.Prepare(fmt.Sprintf(
- "SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';", username))
- if err != nil {
- return err
- }
- defer sessionStmt.Close()
-
- sessionRows, err := sessionStmt.Query()
- if err != nil {
- return err
- }
- defer sessionRows.Close()
-
- var revokeStmts []string
- for sessionRows.Next() {
- var sessionID int
- err = sessionRows.Scan(&sessionID)
- if err != nil {
- return err
- }
- revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID))
- }
-
- // Query for database users using undocumented stored procedure for now since
- // it is the easiest way to get this information;
- // we need to drop the database users before we can drop the login and the role
- // This isn't done in a transaction because even if we fail along the way,
- // we want to remove as much access as possible
- stmt, err := db.Prepare(fmt.Sprintf("EXEC master.dbo.sp_msloginmappings '%s';", username))
- if err != nil {
- return err
- }
- defer stmt.Close()
-
- rows, err := stmt.Query()
- if err != nil {
- return err
- }
- defer rows.Close()
-
- for rows.Next() {
- var loginName, dbName, qUsername string
- var aliasName sql.NullString
- err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName)
- if err != nil {
- return err
- }
- revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username))
- }
-
- // we do not stop on error, as we want to remove as
- // many permissions as possible right now
- var lastStmtError error
- for _, query := range revokeStmts {
- stmt, err := db.Prepare(query)
- if err != nil {
- lastStmtError = err
- continue
- }
- defer stmt.Close()
- _, err = stmt.Exec()
- if err != nil {
- lastStmtError = err
- }
- }
-
- // can't drop if not all database users are dropped
- if rows.Err() != nil {
- return fmt.Errorf("cound not generate sql statements for all rows: %s", rows.Err())
- }
- if lastStmtError != nil {
- return fmt.Errorf("could not perform all sql statements: %s", lastStmtError)
- }
-
- // Drop this login
- stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username))
- if err != nil {
- return err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return err
- }
-
- return nil
-}
-
-const dropUserSQL = `
-USE [%s]
-IF EXISTS
- (SELECT name
- FROM sys.database_principals
- WHERE name = N'%s')
-BEGIN
- DROP USER [%s]
-END
-`
-
-const dropLoginSQL = `
-IF EXISTS
- (SELECT name
- FROM master.sys.server_principals
- WHERE name = N'%s')
-BEGIN
- DROP LOGIN [%s]
-END
-`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go
deleted file mode 100644
index 5a00890..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package mssql
-
-import (
- "database/sql"
- "fmt"
- "os"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
-)
-
-var (
- testMSQLImagePull sync.Once
-)
-
-func TestMSSQL_Initialize(t *testing.T) {
- if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
- return
- }
- connURL := os.Getenv("MSSQL_URL")
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*MSSQL)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
- if !connProducer.Initialized {
- t.Fatal("Database should be initalized")
- }
-
- err = db.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Test decoding a string value for max_open_connections
- connectionDetails = map[string]interface{}{
- "connection_url": connURL,
- "max_open_connections": "5",
- }
-
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestMSSQL_CreateUser(t *testing.T) {
- if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
- return
- }
- connURL := os.Getenv("MSSQL_URL")
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*MSSQL)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- // Test with no configured Creation Statememt
- _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
- if err == nil {
- t.Fatal("Expected error when no creation statement is provided")
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testMSSQLRole,
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-}
-
-func TestMSSQL_RevokeUser(t *testing.T) {
- if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
- return
- }
- connURL := os.Getenv("MSSQL_URL")
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*MSSQL)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testMSSQLRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test default revoke statememts
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test custom revoke statememt
- statements.RevocationStatements = testMSSQLDrop
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-}
-
-func testCredsExist(t testing.TB, connURL, username, password string) error {
- // Log in with the new creds
- parts := strings.Split(connURL, "@")
- connURL = fmt.Sprintf("sqlserver://%s:%s@%s", username, password, parts[1])
- db, err := sql.Open("mssql", connURL)
- if err != nil {
- return err
- }
- defer db.Close()
- return db.Ping()
-}
-
-const testMSSQLRole = `
-CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';
-CREATE USER [{{name}}] FOR LOGIN [{{name}}];
-GRANT SELECT, INSERT, UPDATE, DELETE ON SCHEMA::dbo TO [{{name}}];`
-
-const testMSSQLDrop = `
-DROP USER [{{name}}];
-DROP LOGIN [{{name}}];
-`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go
deleted file mode 100644
index 917f1b3..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/plugins/database/mysql"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:])
-
- err := mysql.Run(apiClientMeta.GetTLSConfig())
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go
deleted file mode 100644
index 2b950e0..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/plugins/database/mysql"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:])
-
- err := mysql.RunLegacy(apiClientMeta.GetTLSConfig())
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go
deleted file mode 100644
index 297941c..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package mysql
-
-import (
- "database/sql"
- "strings"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
-)
-
-const (
- defaultMysqlRevocationStmts = `
- REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%';
- DROP USER '{{name}}'@'%'
- `
- mySQLTypeName = "mysql"
-)
-
-var (
- MetadataLen int = 10
- LegacyMetadataLen int = 4
- UsernameLen int = 32
- LegacyUsernameLen int = 16
-)
-
-type MySQL struct {
- connutil.ConnectionProducer
- credsutil.CredentialsProducer
-}
-
-// New implements builtinplugins.BuiltinFactory
-func New(displayNameLen, roleNameLen, usernameLen int) func() (interface{}, error) {
- return func() (interface{}, error) {
- connProducer := &connutil.SQLConnectionProducer{}
- connProducer.Type = mySQLTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: displayNameLen,
- RoleNameLen: roleNameLen,
- UsernameLen: usernameLen,
- Separator: "-",
- }
-
- dbType := &MySQL{
- ConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
-
- return dbType, nil
- }
-}
-
-// Run instantiates a MySQL object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- return runCommon(false, apiTLSConfig)
-}
-
-// Run instantiates a MySQL object, and runs the RPC server for the plugin
-func RunLegacy(apiTLSConfig *api.TLSConfig) error {
- return runCommon(true, apiTLSConfig)
-}
-
-func runCommon(legacy bool, apiTLSConfig *api.TLSConfig) error {
- var f func() (interface{}, error)
- if legacy {
- f = New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen)
- } else {
- f = New(MetadataLen, MetadataLen, UsernameLen)
- }
- dbType, err := f()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(*MySQL), apiTLSConfig)
-
- return nil
-}
-
-func (m *MySQL) Type() (string, error) {
- return mySQLTypeName, nil
-}
-
-func (m *MySQL) getConnection() (*sql.DB, error) {
- db, err := m.Connection()
- if err != nil {
- return nil, err
- }
-
- return db.(*sql.DB), nil
-}
-
-func (m *MySQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- // Grab the lock
- m.Lock()
- defer m.Unlock()
-
- // Get the connection
- db, err := m.getConnection()
- if err != nil {
- return "", "", err
- }
-
- if statements.CreationStatements == "" {
- return "", "", dbutil.ErrEmptyCreationStatement
- }
-
- username, err = m.GenerateUsername(usernameConfig)
- if err != nil {
- return "", "", err
- }
-
- password, err = m.GeneratePassword()
- if err != nil {
- return "", "", err
- }
-
- expirationStr, err := m.GenerateExpiration(expiration)
- if err != nil {
- return "", "", err
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return "", "", err
- }
- defer tx.Rollback()
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- "password": password,
- "expiration": expirationStr,
- }))
- if err != nil {
- return "", "", err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return "", "", err
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return "", "", err
- }
-
- return username, password, nil
-}
-
-// NOOP
-func (m *MySQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
- return nil
-}
-
-func (m *MySQL) RevokeUser(statements dbplugin.Statements, username string) error {
- // Grab the read lock
- m.Lock()
- defer m.Unlock()
-
- // Get the connection
- db, err := m.getConnection()
- if err != nil {
- return err
- }
-
- revocationStmts := statements.RevocationStatements
- // Use a default SQL statement for revocation if one cannot be fetched from the role
- if revocationStmts == "" {
- revocationStmts = defaultMysqlRevocationStmts
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- // This is not a prepared statement because not all commands are supported
- // 1295: This command is not supported in the prepared statement protocol yet
- // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/
- query = strings.Replace(query, "{{name}}", username, -1)
- _, err = tx.Exec(query)
- if err != nil {
- return err
- }
-
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go
deleted file mode 100644
index 851bd02..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go
+++ /dev/null
@@ -1,326 +0,0 @@
-package mysql
-
-import (
- "database/sql"
- "fmt"
- "os"
- "strings"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-func prepareMySQLTestContainer(t *testing.T) (cleanup func(), retURL string) {
- if os.Getenv("MYSQL_URL") != "" {
- return func() {}, os.Getenv("MYSQL_URL")
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("mysql", "latest", []string{"MYSQL_ROOT_PASSWORD=secret"})
- if err != nil {
- t.Fatalf("Could not start local MySQL docker container: %s", err)
- }
-
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local container: %s", err)
- }
- }
-
- retURL = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp"))
-
- // exponential backoff-retry
- if err = pool.Retry(func() error {
- var err error
- var db *sql.DB
- db, err = sql.Open("mysql", retURL)
- if err != nil {
- return err
- }
- return db.Ping()
- }); err != nil {
- t.Fatalf("Could not connect to MySQL docker container: %s", err)
- }
-
- return
-}
-
-func prepareMySQLLegacyTestContainer(t *testing.T) (cleanup func(), retURL string) {
- if os.Getenv("MYSQL_URL") != "" {
- return func() {}, os.Getenv("MYSQL_URL")
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- // Mysql 5.6 is the last MySQL version to limit usernames to 16 characters.
- resource, err := pool.Run("mysql", "5.6", []string{"MYSQL_ROOT_PASSWORD=secret"})
- if err != nil {
- t.Fatalf("Could not start local MySQL docker container: %s", err)
- }
-
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local container: %s", err)
- }
- }
-
- retURL = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp"))
-
- // exponential backoff-retry
- if err = pool.Retry(func() error {
- var err error
- var db *sql.DB
- db, err = sql.Open("mysql", retURL)
- if err != nil {
- return err
- }
- return db.Ping()
- }); err != nil {
- t.Fatalf("Could not connect to MySQL docker container: %s", err)
- }
-
- return
-}
-
-func TestMySQL_Initialize(t *testing.T) {
- cleanup, connURL := prepareMySQLTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- f := New(MetadataLen, MetadataLen, UsernameLen)
- dbRaw, _ := f()
- db := dbRaw.(*MySQL)
- connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if !connProducer.Initialized {
- t.Fatal("Database should be initalized")
- }
-
- err = db.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Test decoding a string value for max_open_connections
- connectionDetails = map[string]interface{}{
- "connection_url": connURL,
- "max_open_connections": "5",
- }
-
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-}
-
-func TestMySQL_CreateUser(t *testing.T) {
- cleanup, connURL := prepareMySQLTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- f := New(MetadataLen, MetadataLen, UsernameLen)
- dbRaw, _ := f()
- db := dbRaw.(*MySQL)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test-long-displayname",
- RoleName: "test-long-rolename",
- }
-
- // Test with no configured Creation Statememt
- _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
- if err == nil {
- t.Fatal("Expected error when no creation statement is provided")
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testMySQLRoleWildCard,
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test a second time to make sure usernames don't collide
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-}
-
-func TestMySQL_CreateUser_Legacy(t *testing.T) {
- cleanup, connURL := prepareMySQLLegacyTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- f := New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen)
- dbRaw, _ := f()
- db := dbRaw.(*MySQL)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test-long-displayname",
- RoleName: "test-long-rolename",
- }
-
- // Test with no configured Creation Statememt
- _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
- if err == nil {
- t.Fatal("Expected error when no creation statement is provided")
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testMySQLRoleWildCard,
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test a second time to make sure usernames don't collide
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-}
-
-func TestMySQL_RevokeUser(t *testing.T) {
- cleanup, connURL := prepareMySQLTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- f := New(MetadataLen, MetadataLen, UsernameLen)
- dbRaw, _ := f()
- db := dbRaw.(*MySQL)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testMySQLRoleWildCard,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test default revoke statememts
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-
- statements.CreationStatements = testMySQLRoleWildCard
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test custom revoke statements
- statements.RevocationStatements = testMySQLRevocationSQL
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-}
-
-func testCredsExist(t testing.TB, connURL, username, password string) error {
- // Log in with the new creds
- connURL = strings.Replace(connURL, "root:secret", fmt.Sprintf("%s:%s", username, password), 1)
- db, err := sql.Open("mysql", connURL)
- if err != nil {
- return err
- }
- defer db.Close()
- return db.Ping()
-}
-
-const testMySQLRoleWildCard = `
-CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
-GRANT SELECT ON *.* TO '{{name}}'@'%';
-`
-const testMySQLRevocationSQL = `
-REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%';
-DROP USER '{{name}}'@'%';
-`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go
deleted file mode 100644
index a3b1789..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/plugins/database/postgresql"
-)
-
-func main() {
- apiClientMeta := &pluginutil.APIClientMeta{}
- flags := apiClientMeta.FlagSet()
- flags.Parse(os.Args[1:])
-
- err := postgresql.Run(apiClientMeta.GetTLSConfig())
- if err != nil {
- log.Println(err)
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go
deleted file mode 100644
index 93fa8a8..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package postgresql
-
-import (
- "database/sql"
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
- "github.com/lib/pq"
- _ "github.com/lib/pq"
-)
-
-const (
- postgreSQLTypeName string = "postgres"
- defaultPostgresRenewSQL = `
-ALTER ROLE "{{name}}" VALID UNTIL '{{expiration}}';
-`
-)
-
-// New implements builtinplugins.BuiltinFactory
-func New() (interface{}, error) {
- connProducer := &connutil.SQLConnectionProducer{}
- connProducer.Type = postgreSQLTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: 8,
- RoleNameLen: 8,
- UsernameLen: 63,
- Separator: "-",
- }
-
- dbType := &PostgreSQL{
- ConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
-
- return dbType, nil
-}
-
-// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- dbType, err := New()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(*PostgreSQL), apiTLSConfig)
-
- return nil
-}
-
-type PostgreSQL struct {
- connutil.ConnectionProducer
- credsutil.CredentialsProducer
-}
-
-func (p *PostgreSQL) Type() (string, error) {
- return postgreSQLTypeName, nil
-}
-
-func (p *PostgreSQL) getConnection() (*sql.DB, error) {
- db, err := p.Connection()
- if err != nil {
- return nil, err
- }
-
- return db.(*sql.DB), nil
-}
-
-func (p *PostgreSQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- if statements.CreationStatements == "" {
- return "", "", dbutil.ErrEmptyCreationStatement
- }
-
- // Grab the lock
- p.Lock()
- defer p.Unlock()
-
- username, err = p.GenerateUsername(usernameConfig)
- if err != nil {
- return "", "", err
- }
-
- password, err = p.GeneratePassword()
- if err != nil {
- return "", "", err
- }
-
- expirationStr, err := p.GenerateExpiration(expiration)
- if err != nil {
- return "", "", err
- }
-
- // Get the connection
- db, err := p.getConnection()
- if err != nil {
- return "", "", err
-
- }
-
- // Start a transaction
- tx, err := db.Begin()
- if err != nil {
- return "", "", err
-
- }
- defer func() {
- tx.Rollback()
- }()
- // Return the secret
-
- // Execute each query
- for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- "password": password,
- "expiration": expirationStr,
- }))
- if err != nil {
- return "", "", err
-
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return "", "", err
-
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return "", "", err
-
- }
-
- return username, password, nil
-}
-
-func (p *PostgreSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
- p.Lock()
- defer p.Unlock()
-
- renewStmts := statements.RenewStatements
- if renewStmts == "" {
- renewStmts = defaultPostgresRenewSQL
- }
-
- db, err := p.getConnection()
- if err != nil {
- return err
- }
-
- tx, err := db.Begin()
- if err != nil {
- return err
- }
- defer func() {
- tx.Rollback()
- }()
-
- expirationStr, err := p.GenerateExpiration(expiration)
- if err != nil {
- return err
- }
-
- for _, query := range strutil.ParseArbitraryStringSlice(renewStmts, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- "expiration": expirationStr,
- }))
- if err != nil {
- return err
- }
-
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return err
- }
- }
-
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (p *PostgreSQL) RevokeUser(statements dbplugin.Statements, username string) error {
- // Grab the lock
- p.Lock()
- defer p.Unlock()
-
- if statements.RevocationStatements == "" {
- return p.defaultRevokeUser(username)
- }
-
- return p.customRevokeUser(username, statements.RevocationStatements)
-}
-
-func (p *PostgreSQL) customRevokeUser(username, revocationStmts string) error {
- db, err := p.getConnection()
- if err != nil {
- return err
- }
-
- tx, err := db.Begin()
- if err != nil {
- return err
- }
- defer func() {
- tx.Rollback()
- }()
-
- for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
- "name": username,
- }))
- if err != nil {
- return err
- }
- defer stmt.Close()
-
- if _, err := stmt.Exec(); err != nil {
- return err
- }
- }
-
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (p *PostgreSQL) defaultRevokeUser(username string) error {
- db, err := p.getConnection()
- if err != nil {
- return err
- }
-
- // Check if the role exists
- var exists bool
- err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists)
- if err != nil && err != sql.ErrNoRows {
- return err
- }
-
- if exists == false {
- return nil
- }
-
- // Query for permissions; we need to revoke permissions before we can drop
- // the role
- // This isn't done in a transaction because even if we fail along the way,
- // we want to remove as much access as possible
- stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;")
- if err != nil {
- return err
- }
- defer stmt.Close()
-
- rows, err := stmt.Query(username)
- if err != nil {
- return err
- }
- defer rows.Close()
-
- const initialNumRevocations = 16
- revocationStmts := make([]string, 0, initialNumRevocations)
- for rows.Next() {
- var schema string
- err = rows.Scan(&schema)
- if err != nil {
- // keep going; remove as many permissions as possible right now
- continue
- }
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`,
- pq.QuoteIdentifier(schema),
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE USAGE ON SCHEMA %s FROM %s;`,
- pq.QuoteIdentifier(schema),
- pq.QuoteIdentifier(username)))
- }
-
- // for good measure, revoke all privileges and usage on schema public
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`,
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;",
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- "REVOKE USAGE ON SCHEMA public FROM %s;",
- pq.QuoteIdentifier(username)))
-
- // get the current database name so we can issue a REVOKE CONNECT for
- // this username
- var dbname sql.NullString
- if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil {
- return err
- }
-
- if dbname.Valid {
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE CONNECT ON DATABASE %s FROM %s;`,
- pq.QuoteIdentifier(dbname.String),
- pq.QuoteIdentifier(username)))
- }
-
- // again, here, we do not stop on error, as we want to remove as
- // many permissions as possible right now
- var lastStmtError error
- for _, query := range revocationStmts {
- stmt, err := db.Prepare(query)
- if err != nil {
- lastStmtError = err
- continue
- }
- defer stmt.Close()
- _, err = stmt.Exec()
- if err != nil {
- lastStmtError = err
- }
- }
-
- // can't drop if not all privileges are revoked
- if rows.Err() != nil {
- return fmt.Errorf("could not generate revocation statements for all rows: %s", rows.Err())
- }
- if lastStmtError != nil {
- return fmt.Errorf("could not perform all revocation statements: %s", lastStmtError)
- }
-
- // Drop this user
- stmt, err = db.Prepare(fmt.Sprintf(
- `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username)))
- if err != nil {
- return err
- }
- defer stmt.Close()
- if _, err := stmt.Exec(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go
deleted file mode 100644
index a74abb4..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go
+++ /dev/null
@@ -1,363 +0,0 @@
-package postgresql
-
-import (
- "database/sql"
- "fmt"
- "os"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- dockertest "gopkg.in/ory-am/dockertest.v3"
-)
-
-var (
- testPostgresImagePull sync.Once
-)
-
-func preparePostgresTestContainer(t *testing.T) (cleanup func(), retURL string) {
- if os.Getenv("PG_URL") != "" {
- return func() {}, os.Getenv("PG_URL")
- }
-
- pool, err := dockertest.NewPool("")
- if err != nil {
- t.Fatalf("Failed to connect to docker: %s", err)
- }
-
- resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"})
- if err != nil {
- t.Fatalf("Could not start local PostgreSQL docker container: %s", err)
- }
-
- cleanup = func() {
- err := pool.Purge(resource)
- if err != nil {
- t.Fatalf("Failed to cleanup local container: %s", err)
- }
- }
-
- retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp"))
-
- // exponential backoff-retry
- if err = pool.Retry(func() error {
- var err error
- var db *sql.DB
- db, err = sql.Open("postgres", retURL)
- if err != nil {
- return err
- }
- return db.Ping()
- }); err != nil {
- t.Fatalf("Could not connect to PostgreSQL docker container: %s", err)
- }
-
- return
-}
-
-func TestPostgreSQL_Initialize(t *testing.T) {
- cleanup, connURL := preparePostgresTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- "max_open_connections": 5,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*PostgreSQL)
-
- connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
-
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if !connProducer.Initialized {
- t.Fatal("Database should be initalized")
- }
-
- err = db.Close()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Test decoding a string value for max_open_connections
- connectionDetails = map[string]interface{}{
- "connection_url": connURL,
- "max_open_connections": "5",
- }
-
- err = db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
-}
-
-func TestPostgreSQL_CreateUser(t *testing.T) {
- cleanup, connURL := preparePostgresTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*PostgreSQL)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- // Test with no configured Creation Statememt
- _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
- if err == nil {
- t.Fatal("Expected error when no creation statement is provided")
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testPostgresRole,
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- statements.CreationStatements = testPostgresReadOnlyRole
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-}
-
-func TestPostgreSQL_RenewUser(t *testing.T) {
- cleanup, connURL := preparePostgresTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*PostgreSQL)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testPostgresRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- err = db.RenewUser(statements, username, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Sleep longer than the inital expiration time
- time.Sleep(2 * time.Second)
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
- statements.RenewStatements = defaultPostgresRenewSQL
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- err = db.RenewUser(statements, username, time.Now().Add(time.Minute))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Sleep longer than the inital expiration time
- time.Sleep(2 * time.Second)
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
-}
-
-func TestPostgreSQL_RevokeUser(t *testing.T) {
- cleanup, connURL := preparePostgresTestContainer(t)
- defer cleanup()
-
- connectionDetails := map[string]interface{}{
- "connection_url": connURL,
- }
-
- dbRaw, _ := New()
- db := dbRaw.(*PostgreSQL)
- err := db.Initialize(connectionDetails, true)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- statements := dbplugin.Statements{
- CreationStatements: testPostgresRole,
- }
-
- usernameConfig := dbplugin.UsernameConfig{
- DisplayName: "test",
- RoleName: "test",
- }
-
- username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test default revoke statememts
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-
- username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err = testCredsExist(t, connURL, username, password); err != nil {
- t.Fatalf("Could not connect with new credentials: %s", err)
- }
-
- // Test custom revoke statements
- statements.RevocationStatements = defaultPostgresRevocationSQL
- err = db.RevokeUser(statements, username)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- if err := testCredsExist(t, connURL, username, password); err == nil {
- t.Fatal("Credentials were not revoked")
- }
-}
-
-func testCredsExist(t testing.TB, connURL, username, password string) error {
- // Log in with the new creds
- connURL = strings.Replace(connURL, "postgres:secret", fmt.Sprintf("%s:%s", username, password), 1)
- db, err := sql.Open("postgres", connURL)
- if err != nil {
- return err
- }
- defer db.Close()
- return db.Ping()
-}
-
-const testPostgresRole = `
-CREATE ROLE "{{name}}" WITH
- LOGIN
- PASSWORD '{{password}}'
- VALID UNTIL '{{expiration}}';
-GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
-`
-
-const testPostgresReadOnlyRole = `
-CREATE ROLE "{{name}}" WITH
- LOGIN
- PASSWORD '{{password}}'
- VALID UNTIL '{{expiration}}';
-GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
-GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
-`
-
-const testPostgresBlockStatementRole = `
-DO $$
-BEGIN
- IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
- CREATE ROLE "foo-role";
- CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
- ALTER ROLE "foo-role" SET search_path = foo;
- GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
- GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
- END IF;
-END
-$$
-
-CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';
-GRANT "foo-role" TO "{{name}}";
-ALTER ROLE "{{name}}" SET search_path = foo;
-GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";
-`
-
-var testPostgresBlockStatementRoleSlice = []string{
- `
-DO $$
-BEGIN
- IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
- CREATE ROLE "foo-role";
- CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
- ALTER ROLE "foo-role" SET search_path = foo;
- GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
- GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
- GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
- END IF;
-END
-$$
-`,
- `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`,
- `GRANT "foo-role" TO "{{name}}";`,
- `ALTER ROLE "{{name}}" SET search_path = foo;`,
- `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`,
-}
-
-const defaultPostgresRevocationSQL = `
-REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}";
-REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}";
-REVOKE USAGE ON SCHEMA public FROM "{{name}}";
-
-DROP ROLE IF EXISTS "{{name}}";
-`
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go
deleted file mode 100644
index d36d571..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package connutil
-
-import (
- "errors"
- "sync"
-)
-
-var (
- ErrNotInitialized = errors.New("connection has not been initalized")
-)
-
-// ConnectionProducer can be used as an embeded interface in the Database
-// definition. It implements the methods dealing with individual database
-// connections and is used in all the builtin database types.
-type ConnectionProducer interface {
- Close() error
- Initialize(map[string]interface{}, bool) error
- Connection() (interface{}, error)
-
- sync.Locker
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go
deleted file mode 100644
index c325cbc..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package connutil
-
-import (
- "database/sql"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/mitchellh/mapstructure"
-)
-
-// SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases
-type SQLConnectionProducer struct {
- ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
- MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
- MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"`
- MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" structs:"max_connection_lifetime" mapstructure:"max_connection_lifetime"`
-
- Type string
- maxConnectionLifetime time.Duration
- Initialized bool
- db *sql.DB
- sync.Mutex
-}
-
-func (c *SQLConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error {
- c.Lock()
- defer c.Unlock()
-
- err := mapstructure.WeakDecode(conf, c)
- if err != nil {
- return err
- }
-
- if len(c.ConnectionURL) == 0 {
- return fmt.Errorf("connection_url cannot be empty")
- }
-
- if c.MaxOpenConnections == 0 {
- c.MaxOpenConnections = 2
- }
-
- if c.MaxIdleConnections == 0 {
- c.MaxIdleConnections = c.MaxOpenConnections
- }
- if c.MaxIdleConnections > c.MaxOpenConnections {
- c.MaxIdleConnections = c.MaxOpenConnections
- }
- if c.MaxConnectionLifetimeRaw == nil {
- c.MaxConnectionLifetimeRaw = "0s"
- }
-
- c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw)
- if err != nil {
- return fmt.Errorf("invalid max_connection_lifetime: %s", err)
- }
-
- // Set initialized to true at this point since all fields are set,
- // and the connection can be established at a later time.
- c.Initialized = true
-
- if verifyConnection {
- if _, err := c.Connection(); err != nil {
- return fmt.Errorf("error verifying connection: %s", err)
- }
-
- if err := c.db.Ping(); err != nil {
- return fmt.Errorf("error verifying connection: %s", err)
- }
- }
-
- return nil
-}
-
-func (c *SQLConnectionProducer) Connection() (interface{}, error) {
- if !c.Initialized {
- return nil, ErrNotInitialized
- }
-
- // If we already have a DB, test it and return
- if c.db != nil {
- if err := c.db.Ping(); err == nil {
- return c.db, nil
- }
- // If the ping was unsuccessful, close it and ignore errors as we'll be
- // reestablishing anyways
- c.db.Close()
- }
-
- // For mssql backend, switch to sqlserver instead
- dbType := c.Type
- if c.Type == "mssql" {
- dbType = "sqlserver"
- }
-
- // Otherwise, attempt to make connection
- conn := c.ConnectionURL
-
- // Ensure timezone is set to UTC for all the conenctions
- if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") {
- if strings.Contains(conn, "?") {
- conn += "&timezone=utc"
- } else {
- conn += "?timezone=utc"
- }
- }
-
- var err error
- c.db, err = sql.Open(dbType, conn)
- if err != nil {
- return nil, err
- }
-
- // Set some connection pool settings. We don't need much of this,
- // since the request rate shouldn't be high.
- c.db.SetMaxOpenConns(c.MaxOpenConnections)
- c.db.SetMaxIdleConns(c.MaxIdleConnections)
- c.db.SetConnMaxLifetime(c.maxConnectionLifetime)
-
- return c.db, nil
-}
-
-// Close attempts to close the connection
-func (c *SQLConnectionProducer) Close() error {
- // Grab the write lock
- c.Lock()
- defer c.Unlock()
-
- if c.db != nil {
- c.db.Close()
- }
-
- c.db = nil
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go
deleted file mode 100644
index 8ce3b5e..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package credsutil
-
-import (
- "crypto/rand"
- "time"
-
- "fmt"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
-)
-
-// CredentialsProducer can be used as an embeded interface in the Database
-// definition. It implements the methods for generating user information for a
-// particular database type and is used in all the builtin database types.
-type CredentialsProducer interface {
- GenerateUsername(usernameConfig dbplugin.UsernameConfig) (string, error)
- GeneratePassword() (string, error)
- GenerateExpiration(ttl time.Time) (string, error)
-}
-
-const (
- reqStr = `A1a-`
- minStrLen = 10
-)
-
-// RandomAlphaNumeric returns a random string of characters [A-Za-z0-9-]
-// of the provided length. The string generated takes up to 4 characters
-// of space that are predefined and prepended to ensure password
-// character requirements. It also requires a min length of 10 characters.
-func RandomAlphaNumeric(length int, prependA1a bool) (string, error) {
- if length < minStrLen {
- return "", fmt.Errorf("minimum length of %d is required", minStrLen)
- }
-
- var size int
- var retBytes []byte
- if prependA1a {
- size = len(reqStr)
- retBytes = make([]byte, length-size)
- // Enforce alphanumeric requirements
- retBytes = append([]byte(reqStr), retBytes...)
- } else {
- retBytes = make([]byte, length)
- }
-
- for size < length {
- // Extend the len of the random byte slice to lower odds of having to
- // re-roll.
- c := length + len(reqStr)
- bArr := make([]byte, c)
- _, err := rand.Read(bArr)
- if err != nil {
- return "", err
- }
-
- for _, b := range bArr {
- if size == length {
- break
- }
-
- /**
- * Each byte will be in [0, 256), but we only care about:
- *
- * [48, 57] 0-9
- * [65, 90] A-Z
- * [97, 122] a-z
- *
- * Which means that the highest bit will always be zero, since the last byte with high bit
- * zero is 01111111 = 127 which is higher than 122. Lower our odds of having to re-roll a byte by
- * dividing by two (right bit shift of 1).
- */
-
- b = b >> 1
- // Bitwise OR to set min to 48, further reduces re-roll
- b |= 0x30
-
- // The byte is any of 0-9 A-Z a-z
- byteIsAllowable := (b >= 48 && b <= 57) || (b >= 65 && b <= 90) || (b >= 97 && b <= 122)
- if byteIsAllowable {
- retBytes[size] = b
- size++
- }
- }
- }
-
- return string(retBytes), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go
deleted file mode 100644
index e094719..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package credsutil
-
-import (
- "strings"
- "testing"
-)
-
-func TestRandomAlphaNumeric(t *testing.T) {
- s, err := RandomAlphaNumeric(10, true)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- if len(s) != 10 {
- t.Fatalf("Unexpected length of string, expected 10, got string: %s", s)
- }
-
- s, err = RandomAlphaNumeric(20, true)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- if len(s) != 20 {
- t.Fatalf("Unexpected length of string, expected 20, got string: %s", s)
- }
-
- if !strings.Contains(s, reqStr) {
- t.Fatalf("Expected %s to contain %s", s, reqStr)
- }
-
- s, err = RandomAlphaNumeric(20, false)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- if len(s) != 20 {
- t.Fatalf("Unexpected length of string, expected 20, got string: %s", s)
- }
-
- if strings.Contains(s, reqStr) {
- t.Fatalf("Expected %s not to contain %s", s, reqStr)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go
deleted file mode 100644
index af9a746..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package credsutil
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
-)
-
-const (
- NoneLength int = -1
-)
-
-// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types.
-type SQLCredentialsProducer struct {
- DisplayNameLen int
- RoleNameLen int
- UsernameLen int
- Separator string
-}
-
-func (scp *SQLCredentialsProducer) GenerateUsername(config dbplugin.UsernameConfig) (string, error) {
- username := "v"
-
- displayName := config.DisplayName
- if scp.DisplayNameLen > 0 && len(displayName) > scp.DisplayNameLen {
- displayName = displayName[:scp.DisplayNameLen]
- } else if scp.DisplayNameLen == NoneLength {
- displayName = ""
- }
-
- if len(displayName) > 0 {
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, displayName)
- }
-
- roleName := config.RoleName
- if scp.RoleNameLen > 0 && len(roleName) > scp.RoleNameLen {
- roleName = roleName[:scp.RoleNameLen]
- } else if scp.RoleNameLen == NoneLength {
- roleName = ""
- }
-
- if len(roleName) > 0 {
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, roleName)
- }
-
- userUUID, err := RandomAlphaNumeric(20, false)
- if err != nil {
- return "", err
- }
-
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, userUUID)
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, fmt.Sprint(time.Now().UTC().Unix()))
- if scp.UsernameLen > 0 && len(username) > scp.UsernameLen {
- username = username[:scp.UsernameLen]
- }
-
- return username, nil
-}
-
-func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) {
- password, err := RandomAlphaNumeric(20, true)
- if err != nil {
- return "", err
- }
-
- return password, nil
-}
-
-func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) {
- return ttl.Format("2006-01-02 15:04:05-0700"), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go
deleted file mode 100644
index e80273b..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package dbutil
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-var (
- ErrEmptyCreationStatement = errors.New("empty creation statements")
-)
-
-// Query templates a query for us.
-func QueryHelper(tpl string, data map[string]string) string {
- for k, v := range data {
- tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
- }
-
- return tpl
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/serve.go b/vendor/github.com/hashicorp/vault/plugins/serve.go
deleted file mode 100644
index a40fc5b..0000000
--- a/vendor/github.com/hashicorp/vault/plugins/serve.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package plugins
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/pluginutil"
-)
-
-// Serve is used to start a plugin's RPC server. It takes an interface that must
-// implement a known plugin interface to vault and an optional api.TLSConfig for
-// use during the inital unwrap request to vault. The api config is particulary
-// useful when vault is setup to require client cert checking.
-func Serve(plugin interface{}, tlsConfig *api.TLSConfig) {
- tlsProvider := pluginutil.VaultPluginTLSProvider(tlsConfig)
-
- err := pluginutil.OptionallyEnableMlock()
- if err != nil {
- fmt.Println(err)
- return
- }
-
- switch p := plugin.(type) {
- case dbplugin.Database:
- dbplugin.Serve(p, tlsProvider)
- default:
- fmt.Println("Unsupported plugin type")
- }
-
-}
diff --git a/vendor/github.com/hashicorp/vault/scripts/build.sh b/vendor/github.com/hashicorp/vault/scripts/build.sh
deleted file mode 100755
index 6a1cb51..0000000
--- a/vendor/github.com/hashicorp/vault/scripts/build.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env bash
-#
-# This script builds the application from source for multiple platforms.
-set -e
-
-# Get the parent directory of where this script is.
-SOURCE="${BASH_SOURCE[0]}"
-while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
-DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
-
-# Change into that directory
-cd "$DIR"
-
-# Set build tags
-BUILD_TAGS="${BUILD_TAGS:-"vault"}"
-
-# Get the git commit
-GIT_COMMIT="$(git rev-parse HEAD)"
-GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)"
-
-# Determine the arch/os combos we're building for
-XC_ARCH=${XC_ARCH:-"386 amd64"}
-XC_OS=${XC_OS:-linux darwin windows freebsd openbsd netbsd solaris}
-XC_OSARCH=${XC_OSARCH:-"linux/386 linux/amd64 linux/arm linux/arm64 darwin/386 darwin/amd64 windows/386 windows/amd64 freebsd/386 freebsd/amd64 freebsd/arm openbsd/386 openbsd/amd64 openbsd/arm netbsd/386 netbsd/amd64 netbsd/arm solaris/amd64"}
-
-GOPATH=${GOPATH:-$(go env GOPATH)}
-case $(uname) in
- CYGWIN*)
- GOPATH="$(cygpath $GOPATH)"
- ;;
-esac
-
-# Delete the old dir
-echo "==> Removing old directory..."
-rm -f bin/*
-rm -rf pkg/*
-mkdir -p bin/
-
-# If its dev mode, only build for ourself
-if [ "${VAULT_DEV_BUILD}x" != "x" ]; then
- XC_OS=$(go env GOOS)
- XC_ARCH=$(go env GOARCH)
- XC_OSARCH=$(go env GOOS)/$(go env GOARCH)
-fi
-
-# Build!
-echo "==> Building..."
-gox \
- -osarch="${XC_OSARCH}" \
- -ldflags "-X github.com/hashicorp/vault/version.GitCommit='${GIT_COMMIT}${GIT_DIRTY}'" \
- -output "pkg/{{.OS}}_{{.Arch}}/vault" \
- -tags="${BUILD_TAGS}" \
- .
-
-# Move all the compiled things to the $GOPATH/bin
-OLDIFS=$IFS
-IFS=: MAIN_GOPATH=($GOPATH)
-IFS=$OLDIFS
-
-# Copy our OS/Arch to the bin/ directory
-DEV_PLATFORM="./pkg/$(go env GOOS)_$(go env GOARCH)"
-for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do
- cp ${F} bin/
- cp ${F} ${MAIN_GOPATH}/bin/
-done
-
-if [ "${VAULT_DEV_BUILD}x" = "x" ]; then
- # Zip and copy to the dist dir
- echo "==> Packaging..."
- for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do
- OSARCH=$(basename ${PLATFORM})
- echo "--> ${OSARCH}"
-
- pushd $PLATFORM >/dev/null 2>&1
- zip ../${OSARCH}.zip ./*
- popd >/dev/null 2>&1
- done
-fi
-
-# Done!
-echo
-echo "==> Results:"
-ls -hl bin/
diff --git a/vendor/github.com/hashicorp/vault/scripts/coverage.sh b/vendor/github.com/hashicorp/vault/scripts/coverage.sh
deleted file mode 100755
index ad80496..0000000
--- a/vendor/github.com/hashicorp/vault/scripts/coverage.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/sh
-# Generate test coverage statistics for Go packages.
-#
-# Works around the fact that `go test -coverprofile` currently does not work
-# with multiple packages, see https://code.google.com/p/go/issues/detail?id=6909
-#
-# Usage: script/coverage [--html|--coveralls]
-#
-# --html Additionally create HTML report and open it in browser
-# --coveralls Push coverage statistics to coveralls.io
-#
-
-set -e
-
-workdir=.cover
-profile="$workdir/cover.out"
-mode=count
-
-generate_cover_data() {
- rm -rf "$workdir"
- mkdir "$workdir"
-
- for pkg in "$@"; do
- f="$workdir/$(echo $pkg | tr / -).cover"
- go test -covermode="$mode" -coverprofile="$f" "$pkg"
- done
-
- echo "mode: $mode" >"$profile"
- grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
-}
-
-show_cover_report() {
- go tool cover -${1}="$profile"
-}
-
-push_to_coveralls() {
- echo "Pushing coverage statistics to coveralls.io"
- goveralls -coverprofile="$profile"
-}
-
-generate_cover_data $(go list ./... | grep -v /vendor/)
-show_cover_report func
-case "$1" in
-"")
- ;;
---html)
- show_cover_report html ;;
---coveralls)
- push_to_coveralls ;;
-*)
- echo >&2 "error: invalid option: $1"; exit 1 ;;
-esac
diff --git a/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile b/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile
deleted file mode 100644
index c5104f3..0000000
--- a/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile
+++ /dev/null
@@ -1,26 +0,0 @@
-# Adapted from tcnksm/dockerfile-gox -- thanks!
-
-FROM debian:stable
-
-RUN apt-get update -y && apt-get install --no-install-recommends -y -q \
- curl \
- zip \
- build-essential \
- ca-certificates \
- git mercurial bzr \
- && rm -rf /var/lib/apt/lists/*
-
-ENV GOVERSION 1.9
-RUN mkdir /goroot && mkdir /gopath
-RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \
- | tar xvzf - -C /goroot --strip-components=1
-
-ENV GOPATH /gopath
-ENV GOROOT /goroot
-ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH
-
-RUN go get github.com/mitchellh/gox
-
-RUN mkdir -p /gopath/src/github.com/hashicorp/vault
-WORKDIR /gopath/src/github.com/hashicorp/vault
-CMD make bin
diff --git a/vendor/github.com/hashicorp/vault/scripts/dist.sh b/vendor/github.com/hashicorp/vault/scripts/dist.sh
deleted file mode 100755
index 1e1b2f7..0000000
--- a/vendor/github.com/hashicorp/vault/scripts/dist.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# Get the version from the command line
-VERSION=$1
-if [ -z $VERSION ]; then
- echo "Please specify a version."
- exit 1
-fi
-
-# Make sure we have AWS API keys
-if ([ -z $AWS_ACCESS_KEY_ID ] || [ -z $AWS_SECRET_ACCESS_KEY ]) && [ ! -z $HC_RELEASE ]; then
- echo "Please set your AWS access key information in the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env vars."
- exit 1
-fi
-
-if [ -z $NOBUILD ] && [ -z $DOCKER_CROSS_IMAGE ]; then
- echo "Please set the Docker cross-compile image in DOCKER_CROSS_IMAGE"
- exit 1
-fi
-
-# Get the parent directory of where this script is.
-SOURCE="${BASH_SOURCE[0]}"
-while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
-DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
-
-# Change into that dir because we expect that
-cd $DIR
-
-if [ -z $RELBRANCH ]; then
- RELBRANCH=master
-fi
-
-# Tag, unless told not to
-if [ -z $NOTAG ]; then
- echo "==> Tagging..."
- git commit --allow-empty --gpg-sign=348FFC4C -m "Cut version $VERSION"
- git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" $RELBRANCH
-fi
-
-# Build the packages
-if [ -z $NOBUILD ]; then
- # This should be a local build of the Dockerfile in the cross dir
- docker run --rm -v "$(pwd)":/gopath/src/github.com/hashicorp/vault -w /gopath/src/github.com/hashicorp/vault ${DOCKER_CROSS_IMAGE}
-fi
-
-# Zip all the files
-rm -rf ./pkg/dist
-mkdir -p ./pkg/dist
-for FILENAME in $(find ./pkg -mindepth 1 -maxdepth 1 -type f); do
- FILENAME=$(basename $FILENAME)
- cp ./pkg/${FILENAME} ./pkg/dist/vault_${VERSION}_${FILENAME}
-done
-
-if [ -z $NOSIGN ]; then
- echo "==> Signing..."
- pushd ./pkg/dist
- rm -f ./vault_${VERSION}_SHA256SUMS*
- shasum -a256 * > ./vault_${VERSION}_SHA256SUMS
- gpg --default-key 348FFC4C --detach-sig ./vault_${VERSION}_SHA256SUMS
- popd
-fi
-
-# Upload
-if [ ! -z $HC_RELEASE ]; then
- hc-releases upload $DIR/pkg/dist
- hc-releases publish
-
- curl -X PURGE https://releases.hashicorp.com/vault/${VERSION}
- for FILENAME in $(find $DIR/pkg/dist -type f); do
- FILENAME=$(basename $FILENAME)
- curl -X PURGE https://releases.hashicorp.com/vault/${VERSION}/${FILENAME}
- done
-fi
-
-exit 0
diff --git a/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh b/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh
deleted file mode 100755
index 574f4d7..0000000
--- a/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env bash
-
-echo "==> Checking that code complies with gofmt requirements..."
-
-gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`)
-if [[ -n ${gofmt_files} ]]; then
- echo 'gofmt needs running on the following files:'
- echo "${gofmt_files}"
- echo "You can use the command: \`make fmt\` to reformat code."
- exit 1
-fi
diff --git a/vendor/github.com/hashicorp/vault/scripts/update_deps.sh b/vendor/github.com/hashicorp/vault/scripts/update_deps.sh
deleted file mode 100755
index 82b85b0..0000000
--- a/vendor/github.com/hashicorp/vault/scripts/update_deps.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-
-set -e
-
-TOOL=vault
-
-## Make a temp dir
-tempdir=$(mktemp -d update-${TOOL}-deps.XXXXXX)
-
-## Set paths
-export GOPATH="$(pwd)/${tempdir}"
-export PATH="${GOPATH}/bin:${PATH}"
-cd $tempdir
-
-## Get Vault
-mkdir -p src/github.com/hashicorp
-cd src/github.com/hashicorp
-echo "Fetching ${TOOL}..."
-git clone https://github.com/hashicorp/${TOOL}
-cd ${TOOL}
-
-## Clean out earlier vendoring
-rm -rf Godeps vendor
-
-## Get govendor
-go get github.com/kardianos/govendor
-
-## Init
-govendor init
-
-## Fetch deps
-echo "Fetching deps, will take some time..."
-govendor fetch +missing
-
-govendor remove github.com/Sirupsen/logrus
-cd vendor
-find -type f | grep '.go' | xargs sed -i -e 's/Sirupsen/sirupsen/'
-
-echo "Done; to commit run \n\ncd ${GOPATH}/src/github.com/hashicorp/${TOOL}\n"
diff --git a/vendor/github.com/hashicorp/vault/scripts/windows/build.bat b/vendor/github.com/hashicorp/vault/scripts/windows/build.bat
deleted file mode 100644
index feace8f..0000000
--- a/vendor/github.com/hashicorp/vault/scripts/windows/build.bat
+++ /dev/null
@@ -1,101 +0,0 @@
-@echo off
-setlocal
-
-set _EXITCODE=0
-set _DEV_BUILD=0
-
-if not exist %1 exit /b 1
-if x%2 == xVAULT_DEV set _DEV_BUILD=1
-
-cd %1
-md bin 2>nul
-
-:: Get the git commit
-set _GIT_COMMIT_FILE=%TEMP%\vault-git_commit.txt
-set _GIT_DIRTY_FILE=%TEMP%\vault-git_dirty.txt
-
-set _NUL_CMP_FILE=%TEMP%\vault-nul_cmp.txt
-type nul >%_NUL_CMP_FILE%
-
-git rev-parse HEAD >"%_GIT_COMMIT_FILE%"
-set /p _GIT_COMMIT=<"%_GIT_COMMIT_FILE%"
-del /f "%_GIT_COMMIT_FILE%" 2>nul
-
-set _GIT_DIRTY=
-git status --porcelain >"%_GIT_DIRTY_FILE%"
-fc "%_GIT_DIRTY_FILE%" "%_NUL_CMP_FILE%" >nul
-if errorlevel 1 set _GIT_DIRTY=+CHANGES
-del /f "%_GIT_DIRTY_FILE%" 2>nul
-del /f "%_NUL_CMP_FILE%" 2>nul
-
-REM Determine the arch/os combos we're building for
-set _XC_ARCH=386 amd64 arm
-set _XC_OS=linux darwin windows freebsd openbsd
-
-REM Install dependencies
-echo ==^> Installing dependencies...
-go get ./...
-
-REM Clean up the old binaries and packages.
-echo ==^> Cleaning old builds...
-rd /s /q bin pkg 2>nul
-md bin 2>nul
-
-REM If its dev mode, only build for ourself
-if not %_DEV_BUILD% equ 1 goto build
-
-:devbuild
-echo ==^> Preparing for development build...
-set _GO_ENV_TMP_FILE=%TEMP%\vault-go-env.txt
-go env GOARCH >"%_GO_ENV_TMP_FILE%"
-set /p _XC_ARCH=<"%_GO_ENV_TMP_FILE%"
-del /f "%_GO_ENV_TMP_FILE%" 2>nul
-go env GOOS >"%_GO_ENV_TMP_FILE%"
-set /p _XC_OS=<"%_GO_ENV_TMP_FILE%"
-del /f "%_GO_ENV_TMP_FILE%" 2>nul
-
-:build
-REM Build!
-echo ==^> Building...
-gox^
- -os="%_XC_OS%"^
- -arch="%_XC_ARCH%"^
- -ldflags "-X github.com/hashicorp/vault/version.GitCommit %_GIT_COMMIT%%_GIT_DIRTY%"^
- -output "pkg/{{.OS}}_{{.Arch}}/vault"^
- .
-
-if %ERRORLEVEL% equ 1 set %_EXITCODE%=1
-
-if %_EXITCODE% equ 1 exit /b %_EXITCODE%
-
-set _GO_ENV_TMP_FILE=%TEMP%\vault-go-env.txt
-
-go env GOPATH >"%_GO_ENV_TMP_FILE%"
-set /p _GOPATH=<"%_GO_ENV_TMP_FILE%"
-del /f "%_GO_ENV_TMP_FILE%" 2>nul
-
-go env GOARCH >"%_GO_ENV_TMP_FILE%"
-set /p _GOARCH=<"%_GO_ENV_TMP_FILE%"
-del /f "%_GO_ENV_TMP_FILE%" 2>nul
-
-go env GOOS >"%_GO_ENV_TMP_FILE%"
-set /p _GOOS=<"%_GO_ENV_TMP_FILE%"
-del /f "%_GO_ENV_TMP_FILE%" 2>nul
-
-REM Copy our OS/Arch to the bin/ directory
-set _DEV_PLATFORM=pkg\%_GOOS%_%_GOARCH%
-
-for /r %%f in (%_DEV_PLATFORM%) do (
- copy /b /y %%f bin\ >nul
- copy /b /y %%f %_GOPATH%\bin\ >nul
-)
-
-REM TODO(ceh): package dist
-
-REM Done!
-echo.
-echo ==^> Results:
-echo.
-for %%A in ("bin\*") do echo %%~fA
-
-exit /b %_EXITCODE%
diff --git a/vendor/github.com/hashicorp/vault/shamir/shamir.go b/vendor/github.com/hashicorp/vault/shamir/shamir.go
deleted file mode 100644
index d6f5137..0000000
--- a/vendor/github.com/hashicorp/vault/shamir/shamir.go
+++ /dev/null
@@ -1,260 +0,0 @@
-package shamir
-
-import (
- "crypto/rand"
- "crypto/subtle"
- "fmt"
- mathrand "math/rand"
- "time"
-)
-
-const (
- // ShareOverhead is the byte size overhead of each share
- // when using Split on a secret. This is caused by appending
- // a one byte tag to the share.
- ShareOverhead = 1
-)
-
-// polynomial represents a polynomial of arbitrary degree
-type polynomial struct {
- coefficients []uint8
-}
-
-// makePolynomial constructs a random polynomial of the given
-// degree but with the provided intercept value.
-func makePolynomial(intercept, degree uint8) (polynomial, error) {
- // Create a wrapper
- p := polynomial{
- coefficients: make([]byte, degree+1),
- }
-
- // Ensure the intercept is set
- p.coefficients[0] = intercept
-
- // Assign random co-efficients to the polynomial
- if _, err := rand.Read(p.coefficients[1:]); err != nil {
- return p, err
- }
-
- return p, nil
-}
-
-// evaluate returns the value of the polynomial for the given x
-func (p *polynomial) evaluate(x uint8) uint8 {
- // Special case the origin
- if x == 0 {
- return p.coefficients[0]
- }
-
- // Compute the polynomial value using Horner's method.
- degree := len(p.coefficients) - 1
- out := p.coefficients[degree]
- for i := degree - 1; i >= 0; i-- {
- coeff := p.coefficients[i]
- out = add(mult(out, x), coeff)
- }
- return out
-}
-
-// interpolatePolynomial takes N sample points and returns
-// the value at a given x using a lagrange interpolation.
-func interpolatePolynomial(x_samples, y_samples []uint8, x uint8) uint8 {
- limit := len(x_samples)
- var result, basis uint8
- for i := 0; i < limit; i++ {
- basis = 1
- for j := 0; j < limit; j++ {
- if i == j {
- continue
- }
- num := add(x, x_samples[j])
- denom := add(x_samples[i], x_samples[j])
- term := div(num, denom)
- basis = mult(basis, term)
- }
- group := mult(y_samples[i], basis)
- result = add(result, group)
- }
- return result
-}
-
-// div divides two numbers in GF(2^8)
-func div(a, b uint8) uint8 {
- if b == 0 {
- // leaks some timing information but we don't care anyways as this
- // should never happen, hence the panic
- panic("divide by zero")
- }
-
- var goodVal, zero uint8
- log_a := logTable[a]
- log_b := logTable[b]
- diff := (int(log_a) - int(log_b)) % 255
- if diff < 0 {
- diff += 255
- }
-
- ret := expTable[diff]
-
- // Ensure we return zero if a is zero but aren't subject to timing attacks
- goodVal = ret
-
- if subtle.ConstantTimeByteEq(a, 0) == 1 {
- ret = zero
- } else {
- ret = goodVal
- }
-
- return ret
-}
-
-// mult multiplies two numbers in GF(2^8)
-func mult(a, b uint8) (out uint8) {
- var goodVal, zero uint8
- log_a := logTable[a]
- log_b := logTable[b]
- sum := (int(log_a) + int(log_b)) % 255
-
- ret := expTable[sum]
-
- // Ensure we return zero if either a or be are zero but aren't subject to
- // timing attacks
- goodVal = ret
-
- if subtle.ConstantTimeByteEq(a, 0) == 1 {
- ret = zero
- } else {
- ret = goodVal
- }
-
- if subtle.ConstantTimeByteEq(b, 0) == 1 {
- ret = zero
- } else {
- // This operation does not do anything logically useful. It
- // only ensures a constant number of assignments to thwart
- // timing attacks.
- goodVal = zero
- }
-
- return ret
-}
-
-// add combines two numbers in GF(2^8)
-// This can also be used for subtraction since it is symmetric.
-func add(a, b uint8) uint8 {
- return a ^ b
-}
-
-// Split takes an arbitrarily long secret and generates a `parts`
-// number of shares, `threshold` of which are required to reconstruct
-// the secret. The parts and threshold must be at least 2, and less
-// than 256. The returned shares are each one byte longer than the secret
-// as they attach a tag used to reconstruct the secret.
-func Split(secret []byte, parts, threshold int) ([][]byte, error) {
- // Sanity check the input
- if parts < threshold {
- return nil, fmt.Errorf("parts cannot be less than threshold")
- }
- if parts > 255 {
- return nil, fmt.Errorf("parts cannot exceed 255")
- }
- if threshold < 2 {
- return nil, fmt.Errorf("threshold must be at least 2")
- }
- if threshold > 255 {
- return nil, fmt.Errorf("threshold cannot exceed 255")
- }
- if len(secret) == 0 {
- return nil, fmt.Errorf("cannot split an empty secret")
- }
-
- // Generate random list of x coordinates
- mathrand.Seed(time.Now().UnixNano())
- xCoordinates := mathrand.Perm(255)
-
- // Allocate the output array, initialize the final byte
- // of the output with the offset. The representation of each
- // output is {y1, y2, .., yN, x}.
- out := make([][]byte, parts)
- for idx := range out {
- out[idx] = make([]byte, len(secret)+1)
- out[idx][len(secret)] = uint8(xCoordinates[idx]) + 1
- }
-
- // Construct a random polynomial for each byte of the secret.
- // Because we are using a field of size 256, we can only represent
- // a single byte as the intercept of the polynomial, so we must
- // use a new polynomial for each byte.
- for idx, val := range secret {
- p, err := makePolynomial(val, uint8(threshold-1))
- if err != nil {
- return nil, fmt.Errorf("failed to generate polynomial: %v", err)
- }
-
- // Generate a `parts` number of (x,y) pairs
- // We cheat by encoding the x value once as the final index,
- // so that it only needs to be stored once.
- for i := 0; i < parts; i++ {
- x := uint8(xCoordinates[i]) + 1
- y := p.evaluate(x)
- out[i][idx] = y
- }
- }
-
- // Return the encoded secrets
- return out, nil
-}
-
-// Combine is used to reverse a Split and reconstruct a secret
-// once a `threshold` number of parts are available.
-func Combine(parts [][]byte) ([]byte, error) {
- // Verify enough parts provided
- if len(parts) < 2 {
- return nil, fmt.Errorf("less than two parts cannot be used to reconstruct the secret")
- }
-
- // Verify the parts are all the same length
- firstPartLen := len(parts[0])
- if firstPartLen < 2 {
- return nil, fmt.Errorf("parts must be at least two bytes")
- }
- for i := 1; i < len(parts); i++ {
- if len(parts[i]) != firstPartLen {
- return nil, fmt.Errorf("all parts must be the same length")
- }
- }
-
- // Create a buffer to store the reconstructed secret
- secret := make([]byte, firstPartLen-1)
-
- // Buffer to store the samples
- x_samples := make([]uint8, len(parts))
- y_samples := make([]uint8, len(parts))
-
- // Set the x value for each sample and ensure no x_sample values are the same,
- // otherwise div() can be unhappy
- checkMap := map[byte]bool{}
- for i, part := range parts {
- samp := part[firstPartLen-1]
- if exists := checkMap[samp]; exists {
- return nil, fmt.Errorf("duplicate part detected")
- }
- checkMap[samp] = true
- x_samples[i] = samp
- }
-
- // Reconstruct each byte
- for idx := range secret {
- // Set the y value for each sample
- for i, part := range parts {
- y_samples[i] = part[idx]
- }
-
- // Interpolte the polynomial and compute the value at 0
- val := interpolatePolynomial(x_samples, y_samples, 0)
-
- // Evaluate the 0th value to get the intercept
- secret[idx] = val
- }
- return secret, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/shamir/shamir_test.go b/vendor/github.com/hashicorp/vault/shamir/shamir_test.go
deleted file mode 100644
index 09f90d5..0000000
--- a/vendor/github.com/hashicorp/vault/shamir/shamir_test.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package shamir
-
-import (
- "bytes"
- "testing"
-)
-
-func TestSplit_invalid(t *testing.T) {
- secret := []byte("test")
-
- if _, err := Split(secret, 0, 0); err == nil {
- t.Fatalf("expect error")
- }
-
- if _, err := Split(secret, 2, 3); err == nil {
- t.Fatalf("expect error")
- }
-
- if _, err := Split(secret, 1000, 3); err == nil {
- t.Fatalf("expect error")
- }
-
- if _, err := Split(secret, 10, 1); err == nil {
- t.Fatalf("expect error")
- }
-
- if _, err := Split(nil, 3, 2); err == nil {
- t.Fatalf("expect error")
- }
-}
-
-func TestSplit(t *testing.T) {
- secret := []byte("test")
-
- out, err := Split(secret, 5, 3)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if len(out) != 5 {
- t.Fatalf("bad: %v", out)
- }
-
- for _, share := range out {
- if len(share) != len(secret)+1 {
- t.Fatalf("bad: %v", out)
- }
- }
-}
-
-func TestCombine_invalid(t *testing.T) {
- // Not enough parts
- if _, err := Combine(nil); err == nil {
- t.Fatalf("should err")
- }
-
- // Mis-match in length
- parts := [][]byte{
- []byte("foo"),
- []byte("ba"),
- }
- if _, err := Combine(parts); err == nil {
- t.Fatalf("should err")
- }
-
- //Too short
- parts = [][]byte{
- []byte("f"),
- []byte("b"),
- }
- if _, err := Combine(parts); err == nil {
- t.Fatalf("should err")
- }
-
- parts = [][]byte{
- []byte("foo"),
- []byte("foo"),
- }
- if _, err := Combine(parts); err == nil {
- t.Fatalf("should err")
- }
-}
-
-func TestCombine(t *testing.T) {
- secret := []byte("test")
-
- out, err := Split(secret, 5, 3)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // There is 5*4*3 possible choices,
- // we will just brute force try them all
- for i := 0; i < 5; i++ {
- for j := 0; j < 5; j++ {
- if j == i {
- continue
- }
- for k := 0; k < 5; k++ {
- if k == i || k == j {
- continue
- }
- parts := [][]byte{out[i], out[j], out[k]}
- recomb, err := Combine(parts)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !bytes.Equal(recomb, secret) {
- t.Errorf("parts: (i:%d, j:%d, k:%d) %v", i, j, k, parts)
- t.Fatalf("bad: %v %v", recomb, secret)
- }
- }
- }
- }
-}
-
-func TestField_Add(t *testing.T) {
- if out := add(16, 16); out != 0 {
- t.Fatalf("Bad: %v 16", out)
- }
-
- if out := add(3, 4); out != 7 {
- t.Fatalf("Bad: %v 7", out)
- }
-}
-
-func TestField_Mult(t *testing.T) {
- if out := mult(3, 7); out != 9 {
- t.Fatalf("Bad: %v 9", out)
- }
-
- if out := mult(3, 0); out != 0 {
- t.Fatalf("Bad: %v 0", out)
- }
-
- if out := mult(0, 3); out != 0 {
- t.Fatalf("Bad: %v 0", out)
- }
-}
-
-func TestField_Divide(t *testing.T) {
- if out := div(0, 7); out != 0 {
- t.Fatalf("Bad: %v 0", out)
- }
-
- if out := div(3, 3); out != 1 {
- t.Fatalf("Bad: %v 1", out)
- }
-
- if out := div(6, 3); out != 2 {
- t.Fatalf("Bad: %v 2", out)
- }
-}
-
-func TestPolynomial_Random(t *testing.T) {
- p, err := makePolynomial(42, 2)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if p.coefficients[0] != 42 {
- t.Fatalf("bad: %v", p.coefficients)
- }
-}
-
-func TestPolynomial_Eval(t *testing.T) {
- p, err := makePolynomial(42, 1)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if out := p.evaluate(0); out != 42 {
- t.Fatalf("bad: %v", out)
- }
-
- out := p.evaluate(1)
- exp := add(42, mult(1, p.coefficients[1]))
- if out != exp {
- t.Fatalf("bad: %v %v %v", out, exp, p.coefficients)
- }
-}
-
-func TestInterpolate_Rand(t *testing.T) {
- for i := 0; i < 256; i++ {
- p, err := makePolynomial(uint8(i), 2)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- x_vals := []uint8{1, 2, 3}
- y_vals := []uint8{p.evaluate(1), p.evaluate(2), p.evaluate(3)}
- out := interpolatePolynomial(x_vals, y_vals, 0)
- if out != uint8(i) {
- t.Fatalf("Bad: %v %d", out, i)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/shamir/tables.go b/vendor/github.com/hashicorp/vault/shamir/tables.go
deleted file mode 100644
index 76c245e..0000000
--- a/vendor/github.com/hashicorp/vault/shamir/tables.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package shamir
-
-// Tables taken from http://www.samiam.org/galois.html
-// They use 0xe5 (229) as the generator
-
-var (
- // logTable provides the log(X)/log(g) at each index X
- logTable = [256]uint8{
- 0x00, 0xff, 0xc8, 0x08, 0x91, 0x10, 0xd0, 0x36,
- 0x5a, 0x3e, 0xd8, 0x43, 0x99, 0x77, 0xfe, 0x18,
- 0x23, 0x20, 0x07, 0x70, 0xa1, 0x6c, 0x0c, 0x7f,
- 0x62, 0x8b, 0x40, 0x46, 0xc7, 0x4b, 0xe0, 0x0e,
- 0xeb, 0x16, 0xe8, 0xad, 0xcf, 0xcd, 0x39, 0x53,
- 0x6a, 0x27, 0x35, 0x93, 0xd4, 0x4e, 0x48, 0xc3,
- 0x2b, 0x79, 0x54, 0x28, 0x09, 0x78, 0x0f, 0x21,
- 0x90, 0x87, 0x14, 0x2a, 0xa9, 0x9c, 0xd6, 0x74,
- 0xb4, 0x7c, 0xde, 0xed, 0xb1, 0x86, 0x76, 0xa4,
- 0x98, 0xe2, 0x96, 0x8f, 0x02, 0x32, 0x1c, 0xc1,
- 0x33, 0xee, 0xef, 0x81, 0xfd, 0x30, 0x5c, 0x13,
- 0x9d, 0x29, 0x17, 0xc4, 0x11, 0x44, 0x8c, 0x80,
- 0xf3, 0x73, 0x42, 0x1e, 0x1d, 0xb5, 0xf0, 0x12,
- 0xd1, 0x5b, 0x41, 0xa2, 0xd7, 0x2c, 0xe9, 0xd5,
- 0x59, 0xcb, 0x50, 0xa8, 0xdc, 0xfc, 0xf2, 0x56,
- 0x72, 0xa6, 0x65, 0x2f, 0x9f, 0x9b, 0x3d, 0xba,
- 0x7d, 0xc2, 0x45, 0x82, 0xa7, 0x57, 0xb6, 0xa3,
- 0x7a, 0x75, 0x4f, 0xae, 0x3f, 0x37, 0x6d, 0x47,
- 0x61, 0xbe, 0xab, 0xd3, 0x5f, 0xb0, 0x58, 0xaf,
- 0xca, 0x5e, 0xfa, 0x85, 0xe4, 0x4d, 0x8a, 0x05,
- 0xfb, 0x60, 0xb7, 0x7b, 0xb8, 0x26, 0x4a, 0x67,
- 0xc6, 0x1a, 0xf8, 0x69, 0x25, 0xb3, 0xdb, 0xbd,
- 0x66, 0xdd, 0xf1, 0xd2, 0xdf, 0x03, 0x8d, 0x34,
- 0xd9, 0x92, 0x0d, 0x63, 0x55, 0xaa, 0x49, 0xec,
- 0xbc, 0x95, 0x3c, 0x84, 0x0b, 0xf5, 0xe6, 0xe7,
- 0xe5, 0xac, 0x7e, 0x6e, 0xb9, 0xf9, 0xda, 0x8e,
- 0x9a, 0xc9, 0x24, 0xe1, 0x0a, 0x15, 0x6b, 0x3a,
- 0xa0, 0x51, 0xf4, 0xea, 0xb2, 0x97, 0x9e, 0x5d,
- 0x22, 0x88, 0x94, 0xce, 0x19, 0x01, 0x71, 0x4c,
- 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d,
- 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0,
- 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38}
-
- // expTable provides the anti-log or exponentiation value
- // for the equivalent index
- expTable = [256]uint8{
- 0x01, 0xe5, 0x4c, 0xb5, 0xfb, 0x9f, 0xfc, 0x12,
- 0x03, 0x34, 0xd4, 0xc4, 0x16, 0xba, 0x1f, 0x36,
- 0x05, 0x5c, 0x67, 0x57, 0x3a, 0xd5, 0x21, 0x5a,
- 0x0f, 0xe4, 0xa9, 0xf9, 0x4e, 0x64, 0x63, 0xee,
- 0x11, 0x37, 0xe0, 0x10, 0xd2, 0xac, 0xa5, 0x29,
- 0x33, 0x59, 0x3b, 0x30, 0x6d, 0xef, 0xf4, 0x7b,
- 0x55, 0xeb, 0x4d, 0x50, 0xb7, 0x2a, 0x07, 0x8d,
- 0xff, 0x26, 0xd7, 0xf0, 0xc2, 0x7e, 0x09, 0x8c,
- 0x1a, 0x6a, 0x62, 0x0b, 0x5d, 0x82, 0x1b, 0x8f,
- 0x2e, 0xbe, 0xa6, 0x1d, 0xe7, 0x9d, 0x2d, 0x8a,
- 0x72, 0xd9, 0xf1, 0x27, 0x32, 0xbc, 0x77, 0x85,
- 0x96, 0x70, 0x08, 0x69, 0x56, 0xdf, 0x99, 0x94,
- 0xa1, 0x90, 0x18, 0xbb, 0xfa, 0x7a, 0xb0, 0xa7,
- 0xf8, 0xab, 0x28, 0xd6, 0x15, 0x8e, 0xcb, 0xf2,
- 0x13, 0xe6, 0x78, 0x61, 0x3f, 0x89, 0x46, 0x0d,
- 0x35, 0x31, 0x88, 0xa3, 0x41, 0x80, 0xca, 0x17,
- 0x5f, 0x53, 0x83, 0xfe, 0xc3, 0x9b, 0x45, 0x39,
- 0xe1, 0xf5, 0x9e, 0x19, 0x5e, 0xb6, 0xcf, 0x4b,
- 0x38, 0x04, 0xb9, 0x2b, 0xe2, 0xc1, 0x4a, 0xdd,
- 0x48, 0x0c, 0xd0, 0x7d, 0x3d, 0x58, 0xde, 0x7c,
- 0xd8, 0x14, 0x6b, 0x87, 0x47, 0xe8, 0x79, 0x84,
- 0x73, 0x3c, 0xbd, 0x92, 0xc9, 0x23, 0x8b, 0x97,
- 0x95, 0x44, 0xdc, 0xad, 0x40, 0x65, 0x86, 0xa2,
- 0xa4, 0xcc, 0x7f, 0xec, 0xc0, 0xaf, 0x91, 0xfd,
- 0xf7, 0x4f, 0x81, 0x2f, 0x5b, 0xea, 0xa8, 0x1c,
- 0x02, 0xd1, 0x98, 0x71, 0xed, 0x25, 0xe3, 0x24,
- 0x06, 0x68, 0xb3, 0x93, 0x2c, 0x6f, 0x3e, 0x6c,
- 0x0a, 0xb8, 0xce, 0xae, 0x74, 0xb1, 0x42, 0xb4,
- 0x1e, 0xd3, 0x49, 0xe9, 0x9c, 0xc8, 0xc6, 0xc7,
- 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52,
- 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6,
- 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01}
-)
diff --git a/vendor/github.com/hashicorp/vault/shamir/tables_test.go b/vendor/github.com/hashicorp/vault/shamir/tables_test.go
deleted file mode 100644
index 81aa983..0000000
--- a/vendor/github.com/hashicorp/vault/shamir/tables_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package shamir
-
-import "testing"
-
-func TestTables(t *testing.T) {
- for i := 1; i < 256; i++ {
- logV := logTable[i]
- expV := expTable[logV]
- if expV != uint8(i) {
- t.Fatalf("bad: %d log: %d exp: %d", i, logV, expV)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/README.md b/vendor/github.com/hashicorp/vault/terraform/aws/README.md
deleted file mode 100644
index cd9d8f4..0000000
--- a/vendor/github.com/hashicorp/vault/terraform/aws/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Deploy Vault to AWS
-
-This folder contains a Terraform module for deploying Vault to AWS
-(within a VPC). It can be used as-is or can be modified to work in your
-scenario, but should serve as a strong starting point for deploying Vault.
-
-See `variables.tf` for a full reference to the parameters that this module
-takes and their descriptions.
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/main.tf b/vendor/github.com/hashicorp/vault/terraform/aws/main.tf
deleted file mode 100644
index 279b0bf..0000000
--- a/vendor/github.com/hashicorp/vault/terraform/aws/main.tf
+++ /dev/null
@@ -1,140 +0,0 @@
-resource "template_file" "install" {
- template = "${file("${path.module}/scripts/install.sh.tpl")}"
-
- vars {
- download_url = "${var.download-url}"
- config = "${var.config}"
- extra-install = "${var.extra-install}"
- }
-}
-
-// We launch Vault into an ASG so that it can properly bring them up for us.
-resource "aws_autoscaling_group" "vault" {
- name = "vault - ${aws_launch_configuration.vault.name}"
- launch_configuration = "${aws_launch_configuration.vault.name}"
- availability_zones = ["${split(",", var.availability-zones)}"]
- min_size = "${var.nodes}"
- max_size = "${var.nodes}"
- desired_capacity = "${var.nodes}"
- health_check_grace_period = 15
- health_check_type = "EC2"
- vpc_zone_identifier = ["${split(",", var.subnets)}"]
- load_balancers = ["${aws_elb.vault.id}"]
-
- tag {
- key = "Name"
- value = "vault"
- propagate_at_launch = true
- }
-}
-
-resource "aws_launch_configuration" "vault" {
- image_id = "${var.ami}"
- instance_type = "${var.instance_type}"
- key_name = "${var.key-name}"
- security_groups = ["${aws_security_group.vault.id}"]
- user_data = "${template_file.install.rendered}"
-}
-
-// Security group for Vault allows SSH and HTTP access (via "tcp" in
-// case TLS is used)
-resource "aws_security_group" "vault" {
- name = "vault"
- description = "Vault servers"
- vpc_id = "${var.vpc-id}"
-}
-
-resource "aws_security_group_rule" "vault-ssh" {
- security_group_id = "${aws_security_group.vault.id}"
- type = "ingress"
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-// This rule allows Vault HTTP API access to individual nodes, since each will
-// need to be addressed individually for unsealing.
-resource "aws_security_group_rule" "vault-http-api" {
- security_group_id = "${aws_security_group.vault.id}"
- type = "ingress"
- from_port = 8200
- to_port = 8200
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-resource "aws_security_group_rule" "vault-egress" {
- security_group_id = "${aws_security_group.vault.id}"
- type = "egress"
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-// Launch the ELB that is serving Vault. This has proper health checks
-// to only serve healthy, unsealed Vaults.
-resource "aws_elb" "vault" {
- name = "vault"
- connection_draining = true
- connection_draining_timeout = 400
- internal = true
- subnets = ["${split(",", var.subnets)}"]
- security_groups = ["${aws_security_group.elb.id}"]
-
- listener {
- instance_port = 8200
- instance_protocol = "tcp"
- lb_port = 80
- lb_protocol = "tcp"
- }
-
- listener {
- instance_port = 8200
- instance_protocol = "tcp"
- lb_port = 443
- lb_protocol = "tcp"
- }
-
- health_check {
- healthy_threshold = 2
- unhealthy_threshold = 3
- timeout = 5
- target = "${var.elb-health-check}"
- interval = 15
- }
-}
-
-resource "aws_security_group" "elb" {
- name = "vault-elb"
- description = "Vault ELB"
- vpc_id = "${var.vpc-id}"
-}
-
-resource "aws_security_group_rule" "vault-elb-http" {
- security_group_id = "${aws_security_group.elb.id}"
- type = "ingress"
- from_port = 80
- to_port = 80
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-resource "aws_security_group_rule" "vault-elb-https" {
- security_group_id = "${aws_security_group.elb.id}"
- type = "ingress"
- from_port = 443
- to_port = 443
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
-}
-
-resource "aws_security_group_rule" "vault-elb-egress" {
- security_group_id = "${aws_security_group.elb.id}"
- type = "egress"
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
-}
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/outputs.tf b/vendor/github.com/hashicorp/vault/terraform/aws/outputs.tf
deleted file mode 100644
index 392d7af..0000000
--- a/vendor/github.com/hashicorp/vault/terraform/aws/outputs.tf
+++ /dev/null
@@ -1,13 +0,0 @@
-output "address" {
- value = "${aws_elb.vault.dns_name}"
-}
-
-// Can be used to add additional SG rules to Vault instances.
-output "vault_security_group" {
- value = "${aws_security_group.vault.id}"
-}
-
-// Can be used to add additional SG rules to the Vault ELB.
-output "elb_security_group" {
- value = "${aws_security_group.elb.id}"
-}
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/scripts/install.sh.tpl b/vendor/github.com/hashicorp/vault/terraform/aws/scripts/install.sh.tpl
deleted file mode 100644
index 03296b4..0000000
--- a/vendor/github.com/hashicorp/vault/terraform/aws/scripts/install.sh.tpl
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# Install packages
-sudo apt-get update -y
-sudo apt-get install -y curl unzip
-
-# Download Vault into some temporary directory
-curl -L "${download_url}" > /tmp/vault.zip
-
-# Unzip it
-cd /tmp
-sudo unzip vault.zip
-sudo mv vault /usr/local/bin
-sudo chmod 0755 /usr/local/bin/vault
-sudo chown root:root /usr/local/bin/vault
-
-# Setup the configuration
-cat </tmp/vault-config
-${config}
-EOF
-sudo mv /tmp/vault-config /usr/local/etc/vault-config.json
-
-# Setup the init script
-cat </tmp/upstart
-description "Vault server"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-
-script
- if [ -f "/etc/service/vault" ]; then
- . /etc/service/vault
- fi
-
- # Make sure to use all our CPUs, because Vault can block a scheduler thread
- export GOMAXPROCS=`nproc`
-
- exec /usr/local/bin/vault server \
- -config="/usr/local/etc/vault-config.json" \
- \$${VAULT_FLAGS} \
- >>/var/log/vault.log 2>&1
-end script
-EOF
-sudo mv /tmp/upstart /etc/init/vault.conf
-
-# Extra install steps (if any)
-${extra-install}
-
-# Start Vault
-sudo start vault
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf b/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf
deleted file mode 100644
index cef4002..0000000
--- a/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf
+++ /dev/null
@@ -1,59 +0,0 @@
-//-------------------------------------------------------------------
-// Vault settings
-//-------------------------------------------------------------------
-
-variable "download-url" {
- default = "https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_linux_amd64.zip"
- description = "URL to download Vault"
-}
-
-variable "config" {
- description = "Configuration (text) for Vault"
-}
-
-variable "extra-install" {
- default = ""
- description = "Extra commands to run in the install script"
-}
-
-//-------------------------------------------------------------------
-// AWS settings
-//-------------------------------------------------------------------
-
-variable "ami" {
- default = "ami-7eb2a716"
- description = "AMI for Vault instances"
-}
-
-variable "availability-zones" {
- default = "us-east-1a,us-east-1b"
- description = "Availability zones for launching the Vault instances"
-}
-
-variable "elb-health-check" {
- default = "HTTP:8200/v1/sys/health"
- description = "Health check for Vault servers"
-}
-
-variable "instance_type" {
- default = "m3.medium"
- description = "Instance type for Vault instances"
-}
-
-variable "key-name" {
- default = "default"
- description = "SSH key name for Vault instances"
-}
-
-variable "nodes" {
- default = "2"
- description = "number of Vault instances"
-}
-
-variable "subnets" {
- description = "list of subnets to launch Vault within"
-}
-
-variable "vpc-id" {
- description = "VPC ID"
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/acl.go b/vendor/github.com/hashicorp/vault/vault/acl.go
deleted file mode 100644
index 7360178..0000000
--- a/vendor/github.com/hashicorp/vault/vault/acl.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package vault
-
-import (
- "reflect"
- "strings"
-
- "github.com/armon/go-radix"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// ACL is used to wrap a set of policies to provide
-// an efficient interface for access control.
-type ACL struct {
- // exactRules contains the path policies that are exact
- exactRules *radix.Tree
-
- // globRules contains the path policies that glob
- globRules *radix.Tree
-
- // root is enabled if the "root" named policy is present.
- root bool
-}
-
-// New is used to construct a policy based ACL from a set of policies.
-func NewACL(policies []*Policy) (*ACL, error) {
- // Initialize
- a := &ACL{
- exactRules: radix.New(),
- globRules: radix.New(),
- root: false,
- }
-
- // Inject each policy
- for _, policy := range policies {
- // Ignore a nil policy object
- if policy == nil {
- continue
- }
- // Check if this is root
- if policy.Name == "root" {
- a.root = true
- }
- for _, pc := range policy.Paths {
- // Check which tree to use
- tree := a.exactRules
- if pc.Glob {
- tree = a.globRules
- }
-
- // Check for an existing policy
- raw, ok := tree.Get(pc.Prefix)
- if !ok {
- clonedPerms, err := pc.Permissions.Clone()
- if err != nil {
- return nil, errwrap.Wrapf("error cloning ACL permissions: {{err}}", err)
- }
- tree.Insert(pc.Prefix, clonedPerms)
- continue
- }
-
- // these are the ones already in the tree
- existingPerms := raw.(*Permissions)
-
- switch {
- case existingPerms.CapabilitiesBitmap&DenyCapabilityInt > 0:
- // If we are explicitly denied in the existing capability set,
- // don't save anything else
- continue
-
- case pc.Permissions.CapabilitiesBitmap&DenyCapabilityInt > 0:
- // If this new policy explicitly denies, only save the deny value
- existingPerms.CapabilitiesBitmap = DenyCapabilityInt
- existingPerms.AllowedParameters = nil
- existingPerms.DeniedParameters = nil
- goto INSERT
-
- default:
- // Insert the capabilities in this new policy into the existing
- // value
- existingPerms.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap
- }
-
- // Note: In these stanzas, we're preferring minimum lifetimes. So
- // we take the lesser of two specified max values, or we take the
- // lesser of two specified min values, the idea being, allowing
- // token lifetime to be minimum possible.
- //
- // If we have an existing max, and we either don't have a current
- // max, or the current is greater than the previous, use the
- // existing.
- if pc.Permissions.MaxWrappingTTL > 0 &&
- (existingPerms.MaxWrappingTTL == 0 ||
- pc.Permissions.MaxWrappingTTL < existingPerms.MaxWrappingTTL) {
- existingPerms.MaxWrappingTTL = pc.Permissions.MaxWrappingTTL
- }
- // If we have an existing min, and we either don't have a current
- // min, or the current is greater than the previous, use the
- // existing
- if pc.Permissions.MinWrappingTTL > 0 &&
- (existingPerms.MinWrappingTTL == 0 ||
- pc.Permissions.MinWrappingTTL < existingPerms.MinWrappingTTL) {
- existingPerms.MinWrappingTTL = pc.Permissions.MinWrappingTTL
- }
-
- if len(pc.Permissions.AllowedParameters) > 0 {
- if existingPerms.AllowedParameters == nil {
- existingPerms.AllowedParameters = pc.Permissions.AllowedParameters
- } else {
- for key, value := range pc.Permissions.AllowedParameters {
- pcValue, ok := existingPerms.AllowedParameters[key]
- // If an empty array exist it should overwrite any other
- // value.
- if len(value) == 0 || (ok && len(pcValue) == 0) {
- existingPerms.AllowedParameters[key] = []interface{}{}
- } else {
- // Merge the two maps, appending values on key conflict.
- existingPerms.AllowedParameters[key] = append(value, existingPerms.AllowedParameters[key]...)
- }
- }
- }
- }
-
- if len(pc.Permissions.DeniedParameters) > 0 {
- if existingPerms.DeniedParameters == nil {
- existingPerms.DeniedParameters = pc.Permissions.DeniedParameters
- } else {
- for key, value := range pc.Permissions.DeniedParameters {
- pcValue, ok := existingPerms.DeniedParameters[key]
- // If an empty array exist it should overwrite any other
- // value.
- if len(value) == 0 || (ok && len(pcValue) == 0) {
- existingPerms.DeniedParameters[key] = []interface{}{}
- } else {
- // Merge the two maps, appending values on key conflict.
- existingPerms.DeniedParameters[key] = append(value, existingPerms.DeniedParameters[key]...)
- }
- }
- }
- }
-
- INSERT:
- tree.Insert(pc.Prefix, existingPerms)
-
- }
- }
- return a, nil
-}
-
-func (a *ACL) Capabilities(path string) (pathCapabilities []string) {
- // Fast-path root
- if a.root {
- return []string{RootCapability}
- }
-
- // Find an exact matching rule, look for glob if no match
- var capabilities uint32
- raw, ok := a.exactRules.Get(path)
-
- if ok {
- perm := raw.(*Permissions)
- capabilities = perm.CapabilitiesBitmap
- goto CHECK
- }
-
- // Find a glob rule, default deny if no match
- _, raw, ok = a.globRules.LongestPrefix(path)
- if !ok {
- return []string{DenyCapability}
- } else {
- perm := raw.(*Permissions)
- capabilities = perm.CapabilitiesBitmap
- }
-
-CHECK:
- if capabilities&SudoCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, SudoCapability)
- }
- if capabilities&ReadCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, ReadCapability)
- }
- if capabilities&ListCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, ListCapability)
- }
- if capabilities&UpdateCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, UpdateCapability)
- }
- if capabilities&DeleteCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, DeleteCapability)
- }
- if capabilities&CreateCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, CreateCapability)
- }
-
- // If "deny" is explicitly set or if the path has no capabilities at all,
- // set the path capabilities to "deny"
- if capabilities&DenyCapabilityInt > 0 || len(pathCapabilities) == 0 {
- pathCapabilities = []string{DenyCapability}
- }
- return
-}
-
-// AllowOperation is used to check if the given operation is permitted. The
-// first bool indicates if an op is allowed, the second whether sudo priviliges
-// exist for that op and path.
-func (a *ACL) AllowOperation(req *logical.Request) (bool, bool) {
- // Fast-path root
- if a.root {
- return true, true
- }
- op := req.Operation
- path := req.Path
-
- // Help is always allowed
- if op == logical.HelpOperation {
- return true, false
- }
-
- var permissions *Permissions
-
- // Find an exact matching rule, look for glob if no match
- var capabilities uint32
- raw, ok := a.exactRules.Get(path)
- if ok {
- permissions = raw.(*Permissions)
- capabilities = permissions.CapabilitiesBitmap
- goto CHECK
- }
-
- // Find a glob rule, default deny if no match
- _, raw, ok = a.globRules.LongestPrefix(path)
- if !ok {
- return false, false
- } else {
- permissions = raw.(*Permissions)
- capabilities = permissions.CapabilitiesBitmap
- }
-
-CHECK:
- // Check if the minimum permissions are met
- // If "deny" has been explicitly set, only deny will be in the map, so we
- // only need to check for the existence of other values
- sudo := capabilities&SudoCapabilityInt > 0
- operationAllowed := false
- switch op {
- case logical.ReadOperation:
- operationAllowed = capabilities&ReadCapabilityInt > 0
- case logical.ListOperation:
- operationAllowed = capabilities&ListCapabilityInt > 0
- case logical.UpdateOperation:
- operationAllowed = capabilities&UpdateCapabilityInt > 0
- case logical.DeleteOperation:
- operationAllowed = capabilities&DeleteCapabilityInt > 0
- case logical.CreateOperation:
- operationAllowed = capabilities&CreateCapabilityInt > 0
-
- // These three re-use UpdateCapabilityInt since that's the most appropriate
- // capability/operation mapping
- case logical.RevokeOperation, logical.RenewOperation, logical.RollbackOperation:
- operationAllowed = capabilities&UpdateCapabilityInt > 0
-
- default:
- return false, false
- }
-
- if !operationAllowed {
- return false, sudo
- }
-
- if permissions.MaxWrappingTTL > 0 {
- if req.WrapInfo == nil || req.WrapInfo.TTL > permissions.MaxWrappingTTL {
- return false, sudo
- }
- }
- if permissions.MinWrappingTTL > 0 {
- if req.WrapInfo == nil || req.WrapInfo.TTL < permissions.MinWrappingTTL {
- return false, sudo
- }
- }
- // This situation can happen because of merging, even though in a single
- // path statement we check on ingress
- if permissions.MinWrappingTTL != 0 &&
- permissions.MaxWrappingTTL != 0 &&
- permissions.MaxWrappingTTL < permissions.MinWrappingTTL {
- return false, sudo
- }
-
- // Only check parameter permissions for operations that can modify
- // parameters.
- if op == logical.UpdateOperation || op == logical.CreateOperation {
- // If there are no data fields, allow
- if len(req.Data) == 0 {
- return true, sudo
- }
-
- if len(permissions.DeniedParameters) == 0 {
- goto ALLOWED_PARAMETERS
- }
-
- // Check if all parameters have been denied
- if _, ok := permissions.DeniedParameters["*"]; ok {
- return false, sudo
- }
-
- for parameter, value := range req.Data {
- // Check if parameter has been explictly denied
- if valueSlice, ok := permissions.DeniedParameters[strings.ToLower(parameter)]; ok {
- // If the value exists in denied values slice, deny
- if valueInParameterList(value, valueSlice) {
- return false, sudo
- }
- }
- }
-
- ALLOWED_PARAMETERS:
- // If we don't have any allowed parameters set, allow
- if len(permissions.AllowedParameters) == 0 {
- return true, sudo
- }
-
- _, allowedAll := permissions.AllowedParameters["*"]
- if len(permissions.AllowedParameters) == 1 && allowedAll {
- return true, sudo
- }
-
- for parameter, value := range req.Data {
- valueSlice, ok := permissions.AllowedParameters[strings.ToLower(parameter)]
- // Requested parameter is not in allowed list
- if !ok && !allowedAll {
- return false, sudo
- }
-
- // If the value doesn't exists in the allowed values slice,
- // deny
- if ok && !valueInParameterList(value, valueSlice) {
- return false, sudo
- }
- }
- }
-
- return true, sudo
-}
-
-func valueInParameterList(v interface{}, list []interface{}) bool {
- // Empty list is equivalent to the item always existing in the list
- if len(list) == 0 {
- return true
- }
-
- return valueInSlice(v, list)
-}
-
-func valueInSlice(v interface{}, list []interface{}) bool {
- for _, el := range list {
- if reflect.TypeOf(el).String() == "string" && reflect.TypeOf(v).String() == "string" {
- item := el.(string)
- val := v.(string)
-
- if strutil.GlobbedStringsMatch(item, val) {
- return true
- }
- } else if reflect.DeepEqual(el, v) {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/acl_test.go b/vendor/github.com/hashicorp/vault/vault/acl_test.go
deleted file mode 100644
index 638fed6..0000000
--- a/vendor/github.com/hashicorp/vault/vault/acl_test.go
+++ /dev/null
@@ -1,765 +0,0 @@
-package vault
-
-import (
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestACL_Capabilities(t *testing.T) {
- // Create the root policy ACL
- policy := []*Policy{&Policy{Name: "root"}}
- acl, err := NewACL(policy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- actual := acl.Capabilities("any/path")
- expected := []string{"root"}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
- }
-
- policies, err := Parse(aclPolicy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- acl, err = NewACL([]*Policy{policies})
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- actual = acl.Capabilities("dev")
- expected = []string{"deny"}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: path:%s\ngot\n%#v\nexpected\n%#v\n", "deny", actual, expected)
- }
-
- actual = acl.Capabilities("dev/")
- expected = []string{"sudo", "read", "list", "update", "delete", "create"}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: path:%s\ngot\n%#v\nexpected\n%#v\n", "dev/", actual, expected)
- }
-
- actual = acl.Capabilities("stage/aws/test")
- expected = []string{"sudo", "read", "list", "update"}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: path:%s\ngot\n%#v\nexpected\n%#v\n", "stage/aws/test", actual, expected)
- }
-
-}
-
-func TestACL_Root(t *testing.T) {
- // Create the root policy ACL
- policy := []*Policy{&Policy{Name: "root"}}
- acl, err := NewACL(policy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- request := new(logical.Request)
- request.Operation = logical.UpdateOperation
- request.Path = "sys/mount/foo"
- allowed, rootPrivs := acl.AllowOperation(request)
- if !rootPrivs {
- t.Fatalf("expected root")
- }
- if !allowed {
- t.Fatalf("expected permissions")
- }
-}
-
-func TestACL_Single(t *testing.T) {
- policy, err := Parse(aclPolicy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- acl, err := NewACL([]*Policy{policy})
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Type of operation is not important here as we only care about checking
- // sudo/root
- request := new(logical.Request)
- request.Operation = logical.ReadOperation
- request.Path = "sys/mount/foo"
- _, rootPrivs := acl.AllowOperation(request)
- if rootPrivs {
- t.Fatalf("unexpected root")
- }
-
- type tcase struct {
- op logical.Operation
- path string
- allowed bool
- rootPrivs bool
- }
- tcases := []tcase{
- {logical.ReadOperation, "root", false, false},
- {logical.HelpOperation, "root", true, false},
-
- {logical.ReadOperation, "dev/foo", true, true},
- {logical.UpdateOperation, "dev/foo", true, true},
-
- {logical.DeleteOperation, "stage/foo", true, false},
- {logical.ListOperation, "stage/aws/foo", true, true},
- {logical.UpdateOperation, "stage/aws/foo", true, true},
- {logical.UpdateOperation, "stage/aws/policy/foo", true, true},
-
- {logical.DeleteOperation, "prod/foo", false, false},
- {logical.UpdateOperation, "prod/foo", false, false},
- {logical.ReadOperation, "prod/foo", true, false},
- {logical.ListOperation, "prod/foo", true, false},
- {logical.ReadOperation, "prod/aws/foo", false, false},
-
- {logical.ReadOperation, "foo/bar", true, true},
- {logical.ListOperation, "foo/bar", false, true},
- {logical.UpdateOperation, "foo/bar", false, true},
- {logical.CreateOperation, "foo/bar", true, true},
- }
-
- for _, tc := range tcases {
- request := new(logical.Request)
- request.Operation = tc.op
- request.Path = tc.path
- allowed, rootPrivs := acl.AllowOperation(request)
- if allowed != tc.allowed {
- t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
- }
- if rootPrivs != tc.rootPrivs {
- t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
- }
- }
-}
-
-func TestACL_Layered(t *testing.T) {
- policy1, err := Parse(aclPolicy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- policy2, err := Parse(aclPolicy2)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- acl, err := NewACL([]*Policy{policy1, policy2})
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- testLayeredACL(t, acl)
-}
-
-func testLayeredACL(t *testing.T, acl *ACL) {
- // Type of operation is not important here as we only care about checking
- // sudo/root
- request := new(logical.Request)
- request.Operation = logical.ReadOperation
- request.Path = "sys/mount/foo"
- _, rootPrivs := acl.AllowOperation(request)
- if rootPrivs {
- t.Fatalf("unexpected root")
- }
-
- type tcase struct {
- op logical.Operation
- path string
- allowed bool
- rootPrivs bool
- }
- tcases := []tcase{
- {logical.ReadOperation, "root", false, false},
- {logical.HelpOperation, "root", true, false},
-
- {logical.ReadOperation, "dev/foo", true, true},
- {logical.UpdateOperation, "dev/foo", true, true},
- {logical.ReadOperation, "dev/hide/foo", false, false},
- {logical.UpdateOperation, "dev/hide/foo", false, false},
-
- {logical.DeleteOperation, "stage/foo", true, false},
- {logical.ListOperation, "stage/aws/foo", true, true},
- {logical.UpdateOperation, "stage/aws/foo", true, true},
- {logical.UpdateOperation, "stage/aws/policy/foo", false, false},
-
- {logical.DeleteOperation, "prod/foo", true, false},
- {logical.UpdateOperation, "prod/foo", true, false},
- {logical.ReadOperation, "prod/foo", true, false},
- {logical.ListOperation, "prod/foo", true, false},
- {logical.ReadOperation, "prod/aws/foo", false, false},
-
- {logical.ReadOperation, "sys/status", false, false},
- {logical.UpdateOperation, "sys/seal", true, true},
-
- {logical.ReadOperation, "foo/bar", false, false},
- {logical.ListOperation, "foo/bar", false, false},
- {logical.UpdateOperation, "foo/bar", false, false},
- {logical.CreateOperation, "foo/bar", false, false},
- }
-
- for _, tc := range tcases {
- request := new(logical.Request)
- request.Operation = tc.op
- request.Path = tc.path
- allowed, rootPrivs := acl.AllowOperation(request)
- if allowed != tc.allowed {
- t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
- }
- if rootPrivs != tc.rootPrivs {
- t.Fatalf("bad: case %#v: %v, %v", tc, allowed, rootPrivs)
- }
- }
-}
-
-func TestACL_PolicyMerge(t *testing.T) {
- policy, err := Parse(mergingPolicies)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- acl, err := NewACL([]*Policy{policy})
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- type tcase struct {
- path string
- minWrappingTTL *time.Duration
- maxWrappingTTL *time.Duration
- allowed map[string][]interface{}
- denied map[string][]interface{}
- }
-
- createDuration := func(seconds int) *time.Duration {
- ret := time.Duration(seconds) * time.Second
- return &ret
- }
-
- tcases := []tcase{
- {"foo/bar", nil, nil, nil, map[string][]interface{}{"zip": []interface{}{}, "baz": []interface{}{}}},
- {"hello/universe", createDuration(50), createDuration(200), map[string][]interface{}{"foo": []interface{}{}, "bar": []interface{}{}}, nil},
- {"allow/all", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil},
- {"allow/all1", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil},
- {"deny/all", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}},
- {"deny/all1", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}},
- {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}},
- {"value/empty", nil, nil, map[string][]interface{}{"empty": []interface{}{}}, map[string][]interface{}{"empty": []interface{}{}}},
- }
-
- for _, tc := range tcases {
- raw, ok := acl.exactRules.Get(tc.path)
- if !ok {
- t.Fatalf("Could not find acl entry for path %s", tc.path)
- }
-
- p := raw.(*Permissions)
- if !reflect.DeepEqual(tc.allowed, p.AllowedParameters) {
- t.Fatalf("Allowed paramaters did not match, Expected: %#v, Got: %#v", tc.allowed, p.AllowedParameters)
- }
- if !reflect.DeepEqual(tc.denied, p.DeniedParameters) {
- t.Fatalf("Denied paramaters did not match, Expected: %#v, Got: %#v", tc.denied, p.DeniedParameters)
- }
- if tc.minWrappingTTL != nil && *tc.minWrappingTTL != p.MinWrappingTTL {
- t.Fatalf("Min wrapping TTL did not match, Expected: %#v, Got: %#v", tc.minWrappingTTL, p.MinWrappingTTL)
- }
- if tc.minWrappingTTL != nil && *tc.maxWrappingTTL != p.MaxWrappingTTL {
- t.Fatalf("Max wrapping TTL did not match, Expected: %#v, Got: %#v", tc.maxWrappingTTL, p.MaxWrappingTTL)
- }
- }
-}
-
-func TestACL_AllowOperation(t *testing.T) {
- policy, err := Parse(permissionsPolicy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- acl, err := NewACL([]*Policy{policy})
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- toperations := []logical.Operation{
- logical.UpdateOperation,
- logical.CreateOperation,
- }
- type tcase struct {
- path string
- wrappingTTL *time.Duration
- parameters []string
- allowed bool
- }
-
- createDuration := func(seconds int) *time.Duration {
- ret := time.Duration(seconds) * time.Second
- return &ret
- }
-
- tcases := []tcase{
- {"dev/ops", nil, []string{"zip"}, true},
- {"foo/bar", nil, []string{"zap"}, false},
- {"foo/bar", nil, []string{"zip"}, false},
- {"foo/bar", createDuration(50), []string{"zip"}, false},
- {"foo/bar", createDuration(450), []string{"zip"}, false},
- {"foo/bar", createDuration(350), []string{"zip"}, true},
- {"foo/baz", nil, []string{"hello"}, false},
- {"foo/baz", createDuration(50), []string{"hello"}, false},
- {"foo/baz", createDuration(450), []string{"hello"}, true},
- {"foo/baz", nil, []string{"zap"}, false},
- {"broken/phone", nil, []string{"steve"}, false},
- {"working/phone", nil, []string{""}, false},
- {"working/phone", createDuration(450), []string{""}, false},
- {"working/phone", createDuration(350), []string{""}, true},
- {"hello/world", nil, []string{"one"}, false},
- {"tree/fort", nil, []string{"one"}, true},
- {"tree/fort", nil, []string{"foo"}, false},
- {"fruit/apple", nil, []string{"pear"}, false},
- {"fruit/apple", nil, []string{"one"}, false},
- {"cold/weather", nil, []string{"four"}, true},
- {"var/aws", nil, []string{"cold", "warm", "kitty"}, false},
- }
-
- for _, tc := range tcases {
- request := logical.Request{Path: tc.path, Data: make(map[string]interface{})}
- for _, parameter := range tc.parameters {
- request.Data[parameter] = ""
- }
- if tc.wrappingTTL != nil {
- request.WrapInfo = &logical.RequestWrapInfo{
- TTL: *tc.wrappingTTL,
- }
- }
- for _, op := range toperations {
- request.Operation = op
- allowed, _ := acl.AllowOperation(&request)
- if allowed != tc.allowed {
- t.Fatalf("bad: case %#v: %v", tc, allowed)
- }
- }
- }
-}
-
-func TestACL_ValuePermissions(t *testing.T) {
- policy, err := Parse(valuePermissionsPolicy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- acl, err := NewACL([]*Policy{policy})
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- toperations := []logical.Operation{
- logical.UpdateOperation,
- logical.CreateOperation,
- }
- type tcase struct {
- path string
- parameters []string
- values []interface{}
- allowed bool
- }
-
- tcases := []tcase{
- {"dev/ops", []string{"allow"}, []interface{}{"good"}, true},
- {"dev/ops", []string{"allow"}, []interface{}{"bad"}, false},
- {"foo/bar", []string{"deny"}, []interface{}{"bad"}, false},
- {"foo/bar", []string{"deny"}, []interface{}{"bad glob"}, false},
- {"foo/bar", []string{"deny"}, []interface{}{"good"}, true},
- {"foo/bar", []string{"allow"}, []interface{}{"good"}, true},
- {"foo/baz", []string{"aLLow"}, []interface{}{"good"}, true},
- {"foo/baz", []string{"deny"}, []interface{}{"bad"}, false},
- {"foo/baz", []string{"deny"}, []interface{}{"good"}, false},
- {"foo/baz", []string{"allow", "deny"}, []interface{}{"good", "bad"}, false},
- {"foo/baz", []string{"deny", "allow"}, []interface{}{"good", "bad"}, false},
- {"foo/baz", []string{"deNy", "allow"}, []interface{}{"bad", "good"}, false},
- {"foo/baz", []string{"aLLow"}, []interface{}{"bad"}, false},
- {"foo/baz", []string{"Neither"}, []interface{}{"bad"}, false},
- {"fizz/buzz", []string{"allow_multi"}, []interface{}{"good"}, true},
- {"fizz/buzz", []string{"allow_multi"}, []interface{}{"good1"}, true},
- {"fizz/buzz", []string{"allow_multi"}, []interface{}{"good2"}, true},
- {"fizz/buzz", []string{"allow_multi"}, []interface{}{"glob good2"}, false},
- {"fizz/buzz", []string{"allow_multi"}, []interface{}{"glob good3"}, true},
- {"fizz/buzz", []string{"allow_multi"}, []interface{}{"bad"}, false},
- {"fizz/buzz", []string{"allow_multi"}, []interface{}{"bad"}, false},
- {"fizz/buzz", []string{"allow_multi", "allow"}, []interface{}{"good1", "good"}, true},
- {"fizz/buzz", []string{"deny_multi"}, []interface{}{"bad2"}, false},
- {"fizz/buzz", []string{"deny_multi", "allow_multi"}, []interface{}{"good", "good2"}, false},
- // {"test/types", []string{"array"}, []interface{}{[1]string{"good"}}, true},
- {"test/types", []string{"map"}, []interface{}{map[string]interface{}{"good": "one"}}, true},
- {"test/types", []string{"map"}, []interface{}{map[string]interface{}{"bad": "one"}}, false},
- {"test/types", []string{"int"}, []interface{}{1}, true},
- {"test/types", []string{"int"}, []interface{}{3}, false},
- {"test/types", []string{"bool"}, []interface{}{false}, true},
- {"test/types", []string{"bool"}, []interface{}{true}, false},
- {"test/star", []string{"anything"}, []interface{}{true}, true},
- {"test/star", []string{"foo"}, []interface{}{true}, true},
- {"test/star", []string{"bar"}, []interface{}{false}, true},
- {"test/star", []string{"bar"}, []interface{}{true}, false},
- }
-
- for _, tc := range tcases {
- request := logical.Request{Path: tc.path, Data: make(map[string]interface{})}
- for i, parameter := range tc.parameters {
- request.Data[parameter] = tc.values[i]
- }
- for _, op := range toperations {
- request.Operation = op
- allowed, _ := acl.AllowOperation(&request)
- if allowed != tc.allowed {
- t.Fatalf("bad: case %#v: %v", tc, allowed)
- }
- }
- }
-}
-
-// NOTE: this test doesn't catch any races ATM
-func TestACL_CreationRace(t *testing.T) {
- policy, err := Parse(valuePermissionsPolicy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- var wg sync.WaitGroup
- stopTime := time.Now().Add(20 * time.Second)
-
- for i := 0; i < 50; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for {
- if time.Now().After(stopTime) {
- return
- }
- _, err := NewACL([]*Policy{policy})
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- }
- }()
- }
-
- wg.Wait()
-}
-
-var tokenCreationPolicy = `
-name = "tokenCreation"
-path "auth/token/create*" {
- capabilities = ["update", "create", "sudo"]
-}
-`
-
-var aclPolicy = `
-name = "DeV"
-path "dev/*" {
- policy = "sudo"
-}
-path "stage/*" {
- policy = "write"
-}
-path "stage/aws/*" {
- policy = "read"
- capabilities = ["update", "sudo"]
-}
-path "stage/aws/policy/*" {
- policy = "sudo"
-}
-path "prod/*" {
- policy = "read"
-}
-path "prod/aws/*" {
- policy = "deny"
-}
-path "sys/*" {
- policy = "deny"
-}
-path "foo/bar" {
- capabilities = ["read", "create", "sudo"]
-}
-`
-
-var aclPolicy2 = `
-name = "OpS"
-path "dev/hide/*" {
- policy = "deny"
-}
-path "stage/aws/policy/*" {
- policy = "deny"
- # This should have no effect
- capabilities = ["read", "update", "sudo"]
-}
-path "prod/*" {
- policy = "write"
-}
-path "sys/seal" {
- policy = "sudo"
-}
-path "foo/bar" {
- capabilities = ["deny"]
-}
-`
-
-//test merging
-var mergingPolicies = `
-name = "ops"
-path "foo/bar" {
- policy = "write"
- denied_parameters = {
- "baz" = []
- }
-}
-path "foo/bar" {
- policy = "write"
- denied_parameters = {
- "zip" = []
- }
-}
-path "hello/universe" {
- policy = "write"
- allowed_parameters = {
- "foo" = []
- }
- max_wrapping_ttl = 300
- min_wrapping_ttl = 100
-}
-path "hello/universe" {
- policy = "write"
- allowed_parameters = {
- "bar" = []
- }
- max_wrapping_ttl = 200
- min_wrapping_ttl = 50
-}
-path "allow/all" {
- policy = "write"
- allowed_parameters = {
- "test" = []
- "test1" = ["foo"]
- }
-}
-path "allow/all" {
- policy = "write"
- allowed_parameters = {
- "*" = []
- }
-}
-path "allow/all1" {
- policy = "write"
- allowed_parameters = {
- "*" = []
- }
-}
-path "allow/all1" {
- policy = "write"
- allowed_parameters = {
- "test" = []
- "test1" = ["foo"]
- }
-}
-path "deny/all" {
- policy = "write"
- denied_parameters = {
- "test" = []
- }
-}
-path "deny/all" {
- policy = "write"
- denied_parameters = {
- "*" = []
- }
-}
-path "deny/all1" {
- policy = "write"
- denied_parameters = {
- "*" = []
- }
-}
-path "deny/all1" {
- policy = "write"
- denied_parameters = {
- "test" = []
- }
-}
-path "value/merge" {
- policy = "write"
- allowed_parameters = {
- "test" = [1, 2]
- }
- denied_parameters = {
- "test" = [1, 2]
- }
-}
-path "value/merge" {
- policy = "write"
- allowed_parameters = {
- "test" = [3, 4]
- }
- denied_parameters = {
- "test" = [3, 4]
- }
-}
-path "value/empty" {
- policy = "write"
- allowed_parameters = {
- "empty" = []
- }
- denied_parameters = {
- "empty" = [1]
- }
-}
-path "value/empty" {
- policy = "write"
- allowed_parameters = {
- "empty" = [1]
- }
- denied_parameters = {
- "empty" = []
- }
-}
-`
-
-//allow operation testing
-var permissionsPolicy = `
-name = "dev"
-path "dev/*" {
- policy = "write"
-
- allowed_parameters = {
- "zip" = []
- }
-}
-path "foo/bar" {
- policy = "write"
- denied_parameters = {
- "zap" = []
- }
- min_wrapping_ttl = 300
- max_wrapping_ttl = 400
-}
-path "foo/baz" {
- policy = "write"
- allowed_parameters = {
- "hello" = []
- }
- denied_parameters = {
- "zap" = []
- }
- min_wrapping_ttl = 300
-}
-path "working/phone" {
- policy = "write"
- max_wrapping_ttl = 400
-}
-path "broken/phone" {
- policy = "write"
- allowed_parameters = {
- "steve" = []
- }
- denied_parameters = {
- "steve" = []
- }
-}
-path "hello/world" {
- policy = "write"
- allowed_parameters = {
- "*" = []
- }
- denied_parameters = {
- "*" = []
- }
-}
-path "tree/fort" {
- policy = "write"
- allowed_parameters = {
- "*" = []
- }
- denied_parameters = {
- "foo" = []
- }
-}
-path "fruit/apple" {
- policy = "write"
- allowed_parameters = {
- "pear" = []
- }
- denied_parameters = {
- "*" = []
- }
-}
-path "cold/weather" {
- policy = "write"
- allowed_parameters = {}
- denied_parameters = {}
-}
-path "var/aws" {
- policy = "write"
- allowed_parameters = {
- "*" = []
- }
- denied_parameters = {
- "soft" = []
- "warm" = []
- "kitty" = []
- }
-}
-`
-
-//allow operation testing
-var valuePermissionsPolicy = `
-name = "op"
-path "dev/*" {
- policy = "write"
-
- allowed_parameters = {
- "allow" = ["good"]
- }
-}
-path "foo/bar" {
- policy = "write"
- denied_parameters = {
- "deny" = ["bad*"]
- }
-}
-path "foo/baz" {
- policy = "write"
- allowed_parameters = {
- "ALLOW" = ["good"]
- }
- denied_parameters = {
- "dEny" = ["bad"]
- }
-}
-path "fizz/buzz" {
- policy = "write"
- allowed_parameters = {
- "allow_multi" = ["good", "good1", "good2", "*good3"]
- "allow" = ["good"]
- }
- denied_parameters = {
- "deny_multi" = ["bad", "bad1", "bad2"]
- }
-}
-path "test/types" {
- policy = "write"
- allowed_parameters = {
- "map" = [{"good" = "one"}]
- "int" = [1, 2]
- "bool" = [false]
- }
- denied_parameters = {
- }
-}
-path "test/star" {
- policy = "write"
- allowed_parameters = {
- "*" = []
- "foo" = []
- "bar" = [false]
- }
- denied_parameters = {
- }
-}
-`
diff --git a/vendor/github.com/hashicorp/vault/vault/audit.go b/vendor/github.com/hashicorp/vault/vault/audit.go
deleted file mode 100644
index fccf9aa..0000000
--- a/vendor/github.com/hashicorp/vault/vault/audit.go
+++ /dev/null
@@ -1,618 +0,0 @@
-package vault
-
-import (
- "crypto/sha256"
- "errors"
- "fmt"
- "strings"
- "sync"
- "time"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // coreAuditConfigPath is used to store the audit configuration.
- // Audit configuration is protected within the Vault itself, which means it
- // can only be viewed or modified after an unseal.
- coreAuditConfigPath = "core/audit"
-
- // coreLocalAuditConfigPath is used to store audit information for local
- // (non-replicated) mounts
- coreLocalAuditConfigPath = "core/local-audit"
-
- // auditBarrierPrefix is the prefix to the UUID used in the
- // barrier view for the audit backends.
- auditBarrierPrefix = "audit/"
-
- // auditTableType is the value we expect to find for the audit table and
- // corresponding entries
- auditTableType = "audit"
-)
-
-var (
- // loadAuditFailed if loading audit tables encounters an error
- errLoadAuditFailed = errors.New("failed to setup audit table")
-)
-
-// enableAudit is used to enable a new audit backend
-func (c *Core) enableAudit(entry *MountEntry) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(entry.Path, "/") {
- entry.Path += "/"
- }
-
- // Ensure there is a name
- if entry.Path == "/" {
- return fmt.Errorf("backend path must be specified")
- }
-
- // Update the audit table
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- // Look for matching name
- for _, ent := range c.audit.Entries {
- switch {
- // Existing is sql/mysql/ new is sql/ or
- // existing is sql/ and new is sql/mysql/
- case strings.HasPrefix(ent.Path, entry.Path):
- fallthrough
- case strings.HasPrefix(entry.Path, ent.Path):
- return fmt.Errorf("path already in use")
- }
- }
-
- // Generate a new UUID and view
- if entry.UUID == "" {
- entryUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.UUID = entryUUID
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("audit_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- }
- viewPath := auditBarrierPrefix + entry.UUID + "/"
- view := NewBarrierView(c.barrier, viewPath)
-
- // Lookup the new backend
- backend, err := c.newAuditBackend(entry, view, entry.Options)
- if err != nil {
- return err
- }
- if backend == nil {
- return fmt.Errorf("nil audit backend of type %q returned from factory", entry.Type)
- }
-
- newTable := c.audit.shallowClone()
- newTable.Entries = append(newTable.Entries, entry)
- if err := c.persistAudit(newTable, entry.Local); err != nil {
- return errors.New("failed to update audit table")
- }
-
- c.audit = newTable
-
- // Register the backend
- c.auditBroker.Register(entry.Path, backend, view)
- if c.logger.IsInfo() {
- c.logger.Info("core: enabled audit backend", "path", entry.Path, "type", entry.Type)
- }
- return nil
-}
-
-// disableAudit is used to disable an existing audit backend
-func (c *Core) disableAudit(path string) (bool, error) {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
-
- // Remove the entry from the mount table
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- newTable := c.audit.shallowClone()
- entry := newTable.remove(path)
-
- // Ensure there was a match
- if entry == nil {
- return false, fmt.Errorf("no matching backend")
- }
-
- c.removeAuditReloadFunc(entry)
-
- // When unmounting all entries the JSON code will load back up from storage
- // as a nil slice, which kills tests...just set it nil explicitly
- if len(newTable.Entries) == 0 {
- newTable.Entries = nil
- }
-
- // Update the audit table
- if err := c.persistAudit(newTable, entry.Local); err != nil {
- return true, errors.New("failed to update audit table")
- }
-
- c.audit = newTable
-
- // Unmount the backend
- c.auditBroker.Deregister(path)
- if c.logger.IsInfo() {
- c.logger.Info("core: disabled audit backend", "path", path)
- }
-
- return true, nil
-}
-
-// loadAudits is invoked as part of postUnseal to load the audit table
-func (c *Core) loadAudits() error {
- auditTable := &MountTable{}
- localAuditTable := &MountTable{}
-
- // Load the existing audit table
- raw, err := c.barrier.Get(coreAuditConfigPath)
- if err != nil {
- c.logger.Error("core: failed to read audit table", "error", err)
- return errLoadAuditFailed
- }
- rawLocal, err := c.barrier.Get(coreLocalAuditConfigPath)
- if err != nil {
- c.logger.Error("core: failed to read local audit table", "error", err)
- return errLoadAuditFailed
- }
-
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- if raw != nil {
- if err := jsonutil.DecodeJSON(raw.Value, auditTable); err != nil {
- c.logger.Error("core: failed to decode audit table", "error", err)
- return errLoadAuditFailed
- }
- c.audit = auditTable
- }
- if rawLocal != nil {
- if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
- c.logger.Error("core: failed to decode local audit table", "error", err)
- return errLoadAuditFailed
- }
- c.audit.Entries = append(c.audit.Entries, localAuditTable.Entries...)
- }
-
- // Done if we have restored the audit table
- if c.audit != nil {
- needPersist := false
-
- // Upgrade to typed auth table
- if c.audit.Type == "" {
- c.audit.Type = auditTableType
- needPersist = true
- }
-
- // Upgrade to table-scoped entries
- for _, entry := range c.audit.Entries {
- if entry.Table == "" {
- entry.Table = c.audit.Type
- needPersist = true
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("audit_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- needPersist = true
- }
- }
-
- if !needPersist {
- return nil
- }
- } else {
- c.audit = defaultAuditTable()
- }
-
- if err := c.persistAudit(c.audit, false); err != nil {
- return errLoadAuditFailed
- }
- return nil
-}
-
-// persistAudit is used to persist the audit table after modification
-func (c *Core) persistAudit(table *MountTable, localOnly bool) error {
- if table.Type != auditTableType {
- c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType)
- return fmt.Errorf("invalid table type given, not persisting")
- }
-
- for _, entry := range table.Entries {
- if entry.Table != table.Type {
- c.logger.Error("core: given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
- return fmt.Errorf("invalid audit entry found, not persisting")
- }
- }
-
- nonLocalAudit := &MountTable{
- Type: auditTableType,
- }
-
- localAudit := &MountTable{
- Type: auditTableType,
- }
-
- for _, entry := range table.Entries {
- if entry.Local {
- localAudit.Entries = append(localAudit.Entries, entry)
- } else {
- nonLocalAudit.Entries = append(nonLocalAudit.Entries, entry)
- }
- }
-
- if !localOnly {
- // Marshal the table
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAudit, nil)
- if err != nil {
- c.logger.Error("core: failed to encode and/or compress audit table", "error", err)
- return err
- }
-
- // Create an entry
- entry := &Entry{
- Key: coreAuditConfigPath,
- Value: compressedBytes,
- }
-
- // Write to the physical backend
- if err := c.barrier.Put(entry); err != nil {
- c.logger.Error("core: failed to persist audit table", "error", err)
- return err
- }
- }
-
- // Repeat with local audit
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAudit, nil)
- if err != nil {
- c.logger.Error("core: failed to encode and/or compress local audit table", "error", err)
- return err
- }
-
- entry := &Entry{
- Key: coreLocalAuditConfigPath,
- Value: compressedBytes,
- }
-
- if err := c.barrier.Put(entry); err != nil {
- c.logger.Error("core: failed to persist local audit table", "error", err)
- return err
- }
-
- return nil
-}
-
-// setupAudit is invoked after we've loaded the audit able to
-// initialize the audit backends
-func (c *Core) setupAudits() error {
- broker := NewAuditBroker(c.logger)
-
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- var successCount int
-
- for _, entry := range c.audit.Entries {
- // Create a barrier view using the UUID
- viewPath := auditBarrierPrefix + entry.UUID + "/"
- view := NewBarrierView(c.barrier, viewPath)
-
- // Initialize the backend
- backend, err := c.newAuditBackend(entry, view, entry.Options)
- if err != nil {
- c.logger.Error("core: failed to create audit entry", "path", entry.Path, "error", err)
- continue
- }
- if backend == nil {
- c.logger.Error("core: created audit entry was nil", "path", entry.Path, "type", entry.Type)
- continue
- }
-
- // Mount the backend
- broker.Register(entry.Path, backend, view)
-
- successCount += 1
- }
-
- if len(c.audit.Entries) > 0 && successCount == 0 {
- return errLoadAuditFailed
- }
-
- c.auditBroker = broker
- return nil
-}
-
-// teardownAudit is used before we seal the vault to reset the audit
-// backends to their unloaded state. This is reversed by loadAudits.
-func (c *Core) teardownAudits() error {
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- if c.audit != nil {
- for _, entry := range c.audit.Entries {
- c.removeAuditReloadFunc(entry)
- }
- }
-
- c.audit = nil
- c.auditBroker = nil
- return nil
-}
-
-// removeAuditReloadFunc removes the reload func from the working set. The
-// audit lock needs to be held before calling this.
-func (c *Core) removeAuditReloadFunc(entry *MountEntry) {
- switch entry.Type {
- case "file":
- key := "audit_file|" + entry.Path
- c.reloadFuncsLock.Lock()
-
- if c.logger.IsDebug() {
- c.logger.Debug("audit: removing reload function", "path", entry.Path)
- }
-
- delete(c.reloadFuncs, key)
-
- c.reloadFuncsLock.Unlock()
- }
-}
-
-// newAuditBackend is used to create and configure a new audit backend by name
-func (c *Core) newAuditBackend(entry *MountEntry, view logical.Storage, conf map[string]string) (audit.Backend, error) {
- f, ok := c.auditBackends[entry.Type]
- if !ok {
- return nil, fmt.Errorf("unknown backend type: %s", entry.Type)
- }
- saltConfig := &salt.Config{
- HMAC: sha256.New,
- HMACType: "hmac-sha256",
- Location: salt.DefaultLocation,
- }
-
- be, err := f(&audit.BackendConfig{
- SaltView: view,
- SaltConfig: saltConfig,
- Config: conf,
- })
- if err != nil {
- return nil, err
- }
- if be == nil {
- return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type)
- }
-
- switch entry.Type {
- case "file":
- key := "audit_file|" + entry.Path
-
- c.reloadFuncsLock.Lock()
-
- if c.logger.IsDebug() {
- c.logger.Debug("audit: adding reload function", "path", entry.Path)
- }
-
- c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]interface{}) error {
- if c.logger.IsInfo() {
- c.logger.Info("audit: reloading file audit backend", "path", entry.Path)
- }
- return be.Reload()
- })
-
- c.reloadFuncsLock.Unlock()
- }
-
- return be, err
-}
-
-// defaultAuditTable creates a default audit table
-func defaultAuditTable() *MountTable {
- table := &MountTable{
- Type: auditTableType,
- }
- return table
-}
-
-type backendEntry struct {
- backend audit.Backend
- view *BarrierView
-}
-
-// AuditBroker is used to provide a single ingest interface to auditable
-// events given that multiple backends may be configured.
-type AuditBroker struct {
- sync.RWMutex
- backends map[string]backendEntry
- logger log.Logger
-}
-
-// NewAuditBroker creates a new audit broker
-func NewAuditBroker(log log.Logger) *AuditBroker {
- b := &AuditBroker{
- backends: make(map[string]backendEntry),
- logger: log,
- }
- return b
-}
-
-// Register is used to add new audit backend to the broker
-func (a *AuditBroker) Register(name string, b audit.Backend, v *BarrierView) {
- a.Lock()
- defer a.Unlock()
- a.backends[name] = backendEntry{
- backend: b,
- view: v,
- }
-}
-
-// Deregister is used to remove an audit backend from the broker
-func (a *AuditBroker) Deregister(name string) {
- a.Lock()
- defer a.Unlock()
- delete(a.backends, name)
-}
-
-// IsRegistered is used to check if a given audit backend is registered
-func (a *AuditBroker) IsRegistered(name string) bool {
- a.RLock()
- defer a.RUnlock()
- _, ok := a.backends[name]
- return ok
-}
-
-// GetHash returns a hash using the salt of the given backend
-func (a *AuditBroker) GetHash(name string, input string) (string, error) {
- a.RLock()
- defer a.RUnlock()
- be, ok := a.backends[name]
- if !ok {
- return "", fmt.Errorf("unknown audit backend %s", name)
- }
-
- return be.backend.GetHash(input)
-}
-
-// LogRequest is used to ensure all the audit backends have an opportunity to
-// log the given request and that *at least one* succeeds.
-func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, headersConfig *AuditedHeadersConfig, outerErr error) (ret error) {
- defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now())
- a.RLock()
- defer a.RUnlock()
-
- var retErr *multierror.Error
-
- defer func() {
- if r := recover(); r != nil {
- a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
- retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
- }
-
- ret = retErr.ErrorOrNil()
-
- if ret != nil {
- metrics.IncrCounter([]string{"audit", "log_request_failure"}, 1.0)
- }
- }()
-
- // All logged requests must have an identifier
- //if req.ID == "" {
- // a.logger.Error("audit: missing identifier in request object", "request_path", req.Path)
- // retErr = multierror.Append(retErr, fmt.Errorf("missing identifier in request object: %s", req.Path))
- // return
- //}
-
- headers := req.Headers
- defer func() {
- req.Headers = headers
- }()
-
- // Ensure at least one backend logs
- anyLogged := false
- for name, be := range a.backends {
- req.Headers = nil
- transHeaders, thErr := headersConfig.ApplyConfig(headers, be.backend.GetHash)
- if thErr != nil {
- a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr)
- continue
- }
- req.Headers = transHeaders
-
- start := time.Now()
- lrErr := be.backend.LogRequest(auth, req, outerErr)
- metrics.MeasureSince([]string{"audit", name, "log_request"}, start)
- if lrErr != nil {
- a.logger.Error("audit: backend failed to log request", "backend", name, "error", lrErr)
- } else {
- anyLogged = true
- }
- }
- if !anyLogged && len(a.backends) > 0 {
- retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the request"))
- }
-
- return retErr.ErrorOrNil()
-}
-
-// LogResponse is used to ensure all the audit backends have an opportunity to
-// log the given response and that *at least one* succeeds.
-func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request,
- resp *logical.Response, headersConfig *AuditedHeadersConfig, err error) (ret error) {
- defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now())
- a.RLock()
- defer a.RUnlock()
-
- var retErr *multierror.Error
-
- defer func() {
- if r := recover(); r != nil {
- a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
- retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
- }
-
- ret = retErr.ErrorOrNil()
-
- if ret != nil {
- metrics.IncrCounter([]string{"audit", "log_response_failure"}, 1.0)
- }
- }()
-
- headers := req.Headers
- defer func() {
- req.Headers = headers
- }()
-
- // Ensure at least one backend logs
- anyLogged := false
- for name, be := range a.backends {
- req.Headers = nil
- transHeaders, thErr := headersConfig.ApplyConfig(headers, be.backend.GetHash)
- if thErr != nil {
- a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr)
- continue
- }
- req.Headers = transHeaders
-
- start := time.Now()
- lrErr := be.backend.LogResponse(auth, req, resp, err)
- metrics.MeasureSince([]string{"audit", name, "log_response"}, start)
- if lrErr != nil {
- a.logger.Error("audit: backend failed to log response", "backend", name, "error", lrErr)
- } else {
- anyLogged = true
- }
- }
- if !anyLogged && len(a.backends) > 0 {
- retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the response"))
- }
-
- return retErr.ErrorOrNil()
-}
-
-func (a *AuditBroker) Invalidate(key string) {
- // For now we ignore the key as this would only apply to salts. We just
- // sort of brute force it on each one.
- a.Lock()
- defer a.Unlock()
- for _, be := range a.backends {
- be.backend.Invalidate()
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/audit_test.go b/vendor/github.com/hashicorp/vault/vault/audit_test.go
deleted file mode 100644
index a91298d..0000000
--- a/vendor/github.com/hashicorp/vault/vault/audit_test.go
+++ /dev/null
@@ -1,639 +0,0 @@
-package vault
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "testing"
- "time"
-
- "errors"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
- log "github.com/mgutz/logxi/v1"
- "github.com/mitchellh/copystructure"
-)
-
-type NoopAudit struct {
- Config *audit.BackendConfig
- ReqErr error
- ReqAuth []*logical.Auth
- Req []*logical.Request
- ReqHeaders []map[string][]string
- ReqErrs []error
-
- RespErr error
- RespAuth []*logical.Auth
- RespReq []*logical.Request
- Resp []*logical.Response
- RespErrs []error
-
- salt *salt.Salt
- saltMutex sync.RWMutex
-}
-
-func (n *NoopAudit) LogRequest(a *logical.Auth, r *logical.Request, err error) error {
- n.ReqAuth = append(n.ReqAuth, a)
- n.Req = append(n.Req, r)
- n.ReqHeaders = append(n.ReqHeaders, r.Headers)
- n.ReqErrs = append(n.ReqErrs, err)
- return n.ReqErr
-}
-
-func (n *NoopAudit) LogResponse(a *logical.Auth, r *logical.Request, re *logical.Response, err error) error {
- n.RespAuth = append(n.RespAuth, a)
- n.RespReq = append(n.RespReq, r)
- n.Resp = append(n.Resp, re)
- n.RespErrs = append(n.RespErrs, err)
- return n.RespErr
-}
-
-func (n *NoopAudit) Salt() (*salt.Salt, error) {
- n.saltMutex.RLock()
- if n.salt != nil {
- defer n.saltMutex.RUnlock()
- return n.salt, nil
- }
- n.saltMutex.RUnlock()
- n.saltMutex.Lock()
- defer n.saltMutex.Unlock()
- if n.salt != nil {
- return n.salt, nil
- }
- salt, err := salt.NewSalt(n.Config.SaltView, n.Config.SaltConfig)
- if err != nil {
- return nil, err
- }
- n.salt = salt
- return salt, nil
-}
-
-func (n *NoopAudit) GetHash(data string) (string, error) {
- salt, err := n.Salt()
- if err != nil {
- return "", err
- }
- return salt.GetIdentifiedHMAC(data), nil
-}
-
-func (n *NoopAudit) Reload() error {
- return nil
-}
-
-func (n *NoopAudit) Invalidate() {
- n.saltMutex.Lock()
- defer n.saltMutex.Unlock()
- n.salt = nil
-}
-
-func TestCore_EnableAudit(t *testing.T) {
- c, keys, _ := TestCoreUnsealed(t)
- c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- return &NoopAudit{
- Config: config,
- }, nil
- }
-
- me := &MountEntry{
- Table: auditTableType,
- Path: "foo",
- Type: "noop",
- }
- err := c.enableAudit(me)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !c.auditBroker.IsRegistered("foo/") {
- t.Fatalf("missing audit backend")
- }
-
- conf := &CoreConfig{
- Physical: c.physical,
- AuditBackends: make(map[string]audit.Factory),
- DisableMlock: true,
- }
- conf.AuditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- return &NoopAudit{
- Config: config,
- }, nil
- }
- c2, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for i, key := range keys {
- unseal, err := TestCoreUnseal(c2, key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i+1 == len(keys) && !unseal {
- t.Fatalf("should be unsealed")
- }
- }
-
- // Verify matching audit tables
- if !reflect.DeepEqual(c.audit, c2.audit) {
- t.Fatalf("mismatch: %v %v", c.audit, c2.audit)
- }
-
- // Check for registration
- if !c2.auditBroker.IsRegistered("foo/") {
- t.Fatalf("missing audit backend")
- }
-}
-
-func TestCore_EnableAudit_MixedFailures(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- return &NoopAudit{
- Config: config,
- }, nil
- }
-
- c.auditBackends["fail"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- return nil, fmt.Errorf("failing enabling")
- }
-
- c.audit = &MountTable{
- Type: auditTableType,
- Entries: []*MountEntry{
- &MountEntry{
- Table: auditTableType,
- Path: "noop/",
- Type: "noop",
- UUID: "abcd",
- },
- &MountEntry{
- Table: auditTableType,
- Path: "noop2/",
- Type: "noop",
- UUID: "bcde",
- },
- },
- }
-
- // Both should set up successfully
- err := c.setupAudits()
- if err != nil {
- t.Fatal(err)
- }
-
- // We expect this to work because the other entry is still valid
- c.audit.Entries[0].Type = "fail"
- err = c.setupAudits()
- if err != nil {
- t.Fatal(err)
- }
-
- // No audit backend set up successfully, so expect error
- c.audit.Entries[1].Type = "fail"
- err = c.setupAudits()
- if err == nil {
- t.Fatal("expected error")
- }
-}
-
-// Test that the local table actually gets populated as expected with local
-// entries, and that upon reading the entries from both are recombined
-// correctly
-func TestCore_EnableAudit_Local(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- return &NoopAudit{
- Config: config,
- }, nil
- }
-
- c.auditBackends["fail"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- return nil, fmt.Errorf("failing enabling")
- }
-
- c.audit = &MountTable{
- Type: auditTableType,
- Entries: []*MountEntry{
- &MountEntry{
- Table: auditTableType,
- Path: "noop/",
- Type: "noop",
- UUID: "abcd",
- Accessor: "noop-abcd",
- },
- &MountEntry{
- Table: auditTableType,
- Path: "noop2/",
- Type: "noop",
- UUID: "bcde",
- Accessor: "noop-bcde",
- },
- },
- }
-
- // Both should set up successfully
- err := c.setupAudits()
- if err != nil {
- t.Fatal(err)
- }
-
- rawLocal, err := c.barrier.Get(coreLocalAuditConfigPath)
- if err != nil {
- t.Fatal(err)
- }
- if rawLocal == nil {
- t.Fatal("expected non-nil local audit")
- }
- localAuditTable := &MountTable{}
- if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
- t.Fatal(err)
- }
- if len(localAuditTable.Entries) > 0 {
- t.Fatalf("expected no entries in local audit table, got %#v", localAuditTable)
- }
-
- c.audit.Entries[1].Local = true
- if err := c.persistAudit(c.audit, false); err != nil {
- t.Fatal(err)
- }
-
- rawLocal, err = c.barrier.Get(coreLocalAuditConfigPath)
- if err != nil {
- t.Fatal(err)
- }
- if rawLocal == nil {
- t.Fatal("expected non-nil local audit")
- }
- localAuditTable = &MountTable{}
- if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
- t.Fatal(err)
- }
- if len(localAuditTable.Entries) != 1 {
- t.Fatalf("expected one entry in local audit table, got %#v", localAuditTable)
- }
-
- oldAudit := c.audit
- if err := c.loadAudits(); err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(oldAudit, c.audit) {
- t.Fatalf("expected\n%#v\ngot\n%#v\n", oldAudit, c.audit)
- }
-
- if len(c.audit.Entries) != 2 {
- t.Fatalf("expected two audit entries, got %#v", localAuditTable)
- }
-}
-
-func TestCore_DisableAudit(t *testing.T) {
- c, keys, _ := TestCoreUnsealed(t)
- c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- return &NoopAudit{
- Config: config,
- }, nil
- }
-
- existed, err := c.disableAudit("foo")
- if existed && err != nil {
- t.Fatalf("existed: %v; err: %v", existed, err)
- }
-
- me := &MountEntry{
- Table: auditTableType,
- Path: "foo",
- Type: "noop",
- }
- err = c.enableAudit(me)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- existed, err = c.disableAudit("foo")
- if !existed || err != nil {
- t.Fatalf("existed: %v; err: %v", existed, err)
- }
-
- // Check for registration
- if c.auditBroker.IsRegistered("foo") {
- t.Fatalf("audit backend present")
- }
-
- conf := &CoreConfig{
- Physical: c.physical,
- DisableMlock: true,
- }
- c2, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for i, key := range keys {
- unseal, err := TestCoreUnseal(c2, key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i+1 == len(keys) && !unseal {
- t.Fatalf("should be unsealed")
- }
- }
-
- // Verify matching mount tables
- if !reflect.DeepEqual(c.audit, c2.audit) {
- t.Fatalf("mismatch:\n%#v\n%#v", c.audit, c2.audit)
- }
-}
-
-func TestCore_DefaultAuditTable(t *testing.T) {
- c, keys, _ := TestCoreUnsealed(t)
- verifyDefaultAuditTable(t, c.audit)
-
- // Verify we have an audit broker
- if c.auditBroker == nil {
- t.Fatalf("missing audit broker")
- }
-
- // Start a second core with same physical
- conf := &CoreConfig{
- Physical: c.physical,
- DisableMlock: true,
- }
- c2, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for i, key := range keys {
- unseal, err := TestCoreUnseal(c2, key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i+1 == len(keys) && !unseal {
- t.Fatalf("should be unsealed")
- }
- }
-
- // Verify matching mount tables
- if !reflect.DeepEqual(c.audit, c2.audit) {
- t.Fatalf("mismatch: %v %v", c.audit, c2.audit)
- }
-}
-
-func TestDefaultAuditTable(t *testing.T) {
- table := defaultAuditTable()
- verifyDefaultAuditTable(t, table)
-}
-
-func verifyDefaultAuditTable(t *testing.T, table *MountTable) {
- if len(table.Entries) != 0 {
- t.Fatalf("bad: %v", table.Entries)
- }
- if table.Type != auditTableType {
- t.Fatalf("bad: %v", *table)
- }
-}
-
-func TestAuditBroker_LogRequest(t *testing.T) {
- l := logformat.NewVaultLogger(log.LevelTrace)
- b := NewAuditBroker(l)
- a1 := &NoopAudit{}
- a2 := &NoopAudit{}
- b.Register("foo", a1, nil)
- b.Register("bar", a2, nil)
-
- auth := &logical.Auth{
- ClientToken: "foo",
- Policies: []string{"dev", "ops"},
- Metadata: map[string]string{
- "user": "armon",
- "source": "github",
- },
- }
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "sys/mounts",
- }
-
- // Copy so we can verify nothing canged
- authCopyRaw, err := copystructure.Copy(auth)
- if err != nil {
- t.Fatal(err)
- }
- authCopy := authCopyRaw.(*logical.Auth)
-
- reqCopyRaw, err := copystructure.Copy(req)
- if err != nil {
- t.Fatal(err)
- }
- reqCopy := reqCopyRaw.(*logical.Request)
-
- // Create an identifier for the request to verify against
- req.ID, err = uuid.GenerateUUID()
- if err != nil {
- t.Fatalf("failed to generate identifier for the request: path%s err: %v", req.Path, err)
- }
- reqCopy.ID = req.ID
-
- reqErrs := errors.New("errs")
-
- headersConf := &AuditedHeadersConfig{
- Headers: make(map[string]*auditedHeaderSettings),
- }
-
- err = b.LogRequest(authCopy, reqCopy, headersConf, reqErrs)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- for _, a := range []*NoopAudit{a1, a2} {
- if !reflect.DeepEqual(a.ReqAuth[0], auth) {
- t.Fatalf("Bad: %#v", a.ReqAuth[0])
- }
- if !reflect.DeepEqual(a.Req[0], req) {
- t.Fatalf("Bad: %#v\n wanted %#v", a.Req[0], req)
- }
- if !reflect.DeepEqual(a.ReqErrs[0], reqErrs) {
- t.Fatalf("Bad: %#v", a.ReqErrs[0])
- }
- }
-
- // Should still work with one failing backend
- a1.ReqErr = fmt.Errorf("failed")
- if err := b.LogRequest(auth, req, headersConf, nil); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should FAIL work with both failing backends
- a2.ReqErr = fmt.Errorf("failed")
- if err := b.LogRequest(auth, req, headersConf, nil); !errwrap.Contains(err, "no audit backend succeeded in logging the request") {
- t.Fatalf("err: %v", err)
- }
-}
-
-func TestAuditBroker_LogResponse(t *testing.T) {
- l := logformat.NewVaultLogger(log.LevelTrace)
- b := NewAuditBroker(l)
- a1 := &NoopAudit{}
- a2 := &NoopAudit{}
- b.Register("foo", a1, nil)
- b.Register("bar", a2, nil)
-
- auth := &logical.Auth{
- NumUses: 10,
- ClientToken: "foo",
- Policies: []string{"dev", "ops"},
- Metadata: map[string]string{
- "user": "armon",
- "source": "github",
- },
- }
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "sys/mounts",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 1 * time.Hour,
- },
- },
- Data: map[string]interface{}{
- "user": "root",
- "password": "password",
- },
- }
- respErr := fmt.Errorf("permission denied")
-
- // Copy so we can verify nothing canged
- authCopyRaw, err := copystructure.Copy(auth)
- if err != nil {
- t.Fatal(err)
- }
- authCopy := authCopyRaw.(*logical.Auth)
-
- reqCopyRaw, err := copystructure.Copy(req)
- if err != nil {
- t.Fatal(err)
- }
- reqCopy := reqCopyRaw.(*logical.Request)
-
- respCopyRaw, err := copystructure.Copy(resp)
- if err != nil {
- t.Fatal(err)
- }
- respCopy := respCopyRaw.(*logical.Response)
-
- headersConf := &AuditedHeadersConfig{
- Headers: make(map[string]*auditedHeaderSettings),
- }
-
- err = b.LogResponse(authCopy, reqCopy, respCopy, headersConf, respErr)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- for _, a := range []*NoopAudit{a1, a2} {
- if !reflect.DeepEqual(a.RespAuth[0], auth) {
- t.Fatalf("Bad: %#v", a.ReqAuth[0])
- }
- if !reflect.DeepEqual(a.RespReq[0], req) {
- t.Fatalf("Bad: %#v", a.Req[0])
- }
- if !reflect.DeepEqual(a.Resp[0], resp) {
- t.Fatalf("Bad: %#v", a.Resp[0])
- }
- if !reflect.DeepEqual(a.RespErrs[0], respErr) {
- t.Fatalf("Expected\n%v\nGot\n%#v", respErr, a.RespErrs[0])
- }
- }
-
- // Should still work with one failing backend
- a1.RespErr = fmt.Errorf("failed")
- err = b.LogResponse(auth, req, resp, headersConf, respErr)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should FAIL work with both failing backends
- a2.RespErr = fmt.Errorf("failed")
- err = b.LogResponse(auth, req, resp, headersConf, respErr)
- if !strings.Contains(err.Error(), "no audit backend succeeded in logging the response") {
- t.Fatalf("err: %v", err)
- }
-}
-
-func TestAuditBroker_AuditHeaders(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
- b := NewAuditBroker(logger)
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "headers/")
- a1 := &NoopAudit{}
- a2 := &NoopAudit{}
- b.Register("foo", a1, nil)
- b.Register("bar", a2, nil)
-
- auth := &logical.Auth{
- ClientToken: "foo",
- Policies: []string{"dev", "ops"},
- Metadata: map[string]string{
- "user": "armon",
- "source": "github",
- },
- }
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "sys/mounts",
- Headers: map[string][]string{
- "X-Test-Header": []string{"foo"},
- "X-Vault-Header": []string{"bar"},
- "Content-Type": []string{"baz"},
- },
- }
- respErr := fmt.Errorf("permission denied")
-
- // Copy so we can verify nothing canged
- reqCopyRaw, err := copystructure.Copy(req)
- if err != nil {
- t.Fatal(err)
- }
- reqCopy := reqCopyRaw.(*logical.Request)
-
- headersConf := &AuditedHeadersConfig{
- view: view,
- }
- headersConf.add("X-Test-Header", false)
- headersConf.add("X-Vault-Header", false)
-
- err = b.LogRequest(auth, reqCopy, headersConf, respErr)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- expected := map[string][]string{
- "x-test-header": []string{"foo"},
- "x-vault-header": []string{"bar"},
- }
-
- for _, a := range []*NoopAudit{a1, a2} {
- if !reflect.DeepEqual(a.ReqHeaders[0], expected) {
- t.Fatalf("Bad audited headers: %#v", a.Req[0].Headers)
- }
- }
-
- // Should still work with one failing backend
- a1.ReqErr = fmt.Errorf("failed")
- err = b.LogRequest(auth, req, headersConf, respErr)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should FAIL work with both failing backends
- a2.ReqErr = fmt.Errorf("failed")
- err = b.LogRequest(auth, req, headersConf, respErr)
- if !errwrap.Contains(err, "no audit backend succeeded in logging the request") {
- t.Fatalf("err: %v", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers.go b/vendor/github.com/hashicorp/vault/vault/audited_headers.go
deleted file mode 100644
index 1e1a11b..0000000
--- a/vendor/github.com/hashicorp/vault/vault/audited_headers.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package vault
-
-import (
- "fmt"
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// N.B.: While we could use textproto to get the canonical mime header, HTTP/2
-// requires all headers to be converted to lower case, so we just do that.
-
-const (
- // Key used in the BarrierView to store and retrieve the header config
- auditedHeadersEntry = "audited-headers"
- // Path used to create a sub view off of BarrierView
- auditedHeadersSubPath = "audited-headers-config/"
-)
-
-type auditedHeaderSettings struct {
- HMAC bool `json:"hmac"`
-}
-
-// AuditedHeadersConfig is used by the Audit Broker to write only approved
-// headers to the audit logs. It uses a BarrierView to persist the settings.
-type AuditedHeadersConfig struct {
- Headers map[string]*auditedHeaderSettings
-
- view *BarrierView
- sync.RWMutex
-}
-
-// add adds or overwrites a header in the config and updates the barrier view
-func (a *AuditedHeadersConfig) add(header string, hmac bool) error {
- if header == "" {
- return fmt.Errorf("header value cannot be empty")
- }
-
- // Grab a write lock
- a.Lock()
- defer a.Unlock()
-
- if a.Headers == nil {
- a.Headers = make(map[string]*auditedHeaderSettings, 1)
- }
-
- a.Headers[strings.ToLower(header)] = &auditedHeaderSettings{hmac}
- entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers)
- if err != nil {
- return fmt.Errorf("failed to persist audited headers config: %v", err)
- }
-
- if err := a.view.Put(entry); err != nil {
- return fmt.Errorf("failed to persist audited headers config: %v", err)
- }
-
- return nil
-}
-
-// remove deletes a header out of the header config and updates the barrier view
-func (a *AuditedHeadersConfig) remove(header string) error {
- if header == "" {
- return fmt.Errorf("header value cannot be empty")
- }
-
- // Grab a write lock
- a.Lock()
- defer a.Unlock()
-
- // Nothing to delete
- if len(a.Headers) == 0 {
- return nil
- }
-
- delete(a.Headers, strings.ToLower(header))
- entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers)
- if err != nil {
- return fmt.Errorf("failed to persist audited headers config: %v", err)
- }
-
- if err := a.view.Put(entry); err != nil {
- return fmt.Errorf("failed to persist audited headers config: %v", err)
- }
-
- return nil
-}
-
-// ApplyConfig returns a map of approved headers and their values, either
-// hmac'ed or plaintext
-func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc func(string) (string, error)) (result map[string][]string, retErr error) {
- // Grab a read lock
- a.RLock()
- defer a.RUnlock()
-
- // Make a copy of the incoming headers with everything lower so we can
- // case-insensitively compare
- lowerHeaders := make(map[string][]string, len(headers))
- for k, v := range headers {
- lowerHeaders[strings.ToLower(k)] = v
- }
-
- result = make(map[string][]string, len(a.Headers))
- for key, settings := range a.Headers {
- if val, ok := lowerHeaders[key]; ok {
- // copy the header values so we don't overwrite them
- hVals := make([]string, len(val))
- copy(hVals, val)
-
- // Optionally hmac the values
- if settings.HMAC {
- for i, el := range hVals {
- hVal, err := hashFunc(el)
- if err != nil {
- return nil, err
- }
- hVals[i] = hVal
- }
- }
-
- result[key] = hVals
- }
- }
-
- return result, nil
-}
-
-// Initalize the headers config by loading from the barrier view
-func (c *Core) setupAuditedHeadersConfig() error {
- // Create a sub-view
- view := c.systemBarrierView.SubView(auditedHeadersSubPath)
-
- // Create the config
- out, err := view.Get(auditedHeadersEntry)
- if err != nil {
- return fmt.Errorf("failed to read config: %v", err)
- }
-
- headers := make(map[string]*auditedHeaderSettings)
- if out != nil {
- err = out.DecodeJSON(&headers)
- if err != nil {
- return err
- }
- }
-
- // Ensure that we are able to case-sensitively access the headers;
- // necessary for the upgrade case
- lowerHeaders := make(map[string]*auditedHeaderSettings, len(headers))
- for k, v := range headers {
- lowerHeaders[strings.ToLower(k)] = v
- }
-
- c.auditedHeaders = &AuditedHeadersConfig{
- Headers: lowerHeaders,
- view: view,
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go b/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go
deleted file mode 100644
index 93225cf..0000000
--- a/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package vault
-
-import (
- "reflect"
- "testing"
-
- "github.com/hashicorp/vault/helper/salt"
-)
-
-func mockAuditedHeadersConfig(t *testing.T) *AuditedHeadersConfig {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "foo/")
- return &AuditedHeadersConfig{
- Headers: make(map[string]*auditedHeaderSettings),
- view: view,
- }
-}
-
-func TestAuditedHeadersConfig_CRUD(t *testing.T) {
- conf := mockAuditedHeadersConfig(t)
-
- testAuditedHeadersConfig_Add(t, conf)
- testAuditedHeadersConfig_Remove(t, conf)
-}
-
-func testAuditedHeadersConfig_Add(t *testing.T, conf *AuditedHeadersConfig) {
- err := conf.add("X-Test-Header", false)
- if err != nil {
- t.Fatalf("Error when adding header to config: %s", err)
- }
-
- settings, ok := conf.Headers["x-test-header"]
- if !ok {
- t.Fatal("Expected header to be found in config")
- }
-
- if settings.HMAC {
- t.Fatal("Expected HMAC to be set to false, got true")
- }
-
- out, err := conf.view.Get(auditedHeadersEntry)
- if err != nil {
- t.Fatalf("Could not retrieve headers entry from config: %s", err)
- }
-
- headers := make(map[string]*auditedHeaderSettings)
- err = out.DecodeJSON(&headers)
- if err != nil {
- t.Fatalf("Error decoding header view: %s", err)
- }
-
- expected := map[string]*auditedHeaderSettings{
- "x-test-header": &auditedHeaderSettings{
- HMAC: false,
- },
- }
-
- if !reflect.DeepEqual(headers, expected) {
- t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
- }
-
- err = conf.add("X-Vault-Header", true)
- if err != nil {
- t.Fatalf("Error when adding header to config: %s", err)
- }
-
- settings, ok = conf.Headers["x-vault-header"]
- if !ok {
- t.Fatal("Expected header to be found in config")
- }
-
- if !settings.HMAC {
- t.Fatal("Expected HMAC to be set to true, got false")
- }
-
- out, err = conf.view.Get(auditedHeadersEntry)
- if err != nil {
- t.Fatalf("Could not retrieve headers entry from config: %s", err)
- }
-
- headers = make(map[string]*auditedHeaderSettings)
- err = out.DecodeJSON(&headers)
- if err != nil {
- t.Fatalf("Error decoding header view: %s", err)
- }
-
- expected["x-vault-header"] = &auditedHeaderSettings{
- HMAC: true,
- }
-
- if !reflect.DeepEqual(headers, expected) {
- t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
- }
-
-}
-
-func testAuditedHeadersConfig_Remove(t *testing.T, conf *AuditedHeadersConfig) {
- err := conf.remove("X-Test-Header")
- if err != nil {
- t.Fatalf("Error when adding header to config: %s", err)
- }
-
- _, ok := conf.Headers["x-Test-HeAder"]
- if ok {
- t.Fatal("Expected header to not be found in config")
- }
-
- out, err := conf.view.Get(auditedHeadersEntry)
- if err != nil {
- t.Fatalf("Could not retrieve headers entry from config: %s", err)
- }
-
- headers := make(map[string]*auditedHeaderSettings)
- err = out.DecodeJSON(&headers)
- if err != nil {
- t.Fatalf("Error decoding header view: %s", err)
- }
-
- expected := map[string]*auditedHeaderSettings{
- "x-vault-header": &auditedHeaderSettings{
- HMAC: true,
- },
- }
-
- if !reflect.DeepEqual(headers, expected) {
- t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
- }
-
- err = conf.remove("x-VaulT-Header")
- if err != nil {
- t.Fatalf("Error when adding header to config: %s", err)
- }
-
- _, ok = conf.Headers["x-vault-header"]
- if ok {
- t.Fatal("Expected header to not be found in config")
- }
-
- out, err = conf.view.Get(auditedHeadersEntry)
- if err != nil {
- t.Fatalf("Could not retrieve headers entry from config: %s", err)
- }
-
- headers = make(map[string]*auditedHeaderSettings)
- err = out.DecodeJSON(&headers)
- if err != nil {
- t.Fatalf("Error decoding header view: %s", err)
- }
-
- expected = make(map[string]*auditedHeaderSettings)
-
- if !reflect.DeepEqual(headers, expected) {
- t.Fatalf("Expected config didn't match actual. Expected: %#v, Got: %#v", expected, headers)
- }
-}
-
-func TestAuditedHeadersConfig_ApplyConfig(t *testing.T) {
- conf := mockAuditedHeadersConfig(t)
-
- conf.add("X-TesT-Header", false)
- conf.add("X-Vault-HeAdEr", true)
-
- reqHeaders := map[string][]string{
- "X-Test-Header": []string{"foo"},
- "X-Vault-Header": []string{"bar", "bar"},
- "Content-Type": []string{"json"},
- }
-
- hashFunc := func(s string) (string, error) { return "hashed", nil }
-
- result, err := conf.ApplyConfig(reqHeaders, hashFunc)
- if err != nil {
- t.Fatal(err)
- }
-
- expected := map[string][]string{
- "x-test-header": []string{"foo"},
- "x-vault-header": []string{"hashed", "hashed"},
- }
-
- if !reflect.DeepEqual(result, expected) {
- t.Fatalf("Expected headers did not match actual: Expected %#v\n Got %#v\n", expected, result)
- }
-
- //Make sure we didn't edit the reqHeaders map
- reqHeadersCopy := map[string][]string{
- "X-Test-Header": []string{"foo"},
- "X-Vault-Header": []string{"bar", "bar"},
- "Content-Type": []string{"json"},
- }
-
- if !reflect.DeepEqual(reqHeaders, reqHeadersCopy) {
- t.Fatalf("Req headers were changed, expected %#v\n got %#v", reqHeadersCopy, reqHeaders)
- }
-
-}
-
-func BenchmarkAuditedHeaderConfig_ApplyConfig(b *testing.B) {
- conf := &AuditedHeadersConfig{
- Headers: make(map[string]*auditedHeaderSettings),
- view: nil,
- }
-
- conf.Headers = map[string]*auditedHeaderSettings{
- "X-Test-Header": &auditedHeaderSettings{false},
- "X-Vault-Header": &auditedHeaderSettings{true},
- }
-
- reqHeaders := map[string][]string{
- "X-Test-Header": []string{"foo"},
- "X-Vault-Header": []string{"bar", "bar"},
- "Content-Type": []string{"json"},
- }
-
- salter, err := salt.NewSalt(nil, nil)
- if err != nil {
- b.Fatal(err)
- }
-
- hashFunc := func(s string) (string, error) { return salter.GetIdentifiedHMAC(s), nil }
-
- // Reset the timer since we did a lot above
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- conf.ApplyConfig(reqHeaders, hashFunc)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/auth.go b/vendor/github.com/hashicorp/vault/vault/auth.go
deleted file mode 100644
index 5900449..0000000
--- a/vendor/github.com/hashicorp/vault/vault/auth.go
+++ /dev/null
@@ -1,550 +0,0 @@
-package vault
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // coreAuthConfigPath is used to store the auth configuration.
- // Auth configuration is protected within the Vault itself, which means it
- // can only be viewed or modified after an unseal.
- coreAuthConfigPath = "core/auth"
-
- // coreLocalAuthConfigPath is used to store credential configuration for
- // local (non-replicated) mounts
- coreLocalAuthConfigPath = "core/local-auth"
-
- // credentialBarrierPrefix is the prefix to the UUID used in the
- // barrier view for the credential backends.
- credentialBarrierPrefix = "auth/"
-
- // credentialRoutePrefix is the mount prefix used for the router
- credentialRoutePrefix = "auth/"
-
- // credentialTableType is the value we expect to find for the credential
- // table and corresponding entries
- credentialTableType = "auth"
-)
-
-var (
- // errLoadAuthFailed if loadCredentials encounters an error
- errLoadAuthFailed = errors.New("failed to setup auth table")
-
- // credentialAliases maps old backend names to new backend names, allowing us
- // to move/rename backends but maintain backwards compatibility
- credentialAliases = map[string]string{"aws-ec2": "aws"}
-)
-
-// enableCredential is used to enable a new credential backend
-func (c *Core) enableCredential(entry *MountEntry) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(entry.Path, "/") {
- entry.Path += "/"
- }
-
- // Ensure there is a name
- if entry.Path == "/" {
- return fmt.Errorf("backend path must be specified")
- }
-
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- // Look for matching name
- for _, ent := range c.auth.Entries {
- switch {
- // Existing is oauth/github/ new is oauth/ or
- // existing is oauth/ and new is oauth/github/
- case strings.HasPrefix(ent.Path, entry.Path):
- fallthrough
- case strings.HasPrefix(entry.Path, ent.Path):
- return logical.CodedError(409, "path is already in use")
- }
- }
-
- // Ensure the token backend is a singleton
- if entry.Type == "token" {
- return fmt.Errorf("token credential backend cannot be instantiated")
- }
-
- if match := c.router.MatchingMount(credentialRoutePrefix + entry.Path); match != "" {
- return logical.CodedError(409, fmt.Sprintf("existing mount at %s", match))
- }
-
- // Generate a new UUID and view
- if entry.UUID == "" {
- entryUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.UUID = entryUUID
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("auth_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- }
- viewPath := credentialBarrierPrefix + entry.UUID + "/"
- view := NewBarrierView(c.barrier, viewPath)
- sysView := c.mountEntrySysView(entry)
- conf := make(map[string]string)
- if entry.Config.PluginName != "" {
- conf["plugin_name"] = entry.Config.PluginName
- }
-
- // Create the new backend
- backend, err := c.newCredentialBackend(entry.Type, sysView, view, conf)
- if err != nil {
- return err
- }
- if backend == nil {
- return fmt.Errorf("nil backend returned from %q factory", entry.Type)
- }
-
- // Check for the correct backend type
- backendType := backend.Type()
- if entry.Type == "plugin" && backendType != logical.TypeCredential {
- return fmt.Errorf("cannot mount '%s' of type '%s' as an auth backend", entry.Config.PluginName, backendType)
- }
-
- if err := backend.Initialize(); err != nil {
- return err
- }
-
- // Update the auth table
- newTable := c.auth.shallowClone()
- newTable.Entries = append(newTable.Entries, entry)
- if err := c.persistAuth(newTable, entry.Local); err != nil {
- return errors.New("failed to update auth table")
- }
-
- c.auth = newTable
-
- path := credentialRoutePrefix + entry.Path
- if err := c.router.Mount(backend, path, entry, view); err != nil {
- return err
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("core: enabled credential backend", "path", entry.Path, "type", entry.Type)
- }
- return nil
-}
-
-// disableCredential is used to disable an existing credential backend; the
-// boolean indicates if it existed
-func (c *Core) disableCredential(path string) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
-
- // Ensure the token backend is not affected
- if path == "token/" {
- return fmt.Errorf("token credential backend cannot be disabled")
- }
-
- // Store the view for this backend
- fullPath := credentialRoutePrefix + path
- view := c.router.MatchingStorageView(fullPath)
- if view == nil {
- return fmt.Errorf("no matching backend %s", fullPath)
- }
-
- // Mark the entry as tainted
- if err := c.taintCredEntry(path); err != nil {
- return err
- }
-
- // Taint the router path to prevent routing
- if err := c.router.Taint(fullPath); err != nil {
- return err
- }
-
- // Revoke credentials from this path
- if err := c.expiration.RevokePrefix(fullPath); err != nil {
- return err
- }
-
- // Call cleanup function if it exists
- backend := c.router.MatchingBackend(fullPath)
- if backend != nil {
- backend.Cleanup()
- }
-
- // Unmount the backend
- if err := c.router.Unmount(fullPath); err != nil {
- return err
- }
-
- // Clear the data in the view
- if view != nil {
- if err := logical.ClearView(view); err != nil {
- return err
- }
- }
-
- // Remove the mount table entry
- if err := c.removeCredEntry(path); err != nil {
- return err
- }
- if c.logger.IsInfo() {
- c.logger.Info("core: disabled credential backend", "path", path)
- }
- return nil
-}
-
-// removeCredEntry is used to remove an entry in the auth table
-func (c *Core) removeCredEntry(path string) error {
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- // Taint the entry from the auth table
- newTable := c.auth.shallowClone()
- entry := newTable.remove(path)
- if entry == nil {
- c.logger.Error("core: nil entry found removing entry in auth table", "path", path)
- return logical.CodedError(500, "failed to remove entry in auth table")
- }
-
- // Update the auth table
- if err := c.persistAuth(newTable, entry.Local); err != nil {
- return errors.New("failed to update auth table")
- }
-
- c.auth = newTable
-
- return nil
-}
-
-// taintCredEntry is used to mark an entry in the auth table as tainted
-func (c *Core) taintCredEntry(path string) error {
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- // Taint the entry from the auth table
- // We do this on the original since setting the taint operates
- // on the entries which a shallow clone shares anyways
- entry := c.auth.setTaint(path, true)
-
- // Ensure there was a match
- if entry == nil {
- return fmt.Errorf("no matching backend")
- }
-
- // Update the auth table
- if err := c.persistAuth(c.auth, entry.Local); err != nil {
- return errors.New("failed to update auth table")
- }
-
- return nil
-}
-
-// loadCredentials is invoked as part of postUnseal to load the auth table
-func (c *Core) loadCredentials() error {
- authTable := &MountTable{}
- localAuthTable := &MountTable{}
-
- // Load the existing mount table
- raw, err := c.barrier.Get(coreAuthConfigPath)
- if err != nil {
- c.logger.Error("core: failed to read auth table", "error", err)
- return errLoadAuthFailed
- }
- rawLocal, err := c.barrier.Get(coreLocalAuthConfigPath)
- if err != nil {
- c.logger.Error("core: failed to read local auth table", "error", err)
- return errLoadAuthFailed
- }
-
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- if raw != nil {
- if err := jsonutil.DecodeJSON(raw.Value, authTable); err != nil {
- c.logger.Error("core: failed to decode auth table", "error", err)
- return errLoadAuthFailed
- }
- c.auth = authTable
- }
- if rawLocal != nil {
- if err := jsonutil.DecodeJSON(rawLocal.Value, localAuthTable); err != nil {
- c.logger.Error("core: failed to decode local auth table", "error", err)
- return errLoadAuthFailed
- }
- c.auth.Entries = append(c.auth.Entries, localAuthTable.Entries...)
- }
-
- // Done if we have restored the auth table
- if c.auth != nil {
- needPersist := false
-
- // Upgrade to typed auth table
- if c.auth.Type == "" {
- c.auth.Type = credentialTableType
- needPersist = true
- }
-
- // Upgrade to table-scoped entries
- for _, entry := range c.auth.Entries {
- if entry.Table == "" {
- entry.Table = c.auth.Type
- needPersist = true
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("auth_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- needPersist = true
- }
- }
-
- if !needPersist {
- return nil
- }
- } else {
- c.auth = c.defaultAuthTable()
- }
-
- if err := c.persistAuth(c.auth, false); err != nil {
- c.logger.Error("core: failed to persist auth table", "error", err)
- return errLoadAuthFailed
- }
- return nil
-}
-
-// persistAuth is used to persist the auth table after modification
-func (c *Core) persistAuth(table *MountTable, localOnly bool) error {
- if table.Type != credentialTableType {
- c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", credentialTableType)
- return fmt.Errorf("invalid table type given, not persisting")
- }
-
- for _, entry := range table.Entries {
- if entry.Table != table.Type {
- c.logger.Error("core: given entry to persist in auth table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
- return fmt.Errorf("invalid auth entry found, not persisting")
- }
- }
-
- nonLocalAuth := &MountTable{
- Type: credentialTableType,
- }
-
- localAuth := &MountTable{
- Type: credentialTableType,
- }
-
- for _, entry := range table.Entries {
- if entry.Local {
- localAuth.Entries = append(localAuth.Entries, entry)
- } else {
- nonLocalAuth.Entries = append(nonLocalAuth.Entries, entry)
- }
- }
-
- if !localOnly {
- // Marshal the table
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAuth, nil)
- if err != nil {
- c.logger.Error("core: failed to encode and/or compress auth table", "error", err)
- return err
- }
-
- // Create an entry
- entry := &Entry{
- Key: coreAuthConfigPath,
- Value: compressedBytes,
- }
-
- // Write to the physical backend
- if err := c.barrier.Put(entry); err != nil {
- c.logger.Error("core: failed to persist auth table", "error", err)
- return err
- }
- }
-
- // Repeat with local auth
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAuth, nil)
- if err != nil {
- c.logger.Error("core: failed to encode and/or compress local auth table", "error", err)
- return err
- }
-
- entry := &Entry{
- Key: coreLocalAuthConfigPath,
- Value: compressedBytes,
- }
-
- if err := c.barrier.Put(entry); err != nil {
- c.logger.Error("core: failed to persist local auth table", "error", err)
- return err
- }
-
- return nil
-}
-
-// setupCredentials is invoked after we've loaded the auth table to
-// initialize the credential backends and setup the router
-func (c *Core) setupCredentials() error {
- var view *BarrierView
- var err error
- var persistNeeded bool
-
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- for _, entry := range c.auth.Entries {
- var backend logical.Backend
- // Work around some problematic code that existed in master for a while
- if strings.HasPrefix(entry.Path, credentialRoutePrefix) {
- entry.Path = strings.TrimPrefix(entry.Path, credentialRoutePrefix)
- persistNeeded = true
- }
-
- // Create a barrier view using the UUID
- viewPath := credentialBarrierPrefix + entry.UUID + "/"
- view = NewBarrierView(c.barrier, viewPath)
- sysView := c.mountEntrySysView(entry)
- conf := make(map[string]string)
- if entry.Config.PluginName != "" {
- conf["plugin_name"] = entry.Config.PluginName
- }
-
- // Initialize the backend
- backend, err = c.newCredentialBackend(entry.Type, sysView, view, conf)
- if err != nil {
- c.logger.Error("core: failed to create credential entry", "path", entry.Path, "error", err)
- if errwrap.Contains(err, ErrPluginNotFound.Error()) && entry.Type == "plugin" {
- // If we encounter an error instantiating the backend due to it being missing from the catalog,
- // skip backend initialization but register the entry to the mount table to preserve storage
- // and path.
- goto ROUTER_MOUNT
- }
- return errLoadAuthFailed
- }
- if backend == nil {
- return fmt.Errorf("nil backend returned from %q factory", entry.Type)
- }
-
- // Check for the correct backend type
- if entry.Type == "plugin" && backend.Type() != logical.TypeCredential {
- return fmt.Errorf("cannot mount '%s' of type '%s' as an auth backend", entry.Config.PluginName, backend.Type())
- }
-
- if err := backend.Initialize(); err != nil {
- return err
- }
- ROUTER_MOUNT:
- // Mount the backend
- path := credentialRoutePrefix + entry.Path
- err = c.router.Mount(backend, path, entry, view)
- if err != nil {
- c.logger.Error("core: failed to mount auth entry", "path", entry.Path, "error", err)
- return errLoadAuthFailed
- }
-
- // Ensure the path is tainted if set in the mount table
- if entry.Tainted {
- c.router.Taint(path)
- }
-
- // Check if this is the token store
- if entry.Type == "token" {
- c.tokenStore = backend.(*TokenStore)
-
- // this is loaded *after* the normal mounts, including cubbyhole
- c.router.tokenStoreSaltFunc = c.tokenStore.Salt
- c.tokenStore.cubbyholeBackend = c.router.MatchingBackend("cubbyhole/").(*CubbyholeBackend)
- }
- }
-
- if persistNeeded {
- return c.persistAuth(c.auth, false)
- }
-
- return nil
-}
-
-// teardownCredentials is used before we seal the vault to reset the credential
-// backends to their unloaded state. This is reversed by loadCredentials.
-func (c *Core) teardownCredentials() error {
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- if c.auth != nil {
- authTable := c.auth.shallowClone()
- for _, e := range authTable.Entries {
- backend := c.router.MatchingBackend(credentialRoutePrefix + e.Path)
- if backend != nil {
- backend.Cleanup()
- }
- }
- }
-
- c.auth = nil
- c.tokenStore = nil
- return nil
-}
-
-// newCredentialBackend is used to create and configure a new credential backend by name
-func (c *Core) newCredentialBackend(
- t string, sysView logical.SystemView, view logical.Storage, conf map[string]string) (logical.Backend, error) {
- if alias, ok := credentialAliases[t]; ok {
- t = alias
- }
- f, ok := c.credentialBackends[t]
- if !ok {
- return nil, fmt.Errorf("unknown backend type: %s", t)
- }
-
- config := &logical.BackendConfig{
- StorageView: view,
- Logger: c.logger,
- Config: conf,
- System: sysView,
- }
-
- b, err := f(config)
- if err != nil {
- return nil, err
- }
-
- return b, nil
-}
-
-// defaultAuthTable creates a default auth table
-func (c *Core) defaultAuthTable() *MountTable {
- table := &MountTable{
- Type: credentialTableType,
- }
- tokenUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not generate UUID for default auth table token entry: %v", err))
- }
- tokenAccessor, err := c.generateMountAccessor("auth_token")
- if err != nil {
- panic(fmt.Sprintf("could not generate accessor for default auth table token entry: %v", err))
- }
- tokenAuth := &MountEntry{
- Table: credentialTableType,
- Path: "token/",
- Type: "token",
- Description: "token based credentials",
- UUID: tokenUUID,
- Accessor: tokenAccessor,
- }
- table.Entries = append(table.Entries, tokenAuth)
- return table
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/auth_test.go b/vendor/github.com/hashicorp/vault/vault/auth_test.go
deleted file mode 100644
index c81b264..0000000
--- a/vendor/github.com/hashicorp/vault/vault/auth_test.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package vault
-
-import (
- "reflect"
- "strings"
- "testing"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestCore_DefaultAuthTable(t *testing.T) {
- c, keys, _ := TestCoreUnsealed(t)
- verifyDefaultAuthTable(t, c.auth)
-
- // Start a second core with same physical
- conf := &CoreConfig{
- Physical: c.physical,
- DisableMlock: true,
- }
- c2, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for i, key := range keys {
- unseal, err := TestCoreUnseal(c2, key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i+1 == len(keys) && !unseal {
- t.Fatalf("should be unsealed")
- }
- }
-
- // Verify matching mount tables
- if !reflect.DeepEqual(c.auth, c2.auth) {
- t.Fatalf("mismatch: %v %v", c.auth, c2.auth)
- }
-}
-
-func TestCore_EnableCredential(t *testing.T) {
- c, keys, _ := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return &NoopBackend{}, nil
- }
-
- me := &MountEntry{
- Table: credentialTableType,
- Path: "foo",
- Type: "noop",
- }
- err := c.enableCredential(me)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- match := c.router.MatchingMount("auth/foo/bar")
- if match != "auth/foo/" {
- t.Fatalf("missing mount")
- }
-
- conf := &CoreConfig{
- Physical: c.physical,
- DisableMlock: true,
- }
- c2, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- c2.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return &NoopBackend{}, nil
- }
- for i, key := range keys {
- unseal, err := TestCoreUnseal(c2, key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i+1 == len(keys) && !unseal {
- t.Fatalf("should be unsealed")
- }
- }
-
- // Verify matching auth tables
- if !reflect.DeepEqual(c.auth, c2.auth) {
- t.Fatalf("mismatch: %v %v", c.auth, c2.auth)
- }
-}
-
-// Test that the local table actually gets populated as expected with local
-// entries, and that upon reading the entries from both are recombined
-// correctly
-func TestCore_EnableCredential_Local(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return &NoopBackend{}, nil
- }
-
- c.auth = &MountTable{
- Type: credentialTableType,
- Entries: []*MountEntry{
- &MountEntry{
- Table: credentialTableType,
- Path: "noop/",
- Type: "noop",
- UUID: "abcd",
- Accessor: "noop-abcd",
- },
- &MountEntry{
- Table: credentialTableType,
- Path: "noop2/",
- Type: "noop",
- UUID: "bcde",
- Accessor: "noop-bcde",
- },
- },
- }
-
- // Both should set up successfully
- err := c.setupCredentials()
- if err != nil {
- t.Fatal(err)
- }
-
- rawLocal, err := c.barrier.Get(coreLocalAuthConfigPath)
- if err != nil {
- t.Fatal(err)
- }
- if rawLocal == nil {
- t.Fatal("expected non-nil local credential")
- }
- localCredentialTable := &MountTable{}
- if err := jsonutil.DecodeJSON(rawLocal.Value, localCredentialTable); err != nil {
- t.Fatal(err)
- }
- if len(localCredentialTable.Entries) > 0 {
- t.Fatalf("expected no entries in local credential table, got %#v", localCredentialTable)
- }
-
- c.auth.Entries[1].Local = true
- if err := c.persistAuth(c.auth, false); err != nil {
- t.Fatal(err)
- }
-
- rawLocal, err = c.barrier.Get(coreLocalAuthConfigPath)
- if err != nil {
- t.Fatal(err)
- }
- if rawLocal == nil {
- t.Fatal("expected non-nil local credential")
- }
- localCredentialTable = &MountTable{}
- if err := jsonutil.DecodeJSON(rawLocal.Value, localCredentialTable); err != nil {
- t.Fatal(err)
- }
- if len(localCredentialTable.Entries) != 1 {
- t.Fatalf("expected one entry in local credential table, got %#v", localCredentialTable)
- }
-
- oldCredential := c.auth
- if err := c.loadCredentials(); err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(oldCredential, c.auth) {
- t.Fatalf("expected\n%#v\ngot\n%#v\n", oldCredential, c.auth)
- }
-
- if len(c.auth.Entries) != 2 {
- t.Fatalf("expected two credential entries, got %#v", localCredentialTable)
- }
-}
-
-func TestCore_EnableCredential_twice_409(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return &NoopBackend{}, nil
- }
-
- me := &MountEntry{
- Table: credentialTableType,
- Path: "foo",
- Type: "noop",
- }
- err := c.enableCredential(me)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // 2nd should be a 409 error
- err2 := c.enableCredential(me)
- switch err2.(type) {
- case logical.HTTPCodedError:
- if err2.(logical.HTTPCodedError).Code() != 409 {
- t.Fatalf("invalid code given")
- }
- default:
- t.Fatalf("expected a different error type")
- }
-}
-
-func TestCore_EnableCredential_Token(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- me := &MountEntry{
- Table: credentialTableType,
- Path: "foo",
- Type: "token",
- }
- err := c.enableCredential(me)
- if err.Error() != "token credential backend cannot be instantiated" {
- t.Fatalf("err: %v", err)
- }
-}
-
-func TestCore_DisableCredential(t *testing.T) {
- c, keys, _ := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return &NoopBackend{}, nil
- }
-
- err := c.disableCredential("foo")
- if err != nil && !strings.HasPrefix(err.Error(), "no matching backend") {
- t.Fatalf("err: %v", err)
- }
-
- me := &MountEntry{
- Table: credentialTableType,
- Path: "foo",
- Type: "noop",
- }
- err = c.enableCredential(me)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- err = c.disableCredential("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- match := c.router.MatchingMount("auth/foo/bar")
- if match != "" {
- t.Fatalf("backend present")
- }
-
- conf := &CoreConfig{
- Physical: c.physical,
- DisableMlock: true,
- }
- c2, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for i, key := range keys {
- unseal, err := TestCoreUnseal(c2, key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i+1 == len(keys) && !unseal {
- t.Fatalf("should be unsealed")
- }
- }
-
- // Verify matching mount tables
- if !reflect.DeepEqual(c.auth, c2.auth) {
- t.Fatalf("mismatch: %v %v", c.auth, c2.auth)
- }
-}
-
-func TestCore_DisableCredential_Protected(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- err := c.disableCredential("token")
- if err.Error() != "token credential backend cannot be disabled" {
- t.Fatalf("err: %v", err)
- }
-}
-
-func TestCore_DisableCredential_Cleanup(t *testing.T) {
- noop := &NoopBackend{
- Login: []string{"login"},
- }
- c, _, _ := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noop, nil
- }
-
- me := &MountEntry{
- Table: credentialTableType,
- Path: "foo",
- Type: "noop",
- }
- err := c.enableCredential(me)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Store the view
- view := c.router.MatchingStorageView("auth/foo/")
-
- // Inject data
- se := &logical.StorageEntry{
- Key: "plstodelete",
- Value: []byte("test"),
- }
- if err := view.Put(se); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Generate a new token auth
- noop.Response = &logical.Response{
- Auth: &logical.Auth{
- Policies: []string{"foo"},
- },
- }
- r := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "auth/foo/login",
- }
- resp, err := c.HandleRequest(r)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp.Auth.ClientToken == "" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Disable should cleanup
- err = c.disableCredential("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Token should be revoked
- te, err := c.tokenStore.Lookup(resp.Auth.ClientToken)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if te != nil {
- t.Fatalf("bad: %#v", te)
- }
-
- // View should be empty
- out, err := logical.CollectKeys(view)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(out) != 0 {
- t.Fatalf("bad: %#v", out)
- }
-}
-
-func TestDefaultAuthTable(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- table := c.defaultAuthTable()
- verifyDefaultAuthTable(t, table)
-}
-
-func verifyDefaultAuthTable(t *testing.T, table *MountTable) {
- if len(table.Entries) != 1 {
- t.Fatalf("bad: %v", table.Entries)
- }
- if table.Type != credentialTableType {
- t.Fatalf("bad: %v", *table)
- }
- for idx, entry := range table.Entries {
- switch idx {
- case 0:
- if entry.Path != "token/" {
- t.Fatalf("bad: %v", entry)
- }
- if entry.Type != "token" {
- t.Fatalf("bad: %v", entry)
- }
- }
- if entry.Description == "" {
- t.Fatalf("bad: %v", entry)
- }
- if entry.UUID == "" {
- t.Fatalf("bad: %v", entry)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier.go b/vendor/github.com/hashicorp/vault/vault/barrier.go
deleted file mode 100644
index 7c9acc0..0000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package vault
-
-import (
- "errors"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- // ErrBarrierSealed is returned if an operation is performed on
- // a sealed barrier. No operation is expected to succeed before unsealing
- ErrBarrierSealed = errors.New("Vault is sealed")
-
- // ErrBarrierAlreadyInit is returned if the barrier is already
- // initialized. This prevents a re-initialization.
- ErrBarrierAlreadyInit = errors.New("Vault is already initialized")
-
- // ErrBarrierNotInit is returned if a non-initialized barrier
- // is attempted to be unsealed.
- ErrBarrierNotInit = errors.New("Vault is not initialized")
-
- // ErrBarrierInvalidKey is returned if the Unseal key is invalid
- ErrBarrierInvalidKey = errors.New("Unseal failed, invalid key")
-)
-
-const (
- // barrierInitPath is the path used to store our init sentinel file
- barrierInitPath = "barrier/init"
-
- // keyringPath is the location of the keyring data. This is encrypted
- // by the master key.
- keyringPath = "core/keyring"
-
- // keyringUpgradePrefix is the path used to store keyring update entries.
- // When running in HA mode, the active instance will install the new key
- // and re-write the keyring. For standby instances, they need an upgrade
- // path from key N to N+1. They cannot just use the master key because
- // in the event of a rekey, that master key can no longer decrypt the keyring.
- // When key N+1 is installed, we create an entry at "prefix/N" which uses
- // encryption key N to provide the N+1 key. The standby instances scan
- // for this periodically and refresh their keyring. The upgrade keys
- // are deleted after a few minutes, but this provides enough time for the
- // standby instances to upgrade without causing any disruption.
- keyringUpgradePrefix = "core/upgrade/"
-
- // masterKeyPath is the location of the master key. This is encrypted
- // by the latest key in the keyring. This is only used by standby instances
- // to handle the case of a rekey. If the active instance does a rekey,
- // the standby instances can no longer reload the keyring since they
- // have the old master key. This key can be decrypted if you have the
- // keyring to discover the new master key. The new master key is then
- // used to reload the keyring itself.
- masterKeyPath = "core/master"
-)
-
-// SecurityBarrier is a critical component of Vault. It is used to wrap
-// an untrusted physical backend and provide a single point of encryption,
-// decryption and checksum verification. The goal is to ensure that any
-// data written to the barrier is confidential and that integrity is preserved.
-// As a real-world analogy, this is the steel and concrete wrapper around
-// a Vault. The barrier should only be Unlockable given its key.
-type SecurityBarrier interface {
- // Initialized checks if the barrier has been initialized
- // and has a master key set.
- Initialized() (bool, error)
-
- // Initialize works only if the barrier has not been initialized
- // and makes use of the given master key.
- Initialize([]byte) error
-
- // GenerateKey is used to generate a new key
- GenerateKey() ([]byte, error)
-
- // KeyLength is used to sanity check a key
- KeyLength() (int, int)
-
- // Sealed checks if the barrier has been unlocked yet. The Barrier
- // is not expected to be able to perform any CRUD until it is unsealed.
- Sealed() (bool, error)
-
- // Unseal is used to provide the master key which permits the barrier
- // to be unsealed. If the key is not correct, the barrier remains sealed.
- Unseal(key []byte) error
-
- // VerifyMaster is used to check if the given key matches the master key
- VerifyMaster(key []byte) error
-
- // SetMasterKey is used to directly set a new master key. This is used in
- // repliated scenarios due to the chicken and egg problem of reloading the
- // keyring from disk before we have the master key to decrypt it.
- SetMasterKey(key []byte) error
-
- // ReloadKeyring is used to re-read the underlying keyring.
- // This is used for HA deployments to ensure the latest keyring
- // is present in the leader.
- ReloadKeyring() error
-
- // ReloadMasterKey is used to re-read the underlying masterkey.
- // This is used for HA deployments to ensure the latest master key
- // is available for keyring reloading.
- ReloadMasterKey() error
-
- // Seal is used to re-seal the barrier. This requires the barrier to
- // be unsealed again to perform any further operations.
- Seal() error
-
- // Rotate is used to create a new encryption key. All future writes
- // should use the new key, while old values should still be decryptable.
- Rotate() (uint32, error)
-
- // CreateUpgrade creates an upgrade path key to the given term from the previous term
- CreateUpgrade(term uint32) error
-
- // DestroyUpgrade destroys the upgrade path key to the given term
- DestroyUpgrade(term uint32) error
-
- // CheckUpgrade looks for an upgrade to the current term and installs it
- CheckUpgrade() (bool, uint32, error)
-
- // ActiveKeyInfo is used to inform details about the active key
- ActiveKeyInfo() (*KeyInfo, error)
-
- // Rekey is used to change the master key used to protect the keyring
- Rekey([]byte) error
-
- // For replication we must send over the keyring, so this must be available
- Keyring() (*Keyring, error)
-
- // SecurityBarrier must provide the storage APIs
- BarrierStorage
-
- // SecurityBarrier must provide the encryption APIs
- BarrierEncryptor
-}
-
-// BarrierStorage is the storage only interface required for a Barrier.
-type BarrierStorage interface {
- // Put is used to insert or update an entry
- Put(entry *Entry) error
-
- // Get is used to fetch an entry
- Get(key string) (*Entry, error)
-
- // Delete is used to permanently delete an entry
- Delete(key string) error
-
- // List is used ot list all the keys under a given
- // prefix, up to the next prefix.
- List(prefix string) ([]string, error)
-}
-
-// BarrierEncryptor is the in memory only interface that does not actually
-// use the underlying barrier. It is used for lower level modules like the
-// Write-Ahead-Log and Merkle index to allow them to use the barrier.
-type BarrierEncryptor interface {
- Encrypt(key string, plaintext []byte) ([]byte, error)
- Decrypt(key string, ciphertext []byte) ([]byte, error)
-}
-
-// Entry is used to represent data stored by the security barrier
-type Entry struct {
- Key string
- Value []byte
-}
-
-// Logical turns the Entry into a logical storage entry.
-func (e *Entry) Logical() *logical.StorageEntry {
- return &logical.StorageEntry{
- Key: e.Key,
- Value: e.Value,
- }
-}
-
-// KeyInfo is used to convey information about the encryption key
-type KeyInfo struct {
- Term int
- InstallTime time.Time
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go
deleted file mode 100644
index 37c191b..0000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go
+++ /dev/null
@@ -1,886 +0,0 @@
-package vault
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/rand"
- "crypto/subtle"
- "encoding/binary"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/physical"
-)
-
-const (
- // initialKeyTerm is the hard coded initial key term. This is
- // used only for values that are not encrypted with the keyring.
- initialKeyTerm = 1
-
- // termSize the number of bytes used for the key term.
- termSize = 4
-)
-
-// Versions of the AESGCM storage methodology
-const (
- AESGCMVersion1 = 0x1
- AESGCMVersion2 = 0x2
-)
-
-// barrierInit is the JSON encoded value stored
-type barrierInit struct {
- Version int // Version is the current format version
- Key []byte // Key is the primary encryption key
-}
-
-// AESGCMBarrier is a SecurityBarrier implementation that uses the AES
-// cipher core and the Galois Counter Mode block mode. It defaults to
-// the golang NONCE default value of 12 and a key size of 256
-// bit. AES-GCM is high performance, and provides both confidentiality
-// and integrity.
-type AESGCMBarrier struct {
- backend physical.Backend
-
- l sync.RWMutex
- sealed bool
-
- // keyring is used to maintain all of the encryption keys, including
- // the active key used for encryption, but also prior keys to allow
- // decryption of keys encrypted under previous terms.
- keyring *Keyring
-
- // cache is used to reduce the number of AEAD constructions we do
- cache map[uint32]cipher.AEAD
- cacheLock sync.RWMutex
-
- // currentAESGCMVersionByte is prefixed to a message to allow for
- // future versioning of barrier implementations. It's var instead
- // of const to allow for testing
- currentAESGCMVersionByte byte
-}
-
-// NewAESGCMBarrier is used to construct a new barrier that uses
-// the provided physical backend for storage.
-func NewAESGCMBarrier(physical physical.Backend) (*AESGCMBarrier, error) {
- b := &AESGCMBarrier{
- backend: physical,
- sealed: true,
- cache: make(map[uint32]cipher.AEAD),
- currentAESGCMVersionByte: byte(AESGCMVersion2),
- }
- return b, nil
-}
-
-// Initialized checks if the barrier has been initialized
-// and has a master key set.
-func (b *AESGCMBarrier) Initialized() (bool, error) {
- // Read the keyring file
- out, err := b.backend.Get(keyringPath)
- if err != nil {
- return false, fmt.Errorf("failed to check for initialization: %v", err)
- }
- if out != nil {
- return true, nil
- }
-
- // Fallback, check for the old sentinel file
- out, err = b.backend.Get(barrierInitPath)
- if err != nil {
- return false, fmt.Errorf("failed to check for initialization: %v", err)
- }
- return out != nil, nil
-}
-
-// Initialize works only if the barrier has not been initialized
-// and makes use of the given master key.
-func (b *AESGCMBarrier) Initialize(key []byte) error {
- // Verify the key size
- min, max := b.KeyLength()
- if len(key) < min || len(key) > max {
- return fmt.Errorf("Key size must be %d or %d", min, max)
- }
-
- // Check if already initialized
- if alreadyInit, err := b.Initialized(); err != nil {
- return err
- } else if alreadyInit {
- return ErrBarrierAlreadyInit
- }
-
- // Generate encryption key
- encrypt, err := b.GenerateKey()
- if err != nil {
- return fmt.Errorf("failed to generate encryption key: %v", err)
- }
-
- // Create a new keyring, install the keys
- keyring := NewKeyring()
- keyring = keyring.SetMasterKey(key)
- keyring, err = keyring.AddKey(&Key{
- Term: 1,
- Version: 1,
- Value: encrypt,
- })
- if err != nil {
- return fmt.Errorf("failed to create keyring: %v", err)
- }
- return b.persistKeyring(keyring)
-}
-
-// persistKeyring is used to write out the keyring using the
-// master key to encrypt it.
-func (b *AESGCMBarrier) persistKeyring(keyring *Keyring) error {
- // Create the keyring entry
- keyringBuf, err := keyring.Serialize()
- defer memzero(keyringBuf)
- if err != nil {
- return fmt.Errorf("failed to serialize keyring: %v", err)
- }
-
- // Create the AES-GCM
- gcm, err := b.aeadFromKey(keyring.MasterKey())
- if err != nil {
- return err
- }
-
- // Encrypt the barrier init value
- value := b.encrypt(keyringPath, initialKeyTerm, gcm, keyringBuf)
-
- // Create the keyring physical entry
- pe := &physical.Entry{
- Key: keyringPath,
- Value: value,
- }
- if err := b.backend.Put(pe); err != nil {
- return fmt.Errorf("failed to persist keyring: %v", err)
- }
-
- // Serialize the master key value
- key := &Key{
- Term: 1,
- Version: 1,
- Value: keyring.MasterKey(),
- }
- keyBuf, err := key.Serialize()
- defer memzero(keyBuf)
- if err != nil {
- return fmt.Errorf("failed to serialize master key: %v", err)
- }
-
- // Encrypt the master key
- activeKey := keyring.ActiveKey()
- aead, err := b.aeadFromKey(activeKey.Value)
- if err != nil {
- return err
- }
- value = b.encrypt(masterKeyPath, activeKey.Term, aead, keyBuf)
-
- // Update the masterKeyPath for standby instances
- pe = &physical.Entry{
- Key: masterKeyPath,
- Value: value,
- }
- if err := b.backend.Put(pe); err != nil {
- return fmt.Errorf("failed to persist master key: %v", err)
- }
- return nil
-}
-
-// GenerateKey is used to generate a new key
-func (b *AESGCMBarrier) GenerateKey() ([]byte, error) {
- // Generate a 256bit key
- buf := make([]byte, 2*aes.BlockSize)
- _, err := rand.Read(buf)
- return buf, err
-}
-
-// KeyLength is used to sanity check a key
-func (b *AESGCMBarrier) KeyLength() (int, int) {
- return aes.BlockSize, 2 * aes.BlockSize
-}
-
-// Sealed checks if the barrier has been unlocked yet. The Barrier
-// is not expected to be able to perform any CRUD until it is unsealed.
-func (b *AESGCMBarrier) Sealed() (bool, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- return b.sealed, nil
-}
-
-// VerifyMaster is used to check if the given key matches the master key
-func (b *AESGCMBarrier) VerifyMaster(key []byte) error {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return ErrBarrierSealed
- }
- if subtle.ConstantTimeCompare(key, b.keyring.MasterKey()) != 1 {
- return ErrBarrierInvalidKey
- }
- return nil
-}
-
-// ReloadKeyring is used to re-read the underlying keyring.
-// This is used for HA deployments to ensure the latest keyring
-// is present in the leader.
-func (b *AESGCMBarrier) ReloadKeyring() error {
- b.l.Lock()
- defer b.l.Unlock()
-
- // Create the AES-GCM
- gcm, err := b.aeadFromKey(b.keyring.MasterKey())
- if err != nil {
- return err
- }
-
- // Read in the keyring
- out, err := b.backend.Get(keyringPath)
- if err != nil {
- return fmt.Errorf("failed to check for keyring: %v", err)
- }
-
- // Ensure that the keyring exists. This should never happen,
- // and indicates something really bad has happened.
- if out == nil {
- return fmt.Errorf("keyring unexpectedly missing")
- }
-
- // Decrypt the barrier init key
- plain, err := b.decrypt(keyringPath, gcm, out.Value)
- defer memzero(plain)
- if err != nil {
- if strings.Contains(err.Error(), "message authentication failed") {
- return ErrBarrierInvalidKey
- }
- return err
- }
-
- // Recover the keyring
- keyring, err := DeserializeKeyring(plain)
- if err != nil {
- return fmt.Errorf("keyring deserialization failed: %v", err)
- }
-
- // Setup the keyring and finish
- b.keyring = keyring
- return nil
-}
-
-// ReloadMasterKey is used to re-read the underlying masterkey.
-// This is used for HA deployments to ensure the latest master key
-// is available for keyring reloading.
-func (b *AESGCMBarrier) ReloadMasterKey() error {
- // Read the masterKeyPath upgrade
- out, err := b.Get(masterKeyPath)
- if err != nil {
- return fmt.Errorf("failed to read master key path: %v", err)
- }
-
- // The masterKeyPath could be missing (backwards incompatible),
- // we can ignore this and attempt to make progress with the current
- // master key.
- if out == nil {
- return nil
- }
-
- defer memzero(out.Value)
-
- // Deserialize the master key
- key, err := DeserializeKey(out.Value)
- if err != nil {
- return fmt.Errorf("failed to deserialize key: %v", err)
- }
-
- b.l.Lock()
- defer b.l.Unlock()
-
- // Check if the master key is the same
- if subtle.ConstantTimeCompare(b.keyring.MasterKey(), key.Value) == 1 {
- return nil
- }
-
- // Update the master key
- oldKeyring := b.keyring
- b.keyring = b.keyring.SetMasterKey(key.Value)
- oldKeyring.Zeroize(false)
- return nil
-}
-
-// Unseal is used to provide the master key which permits the barrier
-// to be unsealed. If the key is not correct, the barrier remains sealed.
-func (b *AESGCMBarrier) Unseal(key []byte) error {
- b.l.Lock()
- defer b.l.Unlock()
-
- // Do nothing if already unsealed
- if !b.sealed {
- return nil
- }
-
- // Create the AES-GCM
- gcm, err := b.aeadFromKey(key)
- if err != nil {
- return err
- }
-
- // Read in the keyring
- out, err := b.backend.Get(keyringPath)
- if err != nil {
- return fmt.Errorf("failed to check for keyring: %v", err)
- }
- if out != nil {
- // Decrypt the barrier init key
- plain, err := b.decrypt(keyringPath, gcm, out.Value)
- defer memzero(plain)
- if err != nil {
- if strings.Contains(err.Error(), "message authentication failed") {
- return ErrBarrierInvalidKey
- }
- return err
- }
-
- // Recover the keyring
- keyring, err := DeserializeKeyring(plain)
- if err != nil {
- return fmt.Errorf("keyring deserialization failed: %v", err)
- }
-
- // Setup the keyring and finish
- b.keyring = keyring
- b.sealed = false
- return nil
- }
-
- // Read the barrier initialization key
- out, err = b.backend.Get(barrierInitPath)
- if err != nil {
- return fmt.Errorf("failed to check for initialization: %v", err)
- }
- if out == nil {
- return ErrBarrierNotInit
- }
-
- // Decrypt the barrier init key
- plain, err := b.decrypt(barrierInitPath, gcm, out.Value)
- if err != nil {
- if strings.Contains(err.Error(), "message authentication failed") {
- return ErrBarrierInvalidKey
- }
- return err
- }
- defer memzero(plain)
-
- // Unmarshal the barrier init
- var init barrierInit
- if err := jsonutil.DecodeJSON(plain, &init); err != nil {
- return fmt.Errorf("failed to unmarshal barrier init file")
- }
-
- // Setup a new keyring, this is for backwards compatibility
- keyringNew := NewKeyring()
- keyring := keyringNew.SetMasterKey(key)
-
- // AddKey reuses the master, so we are only zeroizing after this call
- defer keyringNew.Zeroize(false)
-
- keyring, err = keyring.AddKey(&Key{
- Term: 1,
- Version: 1,
- Value: init.Key,
- })
- if err != nil {
- return fmt.Errorf("failed to create keyring: %v", err)
- }
- if err := b.persistKeyring(keyring); err != nil {
- return err
- }
-
- // Delete the old barrier entry
- if err := b.backend.Delete(barrierInitPath); err != nil {
- return fmt.Errorf("failed to delete barrier init file: %v", err)
- }
-
- // Set the vault as unsealed
- b.keyring = keyring
- b.sealed = false
- return nil
-}
-
-// Seal is used to re-seal the barrier. This requires the barrier to
-// be unsealed again to perform any further operations.
-func (b *AESGCMBarrier) Seal() error {
- b.l.Lock()
- defer b.l.Unlock()
-
- // Remove the primary key, and seal the vault
- b.cache = make(map[uint32]cipher.AEAD)
- b.keyring.Zeroize(true)
- b.keyring = nil
- b.sealed = true
- return nil
-}
-
-// Rotate is used to create a new encryption key. All future writes
-// should use the new key, while old values should still be decryptable.
-func (b *AESGCMBarrier) Rotate() (uint32, error) {
- b.l.Lock()
- defer b.l.Unlock()
- if b.sealed {
- return 0, ErrBarrierSealed
- }
-
- // Generate a new key
- encrypt, err := b.GenerateKey()
- if err != nil {
- return 0, fmt.Errorf("failed to generate encryption key: %v", err)
- }
-
- // Get the next term
- term := b.keyring.ActiveTerm()
- newTerm := term + 1
-
- // Add a new encryption key
- newKeyring, err := b.keyring.AddKey(&Key{
- Term: newTerm,
- Version: 1,
- Value: encrypt,
- })
- if err != nil {
- return 0, fmt.Errorf("failed to add new encryption key: %v", err)
- }
-
- // Persist the new keyring
- if err := b.persistKeyring(newKeyring); err != nil {
- return 0, err
- }
-
- // Swap the keyrings
- b.keyring = newKeyring
- return newTerm, nil
-}
-
-// CreateUpgrade creates an upgrade path key to the given term from the previous term
-func (b *AESGCMBarrier) CreateUpgrade(term uint32) error {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return ErrBarrierSealed
- }
-
- // Get the key for this term
- termKey := b.keyring.TermKey(term)
- buf, err := termKey.Serialize()
- defer memzero(buf)
- if err != nil {
- return err
- }
-
- // Get the AEAD for the previous term
- prevTerm := term - 1
- primary, err := b.aeadForTerm(prevTerm)
- if err != nil {
- return err
- }
-
- key := fmt.Sprintf("%s%d", keyringUpgradePrefix, prevTerm)
- value := b.encrypt(key, prevTerm, primary, buf)
- // Create upgrade key
- pe := &physical.Entry{
- Key: key,
- Value: value,
- }
- return b.backend.Put(pe)
-}
-
-// DestroyUpgrade destroys the upgrade path key to the given term
-func (b *AESGCMBarrier) DestroyUpgrade(term uint32) error {
- path := fmt.Sprintf("%s%d", keyringUpgradePrefix, term-1)
- return b.Delete(path)
-}
-
-// CheckUpgrade looks for an upgrade to the current term and installs it
-func (b *AESGCMBarrier) CheckUpgrade() (bool, uint32, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return false, 0, ErrBarrierSealed
- }
-
- // Get the current term
- activeTerm := b.keyring.ActiveTerm()
-
- // Check for an upgrade key
- upgrade := fmt.Sprintf("%s%d", keyringUpgradePrefix, activeTerm)
- entry, err := b.Get(upgrade)
- if err != nil {
- return false, 0, err
- }
-
- // Nothing to do if no upgrade
- if entry == nil {
- return false, 0, nil
- }
-
- defer memzero(entry.Value)
-
- // Deserialize the key
- key, err := DeserializeKey(entry.Value)
- if err != nil {
- return false, 0, err
- }
-
- // Upgrade from read lock to write lock
- b.l.RUnlock()
- defer b.l.RLock()
- b.l.Lock()
- defer b.l.Unlock()
-
- // Update the keyring
- newKeyring, err := b.keyring.AddKey(key)
- if err != nil {
- return false, 0, fmt.Errorf("failed to add new encryption key: %v", err)
- }
- b.keyring = newKeyring
-
- // Done!
- return true, key.Term, nil
-}
-
-// ActiveKeyInfo is used to inform details about the active key
-func (b *AESGCMBarrier) ActiveKeyInfo() (*KeyInfo, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- // Determine the key install time
- term := b.keyring.ActiveTerm()
- key := b.keyring.TermKey(term)
-
- // Return the key info
- info := &KeyInfo{
- Term: int(term),
- InstallTime: key.InstallTime,
- }
- return info, nil
-}
-
-// Rekey is used to change the master key used to protect the keyring
-func (b *AESGCMBarrier) Rekey(key []byte) error {
- b.l.Lock()
- defer b.l.Unlock()
-
- newKeyring, err := b.updateMasterKeyCommon(key)
- if err != nil {
- return err
- }
-
- // Persist the new keyring
- if err := b.persistKeyring(newKeyring); err != nil {
- return err
- }
-
- // Swap the keyrings
- oldKeyring := b.keyring
- b.keyring = newKeyring
- oldKeyring.Zeroize(false)
- return nil
-}
-
-// SetMasterKey updates the keyring's in-memory master key but does not persist
-// anything to storage
-func (b *AESGCMBarrier) SetMasterKey(key []byte) error {
- b.l.Lock()
- defer b.l.Unlock()
-
- newKeyring, err := b.updateMasterKeyCommon(key)
- if err != nil {
- return err
- }
-
- // Swap the keyrings
- oldKeyring := b.keyring
- b.keyring = newKeyring
- oldKeyring.Zeroize(false)
- return nil
-}
-
-// Performs common tasks related to updating the master key; note that the lock
-// must be held before calling this function
-func (b *AESGCMBarrier) updateMasterKeyCommon(key []byte) (*Keyring, error) {
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- // Verify the key size
- min, max := b.KeyLength()
- if len(key) < min || len(key) > max {
- return nil, fmt.Errorf("Key size must be %d or %d", min, max)
- }
-
- return b.keyring.SetMasterKey(key), nil
-}
-
-// Put is used to insert or update an entry
-func (b *AESGCMBarrier) Put(entry *Entry) error {
- defer metrics.MeasureSince([]string{"barrier", "put"}, time.Now())
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return ErrBarrierSealed
- }
-
- term := b.keyring.ActiveTerm()
- primary, err := b.aeadForTerm(term)
- if err != nil {
- return err
- }
-
- pe := &physical.Entry{
- Key: entry.Key,
- Value: b.encrypt(entry.Key, term, primary, entry.Value),
- }
- return b.backend.Put(pe)
-}
-
-// Get is used to fetch an entry
-func (b *AESGCMBarrier) Get(key string) (*Entry, error) {
- defer metrics.MeasureSince([]string{"barrier", "get"}, time.Now())
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- // Read the key from the backend
- pe, err := b.backend.Get(key)
- if err != nil {
- return nil, err
- } else if pe == nil {
- return nil, nil
- }
-
- // Decrypt the ciphertext
- plain, err := b.decryptKeyring(key, pe.Value)
- if err != nil {
- return nil, fmt.Errorf("decryption failed: %v", err)
- }
-
- // Wrap in a logical entry
- entry := &Entry{
- Key: key,
- Value: plain,
- }
- return entry, nil
-}
-
-// Delete is used to permanently delete an entry
-func (b *AESGCMBarrier) Delete(key string) error {
- defer metrics.MeasureSince([]string{"barrier", "delete"}, time.Now())
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return ErrBarrierSealed
- }
-
- return b.backend.Delete(key)
-}
-
-// List is used ot list all the keys under a given
-// prefix, up to the next prefix.
-func (b *AESGCMBarrier) List(prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"barrier", "list"}, time.Now())
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- return b.backend.List(prefix)
-}
-
-// aeadForTerm returns the AES-GCM AEAD for the given term
-func (b *AESGCMBarrier) aeadForTerm(term uint32) (cipher.AEAD, error) {
- // Check for the keyring
- keyring := b.keyring
- if keyring == nil {
- return nil, nil
- }
-
- // Check the cache for the aead
- b.cacheLock.RLock()
- aead, ok := b.cache[term]
- b.cacheLock.RUnlock()
- if ok {
- return aead, nil
- }
-
- // Read the underlying key
- key := keyring.TermKey(term)
- if key == nil {
- return nil, nil
- }
-
- // Create a new aead
- aead, err := b.aeadFromKey(key.Value)
- if err != nil {
- return nil, err
- }
-
- // Update the cache
- b.cacheLock.Lock()
- b.cache[term] = aead
- b.cacheLock.Unlock()
- return aead, nil
-}
-
-// aeadFromKey returns an AES-GCM AEAD using the given key.
-func (b *AESGCMBarrier) aeadFromKey(key []byte) (cipher.AEAD, error) {
- // Create the AES cipher
- aesCipher, err := aes.NewCipher(key)
- if err != nil {
- return nil, fmt.Errorf("failed to create cipher: %v", err)
- }
-
- // Create the GCM mode AEAD
- gcm, err := cipher.NewGCM(aesCipher)
- if err != nil {
- return nil, fmt.Errorf("failed to initialize GCM mode")
- }
- return gcm, nil
-}
-
-// encrypt is used to encrypt a value
-func (b *AESGCMBarrier) encrypt(path string, term uint32, gcm cipher.AEAD, plain []byte) []byte {
- // Allocate the output buffer with room for tern, version byte,
- // nonce, GCM tag and the plaintext
- capacity := termSize + 1 + gcm.NonceSize() + gcm.Overhead() + len(plain)
- size := termSize + 1 + gcm.NonceSize()
- out := make([]byte, size, capacity)
-
- // Set the key term
- binary.BigEndian.PutUint32(out[:4], term)
-
- // Set the version byte
- out[4] = b.currentAESGCMVersionByte
-
- // Generate a random nonce
- nonce := out[5 : 5+gcm.NonceSize()]
- rand.Read(nonce)
-
- // Seal the output
- switch b.currentAESGCMVersionByte {
- case AESGCMVersion1:
- out = gcm.Seal(out, nonce, plain, nil)
- case AESGCMVersion2:
- out = gcm.Seal(out, nonce, plain, []byte(path))
- default:
- panic("Unknown AESGCM version")
- }
-
- return out
-}
-
-// decrypt is used to decrypt a value
-func (b *AESGCMBarrier) decrypt(path string, gcm cipher.AEAD, cipher []byte) ([]byte, error) {
- // Verify the term is always just one
- term := binary.BigEndian.Uint32(cipher[:4])
- if term != initialKeyTerm {
- return nil, fmt.Errorf("term mis-match")
- }
-
- // Capture the parts
- nonce := cipher[5 : 5+gcm.NonceSize()]
- raw := cipher[5+gcm.NonceSize():]
- out := make([]byte, 0, len(raw)-gcm.NonceSize())
-
- // Verify the cipher byte and attempt to open
- switch cipher[4] {
- case AESGCMVersion1:
- return gcm.Open(out, nonce, raw, nil)
- case AESGCMVersion2:
- return gcm.Open(out, nonce, raw, []byte(path))
- default:
- return nil, fmt.Errorf("version bytes mis-match")
- }
-}
-
-// decryptKeyring is used to decrypt a value using the keyring
-func (b *AESGCMBarrier) decryptKeyring(path string, cipher []byte) ([]byte, error) {
- // Verify the term
- term := binary.BigEndian.Uint32(cipher[:4])
-
- // Get the GCM by term
- // It is expensive to do this first but it is not a
- // normal case that this won't match
- gcm, err := b.aeadForTerm(term)
- if err != nil {
- return nil, err
- }
- if gcm == nil {
- return nil, fmt.Errorf("no decryption key available for term %d", term)
- }
-
- nonce := cipher[5 : 5+gcm.NonceSize()]
- raw := cipher[5+gcm.NonceSize():]
- out := make([]byte, 0, len(raw)-gcm.NonceSize())
-
- // Attempt to open
- switch cipher[4] {
- case AESGCMVersion1:
- return gcm.Open(out, nonce, raw, nil)
- case AESGCMVersion2:
- return gcm.Open(out, nonce, raw, []byte(path))
- default:
- return nil, fmt.Errorf("version bytes mis-match")
- }
-}
-
-// Encrypt is used to encrypt in-memory for the BarrierEncryptor interface
-func (b *AESGCMBarrier) Encrypt(key string, plaintext []byte) ([]byte, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- term := b.keyring.ActiveTerm()
- primary, err := b.aeadForTerm(term)
- if err != nil {
- return nil, err
- }
-
- ciphertext := b.encrypt(key, term, primary, plaintext)
- return ciphertext, nil
-}
-
-// Decrypt is used to decrypt in-memory for the BarrierEncryptor interface
-func (b *AESGCMBarrier) Decrypt(key string, ciphertext []byte) ([]byte, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- // Decrypt the ciphertext
- plain, err := b.decryptKeyring(key, ciphertext)
- if err != nil {
- return nil, fmt.Errorf("decryption failed: %v", err)
- }
- return plain, nil
-}
-
-func (b *AESGCMBarrier) Keyring() (*Keyring, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- return b.keyring.Clone(), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go
deleted file mode 100644
index ef0fe38..0000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package vault
-
-import (
- "bytes"
- "encoding/json"
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/physical/inmem"
- log "github.com/mgutz/logxi/v1"
-)
-
-var (
- logger = logformat.NewVaultLogger(log.LevelTrace)
-)
-
-// mockBarrier returns a physical backend, security barrier, and master key
-func mockBarrier(t testing.TB) (physical.Backend, SecurityBarrier, []byte) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Initialize and unseal
- key, _ := b.GenerateKey()
- b.Initialize(key)
- b.Unseal(key)
- return inm, b, key
-}
-
-func TestAESGCMBarrier_Basic(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- testBarrier(t, b)
-}
-
-func TestAESGCMBarrier_Rotate(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- testBarrier_Rotate(t, b)
-}
-
-func TestAESGCMBarrier_Upgrade(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b1, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b2, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- testBarrier_Upgrade(t, b1, b2)
-}
-
-func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b1, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b2, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- testBarrier_Upgrade_Rekey(t, b1, b2)
-}
-
-func TestAESGCMBarrier_Rekey(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- testBarrier_Rekey(t, b)
-}
-
-// Test an upgrade from the old (0.1) barrier/init to the new
-// core/keyring style
-func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Generate a barrier/init entry
- encrypt, _ := b.GenerateKey()
- init := &barrierInit{
- Version: 1,
- Key: encrypt,
- }
- buf, _ := json.Marshal(init)
-
- // Protect with master key
- master, _ := b.GenerateKey()
- gcm, _ := b.aeadFromKey(master)
- value := b.encrypt(barrierInitPath, initialKeyTerm, gcm, buf)
-
- // Write to the physical backend
- pe := &physical.Entry{
- Key: barrierInitPath,
- Value: value,
- }
- inm.Put(pe)
-
- // Create a fake key
- gcm, _ = b.aeadFromKey(encrypt)
- pe = &physical.Entry{
- Key: "test/foo",
- Value: b.encrypt("test/foo", initialKeyTerm, gcm, []byte("test")),
- }
- inm.Put(pe)
-
- // Should still be initialized
- isInit, err := b.Initialized()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isInit {
- t.Fatalf("should be initialized")
- }
-
- // Unseal should work and migrate online
- err = b.Unseal(master)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check for migraiton
- out, err := inm.Get(barrierInitPath)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("should delete old barrier init")
- }
-
- // Should have keyring
- out, err = inm.Get(keyringPath)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("should have keyring file")
- }
-
- // Attempt to read encrypted key
- entry, err := b.Get("test/foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if string(entry.Value) != "test" {
- t.Fatalf("bad: %#v", entry)
- }
-}
-
-// Verify data sent through is encrypted
-func TestAESGCMBarrier_Confidential(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Initialize and unseal
- key, _ := b.GenerateKey()
- b.Initialize(key)
- b.Unseal(key)
-
- // Put a logical entry
- entry := &Entry{Key: "test", Value: []byte("test")}
- err = b.Put(entry)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the physcial entry
- pe, err := inm.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if pe == nil {
- t.Fatalf("missing physical entry")
- }
-
- if pe.Key != "test" {
- t.Fatalf("bad: %#v", pe)
- }
- if bytes.Equal(pe.Value, entry.Value) {
- t.Fatalf("bad: %#v", pe)
- }
-}
-
-// Verify data sent through cannot be tampered with
-func TestAESGCMBarrier_Integrity(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Initialize and unseal
- key, _ := b.GenerateKey()
- b.Initialize(key)
- b.Unseal(key)
-
- // Put a logical entry
- entry := &Entry{Key: "test", Value: []byte("test")}
- err = b.Put(entry)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Change a byte in the underlying physical entry
- pe, _ := inm.Get("test")
- pe.Value[15]++
- err = inm.Put(pe)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Read from the barrier
- _, err = b.Get("test")
- if err == nil {
- t.Fatalf("should fail!")
- }
-}
-
-// Verify data sent through cannot be moved
-func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b.currentAESGCMVersionByte = AESGCMVersion1
-
- // Initialize and unseal
- key, _ := b.GenerateKey()
- err = b.Initialize(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Put a logical entry
- entry := &Entry{Key: "test", Value: []byte("test")}
- err = b.Put(entry)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Change the location of the underlying physical entry
- pe, _ := inm.Get("test")
- pe.Key = "moved"
- err = inm.Put(pe)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Read from the barrier
- _, err = b.Get("moved")
- if err != nil {
- t.Fatalf("should succeed with version 1!")
- }
-}
-
-func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b.currentAESGCMVersionByte = AESGCMVersion2
-
- // Initialize and unseal
- key, _ := b.GenerateKey()
- err = b.Initialize(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Put a logical entry
- entry := &Entry{Key: "test", Value: []byte("test")}
- err = b.Put(entry)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Change the location of the underlying physical entry
- pe, _ := inm.Get("test")
- pe.Key = "moved"
- err = inm.Put(pe)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Read from the barrier
- _, err = b.Get("moved")
- if err == nil {
- t.Fatalf("should fail with version 2!")
- }
-}
-
-func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b.currentAESGCMVersionByte = AESGCMVersion1
-
- // Initialize and unseal
- key, _ := b.GenerateKey()
- err = b.Initialize(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Put a logical entry
- entry := &Entry{Key: "test", Value: []byte("test")}
- err = b.Put(entry)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Seal
- err = b.Seal()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Open again as version 2
- b, err = NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b.currentAESGCMVersionByte = AESGCMVersion2
-
- // Unseal
- err = b.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check successful decryption
- _, err = b.Get("test")
- if err != nil {
- t.Fatalf("Upgrade unsuccessful")
- }
-}
-
-func TestEncrypt_Unique(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- key, _ := b.GenerateKey()
- b.Initialize(key)
- b.Unseal(key)
-
- if b.keyring == nil {
- t.Fatalf("barrier is sealed")
- }
-
- entry := &Entry{Key: "test", Value: []byte("test")}
- term := b.keyring.ActiveTerm()
- primary, _ := b.aeadForTerm(term)
-
- first := b.encrypt("test", term, primary, entry.Value)
- second := b.encrypt("test", term, primary, entry.Value)
-
- if bytes.Equal(first, second) == true {
- t.Fatalf("improper random seeding detected")
- }
-}
-
-func TestInitialize_KeyLength(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- long := []byte("ThisKeyDoesNotHaveTheRightLength!")
- middle := []byte("ThisIsASecretKeyAndMore")
- short := []byte("Key")
-
- err = b.Initialize(long)
-
- if err == nil {
- t.Fatalf("key length protection failed")
- }
-
- err = b.Initialize(middle)
-
- if err == nil {
- t.Fatalf("key length protection failed")
- }
-
- err = b.Initialize(short)
-
- if err == nil {
- t.Fatalf("key length protection failed")
- }
-}
-
-func TestEncrypt_BarrierEncryptor(t *testing.T) {
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- b, err := NewAESGCMBarrier(inm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Initialize and unseal
- key, _ := b.GenerateKey()
- b.Initialize(key)
- b.Unseal(key)
-
- cipher, err := b.Encrypt("foo", []byte("quick brown fox"))
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- plain, err := b.Decrypt("foo", cipher)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if string(plain) != "quick brown fox" {
- t.Fatalf("bad: %s", plain)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_test.go
deleted file mode 100644
index e40c011..0000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_test.go
+++ /dev/null
@@ -1,531 +0,0 @@
-package vault
-
-import (
- "reflect"
- "testing"
- "time"
-)
-
-func testBarrier(t *testing.T, b SecurityBarrier) {
- // Should not be initialized
- init, err := b.Initialized()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if init {
- t.Fatalf("should not be initialized")
- }
-
- // Should start sealed
- sealed, err := b.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !sealed {
- t.Fatalf("should be sealed")
- }
-
- // Sealing should be a no-op
- if err := b.Seal(); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // All operations should fail
- e := &Entry{Key: "test", Value: []byte("test")}
- if err := b.Put(e); err != ErrBarrierSealed {
- t.Fatalf("err: %v", err)
- }
- if _, err := b.Get("test"); err != ErrBarrierSealed {
- t.Fatalf("err: %v", err)
- }
- if err := b.Delete("test"); err != ErrBarrierSealed {
- t.Fatalf("err: %v", err)
- }
- if _, err := b.List(""); err != ErrBarrierSealed {
- t.Fatalf("err: %v", err)
- }
-
- // Get a new key
- key, err := b.GenerateKey()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Validate minimum key length
- min, max := b.KeyLength()
- if min < 16 {
- t.Fatalf("minimum key size too small: %d", min)
- }
- if max < min {
- t.Fatalf("maximum key size smaller than min")
- }
-
- // Unseal should not work
- if err := b.Unseal(key); err != ErrBarrierNotInit {
- t.Fatalf("err: %v", err)
- }
-
- // Initialize the vault
- if err := b.Initialize(key); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Double Initialize should fail
- if err := b.Initialize(key); err != ErrBarrierAlreadyInit {
- t.Fatalf("err: %v", err)
- }
-
- // Should be initialized
- init, err = b.Initialized()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !init {
- t.Fatalf("should be initialized")
- }
-
- // Should still be sealed
- sealed, err = b.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !sealed {
- t.Fatalf("should sealed")
- }
-
- // Unseal should work
- if err := b.Unseal(key); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Unseal should no-op when done twice
- if err := b.Unseal(key); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should no longer be sealed
- sealed, err = b.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if sealed {
- t.Fatalf("should be unsealed")
- }
-
- // Verify the master key
- if err := b.VerifyMaster(key); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Operations should work
- out, err := b.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // List should have only "core/"
- keys, err := b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 || keys[0] != "core/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Try to write
- if err := b.Put(e); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should be equal
- out, err = b.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(out, e) {
- t.Fatalf("bad: %v exp: %v", out, e)
- }
-
- // List should show the items
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 2 {
- t.Fatalf("bad: %v", keys)
- }
- if keys[0] != "core/" || keys[1] != "test" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Delete should clear
- err = b.Delete("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Double Delete is fine
- err = b.Delete("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should be nil
- out, err = b.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // List should have nothing
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 || keys[0] != "core/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Add the item back
- if err := b.Put(e); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reseal should prevent any updates
- if err := b.Seal(); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // No access allowed
- if _, err := b.Get("test"); err != ErrBarrierSealed {
- t.Fatalf("err: %v", err)
- }
-
- // Unseal should work
- if err := b.Unseal(key); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should be equal
- out, err = b.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(out, e) {
- t.Fatalf("bad: %v exp: %v", out, e)
- }
-
- // Final cleanup
- err = b.Delete("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reseal should prevent any updates
- if err := b.Seal(); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Modify the key
- key[0]++
-
- // Unseal should fail
- if err := b.Unseal(key); err != ErrBarrierInvalidKey {
- t.Fatalf("err: %v", err)
- }
-}
-
-func testBarrier_Rotate(t *testing.T, b SecurityBarrier) {
- // Initialize the barrier
- key, _ := b.GenerateKey()
- b.Initialize(key)
- err := b.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the key info
- info, err := b.ActiveKeyInfo()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if info.Term != 1 {
- t.Fatalf("Bad term: %d", info.Term)
- }
- if time.Since(info.InstallTime) > time.Second {
- t.Fatalf("Bad install: %v", info.InstallTime)
- }
- first := info.InstallTime
-
- // Write a key
- e1 := &Entry{Key: "test", Value: []byte("test")}
- if err := b.Put(e1); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Rotate the encryption key
- newTerm, err := b.Rotate()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if newTerm != 2 {
- t.Fatalf("bad: %v", newTerm)
- }
-
- // Check the key info
- info, err = b.ActiveKeyInfo()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if info.Term != 2 {
- t.Fatalf("Bad term: %d", info.Term)
- }
- if !info.InstallTime.After(first) {
- t.Fatalf("Bad install: %v", info.InstallTime)
- }
-
- // Write another key
- e2 := &Entry{Key: "foo", Value: []byte("test")}
- if err := b.Put(e2); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reading both should work
- out, err := b.Get(e1.Key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("bad: %v", out)
- }
-
- out, err = b.Get(e2.Key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Seal and unseal
- err = b.Seal()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reading both should work
- out, err = b.Get(e1.Key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("bad: %v", out)
- }
-
- out, err = b.Get(e2.Key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Should be fine to reload keyring
- err = b.ReloadKeyring()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-}
-
-func testBarrier_Rekey(t *testing.T, b SecurityBarrier) {
- // Initialize the barrier
- key, _ := b.GenerateKey()
- b.Initialize(key)
- err := b.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Write a key
- e1 := &Entry{Key: "test", Value: []byte("test")}
- if err := b.Put(e1); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the master key
- if err := b.VerifyMaster(key); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Rekey to a new key
- newKey, _ := b.GenerateKey()
- err = b.Rekey(newKey)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the old master key
- if err := b.VerifyMaster(key); err != ErrBarrierInvalidKey {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the new master key
- if err := b.VerifyMaster(newKey); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reading should work
- out, err := b.Get(e1.Key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Seal
- err = b.Seal()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Unseal with old key should fail
- err = b.Unseal(key)
- if err == nil {
- t.Fatalf("unseal should fail")
- }
-
- // Unseal with new keys should work
- err = b.Unseal(newKey)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reading should work
- out, err = b.Get(e1.Key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Should be fine to reload keyring
- err = b.ReloadKeyring()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-}
-
-func testBarrier_Upgrade(t *testing.T, b1, b2 SecurityBarrier) {
- // Initialize the barrier
- key, _ := b1.GenerateKey()
- b1.Initialize(key)
- err := b1.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b2.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Rotate the encryption key
- newTerm, err := b1.Rotate()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Create upgrade path
- err = b1.CreateUpgrade(newTerm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check for an upgrade
- did, updated, err := b2.CheckUpgrade()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !did || updated != newTerm {
- t.Fatalf("failed to upgrade")
- }
-
- // Should have no upgrades pending
- did, updated, err = b2.CheckUpgrade()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if did {
- t.Fatalf("should not have upgrade")
- }
-
- // Rotate the encryption key
- newTerm, err = b1.Rotate()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Create upgrade path
- err = b1.CreateUpgrade(newTerm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Destroy upgrade path
- err = b1.DestroyUpgrade(newTerm)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should have no upgrades pending
- did, updated, err = b2.CheckUpgrade()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if did {
- t.Fatalf("should not have upgrade")
- }
-}
-
-func testBarrier_Upgrade_Rekey(t *testing.T, b1, b2 SecurityBarrier) {
- // Initialize the barrier
- key, _ := b1.GenerateKey()
- b1.Initialize(key)
- err := b1.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = b2.Unseal(key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Rekey to a new key
- newKey, _ := b1.GenerateKey()
- err = b1.Rekey(newKey)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reload the master key
- err = b2.ReloadMasterKey()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Reload the keyring
- err = b2.ReloadKeyring()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view.go b/vendor/github.com/hashicorp/vault/vault/barrier_view.go
deleted file mode 100644
index 3512aba..0000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_view.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package vault
-
-import (
- "errors"
- "strings"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// BarrierView wraps a SecurityBarrier and ensures all access is automatically
-// prefixed. This is used to prevent anyone with access to the view to access
-// any data in the durable storage outside of their prefix. Conceptually this
-// is like a "chroot" into the barrier.
-//
-// BarrierView implements logical.Storage so it can be passed in as the
-// durable storage mechanism for logical views.
-type BarrierView struct {
- barrier BarrierStorage
- prefix string
- readonly bool
-}
-
-var (
- ErrRelativePath = errors.New("relative paths not supported")
-)
-
-// NewBarrierView takes an underlying security barrier and returns
-// a view of it that can only operate with the given prefix.
-func NewBarrierView(barrier BarrierStorage, prefix string) *BarrierView {
- return &BarrierView{
- barrier: barrier,
- prefix: prefix,
- }
-}
-
-// sanityCheck is used to perform a sanity check on a key
-func (v *BarrierView) sanityCheck(key string) error {
- if strings.Contains(key, "..") {
- return ErrRelativePath
- }
- return nil
-}
-
-// logical.Storage impl.
-func (v *BarrierView) List(prefix string) ([]string, error) {
- if err := v.sanityCheck(prefix); err != nil {
- return nil, err
- }
- return v.barrier.List(v.expandKey(prefix))
-}
-
-// logical.Storage impl.
-func (v *BarrierView) Get(key string) (*logical.StorageEntry, error) {
- if err := v.sanityCheck(key); err != nil {
- return nil, err
- }
- entry, err := v.barrier.Get(v.expandKey(key))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- if entry != nil {
- entry.Key = v.truncateKey(entry.Key)
- }
-
- return &logical.StorageEntry{
- Key: entry.Key,
- Value: entry.Value,
- }, nil
-}
-
-// logical.Storage impl.
-func (v *BarrierView) Put(entry *logical.StorageEntry) error {
- if err := v.sanityCheck(entry.Key); err != nil {
- return err
- }
-
- expandedKey := v.expandKey(entry.Key)
-
- if v.readonly {
- return logical.ErrReadOnly
- }
-
- nested := &Entry{
- Key: expandedKey,
- Value: entry.Value,
- }
- return v.barrier.Put(nested)
-}
-
-// logical.Storage impl.
-func (v *BarrierView) Delete(key string) error {
- if err := v.sanityCheck(key); err != nil {
- return err
- }
-
- expandedKey := v.expandKey(key)
-
- if v.readonly {
- return logical.ErrReadOnly
- }
-
- return v.barrier.Delete(expandedKey)
-}
-
-// SubView constructs a nested sub-view using the given prefix
-func (v *BarrierView) SubView(prefix string) *BarrierView {
- sub := v.expandKey(prefix)
- return &BarrierView{barrier: v.barrier, prefix: sub, readonly: v.readonly}
-}
-
-// expandKey is used to expand to the full key path with the prefix
-func (v *BarrierView) expandKey(suffix string) string {
- return v.prefix + suffix
-}
-
-// truncateKey is used to remove the prefix of the key
-func (v *BarrierView) truncateKey(full string) string {
- return strings.TrimPrefix(full, v.prefix)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_view_test.go
deleted file mode 100644
index 59a0c38..0000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_view_test.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package vault
-
-import (
- "reflect"
- "sort"
- "testing"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func TestBarrierView_impl(t *testing.T) {
- var _ logical.Storage = new(BarrierView)
-}
-
-func TestBarrierView_spec(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "foo/")
- logical.TestStorage(t, view)
-}
-
-func TestBarrierView_BadKeysKeys(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "foo/")
-
- _, err := view.List("../")
- if err == nil {
- t.Fatalf("expected error")
- }
-
- _, err = view.Get("../")
- if err == nil {
- t.Fatalf("expected error")
- }
-
- err = view.Delete("../foo")
- if err == nil {
- t.Fatalf("expected error")
- }
-
- le := &logical.StorageEntry{
- Key: "../foo",
- Value: []byte("test"),
- }
- err = view.Put(le)
- if err == nil {
- t.Fatalf("expected error")
- }
-}
-
-func TestBarrierView(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "foo/")
-
- // Write a key outside of foo/
- entry := &Entry{Key: "test", Value: []byte("test")}
- if err := barrier.Put(entry); err != nil {
- t.Fatalf("bad: %v", err)
- }
-
- // List should have no visibility
- keys, err := view.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", err)
- }
-
- // Get should have no visibility
- out, err := view.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Try to put the same entry via the view
- if err := view.Put(entry.Logical()); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check it is nested
- entry, err = barrier.Get("foo/test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if entry == nil {
- t.Fatalf("missing nested foo/test")
- }
-
- // Delete nested
- if err := view.Delete("test"); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the nested key
- entry, err = barrier.Get("foo/test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if entry != nil {
- t.Fatalf("nested foo/test should be gone")
- }
-
- // Check the non-nested key
- entry, err = barrier.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if entry == nil {
- t.Fatalf("root test missing")
- }
-}
-
-func TestBarrierView_SubView(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- root := NewBarrierView(barrier, "foo/")
- view := root.SubView("bar/")
-
- // List should have no visibility
- keys, err := view.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", err)
- }
-
- // Get should have no visibility
- out, err := view.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Try to put the same entry via the view
- entry := &logical.StorageEntry{Key: "test", Value: []byte("test")}
- if err := view.Put(entry); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check it is nested
- bout, err := barrier.Get("foo/bar/test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if bout == nil {
- t.Fatalf("missing nested foo/bar/test")
- }
-
- // Check for visibility in root
- out, err = root.Get("bar/test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("missing nested bar/test")
- }
-
- // Delete nested
- if err := view.Delete("test"); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the nested key
- bout, err = barrier.Get("foo/bar/test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if bout != nil {
- t.Fatalf("nested foo/bar/test should be gone")
- }
-}
-
-func TestBarrierView_Scan(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "view/")
-
- expect := []string{}
- ent := []*logical.StorageEntry{
- &logical.StorageEntry{Key: "foo", Value: []byte("test")},
- &logical.StorageEntry{Key: "zip", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")},
- }
-
- for _, e := range ent {
- expect = append(expect, e.Key)
- if err := view.Put(e); err != nil {
- t.Fatalf("err: %v", err)
- }
- }
-
- var out []string
- cb := func(path string) {
- out = append(out, path)
- }
-
- // Collect the keys
- if err := logical.ScanView(view, cb); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- sort.Strings(out)
- sort.Strings(expect)
- if !reflect.DeepEqual(out, expect) {
- t.Fatalf("out: %v expect: %v", out, expect)
- }
-}
-
-func TestBarrierView_CollectKeys(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "view/")
-
- expect := []string{}
- ent := []*logical.StorageEntry{
- &logical.StorageEntry{Key: "foo", Value: []byte("test")},
- &logical.StorageEntry{Key: "zip", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")},
- }
-
- for _, e := range ent {
- expect = append(expect, e.Key)
- if err := view.Put(e); err != nil {
- t.Fatalf("err: %v", err)
- }
- }
-
- // Collect the keys
- out, err := logical.CollectKeys(view)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- sort.Strings(out)
- sort.Strings(expect)
- if !reflect.DeepEqual(out, expect) {
- t.Fatalf("out: %v expect: %v", out, expect)
- }
-}
-
-func TestBarrierView_ClearView(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "view/")
-
- expect := []string{}
- ent := []*logical.StorageEntry{
- &logical.StorageEntry{Key: "foo", Value: []byte("test")},
- &logical.StorageEntry{Key: "zip", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/zap", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar/baz", Value: []byte("test")},
- &logical.StorageEntry{Key: "foo/bar/zoo", Value: []byte("test")},
- }
-
- for _, e := range ent {
- expect = append(expect, e.Key)
- if err := view.Put(e); err != nil {
- t.Fatalf("err: %v", err)
- }
- }
-
- // Clear the keys
- if err := logical.ClearView(view); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Collect the keys
- out, err := logical.CollectKeys(view)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(out) != 0 {
- t.Fatalf("have keys: %#v", out)
- }
-}
-func TestBarrierView_Readonly(t *testing.T) {
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "foo/")
-
- // Add a key before enabling read-only
- entry := &Entry{Key: "test", Value: []byte("test")}
- if err := view.Put(entry.Logical()); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Enable read only mode
- view.readonly = true
-
- // Put should fail in readonly mode
- if err := view.Put(entry.Logical()); err != logical.ErrReadOnly {
- t.Fatalf("err: %v", err)
- }
-
- // Delete nested
- if err := view.Delete("test"); err != logical.ErrReadOnly {
- t.Fatalf("err: %v", err)
- }
-
- // Check the non-nested key
- e, err := view.Get("test")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if e == nil {
- t.Fatalf("key test missing")
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/capabilities.go b/vendor/github.com/hashicorp/vault/vault/capabilities.go
deleted file mode 100644
index 6994e52..0000000
--- a/vendor/github.com/hashicorp/vault/vault/capabilities.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package vault
-
-import (
- "sort"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// Capabilities is used to fetch the capabilities of the given token on the given path
-func (c *Core) Capabilities(token, path string) ([]string, error) {
- if path == "" {
- return nil, &logical.StatusBadRequest{Err: "missing path"}
- }
-
- if token == "" {
- return nil, &logical.StatusBadRequest{Err: "missing token"}
- }
-
- te, err := c.tokenStore.Lookup(token)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return nil, &logical.StatusBadRequest{Err: "invalid token"}
- }
-
- if te.Policies == nil {
- return []string{DenyCapability}, nil
- }
-
- var policies []*Policy
- for _, tePolicy := range te.Policies {
- policy, err := c.policyStore.GetPolicy(tePolicy)
- if err != nil {
- return nil, err
- }
- policies = append(policies, policy)
- }
-
- if len(policies) == 0 {
- return []string{DenyCapability}, nil
- }
-
- acl, err := NewACL(policies)
- if err != nil {
- return nil, err
- }
-
- capabilities := acl.Capabilities(path)
- sort.Strings(capabilities)
- return capabilities, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/capabilities_test.go b/vendor/github.com/hashicorp/vault/vault/capabilities_test.go
deleted file mode 100644
index 0db7eac..0000000
--- a/vendor/github.com/hashicorp/vault/vault/capabilities_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package vault
-
-import (
- "reflect"
- "testing"
-)
-
-func TestCapabilities(t *testing.T) {
- c, _, token := TestCoreUnsealed(t)
-
- actual, err := c.Capabilities(token, "path")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- expected := []string{"root"}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
- }
-
- // Create a policy
- policy, _ := Parse(aclPolicy)
- err = c.policyStore.SetPolicy(policy)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Create a token for the policy
- ent := &TokenEntry{
- ID: "capabilitiestoken",
- Path: "testpath",
- Policies: []string{"dev"},
- }
- if err := c.tokenStore.create(ent); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- actual, err = c.Capabilities("capabilitiestoken", "foo/bar")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- expected = []string{"create", "read", "sudo"}
- if !reflect.DeepEqual(actual, expected) {
- t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster.go b/vendor/github.com/hashicorp/vault/vault/cluster.go
deleted file mode 100644
index beca4b9..0000000
--- a/vendor/github.com/hashicorp/vault/vault/cluster.go
+++ /dev/null
@@ -1,469 +0,0 @@
-package vault
-
-import (
- "bytes"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/json"
- "errors"
- "fmt"
- "math/big"
- mathrand "math/rand"
- "net"
- "net/http"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-const (
- // Storage path where the local cluster name and identifier are stored
- coreLocalClusterInfoPath = "core/cluster/local/info"
-
- corePrivateKeyTypeP521 = "p521"
- corePrivateKeyTypeED25519 = "ed25519"
-
- // Internal so as not to log a trace message
- IntNoForwardingHeaderName = "X-Vault-Internal-No-Request-Forwarding"
-)
-
-var (
- ErrCannotForward = errors.New("cannot forward request; no connection or address not known")
-)
-
-// This can be one of a few key types so the different params may or may not be filled
-type clusterKeyParams struct {
- Type string `json:"type" structs:"type" mapstructure:"type"`
- X *big.Int `json:"x" structs:"x" mapstructure:"x"`
- Y *big.Int `json:"y" structs:"y" mapstructure:"y"`
- D *big.Int `json:"d" structs:"d" mapstructure:"d"`
-}
-
-// Structure representing the storage entry that holds cluster information
-type Cluster struct {
- // Name of the cluster
- Name string `json:"name" structs:"name" mapstructure:"name"`
-
- // Identifier of the cluster
- ID string `json:"id" structs:"id" mapstructure:"id"`
-}
-
-// Cluster fetches the details of the local cluster. This method errors out
-// when Vault is sealed.
-func (c *Core) Cluster() (*Cluster, error) {
- var cluster Cluster
-
- // Fetch the storage entry. This call fails when Vault is sealed.
- entry, err := c.barrier.Get(coreLocalClusterInfoPath)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return &cluster, nil
- }
-
- // Decode the cluster information
- if err = jsonutil.DecodeJSON(entry.Value, &cluster); err != nil {
- return nil, fmt.Errorf("failed to decode cluster details: %v", err)
- }
-
- // Set in config file
- if c.clusterName != "" {
- cluster.Name = c.clusterName
- }
-
- return &cluster, nil
-}
-
-// This sets our local cluster cert and private key based on the advertisement.
-// It also ensures the cert is in our local cluster cert pool.
-func (c *Core) loadLocalClusterTLS(adv activeAdvertisement) (retErr error) {
- defer func() {
- if retErr != nil {
- c.clusterParamsLock.Lock()
- c.localClusterCert = nil
- c.localClusterPrivateKey = nil
- c.localClusterParsedCert = nil
- c.clusterParamsLock.Unlock()
-
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
- }
- }()
-
- switch {
- case adv.ClusterAddr == "":
- // Clustering disabled on the server, don't try to look for params
- return nil
-
- case adv.ClusterKeyParams == nil:
- c.logger.Error("core: no key params found loading local cluster TLS information")
- return fmt.Errorf("no local cluster key params found")
-
- case adv.ClusterKeyParams.X == nil, adv.ClusterKeyParams.Y == nil, adv.ClusterKeyParams.D == nil:
- c.logger.Error("core: failed to parse local cluster key due to missing params")
- return fmt.Errorf("failed to parse local cluster key")
-
- case adv.ClusterKeyParams.Type != corePrivateKeyTypeP521:
- c.logger.Error("core: unknown local cluster key type", "key_type", adv.ClusterKeyParams.Type)
- return fmt.Errorf("failed to find valid local cluster key type")
-
- case adv.ClusterCert == nil || len(adv.ClusterCert) == 0:
- c.logger.Error("core: no local cluster cert found")
- return fmt.Errorf("no local cluster cert found")
-
- }
-
- // Prevent data races with the TLS parameters
- c.clusterParamsLock.Lock()
- defer c.clusterParamsLock.Unlock()
-
- c.localClusterPrivateKey = &ecdsa.PrivateKey{
- PublicKey: ecdsa.PublicKey{
- Curve: elliptic.P521(),
- X: adv.ClusterKeyParams.X,
- Y: adv.ClusterKeyParams.Y,
- },
- D: adv.ClusterKeyParams.D,
- }
-
- c.localClusterCert = adv.ClusterCert
-
- cert, err := x509.ParseCertificate(c.localClusterCert)
- if err != nil {
- c.logger.Error("core: failed parsing local cluster certificate", "error", err)
- return fmt.Errorf("error parsing local cluster certificate: %v", err)
- }
-
- c.localClusterParsedCert = cert
-
- return nil
-}
-
-// setupCluster creates storage entries for holding Vault cluster information.
-// Entries will be created only if they are not already present. If clusterName
-// is not supplied, this method will auto-generate it.
-func (c *Core) setupCluster() error {
- // Prevent data races with the TLS parameters
- c.clusterParamsLock.Lock()
- defer c.clusterParamsLock.Unlock()
-
- // Check if storage index is already present or not
- cluster, err := c.Cluster()
- if err != nil {
- c.logger.Error("core: failed to get cluster details", "error", err)
- return err
- }
-
- var modified bool
-
- if cluster == nil {
- cluster = &Cluster{}
- }
-
- if cluster.Name == "" {
- // If cluster name is not supplied, generate one
- if c.clusterName == "" {
- c.logger.Trace("core: cluster name not found/set, generating new")
- clusterNameBytes, err := uuid.GenerateRandomBytes(4)
- if err != nil {
- c.logger.Error("core: failed to generate cluster name", "error", err)
- return err
- }
-
- c.clusterName = fmt.Sprintf("vault-cluster-%08x", clusterNameBytes)
- }
-
- cluster.Name = c.clusterName
- if c.logger.IsDebug() {
- c.logger.Debug("core: cluster name set", "name", cluster.Name)
- }
- modified = true
- }
-
- if cluster.ID == "" {
- c.logger.Trace("core: cluster ID not found, generating new")
- // Generate a clusterID
- cluster.ID, err = uuid.GenerateUUID()
- if err != nil {
- c.logger.Error("core: failed to generate cluster identifier", "error", err)
- return err
- }
- if c.logger.IsDebug() {
- c.logger.Debug("core: cluster ID set", "id", cluster.ID)
- }
- modified = true
- }
-
- // If we're using HA, generate server-to-server parameters
- if c.ha != nil {
- // Create a private key
- if c.localClusterPrivateKey == nil {
- c.logger.Trace("core: generating cluster private key")
- key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
- if err != nil {
- c.logger.Error("core: failed to generate local cluster key", "error", err)
- return err
- }
-
- c.localClusterPrivateKey = key
- }
-
- // Create a certificate
- if c.localClusterCert == nil {
- c.logger.Trace("core: generating local cluster certificate")
-
- host, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- host = fmt.Sprintf("fw-%s", host)
- template := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: host,
- },
- DNSNames: []string{host},
- ExtKeyUsage: []x509.ExtKeyUsage{
- x509.ExtKeyUsageServerAuth,
- x509.ExtKeyUsageClientAuth,
- },
- KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
- SerialNumber: big.NewInt(mathrand.Int63()),
- NotBefore: time.Now().Add(-30 * time.Second),
- // 30 years of single-active uptime ought to be enough for anybody
- NotAfter: time.Now().Add(262980 * time.Hour),
- BasicConstraintsValid: true,
- IsCA: true,
- }
-
- certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Public(), c.localClusterPrivateKey)
- if err != nil {
- c.logger.Error("core: error generating self-signed cert", "error", err)
- return errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err)
- }
-
- parsedCert, err := x509.ParseCertificate(certBytes)
- if err != nil {
- c.logger.Error("core: error parsing self-signed cert", "error", err)
- return errwrap.Wrapf("error parsing generated certificate: {{err}}", err)
- }
-
- c.localClusterCert = certBytes
- c.localClusterParsedCert = parsedCert
- }
- }
-
- if modified {
- // Encode the cluster information into as a JSON string
- rawCluster, err := json.Marshal(cluster)
- if err != nil {
- c.logger.Error("core: failed to encode cluster details", "error", err)
- return err
- }
-
- // Store it
- err = c.barrier.Put(&Entry{
- Key: coreLocalClusterInfoPath,
- Value: rawCluster,
- })
- if err != nil {
- c.logger.Error("core: failed to store cluster details", "error", err)
- return err
- }
- }
-
- return nil
-}
-
-// startClusterListener starts cluster request listeners during postunseal. It
-// is assumed that the state lock is held while this is run. Right now this
-// only starts forwarding listeners; it's TBD whether other request types will
-// be built in the same mechanism or started independently.
-func (c *Core) startClusterListener() error {
- if c.clusterAddr == "" {
- c.logger.Info("core: clustering disabled, not starting listeners")
- return nil
- }
-
- if c.clusterListenerAddrs == nil || len(c.clusterListenerAddrs) == 0 {
- c.logger.Warn("core: clustering not disabled but no addresses to listen on")
- return fmt.Errorf("cluster addresses not found")
- }
-
- c.logger.Trace("core: starting cluster listeners")
-
- err := c.startForwarding()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// stopClusterListener stops any existing listeners during preseal. It is
-// assumed that the state lock is held while this is run.
-func (c *Core) stopClusterListener() {
- if c.clusterAddr == "" {
- c.logger.Trace("core: clustering disabled, not stopping listeners")
- return
- }
-
- if !c.clusterListenersRunning {
- c.logger.Info("core: cluster listeners not running")
- return
- }
- c.logger.Info("core: stopping cluster listeners")
-
- // Tell the goroutine managing the listeners to perform the shutdown
- // process
- c.clusterListenerShutdownCh <- struct{}{}
-
- // The reason for this loop-de-loop is that we may be unsealing again
- // quickly, and if the listeners are not yet closed, we will get socket
- // bind errors. This ensures proper ordering.
- c.logger.Trace("core: waiting for success notification while stopping cluster listeners")
- <-c.clusterListenerShutdownSuccessCh
- c.clusterListenersRunning = false
-
- c.logger.Info("core: cluster listeners successfully shut down")
-}
-
-// ClusterTLSConfig generates a TLS configuration based on the local/replicated
-// cluster key and cert.
-func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
- // Using lookup functions allows just-in-time lookup of the current state
- // of clustering as connections come and go
-
- serverLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- switch {
- default:
- var localCert bytes.Buffer
-
- c.clusterParamsLock.RLock()
- localCert.Write(c.localClusterCert)
- localSigner := c.localClusterPrivateKey
- parsedCert := c.localClusterParsedCert
- c.clusterParamsLock.RUnlock()
-
- if localCert.Len() == 0 {
- return nil, fmt.Errorf("got forwarding connection but no local cert")
- }
-
- //c.logger.Trace("core: performing cert name lookup", "hello_server_name", clientHello.ServerName, "local_cluster_cert_name", parsedCert.Subject.CommonName)
-
- return &tls.Certificate{
- Certificate: [][]byte{localCert.Bytes()},
- PrivateKey: localSigner,
- Leaf: parsedCert,
- }, nil
-
- }
-
- return nil, nil
- }
-
- clientLookup := func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- //c.logger.Trace("core: performing client cert lookup")
-
- if len(requestInfo.AcceptableCAs) != 1 {
- return nil, fmt.Errorf("expected only a single acceptable CA")
- }
- var localCert bytes.Buffer
-
- c.clusterParamsLock.RLock()
- localCert.Write(c.localClusterCert)
- localSigner := c.localClusterPrivateKey
- parsedCert := c.localClusterParsedCert
- c.clusterParamsLock.RUnlock()
-
- if localCert.Len() == 0 {
- return nil, fmt.Errorf("forwarding connection client but no local cert")
- }
-
- return &tls.Certificate{
- Certificate: [][]byte{localCert.Bytes()},
- PrivateKey: localSigner,
- Leaf: parsedCert,
- }, nil
- }
-
- serverConfigLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) {
- //c.logger.Trace("core: performing server config lookup")
- for _, v := range clientHello.SupportedProtos {
- switch v {
- case "h2", requestForwardingALPN:
- default:
- return nil, fmt.Errorf("unknown ALPN proto %s", v)
- }
- }
-
- caPool := x509.NewCertPool()
-
- ret := &tls.Config{
- ClientAuth: tls.RequireAndVerifyClientCert,
- GetCertificate: serverLookup,
- GetClientCertificate: clientLookup,
- MinVersion: tls.VersionTLS12,
- RootCAs: caPool,
- ClientCAs: caPool,
- NextProtos: clientHello.SupportedProtos,
- CipherSuites: c.clusterCipherSuites,
- }
-
- switch {
- default:
- c.clusterParamsLock.RLock()
- parsedCert := c.localClusterParsedCert
- c.clusterParamsLock.RUnlock()
-
- if parsedCert == nil {
- return nil, fmt.Errorf("forwarding connection client but no local cert")
- }
-
- caPool.AddCert(parsedCert)
- }
-
- return ret, nil
- }
-
- tlsConfig := &tls.Config{
- ClientAuth: tls.RequireAndVerifyClientCert,
- GetCertificate: serverLookup,
- GetClientCertificate: clientLookup,
- GetConfigForClient: serverConfigLookup,
- MinVersion: tls.VersionTLS12,
- CipherSuites: c.clusterCipherSuites,
- }
-
- var localCert bytes.Buffer
- c.clusterParamsLock.RLock()
- localCert.Write(c.localClusterCert)
- parsedCert := c.localClusterParsedCert
- c.clusterParamsLock.RUnlock()
-
- if parsedCert != nil {
- tlsConfig.ServerName = parsedCert.Subject.CommonName
-
- pool := x509.NewCertPool()
- pool.AddCert(parsedCert)
- tlsConfig.RootCAs = pool
- tlsConfig.ClientCAs = pool
- }
-
- return tlsConfig, nil
-}
-
-func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
- c.clusterListenerAddrs = addrs
-}
-
-func (c *Core) SetClusterHandler(handler http.Handler) {
- c.clusterHandler = handler
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster_test.go b/vendor/github.com/hashicorp/vault/vault/cluster_test.go
deleted file mode 100644
index 9bc5b69..0000000
--- a/vendor/github.com/hashicorp/vault/vault/cluster_test.go
+++ /dev/null
@@ -1,419 +0,0 @@
-package vault
-
-import (
- "bytes"
- "crypto/tls"
- "fmt"
- "net"
- "net/http"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/physical/inmem"
- log "github.com/mgutz/logxi/v1"
-)
-
-var (
- clusterTestPausePeriod = 2 * time.Second
-)
-
-func TestClusterFetching(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
-
- err := c.setupCluster()
- if err != nil {
- t.Fatal(err)
- }
-
- cluster, err := c.Cluster()
- if err != nil {
- t.Fatal(err)
- }
- // Test whether expected values are found
- if cluster == nil || cluster.Name == "" || cluster.ID == "" {
- t.Fatalf("cluster information missing: cluster: %#v", cluster)
- }
-}
-
-func TestClusterHAFetching(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- redirect := "http://127.0.0.1:8200"
-
- inm, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- c, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirect,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- keys, _ := TestCoreInit(t, c)
- for _, key := range keys {
- if _, err := TestCoreUnseal(c, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err := c.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Wait for core to become active
- TestWaitActive(t, c)
-
- cluster, err := c.Cluster()
- if err != nil {
- t.Fatal(err)
- }
- // Test whether expected values are found
- if cluster == nil || cluster.Name == "" || cluster.ID == "" {
- t.Fatalf("cluster information missing: cluster:%#v", cluster)
- }
-}
-
-func TestCluster_ListenForRequests(t *testing.T) {
- // Make this nicer for tests
- manualStepDownSleepPeriod = 5 * time.Second
-
- cluster := NewTestCluster(t, nil, &TestClusterOptions{
- KeepStandbysSealed: true,
- })
- cluster.Start()
- defer cluster.Cleanup()
- cores := cluster.Cores
-
- // Wait for core to become active
- TestWaitActive(t, cores[0].Core)
-
- // Use this to have a valid config after sealing since ClusterTLSConfig returns nil
- var lastTLSConfig *tls.Config
- checkListenersFunc := func(expectFail bool) {
- tlsConfig, err := cores[0].ClusterTLSConfig()
- if err != nil {
- if err.Error() != consts.ErrSealed.Error() {
- t.Fatal(err)
- }
- tlsConfig = lastTLSConfig
- } else {
- tlsConfig.NextProtos = []string{"h2"}
- lastTLSConfig = tlsConfig
- }
-
- for _, ln := range cores[0].Listeners {
- tcpAddr, ok := ln.Addr().(*net.TCPAddr)
- if !ok {
- t.Fatalf("%s not a TCP port", tcpAddr.String())
- }
-
- conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+105), tlsConfig)
- if err != nil {
- if expectFail {
- t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
- continue
- }
- t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[1])
- }
- if expectFail {
- t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
- }
- err = conn.Handshake()
- if err != nil {
- t.Fatal(err)
- }
- connState := conn.ConnectionState()
- switch {
- case connState.Version != tls.VersionTLS12:
- t.Fatal("version mismatch")
- case connState.NegotiatedProtocol != "h2" || !connState.NegotiatedProtocolIsMutual:
- t.Fatal("bad protocol negotiation")
- }
- t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+105)
- }
- }
-
- time.Sleep(clusterTestPausePeriod)
- checkListenersFunc(false)
-
- err := cores[0].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: cluster.RootToken,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // StepDown doesn't wait during actual preSeal so give time for listeners
- // to close
- time.Sleep(clusterTestPausePeriod)
- checkListenersFunc(true)
-
- // After this period it should be active again
- time.Sleep(manualStepDownSleepPeriod)
- checkListenersFunc(false)
-
- err = cores[0].Seal(cluster.RootToken)
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(clusterTestPausePeriod)
- // After sealing it should be inactive again
- checkListenersFunc(true)
-}
-
-func TestCluster_ForwardRequests(t *testing.T) {
- // Make this nicer for tests
- manualStepDownSleepPeriod = 5 * time.Second
-
- testCluster_ForwardRequestsCommon(t)
-}
-
-func testCluster_ForwardRequestsCommon(t *testing.T) {
- cluster := NewTestCluster(t, nil, nil)
- cores := cluster.Cores
- cores[0].Handler.(*http.ServeMux).HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
- w.Header().Add("Content-Type", "application/json")
- w.WriteHeader(201)
- w.Write([]byte("core1"))
- })
- cores[1].Handler.(*http.ServeMux).HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
- w.Header().Add("Content-Type", "application/json")
- w.WriteHeader(202)
- w.Write([]byte("core2"))
- })
- cores[2].Handler.(*http.ServeMux).HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
- w.Header().Add("Content-Type", "application/json")
- w.WriteHeader(203)
- w.Write([]byte("core3"))
- })
- cluster.Start()
- defer cluster.Cleanup()
-
- root := cluster.RootToken
-
- // Wait for core to become active
- TestWaitActive(t, cores[0].Core)
-
- // Test forwarding a request. Since we're going directly from core to core
- // with no fallback we know that if it worked, request handling is working
- testCluster_ForwardRequests(t, cores[1], root, "core1")
- testCluster_ForwardRequests(t, cores[2], root, "core1")
-
- //
- // Now we do a bunch of round-robining. The point is to make sure that as
- // nodes come and go, we can always successfully forward to the active
- // node.
- //
-
- // Ensure active core is cores[1] and test
- err := cores[0].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(clusterTestPausePeriod)
- _ = cores[2].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- time.Sleep(clusterTestPausePeriod)
- TestWaitActive(t, cores[1].Core)
- testCluster_ForwardRequests(t, cores[0], root, "core2")
- testCluster_ForwardRequests(t, cores[2], root, "core2")
-
- // Ensure active core is cores[2] and test
- err = cores[1].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(clusterTestPausePeriod)
- _ = cores[0].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- time.Sleep(clusterTestPausePeriod)
- TestWaitActive(t, cores[2].Core)
- testCluster_ForwardRequests(t, cores[0], root, "core3")
- testCluster_ForwardRequests(t, cores[1], root, "core3")
-
- // Ensure active core is cores[0] and test
- err = cores[2].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(clusterTestPausePeriod)
- _ = cores[1].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- time.Sleep(clusterTestPausePeriod)
- TestWaitActive(t, cores[0].Core)
- testCluster_ForwardRequests(t, cores[1], root, "core1")
- testCluster_ForwardRequests(t, cores[2], root, "core1")
-
- // Ensure active core is cores[1] and test
- err = cores[0].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(clusterTestPausePeriod)
- _ = cores[2].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- time.Sleep(clusterTestPausePeriod)
- TestWaitActive(t, cores[1].Core)
- testCluster_ForwardRequests(t, cores[0], root, "core2")
- testCluster_ForwardRequests(t, cores[2], root, "core2")
-
- // Ensure active core is cores[2] and test
- err = cores[1].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(clusterTestPausePeriod)
- _ = cores[0].StepDown(&logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/step-down",
- ClientToken: root,
- })
- time.Sleep(clusterTestPausePeriod)
- TestWaitActive(t, cores[2].Core)
- testCluster_ForwardRequests(t, cores[0], root, "core3")
- testCluster_ForwardRequests(t, cores[1], root, "core3")
-}
-
-func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, rootToken, remoteCoreID string) {
- standby, err := c.Standby()
- if err != nil {
- t.Fatal(err)
- }
- if !standby {
- t.Fatal("expected core to be standby")
- }
-
- // We need to call Leader as that refreshes the connection info
- isLeader, _, _, err := c.Leader()
- if err != nil {
- panic(err.Error())
- t.Fatal(err)
- }
- if isLeader {
- t.Fatal("core should not be leader")
- }
-
- bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": "zap" }`))
- req, err := http.NewRequest("PUT", "https://pushit.real.good:9281/"+remoteCoreID, bodBuf)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Add("X-Vault-Token", rootToken)
-
- statusCode, header, respBytes, err := c.ForwardRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if header == nil {
- t.Fatal("err: expected at least a content-type header")
- }
- if header.Get("Content-Type") != "application/json" {
- t.Fatalf("bad content-type: %s", header.Get("Content-Type"))
- }
-
- body := string(respBytes)
-
- if body != remoteCoreID {
- t.Fatalf("expected %s, got %s", remoteCoreID, body)
- }
- switch body {
- case "core1":
- if statusCode != 201 {
- t.Fatal("bad response")
- }
- case "core2":
- if statusCode != 202 {
- t.Fatal("bad response")
- }
- case "core3":
- if statusCode != 203 {
- t.Fatal("bad response")
- }
- }
-}
-
-func TestCluster_CustomCipherSuites(t *testing.T) {
- cluster := NewTestCluster(t, &CoreConfig{
- ClusterCipherSuites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- }, nil)
- cluster.Start()
- defer cluster.Cleanup()
- core := cluster.Cores[0]
-
- // Wait for core to become active
- TestWaitActive(t, core.Core)
-
- tlsConf, err := core.Core.ClusterTLSConfig()
- if err != nil {
- t.Fatal(err)
- }
-
- conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", core.Listeners[0].Address.IP.String(), core.Listeners[0].Address.Port+105), tlsConf)
- if err != nil {
- t.Fatal(err)
- }
- defer conn.Close()
- err = conn.Handshake()
- if err != nil {
- t.Fatal(err)
- }
- if conn.ConnectionState().CipherSuite != tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 {
- var availCiphers string
- for _, cipher := range core.clusterCipherSuites {
- availCiphers += fmt.Sprintf("%x ", cipher)
- }
- t.Fatalf("got bad negotiated cipher %x, core-set suites are %s", conn.ConnectionState().CipherSuite, availCiphers)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/core.go b/vendor/github.com/hashicorp/vault/vault/core.go
deleted file mode 100644
index 1259c03..0000000
--- a/vendor/github.com/hashicorp/vault/vault/core.go
+++ /dev/null
@@ -1,1928 +0,0 @@
-package vault
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "net"
- "net/http"
- "net/url"
- "path/filepath"
- "sync"
- "time"
-
- "github.com/armon/go-metrics"
- log "github.com/mgutz/logxi/v1"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/helper/mlock"
- "github.com/hashicorp/vault/helper/reload"
- "github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/shamir"
- cache "github.com/patrickmn/go-cache"
-)
-
-const (
- // coreLockPath is the path used to acquire a coordinating lock
- // for a highly-available deploy.
- coreLockPath = "core/lock"
-
- // The poison pill is used as a check during certain scenarios to indicate
- // to standby nodes that they should seal
- poisonPillPath = "core/poison-pill"
-
- // coreLeaderPrefix is the prefix used for the UUID that contains
- // the currently elected leader.
- coreLeaderPrefix = "core/leader/"
-
- // lockRetryInterval is the interval we re-attempt to acquire the
- // HA lock if an error is encountered
- lockRetryInterval = 10 * time.Second
-
- // leaderCheckInterval is how often a standby checks for a new leader
- leaderCheckInterval = 2500 * time.Millisecond
-
- // keyRotateCheckInterval is how often a standby checks for a key
- // rotation taking place.
- keyRotateCheckInterval = 30 * time.Second
-
- // keyRotateGracePeriod is how long we allow an upgrade path
- // for standby instances before we delete the upgrade keys
- keyRotateGracePeriod = 2 * time.Minute
-
- // leaderPrefixCleanDelay is how long to wait between deletions
- // of orphaned leader keys, to prevent slamming the backend.
- leaderPrefixCleanDelay = 200 * time.Millisecond
-
- // coreKeyringCanaryPath is used as a canary to indicate to replicated
- // clusters that they need to perform a rekey operation synchronously; this
- // isn't keyring-canary to avoid ignoring it when ignoring core/keyring
- coreKeyringCanaryPath = "core/canary-keyring"
-)
-
-var (
- // ErrAlreadyInit is returned if the core is already
- // initialized. This prevents a re-initialization.
- ErrAlreadyInit = errors.New("Vault is already initialized")
-
- // ErrNotInit is returned if a non-initialized barrier
- // is attempted to be unsealed.
- ErrNotInit = errors.New("Vault is not initialized")
-
- // ErrInternalError is returned when we don't want to leak
- // any information about an internal error
- ErrInternalError = errors.New("internal error")
-
- // ErrHANotEnabled is returned if the operation only makes sense
- // in an HA setting
- ErrHANotEnabled = errors.New("Vault is not configured for highly-available mode")
-
- // manualStepDownSleepPeriod is how long to sleep after a user-initiated
- // step down of the active node, to prevent instantly regrabbing the lock.
- // It's var not const so that tests can manipulate it.
- manualStepDownSleepPeriod = 10 * time.Second
-
- // Functions only in the Enterprise version
- enterprisePostUnseal = enterprisePostUnsealImpl
- enterprisePreSeal = enterprisePreSealImpl
- startReplication = startReplicationImpl
- stopReplication = stopReplicationImpl
- LastRemoteWAL = lastRemoteWALImpl
-)
-
-// NonFatalError is an error that can be returned during NewCore that should be
-// displayed but not cause a program exit
-type NonFatalError struct {
- Err error
-}
-
-func (e *NonFatalError) WrappedErrors() []error {
- return []error{e.Err}
-}
-
-func (e *NonFatalError) Error() string {
- return e.Err.Error()
-}
-
-// ErrInvalidKey is returned if there is a user-based error with a provided
-// unseal key. This will be shown to the user, so should not contain
-// information that is sensitive.
-type ErrInvalidKey struct {
- Reason string
-}
-
-func (e *ErrInvalidKey) Error() string {
- return fmt.Sprintf("invalid key: %v", e.Reason)
-}
-
-type activeAdvertisement struct {
- RedirectAddr string `json:"redirect_addr"`
- ClusterAddr string `json:"cluster_addr,omitempty"`
- ClusterCert []byte `json:"cluster_cert,omitempty"`
- ClusterKeyParams *clusterKeyParams `json:"cluster_key_params,omitempty"`
-}
-
-type unlockInformation struct {
- Parts [][]byte
- Nonce string
-}
-
-// Core is used as the central manager of Vault activity. It is the primary point of
-// interface for API handlers and is responsible for managing the logical and physical
-// backends, router, security barrier, and audit trails.
-type Core struct {
- // N.B.: This is used to populate a dev token down replication, as
- // otherwise, after replication is started, a dev would have to go through
- // the generate-root process simply to talk to the new follower cluster.
- devToken string
-
- // HABackend may be available depending on the physical backend
- ha physical.HABackend
-
- // redirectAddr is the address we advertise as leader if held
- redirectAddr string
-
- // clusterAddr is the address we use for clustering
- clusterAddr string
-
- // physical backend is the un-trusted backend with durable data
- physical physical.Backend
-
- // Our Seal, for seal configuration information
- seal Seal
-
- // barrier is the security barrier wrapping the physical backend
- barrier SecurityBarrier
-
- // router is responsible for managing the mount points for logical backends.
- router *Router
-
- // logicalBackends is the mapping of backends to use for this core
- logicalBackends map[string]logical.Factory
-
- // credentialBackends is the mapping of backends to use for this core
- credentialBackends map[string]logical.Factory
-
- // auditBackends is the mapping of backends to use for this core
- auditBackends map[string]audit.Factory
-
- // stateLock protects mutable state
- stateLock sync.RWMutex
- sealed bool
-
- standby bool
- standbyDoneCh chan struct{}
- standbyStopCh chan struct{}
- manualStepDownCh chan struct{}
-
- // unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce
- unlockInfo *unlockInformation
-
- // generateRootProgress holds the shares until we reach enough
- // to verify the master key
- generateRootConfig *GenerateRootConfig
- generateRootProgress [][]byte
- generateRootLock sync.Mutex
-
- // These variables holds the config and shares we have until we reach
- // enough to verify the appropriate master key. Note that the same lock is
- // used; this isn't time-critical so this shouldn't be a problem.
- barrierRekeyConfig *SealConfig
- barrierRekeyProgress [][]byte
- recoveryRekeyConfig *SealConfig
- recoveryRekeyProgress [][]byte
- rekeyLock sync.RWMutex
-
- // mounts is loaded after unseal since it is a protected
- // configuration
- mounts *MountTable
-
- // mountsLock is used to ensure that the mounts table does not
- // change underneath a calling function
- mountsLock sync.RWMutex
-
- // auth is loaded after unseal since it is a protected
- // configuration
- auth *MountTable
-
- // authLock is used to ensure that the auth table does not
- // change underneath a calling function
- authLock sync.RWMutex
-
- // audit is loaded after unseal since it is a protected
- // configuration
- audit *MountTable
-
- // auditLock is used to ensure that the audit table does not
- // change underneath a calling function
- auditLock sync.RWMutex
-
- // auditBroker is used to ingest the audit events and fan
- // out into the configured audit backends
- auditBroker *AuditBroker
-
- // auditedHeaders is used to configure which http headers
- // can be output in the audit logs
- auditedHeaders *AuditedHeadersConfig
-
- // systemBarrierView is the barrier view for the system backend
- systemBarrierView *BarrierView
-
- // expiration manager is used for managing LeaseIDs,
- // renewal, expiration and revocation
- expiration *ExpirationManager
-
- // rollback manager is used to run rollbacks periodically
- rollback *RollbackManager
-
- // policy store is used to manage named ACL policies
- policyStore *PolicyStore
-
- // token store is used to manage authentication tokens
- tokenStore *TokenStore
-
- // metricsCh is used to stop the metrics streaming
- metricsCh chan struct{}
-
- // metricsMutex is used to prevent a race condition between
- // metrics emission and sealing leading to a nil pointer
- metricsMutex sync.Mutex
-
- defaultLeaseTTL time.Duration
- maxLeaseTTL time.Duration
-
- logger log.Logger
-
- // cachingDisabled indicates whether caches are disabled
- cachingDisabled bool
-
- // reloadFuncs is a map containing reload functions
- reloadFuncs map[string][]reload.ReloadFunc
-
- // reloadFuncsLock controls access to the funcs
- reloadFuncsLock sync.RWMutex
-
- // wrappingJWTKey is the key used for generating JWTs containing response
- // wrapping information
- wrappingJWTKey *ecdsa.PrivateKey
-
- //
- // Cluster information
- //
- // Name
- clusterName string
- // Specific cipher suites to use for clustering, if any
- clusterCipherSuites []uint16
- // Used to modify cluster parameters
- clusterParamsLock sync.RWMutex
- // The private key stored in the barrier used for establishing
- // mutually-authenticated connections between Vault cluster members
- localClusterPrivateKey crypto.Signer
- // The local cluster cert
- localClusterCert []byte
- // The parsed form of the local cluster cert
- localClusterParsedCert *x509.Certificate
- // The TCP addresses we should use for clustering
- clusterListenerAddrs []*net.TCPAddr
- // The handler to use for request forwarding
- clusterHandler http.Handler
- // Tracks whether cluster listeners are running, e.g. it's safe to send a
- // shutdown down the channel
- clusterListenersRunning bool
- // Shutdown channel for the cluster listeners
- clusterListenerShutdownCh chan struct{}
- // Shutdown success channel. We need this to be done serially to ensure
- // that binds are removed before they might be reinstated.
- clusterListenerShutdownSuccessCh chan struct{}
- // Write lock used to ensure that we don't have multiple connections adjust
- // this value at the same time
- requestForwardingConnectionLock sync.RWMutex
- // Most recent leader UUID. Used to avoid repeatedly JSON parsing the same
- // values.
- clusterLeaderUUID string
- // Most recent leader redirect addr
- clusterLeaderRedirectAddr string
- // Most recent leader cluster addr
- clusterLeaderClusterAddr string
- // Lock for the cluster leader values
- clusterLeaderParamsLock sync.RWMutex
- // Info on cluster members
- clusterPeerClusterAddrsCache *cache.Cache
- // The grpc Server that handles server RPC calls
- rpcServer *grpc.Server
- // The context for the client
- rpcClientConnContext context.Context
- // The function for canceling the client connection
- rpcClientConnCancelFunc context.CancelFunc
- // The grpc ClientConn for RPC calls
- rpcClientConn *grpc.ClientConn
- // The grpc forwarding client
- rpcForwardingClient *forwardingClient
-
- // CORS Information
- corsConfig *CORSConfig
-
- // replicationState keeps the current replication state cached for quick
- // lookup
- replicationState consts.ReplicationState
-
- // uiEnabled indicates whether Vault Web UI is enabled or not
- uiEnabled bool
-
- // rawEnabled indicates whether the Raw endpoint is enabled
- rawEnabled bool
-
- // pluginDirectory is the location vault will look for plugin binaries
- pluginDirectory string
-
- // pluginCatalog is used to manage plugin configurations
- pluginCatalog *PluginCatalog
-
- enableMlock bool
-
- // This can be used to trigger operations to stop running when Vault is
- // going to be shut down, stepped down, or sealed
- requestContext context.Context
- requestContextCancelFunc context.CancelFunc
-}
-
-// CoreConfig is used to parameterize a core
-type CoreConfig struct {
- DevToken string `json:"dev_token" structs:"dev_token" mapstructure:"dev_token"`
-
- LogicalBackends map[string]logical.Factory `json:"logical_backends" structs:"logical_backends" mapstructure:"logical_backends"`
-
- CredentialBackends map[string]logical.Factory `json:"credential_backends" structs:"credential_backends" mapstructure:"credential_backends"`
-
- AuditBackends map[string]audit.Factory `json:"audit_backends" structs:"audit_backends" mapstructure:"audit_backends"`
-
- Physical physical.Backend `json:"physical" structs:"physical" mapstructure:"physical"`
-
- // May be nil, which disables HA operations
- HAPhysical physical.HABackend `json:"ha_physical" structs:"ha_physical" mapstructure:"ha_physical"`
-
- Seal Seal `json:"seal" structs:"seal" mapstructure:"seal"`
-
- Logger log.Logger `json:"logger" structs:"logger" mapstructure:"logger"`
-
- // Disables the LRU cache on the physical backend
- DisableCache bool `json:"disable_cache" structs:"disable_cache" mapstructure:"disable_cache"`
-
- // Disables mlock syscall
- DisableMlock bool `json:"disable_mlock" structs:"disable_mlock" mapstructure:"disable_mlock"`
-
- // Custom cache size for the LRU cache on the physical backend, or zero for default
- CacheSize int `json:"cache_size" structs:"cache_size" mapstructure:"cache_size"`
-
- // Set as the leader address for HA
- RedirectAddr string `json:"redirect_addr" structs:"redirect_addr" mapstructure:"redirect_addr"`
-
- // Set as the cluster address for HA
- ClusterAddr string `json:"cluster_addr" structs:"cluster_addr" mapstructure:"cluster_addr"`
-
- DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
-
- MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
-
- ClusterName string `json:"cluster_name" structs:"cluster_name" mapstructure:"cluster_name"`
-
- ClusterCipherSuites string `json:"cluster_cipher_suites" structs:"cluster_cipher_suites" mapstructure:"cluster_cipher_suites"`
-
- EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"`
-
- // Enable the raw endpoint
- EnableRaw bool `json:"enable_raw" structs:"enable_raw" mapstructure:"enable_raw"`
-
- PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"`
-
- ReloadFuncs *map[string][]reload.ReloadFunc
- ReloadFuncsLock *sync.RWMutex
-}
-
-// NewCore is used to construct a new core
-func NewCore(conf *CoreConfig) (*Core, error) {
- if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
- if conf.RedirectAddr == "" {
- return nil, fmt.Errorf("missing redirect address")
- }
- }
-
- if conf.DefaultLeaseTTL == 0 {
- conf.DefaultLeaseTTL = defaultLeaseTTL
- }
- if conf.MaxLeaseTTL == 0 {
- conf.MaxLeaseTTL = maxLeaseTTL
- }
- if conf.DefaultLeaseTTL > conf.MaxLeaseTTL {
- return nil, fmt.Errorf("cannot have DefaultLeaseTTL larger than MaxLeaseTTL")
- }
-
- // Validate the advertise addr if its given to us
- if conf.RedirectAddr != "" {
- u, err := url.Parse(conf.RedirectAddr)
- if err != nil {
- return nil, fmt.Errorf("redirect address is not valid url: %s", err)
- }
-
- if u.Scheme == "" {
- return nil, fmt.Errorf("redirect address must include scheme (ex. 'http')")
- }
- }
-
- // Make a default logger if not provided
- if conf.Logger == nil {
- conf.Logger = logformat.NewVaultLogger(log.LevelTrace)
- }
-
- // Setup the core
- c := &Core{
- devToken: conf.DevToken,
- physical: conf.Physical,
- redirectAddr: conf.RedirectAddr,
- clusterAddr: conf.ClusterAddr,
- seal: conf.Seal,
- router: NewRouter(),
- sealed: true,
- standby: true,
- logger: conf.Logger,
- defaultLeaseTTL: conf.DefaultLeaseTTL,
- maxLeaseTTL: conf.MaxLeaseTTL,
- cachingDisabled: conf.DisableCache,
- clusterName: conf.ClusterName,
- clusterListenerShutdownCh: make(chan struct{}),
- clusterListenerShutdownSuccessCh: make(chan struct{}),
- clusterPeerClusterAddrsCache: cache.New(3*heartbeatInterval, time.Second),
- enableMlock: !conf.DisableMlock,
- rawEnabled: conf.EnableRaw,
- }
-
- if conf.ClusterCipherSuites != "" {
- suites, err := tlsutil.ParseCiphers(conf.ClusterCipherSuites)
- if err != nil {
- return nil, errwrap.Wrapf("error parsing cluster cipher suites: {{err}}", err)
- }
- c.clusterCipherSuites = suites
- }
-
- c.corsConfig = &CORSConfig{core: c}
- // Load CORS config and provide a value for the core field.
-
- _, txnOK := conf.Physical.(physical.Transactional)
- // Wrap the physical backend in a cache layer if enabled and not already wrapped
- if _, isCache := conf.Physical.(*physical.Cache); !conf.DisableCache && !isCache {
- if txnOK {
- c.physical = physical.NewTransactionalCache(conf.Physical, conf.CacheSize, conf.Logger)
- } else {
- c.physical = physical.NewCache(conf.Physical, conf.CacheSize, conf.Logger)
- }
- }
-
- if !conf.DisableMlock {
- // Ensure our memory usage is locked into physical RAM
- if err := mlock.LockMemory(); err != nil {
- return nil, fmt.Errorf(
- "Failed to lock memory: %v\n\n"+
- "This usually means that the mlock syscall is not available.\n"+
- "Vault uses mlock to prevent memory from being swapped to\n"+
- "disk. This requires root privileges as well as a machine\n"+
- "that supports mlock. Please enable mlock on your system or\n"+
- "disable Vault from using it. To disable Vault from using it,\n"+
- "set the `disable_mlock` configuration option in your configuration\n"+
- "file.",
- err)
- }
- }
-
- var err error
- if conf.PluginDirectory != "" {
- c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory)
- if err != nil {
- return nil, fmt.Errorf("core setup failed, could not verify plugin directory: %v", err)
- }
- }
-
- // Construct a new AES-GCM barrier
- c.barrier, err = NewAESGCMBarrier(c.physical)
- if err != nil {
- return nil, fmt.Errorf("barrier setup failed: %v", err)
- }
-
- if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
- c.ha = conf.HAPhysical
- }
-
- // We create the funcs here, then populate the given config with it so that
- // the caller can share state
- conf.ReloadFuncsLock = &c.reloadFuncsLock
- c.reloadFuncsLock.Lock()
- c.reloadFuncs = make(map[string][]reload.ReloadFunc)
- c.reloadFuncsLock.Unlock()
- conf.ReloadFuncs = &c.reloadFuncs
-
- // Setup the backends
- logicalBackends := make(map[string]logical.Factory)
- for k, f := range conf.LogicalBackends {
- logicalBackends[k] = f
- }
- _, ok := logicalBackends["kv"]
- if !ok {
- logicalBackends["kv"] = PassthroughBackendFactory
- }
- logicalBackends["cubbyhole"] = CubbyholeBackendFactory
- logicalBackends["system"] = func(config *logical.BackendConfig) (logical.Backend, error) {
- b := NewSystemBackend(c)
- if err := b.Setup(config); err != nil {
- return nil, err
- }
- return b, nil
- }
- c.logicalBackends = logicalBackends
-
- credentialBackends := make(map[string]logical.Factory)
- for k, f := range conf.CredentialBackends {
- credentialBackends[k] = f
- }
- credentialBackends["token"] = func(config *logical.BackendConfig) (logical.Backend, error) {
- return NewTokenStore(c, config)
- }
- c.credentialBackends = credentialBackends
-
- auditBackends := make(map[string]audit.Factory)
- for k, f := range conf.AuditBackends {
- auditBackends[k] = f
- }
- c.auditBackends = auditBackends
-
- if c.seal == nil {
- c.seal = &DefaultSeal{}
- }
- c.seal.SetCore(c)
-
- // Attempt unsealing with stored keys; if there are no stored keys this
- // returns nil, otherwise returns nil or an error
- storedKeyErr := c.UnsealWithStoredKeys()
-
- return c, storedKeyErr
-}
-
-// Shutdown is invoked when the Vault instance is about to be terminated. It
-// should not be accessible as part of an API call as it will cause an availability
-// problem. It is only used to gracefully quit in the case of HA so that failover
-// happens as quickly as possible.
-func (c *Core) Shutdown() error {
- c.stateLock.RLock()
- // Tell any requests that know about this to stop
- if c.requestContextCancelFunc != nil {
- c.requestContextCancelFunc()
- }
- c.stateLock.RUnlock()
-
- // Seal the Vault, causes a leader stepdown
- retChan := make(chan error)
- go func() {
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
- retChan <- c.sealInternal()
- }()
-
- return <-retChan
-}
-
-// CORSConfig returns the current CORS configuration
-func (c *Core) CORSConfig() *CORSConfig {
- return c.corsConfig
-}
-
-// LookupToken returns the properties of the token from the token store. This
-// is particularly useful to fetch the accessor of the client token and get it
-// populated in the logical request along with the client token. The accessor
-// of the client token can get audit logged.
-func (c *Core) LookupToken(token string) (*TokenEntry, error) {
- if token == "" {
- return nil, fmt.Errorf("missing client token")
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.sealed {
- return nil, consts.ErrSealed
- }
- if c.standby {
- return nil, consts.ErrStandby
- }
-
- // Many tests don't have a token store running
- if c.tokenStore == nil {
- return nil, nil
- }
-
- return c.tokenStore.Lookup(token)
-}
-
-func (c *Core) fetchACLandTokenEntry(req *logical.Request) (*ACL, *TokenEntry, error) {
- defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now())
-
- // Ensure there is a client token
- if req.ClientToken == "" {
- return nil, nil, fmt.Errorf("missing client token")
- }
-
- if c.tokenStore == nil {
- c.logger.Error("core: token store is unavailable")
- return nil, nil, ErrInternalError
- }
-
- // Resolve the token policy
- te, err := c.tokenStore.Lookup(req.ClientToken)
- if err != nil {
- c.logger.Error("core: failed to lookup token", "error", err)
- return nil, nil, ErrInternalError
- }
-
- // Ensure the token is valid
- if te == nil {
- return nil, nil, logical.ErrPermissionDenied
- }
-
- // Construct the corresponding ACL object
- acl, err := c.policyStore.ACL(te.Policies...)
- if err != nil {
- c.logger.Error("core: failed to construct ACL", "error", err)
- return nil, nil, ErrInternalError
- }
-
- return acl, te, nil
-}
-
-func (c *Core) checkToken(req *logical.Request) (*logical.Auth, *TokenEntry, error) {
- defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now())
-
- acl, te, err := c.fetchACLandTokenEntry(req)
- if err != nil {
- return nil, te, err
- }
-
- // Check if this is a root protected path
- rootPath := c.router.RootPath(req.Path)
-
- // When we receive a write of either type, rather than require clients to
- // PUT/POST and trust the operation, we ask the backend to give us the real
- // skinny -- if the backend implements an existence check, it can tell us
- // whether a particular resource exists. Then we can mark it as an update
- // or creation as appropriate.
- if req.Operation == logical.CreateOperation || req.Operation == logical.UpdateOperation {
- checkExists, resourceExists, err := c.router.RouteExistenceCheck(req)
- switch err {
- case logical.ErrUnsupportedPath:
- // fail later via bad path to avoid confusing items in the log
- checkExists = false
- case nil:
- // Continue on
- default:
- c.logger.Error("core: failed to run existence check", "error", err)
- if _, ok := err.(errutil.UserError); ok {
- return nil, nil, err
- } else {
- return nil, nil, ErrInternalError
- }
- }
-
- switch {
- case checkExists == false:
- // No existence check, so always treate it as an update operation, which is how it is pre 0.5
- req.Operation = logical.UpdateOperation
- case resourceExists == true:
- // It exists, so force an update operation
- req.Operation = logical.UpdateOperation
- case resourceExists == false:
- // It doesn't exist, force a create operation
- req.Operation = logical.CreateOperation
- default:
- panic("unreachable code")
- }
- }
- // Create the auth response
- auth := &logical.Auth{
- ClientToken: req.ClientToken,
- Accessor: req.ClientTokenAccessor,
- Policies: te.Policies,
- Metadata: te.Meta,
- DisplayName: te.DisplayName,
- }
-
- // Check the standard non-root ACLs. Return the token entry if it's not
- // allowed so we can decrement the use count.
- allowed, rootPrivs := acl.AllowOperation(req)
- if !allowed {
- // Return auth for audit logging even if not allowed
- return auth, te, logical.ErrPermissionDenied
- }
- if rootPath && !rootPrivs {
- // Return auth for audit logging even if not allowed
- return auth, te, logical.ErrPermissionDenied
- }
-
- return auth, te, nil
-}
-
-// Sealed checks if the Vault is current sealed
-func (c *Core) Sealed() (bool, error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- return c.sealed, nil
-}
-
-// Standby checks if the Vault is in standby mode
-func (c *Core) Standby() (bool, error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- return c.standby, nil
-}
-
-// Leader is used to get the current active leader
-func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
-
- // Check if sealed
- if c.sealed {
- return false, "", "", consts.ErrSealed
- }
-
- // Check if HA enabled
- if c.ha == nil {
- return false, "", "", ErrHANotEnabled
- }
-
- // Check if we are the leader
- if !c.standby {
- return true, c.redirectAddr, c.clusterAddr, nil
- }
-
- // Initialize a lock
- lock, err := c.ha.LockWith(coreLockPath, "read")
- if err != nil {
- return false, "", "", err
- }
-
- // Read the value
- held, leaderUUID, err := lock.Value()
- if err != nil {
- return false, "", "", err
- }
- if !held {
- return false, "", "", nil
- }
-
- c.clusterLeaderParamsLock.RLock()
- localLeaderUUID := c.clusterLeaderUUID
- localRedirAddr := c.clusterLeaderRedirectAddr
- localClusterAddr := c.clusterLeaderClusterAddr
- c.clusterLeaderParamsLock.RUnlock()
-
- // If the leader hasn't changed, return the cached value; nothing changes
- // mid-leadership, and the barrier caches anyways
- if leaderUUID == localLeaderUUID && localRedirAddr != "" {
- return false, localRedirAddr, localClusterAddr, nil
- }
-
- c.logger.Trace("core: found new active node information, refreshing")
-
- c.clusterLeaderParamsLock.Lock()
- defer c.clusterLeaderParamsLock.Unlock()
-
- // Validate base conditions again
- if leaderUUID == c.clusterLeaderUUID && c.clusterLeaderRedirectAddr != "" {
- return false, localRedirAddr, localClusterAddr, nil
- }
-
- key := coreLeaderPrefix + leaderUUID
- entry, err := c.barrier.Get(key)
- if err != nil {
- return false, "", "", err
- }
- if entry == nil {
- return false, "", "", nil
- }
-
- var oldAdv bool
-
- var adv activeAdvertisement
- err = jsonutil.DecodeJSON(entry.Value, &adv)
- if err != nil {
- // Fall back to pre-struct handling
- adv.RedirectAddr = string(entry.Value)
- c.logger.Trace("core: parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr)
- oldAdv = true
- }
-
- if !oldAdv {
- c.logger.Trace("core: parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr)
-
- // Ensure we are using current values
- err = c.loadLocalClusterTLS(adv)
- if err != nil {
- return false, "", "", err
- }
-
- // This will ensure that we both have a connection at the ready and that
- // the address is the current known value
- err = c.refreshRequestForwardingConnection(adv.ClusterAddr)
- if err != nil {
- return false, "", "", err
- }
- }
-
- // Don't set these until everything has been parsed successfully or we'll
- // never try again
- c.clusterLeaderRedirectAddr = adv.RedirectAddr
- c.clusterLeaderClusterAddr = adv.ClusterAddr
- c.clusterLeaderUUID = leaderUUID
-
- return false, adv.RedirectAddr, adv.ClusterAddr, nil
-}
-
-// SecretProgress returns the number of keys provided so far
-func (c *Core) SecretProgress() (int, string) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- switch c.unlockInfo {
- case nil:
- return 0, ""
- default:
- return len(c.unlockInfo.Parts), c.unlockInfo.Nonce
- }
-}
-
-// ResetUnsealProcess removes the current unlock parts from memory, to reset
-// the unsealing process
-func (c *Core) ResetUnsealProcess() {
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
- if !c.sealed {
- return
- }
- c.unlockInfo = nil
-}
-
-// Unseal is used to provide one of the key parts to unseal the Vault.
-//
-// They key given as a parameter will automatically be zerod after
-// this method is done with it. If you want to keep the key around, a copy
-// should be made.
-func (c *Core) Unseal(key []byte) (bool, error) {
- defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now())
-
- // Verify the key length
- min, max := c.barrier.KeyLength()
- max += shamir.ShareOverhead
- if len(key) < min {
- return false, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
- }
- if len(key) > max {
- return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
- }
-
- // Get the seal configuration
- config, err := c.seal.BarrierConfig()
- if err != nil {
- return false, err
- }
-
- // Ensure the barrier is initialized
- if config == nil {
- return false, ErrNotInit
- }
-
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
-
- // Check if already unsealed
- if !c.sealed {
- return true, nil
- }
-
- masterKey, err := c.unsealPart(config, key)
- if err != nil {
- return false, err
- }
- if masterKey != nil {
- return c.unsealInternal(masterKey)
- }
-
- return false, nil
-}
-
-func (c *Core) unsealPart(config *SealConfig, key []byte) ([]byte, error) {
- // Check if we already have this piece
- if c.unlockInfo != nil {
- for _, existing := range c.unlockInfo.Parts {
- if subtle.ConstantTimeCompare(existing, key) == 1 {
- return nil, nil
- }
- }
- } else {
- uuid, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- c.unlockInfo = &unlockInformation{
- Nonce: uuid,
- }
- }
-
- // Store this key
- c.unlockInfo.Parts = append(c.unlockInfo.Parts, key)
-
- // Check if we don't have enough keys to unlock
- if len(c.unlockInfo.Parts) < config.SecretThreshold {
- if c.logger.IsDebug() {
- c.logger.Debug("core: cannot unseal, not enough keys", "keys", len(c.unlockInfo.Parts), "threshold", config.SecretThreshold, "nonce", c.unlockInfo.Nonce)
- }
- return nil, nil
- }
-
- // Best-effort memzero of unlock parts once we're done with them
- defer func() {
- for i, _ := range c.unlockInfo.Parts {
- memzero(c.unlockInfo.Parts[i])
- }
- c.unlockInfo = nil
- }()
-
- // Recover the master key
- var masterKey []byte
- var err error
- if config.SecretThreshold == 1 {
- masterKey = make([]byte, len(c.unlockInfo.Parts[0]))
- copy(masterKey, c.unlockInfo.Parts[0])
- } else {
- masterKey, err = shamir.Combine(c.unlockInfo.Parts)
- if err != nil {
- return nil, fmt.Errorf("failed to compute master key: %v", err)
- }
- }
-
- return masterKey, nil
-}
-
-// This must be called with the state write lock held
-func (c *Core) unsealInternal(masterKey []byte) (bool, error) {
- defer memzero(masterKey)
-
- // Attempt to unlock
- if err := c.barrier.Unseal(masterKey); err != nil {
- return false, err
- }
- if c.logger.IsInfo() {
- c.logger.Info("core: vault is unsealed")
- }
-
- // Do post-unseal setup if HA is not enabled
- if c.ha == nil {
- // We still need to set up cluster info even if it's not part of a
- // cluster right now. This also populates the cached cluster object.
- if err := c.setupCluster(); err != nil {
- c.logger.Error("core: cluster setup failed", "error", err)
- c.barrier.Seal()
- c.logger.Warn("core: vault is sealed")
- return false, err
- }
-
- if err := c.postUnseal(); err != nil {
- c.logger.Error("core: post-unseal setup failed", "error", err)
- c.barrier.Seal()
- c.logger.Warn("core: vault is sealed")
- return false, err
- }
-
- c.standby = false
- } else {
- // Go to standby mode, wait until we are active to unseal
- c.standbyDoneCh = make(chan struct{})
- c.standbyStopCh = make(chan struct{})
- c.manualStepDownCh = make(chan struct{})
- go c.runStandby(c.standbyDoneCh, c.standbyStopCh, c.manualStepDownCh)
- }
-
- // Success!
- c.sealed = false
- if c.ha != nil {
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifySealedStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("core: failed to notify unsealed status", "error", err)
- }
- }
- }
- }
- return true, nil
-}
-
-// SealWithRequest takes in a logical.Request, acquires the lock, and passes
-// through to sealInternal
-func (c *Core) SealWithRequest(req *logical.Request) error {
- defer metrics.MeasureSince([]string{"core", "seal-with-request"}, time.Now())
-
- c.stateLock.RLock()
-
- if c.sealed {
- c.stateLock.RUnlock()
- return nil
- }
-
- // This will unlock the read lock
- return c.sealInitCommon(req)
-}
-
-// Seal takes in a token and creates a logical.Request, acquires the lock, and
-// passes through to sealInternal
-func (c *Core) Seal(token string) error {
- defer metrics.MeasureSince([]string{"core", "seal"}, time.Now())
-
- c.stateLock.RLock()
-
- if c.sealed {
- c.stateLock.RUnlock()
- return nil
- }
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/seal",
- ClientToken: token,
- }
-
- // This will unlock the read lock
- return c.sealInitCommon(req)
-}
-
-// sealInitCommon is common logic for Seal and SealWithRequest and is used to
-// re-seal the Vault. This requires the Vault to be unsealed again to perform
-// any further operations. Note: this function will read-unlock the state lock.
-func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
- defer metrics.MeasureSince([]string{"core", "seal-internal"}, time.Now())
-
- if req == nil {
- retErr = multierror.Append(retErr, errors.New("nil request to seal"))
- c.stateLock.RUnlock()
- return retErr
- }
-
- // Validate the token is a root token
- acl, te, err := c.fetchACLandTokenEntry(req)
- if err != nil {
- // Since there is no token store in standby nodes, sealing cannot
- // be done. Ideally, the request has to be forwarded to leader node
- // for validation and the operation should be performed. But for now,
- // just returning with an error and recommending a vault restart, which
- // essentially does the same thing.
- if c.standby {
- c.logger.Error("core: vault cannot seal when in standby mode; please restart instead")
- retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead"))
- c.stateLock.RUnlock()
- return retErr
- }
- retErr = multierror.Append(retErr, err)
- c.stateLock.RUnlock()
- return retErr
- }
-
- // Audit-log the request before going any further
- auth := &logical.Auth{
- ClientToken: req.ClientToken,
- Policies: te.Policies,
- Metadata: te.Meta,
- DisplayName: te.DisplayName,
- }
-
- if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil {
- c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
- retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
- c.stateLock.RUnlock()
- return retErr
- }
-
- // Attempt to use the token (decrement num_uses)
- // On error bail out; if the token has been revoked, bail out too
- if te != nil {
- te, err = c.tokenStore.UseToken(te)
- if err != nil {
- c.logger.Error("core: failed to use token", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- c.stateLock.RUnlock()
- return retErr
- }
- if te == nil {
- // Token is no longer valid
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
- if te.NumUses == -1 {
- // Token needs to be revoked
- defer func(id string) {
- err = c.tokenStore.Revoke(id)
- if err != nil {
- c.logger.Error("core: token needed revocation after seal but failed to revoke", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- }
- }(te.ID)
- }
- }
-
- // Verify that this operation is allowed
- allowed, rootPrivs := acl.AllowOperation(req)
- if !allowed {
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
-
- // We always require root privileges for this operation
- if !rootPrivs {
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
-
- // Tell any requests that know about this to stop
- if c.requestContextCancelFunc != nil {
- c.requestContextCancelFunc()
- }
-
- // Unlock from the request handling
- c.stateLock.RUnlock()
-
- //Seal the Vault
- retChan := make(chan error)
- go func() {
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
- retChan <- c.sealInternal()
- }()
-
- funcErr := <-retChan
- if funcErr != nil {
- retErr = multierror.Append(retErr, funcErr)
- }
-
- return retErr
-}
-
-// StepDown is used to step down from leadership
-func (c *Core) StepDown(req *logical.Request) (retErr error) {
- defer metrics.MeasureSince([]string{"core", "step_down"}, time.Now())
-
- if req == nil {
- retErr = multierror.Append(retErr, errors.New("nil request to step-down"))
- return retErr
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.sealed {
- return nil
- }
- if c.ha == nil || c.standby {
- return nil
- }
-
- acl, te, err := c.fetchACLandTokenEntry(req)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- return retErr
- }
-
- // Audit-log the request before going any further
- auth := &logical.Auth{
- ClientToken: req.ClientToken,
- Policies: te.Policies,
- Metadata: te.Meta,
- DisplayName: te.DisplayName,
- }
-
- if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil {
- c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
- retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
- return retErr
- }
-
- // Attempt to use the token (decrement num_uses)
- if te != nil {
- te, err = c.tokenStore.UseToken(te)
- if err != nil {
- c.logger.Error("core: failed to use token", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return retErr
- }
- if te == nil {
- // Token has been revoked
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- return retErr
- }
- if te.NumUses == -1 {
- // Token needs to be revoked
- defer func(id string) {
- err = c.tokenStore.Revoke(id)
- if err != nil {
- c.logger.Error("core: token needed revocation after step-down but failed to revoke", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- }
- }(te.ID)
- }
- }
-
- // Verify that this operation is allowed
- allowed, rootPrivs := acl.AllowOperation(req)
- if !allowed {
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- return retErr
- }
-
- // We always require root privileges for this operation
- if !rootPrivs {
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- return retErr
- }
-
- select {
- case c.manualStepDownCh <- struct{}{}:
- default:
- c.logger.Warn("core: manual step-down operation already queued")
- }
-
- return retErr
-}
-
-// sealInternal is an internal method used to seal the vault. It does not do
-// any authorization checking. The stateLock must be held prior to calling.
-func (c *Core) sealInternal() error {
- if c.sealed {
- return nil
- }
-
- // Enable that we are sealed to prevent further transactions
- c.sealed = true
-
- c.logger.Debug("core: marked as sealed")
-
- // Clear forwarding clients
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
-
- // Do pre-seal teardown if HA is not enabled
- if c.ha == nil {
- // Even in a non-HA context we key off of this for some things
- c.standby = true
- if err := c.preSeal(); err != nil {
- c.logger.Error("core: pre-seal teardown failed", "error", err)
- return fmt.Errorf("internal error")
- }
- } else {
- // Signal the standby goroutine to shutdown, wait for completion
- close(c.standbyStopCh)
-
- c.requestContext = nil
-
- // Release the lock while we wait to avoid deadlocking
- c.stateLock.Unlock()
- <-c.standbyDoneCh
- c.stateLock.Lock()
- }
-
- c.logger.Debug("core: sealing barrier")
- if err := c.barrier.Seal(); err != nil {
- c.logger.Error("core: error sealing barrier", "error", err)
- return err
- }
-
- if c.ha != nil {
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifySealedStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("core: failed to notify sealed status", "error", err)
- }
- }
- }
- }
-
- c.logger.Info("core: vault is sealed")
-
- return nil
-}
-
-// postUnseal is invoked after the barrier is unsealed, but before
-// allowing any user operations. This allows us to setup any state that
-// requires the Vault to be unsealed such as mount tables, logical backends,
-// credential stores, etc.
-func (c *Core) postUnseal() (retErr error) {
- defer metrics.MeasureSince([]string{"core", "post_unseal"}, time.Now())
- defer func() {
- if retErr != nil {
- c.preSeal()
- } else {
- c.requestContext, c.requestContextCancelFunc = context.WithCancel(context.Background())
- }
- }()
- c.logger.Info("core: post-unseal setup starting")
-
- // Clear forwarding clients; we're active
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
-
- // Purge the backend if supported
- if purgable, ok := c.physical.(physical.Purgable); ok {
- purgable.Purge()
- }
-
- // Purge these for safety in case of a rekey
- c.seal.SetBarrierConfig(nil)
- if c.seal.RecoveryKeySupported() {
- c.seal.SetRecoveryConfig(nil)
- }
-
- if err := enterprisePostUnseal(c); err != nil {
- return err
- }
- if err := c.ensureWrappingKey(); err != nil {
- return err
- }
- if err := c.setupPluginCatalog(); err != nil {
- return err
- }
- if err := c.loadMounts(); err != nil {
- return err
- }
- if err := c.setupMounts(); err != nil {
- return err
- }
- if err := c.setupPolicyStore(); err != nil {
- return err
- }
- if err := c.loadCORSConfig(); err != nil {
- return err
- }
- if err := c.loadCredentials(); err != nil {
- return err
- }
- if err := c.setupCredentials(); err != nil {
- return err
- }
- if err := c.startRollback(); err != nil {
- return err
- }
- if err := c.setupExpiration(); err != nil {
- return err
- }
- if err := c.loadAudits(); err != nil {
- return err
- }
- if err := c.setupAudits(); err != nil {
- return err
- }
- if err := c.setupAuditedHeadersConfig(); err != nil {
- return err
- }
-
- if c.ha != nil {
- if err := c.startClusterListener(); err != nil {
- return err
- }
- }
- c.metricsCh = make(chan struct{})
- go c.emitMetrics(c.metricsCh)
- c.logger.Info("core: post-unseal setup complete")
- return nil
-}
-
-// preSeal is invoked before the barrier is sealed, allowing
-// for any state teardown required.
-func (c *Core) preSeal() error {
- defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now())
- c.logger.Info("core: pre-seal teardown starting")
-
- // Clear any rekey progress
- c.barrierRekeyConfig = nil
- c.barrierRekeyProgress = nil
- c.recoveryRekeyConfig = nil
- c.recoveryRekeyProgress = nil
-
- if c.metricsCh != nil {
- close(c.metricsCh)
- c.metricsCh = nil
- }
- var result error
-
- c.stopClusterListener()
-
- if err := c.teardownAudits(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err))
- }
- if err := c.stopExpiration(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err))
- }
- if err := c.teardownCredentials(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err))
- }
- if err := c.teardownPolicyStore(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error tearing down policy store: {{err}}", err))
- }
- if err := c.stopRollback(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error stopping rollback: {{err}}", err))
- }
- if err := c.unloadMounts(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error unloading mounts: {{err}}", err))
- }
- if err := enterprisePreSeal(c); err != nil {
- result = multierror.Append(result, err)
- }
-
- // Purge the backend if supported
- if purgable, ok := c.physical.(physical.Purgable); ok {
- purgable.Purge()
- }
- c.logger.Info("core: pre-seal teardown complete")
- return result
-}
-
-func enterprisePostUnsealImpl(c *Core) error {
- return nil
-}
-
-func enterprisePreSealImpl(c *Core) error {
- return nil
-}
-
-func startReplicationImpl(c *Core) error {
- return nil
-}
-
-func stopReplicationImpl(c *Core) error {
- return nil
-}
-
-// runStandby is a long running routine that is used when an HA backend
-// is enabled. It waits until we are leader and switches this Vault to
-// active.
-func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
- defer close(doneCh)
- defer close(manualStepDownCh)
- c.logger.Info("core: entering standby mode")
-
- // Monitor for key rotation
- keyRotateDone := make(chan struct{})
- keyRotateStop := make(chan struct{})
- go c.periodicCheckKeyUpgrade(keyRotateDone, keyRotateStop)
- // Monitor for new leadership
- checkLeaderDone := make(chan struct{})
- checkLeaderStop := make(chan struct{})
- go c.periodicLeaderRefresh(checkLeaderDone, checkLeaderStop)
- defer func() {
- close(keyRotateStop)
- <-keyRotateDone
- close(checkLeaderStop)
- <-checkLeaderDone
- }()
-
- for {
- // Check for a shutdown
- select {
- case <-stopCh:
- return
- default:
- }
-
- // Create a lock
- uuid, err := uuid.GenerateUUID()
- if err != nil {
- c.logger.Error("core: failed to generate uuid", "error", err)
- return
- }
- lock, err := c.ha.LockWith(coreLockPath, uuid)
- if err != nil {
- c.logger.Error("core: failed to create lock", "error", err)
- return
- }
-
- // Attempt the acquisition
- leaderLostCh := c.acquireLock(lock, stopCh)
-
- // Bail if we are being shutdown
- if leaderLostCh == nil {
- return
- }
- c.logger.Info("core: acquired lock, enabling active operation")
-
- // This is used later to log a metrics event; this can be helpful to
- // detect flapping
- activeTime := time.Now()
-
- // Grab the lock as we need it for cluster setup, which needs to happen
- // before advertising;
- c.stateLock.Lock()
-
- // This block is used to wipe barrier/seal state and verify that
- // everything is sane. If we have no sanity in the barrier, we actually
- // seal, as there's little we can do.
- {
- c.seal.SetBarrierConfig(nil)
- if c.seal.RecoveryKeySupported() {
- c.seal.SetRecoveryConfig(nil)
- }
-
- if err := c.performKeyUpgrades(); err != nil {
- // We call this in a goroutine so that we can give up the
- // statelock and have this shut us down; sealInternal has a
- // workflow where it watches for the stopCh to close so we want
- // to return from here
- go c.Shutdown()
- c.logger.Error("core: error performing key upgrades", "error", err)
- c.stateLock.Unlock()
- lock.Unlock()
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- return
- }
- }
-
- // Clear previous local cluster cert info so we generate new. Since the
- // UUID will have changed, standbys will know to look for new info
- c.clusterParamsLock.Lock()
- c.localClusterCert = nil
- c.localClusterParsedCert = nil
- c.localClusterPrivateKey = nil
- c.clusterParamsLock.Unlock()
-
- if err := c.setupCluster(); err != nil {
- c.stateLock.Unlock()
- c.logger.Error("core: cluster setup failed", "error", err)
- lock.Unlock()
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- continue
- }
-
- // Advertise as leader
- if err := c.advertiseLeader(uuid, leaderLostCh); err != nil {
- c.stateLock.Unlock()
- c.logger.Error("core: leader advertisement setup failed", "error", err)
- lock.Unlock()
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- continue
- }
-
- // Attempt the post-unseal process
- err = c.postUnseal()
- if err == nil {
- c.standby = false
- }
- c.stateLock.Unlock()
-
- // Handle a failure to unseal
- if err != nil {
- c.logger.Error("core: post-unseal setup failed", "error", err)
- lock.Unlock()
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- continue
- }
-
- // Monitor a loss of leadership
- var manualStepDown bool
- select {
- case <-leaderLostCh:
- c.logger.Warn("core: leadership lost, stopping active operation")
- case <-stopCh:
- c.logger.Warn("core: stopping active operation")
- case <-manualStepDownCh:
- c.logger.Warn("core: stepping down from active operation to standby")
- manualStepDown = true
- }
-
- metrics.MeasureSince([]string{"core", "leadership_lost"}, activeTime)
-
- // Clear ourself as leader
- if err := c.clearLeader(uuid); err != nil {
- c.logger.Error("core: clearing leader advertisement failed", "error", err)
- }
-
- // Tell any requests that know about this to stop
- if c.requestContextCancelFunc != nil {
- c.requestContextCancelFunc()
- }
-
- // Attempt the pre-seal process
- c.stateLock.Lock()
- c.standby = true
- preSealErr := c.preSeal()
- c.stateLock.Unlock()
-
- // Give up leadership
- lock.Unlock()
-
- // Check for a failure to prepare to seal
- if preSealErr != nil {
- c.logger.Error("core: pre-seal teardown failed", "error", err)
- }
-
- // If we've merely stepped down, we could instantly grab the lock
- // again. Give the other nodes a chance.
- if manualStepDown {
- time.Sleep(manualStepDownSleepPeriod)
- }
- }
-}
-
-// This checks the leader periodically to ensure that we switch RPC to a new
-// leader pretty quickly. There is logic in Leader() already to not make this
-// onerous and avoid more traffic than needed, so we just call that and ignore
-// the result.
-func (c *Core) periodicLeaderRefresh(doneCh, stopCh chan struct{}) {
- defer close(doneCh)
- for {
- select {
- case <-time.After(leaderCheckInterval):
- c.Leader()
- case <-stopCh:
- return
- }
- }
-}
-
-// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
-func (c *Core) periodicCheckKeyUpgrade(doneCh, stopCh chan struct{}) {
- defer close(doneCh)
- for {
- select {
- case <-time.After(keyRotateCheckInterval):
- // Only check if we are a standby
- c.stateLock.RLock()
- standby := c.standby
- c.stateLock.RUnlock()
- if !standby {
- continue
- }
-
- // Check for a poison pill. If we can read it, it means we have stale
- // keys (e.g. from replication being activated) and we need to seal to
- // be unsealed again.
- entry, _ := c.barrier.Get(poisonPillPath)
- if entry != nil && len(entry.Value) > 0 {
- c.logger.Warn("core: encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
- go c.Shutdown()
- continue
- }
-
- if err := c.checkKeyUpgrades(); err != nil {
- c.logger.Error("core: key rotation periodic upgrade check failed", "error", err)
- }
- case <-stopCh:
- return
- }
- }
-}
-
-// checkKeyUpgrades is used to check if there have been any key rotations
-// and if there is a chain of upgrades available
-func (c *Core) checkKeyUpgrades() error {
- for {
- // Check for an upgrade
- didUpgrade, newTerm, err := c.barrier.CheckUpgrade()
- if err != nil {
- return err
- }
-
- // Nothing to do if no upgrade
- if !didUpgrade {
- break
- }
- if c.logger.IsInfo() {
- c.logger.Info("core: upgraded to new key term", "term", newTerm)
- }
- }
- return nil
-}
-
-// scheduleUpgradeCleanup is used to ensure that all the upgrade paths
-// are cleaned up in a timely manner if a leader failover takes place
-func (c *Core) scheduleUpgradeCleanup() error {
- // List the upgrades
- upgrades, err := c.barrier.List(keyringUpgradePrefix)
- if err != nil {
- return fmt.Errorf("failed to list upgrades: %v", err)
- }
-
- // Nothing to do if no upgrades
- if len(upgrades) == 0 {
- return nil
- }
-
- // Schedule cleanup for all of them
- time.AfterFunc(keyRotateGracePeriod, func() {
- sealed, err := c.barrier.Sealed()
- if err != nil {
- c.logger.Warn("core: failed to check barrier status at upgrade cleanup time")
- return
- }
- if sealed {
- c.logger.Warn("core: barrier sealed at upgrade cleanup time")
- return
- }
- for _, upgrade := range upgrades {
- path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
- if err := c.barrier.Delete(path); err != nil {
- c.logger.Error("core: failed to cleanup upgrade", "path", path, "error", err)
- }
- }
- })
- return nil
-}
-
-func (c *Core) performKeyUpgrades() error {
- if err := c.checkKeyUpgrades(); err != nil {
- return errwrap.Wrapf("error checking for key upgrades: {{err}}", err)
- }
-
- if err := c.barrier.ReloadMasterKey(); err != nil {
- return errwrap.Wrapf("error reloading master key: {{err}}", err)
- }
-
- if err := c.barrier.ReloadKeyring(); err != nil {
- return errwrap.Wrapf("error reloading keyring: {{err}}", err)
- }
-
- if err := c.scheduleUpgradeCleanup(); err != nil {
- return errwrap.Wrapf("error scheduling upgrade cleanup: {{err}}", err)
- }
-
- return nil
-}
-
-// acquireLock blocks until the lock is acquired, returning the leaderLostCh
-func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan struct{} {
- for {
- // Attempt lock acquisition
- leaderLostCh, err := lock.Lock(stopCh)
- if err == nil {
- return leaderLostCh
- }
-
- // Retry the acquisition
- c.logger.Error("core: failed to acquire lock", "error", err)
- select {
- case <-time.After(lockRetryInterval):
- case <-stopCh:
- return nil
- }
- }
-}
-
-// advertiseLeader is used to advertise the current node as leader
-func (c *Core) advertiseLeader(uuid string, leaderLostCh <-chan struct{}) error {
- go c.cleanLeaderPrefix(uuid, leaderLostCh)
-
- var key *ecdsa.PrivateKey
- switch c.localClusterPrivateKey.(type) {
- case *ecdsa.PrivateKey:
- key = c.localClusterPrivateKey.(*ecdsa.PrivateKey)
- default:
- c.logger.Error("core: unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey))
- return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey)
- }
-
- keyParams := &clusterKeyParams{
- Type: corePrivateKeyTypeP521,
- X: key.X,
- Y: key.Y,
- D: key.D,
- }
-
- adv := &activeAdvertisement{
- RedirectAddr: c.redirectAddr,
- ClusterAddr: c.clusterAddr,
- ClusterCert: c.localClusterCert,
- ClusterKeyParams: keyParams,
- }
- val, err := jsonutil.EncodeJSON(adv)
- if err != nil {
- return err
- }
- ent := &Entry{
- Key: coreLeaderPrefix + uuid,
- Value: val,
- }
- err = c.barrier.Put(ent)
- if err != nil {
- return err
- }
-
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifyActiveStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("core: failed to notify active status", "error", err)
- }
- }
- }
- return nil
-}
-
-func (c *Core) cleanLeaderPrefix(uuid string, leaderLostCh <-chan struct{}) {
- keys, err := c.barrier.List(coreLeaderPrefix)
- if err != nil {
- c.logger.Error("core: failed to list entries in core/leader", "error", err)
- return
- }
- for len(keys) > 0 {
- select {
- case <-time.After(leaderPrefixCleanDelay):
- if keys[0] != uuid {
- c.barrier.Delete(coreLeaderPrefix + keys[0])
- }
- keys = keys[1:]
- case <-leaderLostCh:
- return
- }
- }
-}
-
-// clearLeader is used to clear our leadership entry
-func (c *Core) clearLeader(uuid string) error {
- key := coreLeaderPrefix + uuid
- err := c.barrier.Delete(key)
-
- // Advertise ourselves as a standby
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifyActiveStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("core: failed to notify standby status", "error", err)
- }
- }
- }
-
- return err
-}
-
-// emitMetrics is used to periodically expose metrics while runnig
-func (c *Core) emitMetrics(stopCh chan struct{}) {
- for {
- select {
- case <-time.After(time.Second):
- c.metricsMutex.Lock()
- if c.expiration != nil {
- c.expiration.emitMetrics()
- }
- c.metricsMutex.Unlock()
- case <-stopCh:
- return
- }
- }
-}
-
-func (c *Core) ReplicationState() consts.ReplicationState {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- return c.replicationState
-}
-
-func (c *Core) SealAccess() *SealAccess {
- sa := &SealAccess{}
- sa.SetSeal(c.seal)
- return sa
-}
-
-func (c *Core) Logger() log.Logger {
- return c.logger
-}
-
-func (c *Core) BarrierKeyLength() (min, max int) {
- min, max = c.barrier.KeyLength()
- max += shamir.ShareOverhead
- return
-}
-
-func (c *Core) AuditedHeadersConfig() *AuditedHeadersConfig {
- return c.auditedHeaders
-}
-
-func lastRemoteWALImpl(c *Core) uint64 {
- return 0
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/core_test.go b/vendor/github.com/hashicorp/vault/vault/core_test.go
deleted file mode 100644
index b940254..0000000
--- a/vendor/github.com/hashicorp/vault/vault/core_test.go
+++ /dev/null
@@ -1,2139 +0,0 @@
-package vault
-
-import (
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/physical/inmem"
- log "github.com/mgutz/logxi/v1"
-)
-
-var (
- // invalidKey is used to test Unseal
- invalidKey = []byte("abcdefghijklmnopqrstuvwxyz")[:17]
-)
-
-func TestNewCore_badRedirectAddr(t *testing.T) {
- logger = logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- conf := &CoreConfig{
- RedirectAddr: "127.0.0.1:8200",
- Physical: inm,
- DisableMlock: true,
- }
- _, err = NewCore(conf)
- if err == nil {
- t.Fatal("should error")
- }
-}
-
-func TestSealConfig_Invalid(t *testing.T) {
- s := &SealConfig{
- SecretShares: 2,
- SecretThreshold: 1,
- }
- err := s.Validate()
- if err == nil {
- t.Fatalf("expected err")
- }
-}
-
-func TestCore_Unseal_MultiShare(t *testing.T) {
- c := TestCore(t)
-
- _, err := TestCoreUnseal(c, invalidKey)
- if err != ErrNotInit {
- t.Fatalf("err: %v", err)
- }
-
- sealConf := &SealConfig{
- SecretShares: 5,
- SecretThreshold: 3,
- }
- res, err := c.Initialize(&InitParams{
- BarrierConfig: sealConf,
- RecoveryConfig: nil,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- sealed, err := c.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !sealed {
- t.Fatalf("should be sealed")
- }
-
- if prog, _ := c.SecretProgress(); prog != 0 {
- t.Fatalf("bad progress: %d", prog)
- }
-
- for i := 0; i < 5; i++ {
- unseal, err := TestCoreUnseal(c, res.SecretShares[i])
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ignore redundant
- _, err = TestCoreUnseal(c, res.SecretShares[i])
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i >= 2 {
- if !unseal {
- t.Fatalf("should be unsealed")
- }
- if prog, _ := c.SecretProgress(); prog != 0 {
- t.Fatalf("bad progress: %d", prog)
- }
- } else {
- if unseal {
- t.Fatalf("should not be unsealed")
- }
- if prog, _ := c.SecretProgress(); prog != i+1 {
- t.Fatalf("bad progress: %d", prog)
- }
- }
- }
-
- sealed, err = c.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if sealed {
- t.Fatalf("should not be sealed")
- }
-
- err = c.Seal(res.RootToken)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ignore redundant
- err = c.Seal(res.RootToken)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- sealed, err = c.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !sealed {
- t.Fatalf("should be sealed")
- }
-}
-
-func TestCore_Unseal_Single(t *testing.T) {
- c := TestCore(t)
-
- _, err := TestCoreUnseal(c, invalidKey)
- if err != ErrNotInit {
- t.Fatalf("err: %v", err)
- }
-
- sealConf := &SealConfig{
- SecretShares: 1,
- SecretThreshold: 1,
- }
- res, err := c.Initialize(&InitParams{
- BarrierConfig: sealConf,
- RecoveryConfig: nil,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- sealed, err := c.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !sealed {
- t.Fatalf("should be sealed")
- }
-
- if prog, _ := c.SecretProgress(); prog != 0 {
- t.Fatalf("bad progress: %d", prog)
- }
-
- unseal, err := TestCoreUnseal(c, res.SecretShares[0])
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !unseal {
- t.Fatalf("should be unsealed")
- }
- if prog, _ := c.SecretProgress(); prog != 0 {
- t.Fatalf("bad progress: %d", prog)
- }
-
- sealed, err = c.Sealed()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if sealed {
- t.Fatalf("should not be sealed")
- }
-}
-
-func TestCore_Route_Sealed(t *testing.T) {
- c := TestCore(t)
- sealConf := &SealConfig{
- SecretShares: 1,
- SecretThreshold: 1,
- }
-
- // Should not route anything
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "sys/mounts",
- }
- _, err := c.HandleRequest(req)
- if err != consts.ErrSealed {
- t.Fatalf("err: %v", err)
- }
-
- res, err := c.Initialize(&InitParams{
- BarrierConfig: sealConf,
- RecoveryConfig: nil,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- unseal, err := TestCoreUnseal(c, res.SecretShares[0])
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !unseal {
- t.Fatalf("should be unsealed")
- }
-
- // Should not error after unseal
- req.ClientToken = res.RootToken
- _, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-}
-
-// Attempt to unseal after doing a first seal
-func TestCore_SealUnseal(t *testing.T) {
- c, keys, root := TestCoreUnsealed(t)
- if err := c.Seal(root); err != nil {
- t.Fatalf("err: %v", err)
- }
- for i, key := range keys {
- unseal, err := TestCoreUnseal(c, key)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if i+1 == len(keys) && !unseal {
- t.Fatalf("err: should be unsealed")
- }
- }
-}
-
-// Attempt to shutdown after unseal
-func TestCore_Shutdown(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- if err := c.Shutdown(); err != nil {
- t.Fatalf("err: %v", err)
- }
- if sealed, err := c.Sealed(); err != nil || !sealed {
- t.Fatalf("err: %v", err)
- }
-}
-
-// Attempt to seal bad token
-func TestCore_Seal_BadToken(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- if err := c.Seal("foo"); err == nil {
- t.Fatalf("err: %v", err)
- }
- if sealed, err := c.Sealed(); err != nil || sealed {
- t.Fatalf("err: %v", err)
- }
-}
-
-// Ensure we get a LeaseID
-func TestCore_HandleRequest_Lease(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read the key
- req.Operation = logical.ReadOperation
- req.Data = nil
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp == nil || resp.Secret == nil || resp.Data == nil {
- t.Fatalf("bad: %#v", resp)
- }
- if resp.Secret.TTL != time.Hour {
- t.Fatalf("bad: %#v", resp.Secret)
- }
- if resp.Secret.LeaseID == "" {
- t.Fatalf("bad: %#v", resp.Secret)
- }
- if resp.Data["foo"] != "bar" {
- t.Fatalf("bad: %#v", resp.Data)
- }
-}
-
-func TestCore_HandleRequest_Lease_MaxLength(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1000h",
- },
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read the key
- req.Operation = logical.ReadOperation
- req.Data = nil
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp == nil || resp.Secret == nil || resp.Data == nil {
- t.Fatalf("bad: %#v", resp)
- }
- if resp.Secret.TTL != c.maxLeaseTTL {
- t.Fatalf("bad: %#v", resp.Secret)
- }
- if resp.Secret.LeaseID == "" {
- t.Fatalf("bad: %#v", resp.Secret)
- }
- if resp.Data["foo"] != "bar" {
- t.Fatalf("bad: %#v", resp.Data)
- }
-}
-
-func TestCore_HandleRequest_Lease_DefaultLength(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "0h",
- },
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read the key
- req.Operation = logical.ReadOperation
- req.Data = nil
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp == nil || resp.Secret == nil || resp.Data == nil {
- t.Fatalf("bad: %#v", resp)
- }
- if resp.Secret.TTL != c.defaultLeaseTTL {
- t.Fatalf("bad: %#v", resp.Secret)
- }
- if resp.Secret.LeaseID == "" {
- t.Fatalf("bad: %#v", resp.Secret)
- }
- if resp.Data["foo"] != "bar" {
- t.Fatalf("bad: %#v", resp.Data)
- }
-}
-
-func TestCore_HandleRequest_MissingToken(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- }
- resp, err := c.HandleRequest(req)
- if err == nil || !errwrap.Contains(err, logical.ErrInvalidRequest.Error()) {
- t.Fatalf("err: %v", err)
- }
- if resp.Data["error"] != "missing client token" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestCore_HandleRequest_InvalidToken(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- ClientToken: "foobarbaz",
- }
- resp, err := c.HandleRequest(req)
- if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
- t.Fatalf("err: %v", err)
- }
- if resp.Data["error"] != "permission denied" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-// Check that standard permissions work
-func TestCore_HandleRequest_NoSlash(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- req := &logical.Request{
- Operation: logical.HelpOperation,
- Path: "secret",
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v, resp: %v", err, resp)
- }
- if _, ok := resp.Data["help"]; !ok {
- t.Fatalf("resp: %v", resp)
- }
-}
-
-// Test a root path is denied if non-root
-func TestCore_HandleRequest_RootPath(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
- testCoreMakeToken(t, c, root, "child", "", []string{"test"})
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "sys/policy", // root protected!
- ClientToken: "child",
- }
- resp, err := c.HandleRequest(req)
- if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
- t.Fatalf("err: %v, resp: %v", err, resp)
- }
-}
-
-// Test a root path is allowed if non-root but with sudo
-func TestCore_HandleRequest_RootPath_WithSudo(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- // Set the 'test' policy object to permit access to sys/policy
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/policy/test", // root protected!
- Data: map[string]interface{}{
- "rules": `path "sys/policy" { policy = "sudo" }`,
- },
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Child token (non-root) but with 'test' policy should have access
- testCoreMakeToken(t, c, root, "child", "", []string{"test"})
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "sys/policy", // root protected!
- ClientToken: "child",
- }
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp == nil {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-// Check that standard permissions work
-func TestCore_HandleRequest_PermissionDenied(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
- testCoreMakeToken(t, c, root, "child", "", []string{"test"})
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- ClientToken: "child",
- }
- resp, err := c.HandleRequest(req)
- if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
- t.Fatalf("err: %v, resp: %v", err, resp)
- }
-}
-
-// Check that standard permissions work
-func TestCore_HandleRequest_PermissionAllowed(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
- testCoreMakeToken(t, c, root, "child", "", []string{"test"})
-
- // Set the 'test' policy object to permit access to secret/
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/policy/test",
- Data: map[string]interface{}{
- "rules": `path "secret/*" { policy = "write" }`,
- },
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Write should work now
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- ClientToken: "child",
- }
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
-func TestCore_HandleRequest_NoClientToken(t *testing.T) {
- noop := &NoopBackend{
- Response: &logical.Response{},
- }
- c, _, root := TestCoreUnsealed(t)
- c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noop, nil
- }
-
- // Enable the logical backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/mounts/foo")
- req.Data["type"] = "noop"
- req.Data["description"] = "foo"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to request with connection data
- req = &logical.Request{
- Path: "foo/login",
- }
- req.ClientToken = root
- if _, err := c.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- ct := noop.Requests[0].ClientToken
- if ct == "" || ct == root {
- t.Fatalf("bad: %#v", noop.Requests)
- }
-}
-
-func TestCore_HandleRequest_ConnOnLogin(t *testing.T) {
- noop := &NoopBackend{
- Login: []string{"login"},
- Response: &logical.Response{},
- }
- c, _, root := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noop, nil
- }
-
- // Enable the credential backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to request with connection data
- req = &logical.Request{
- Path: "auth/foo/login",
- Connection: &logical.Connection{},
- }
- if _, err := c.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
- if noop.Requests[0].Connection == nil {
- t.Fatalf("bad: %#v", noop.Requests)
- }
-}
-
-// Ensure we get a client token
-func TestCore_HandleLogin_Token(t *testing.T) {
- noop := &NoopBackend{
- Login: []string{"login"},
- Response: &logical.Response{
- Auth: &logical.Auth{
- Policies: []string{"foo", "bar"},
- Metadata: map[string]string{
- "user": "armon",
- },
- DisplayName: "armon",
- },
- },
- }
- c, _, root := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(conf *logical.BackendConfig) (logical.Backend, error) {
- return noop, nil
- }
-
- // Enable the credential backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to login
- lreq := &logical.Request{
- Path: "auth/foo/login",
- }
- lresp, err := c.HandleRequest(lreq)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ensure we got a client token back
- clientToken := lresp.Auth.ClientToken
- if clientToken == "" {
- t.Fatalf("bad: %#v", lresp)
- }
-
- // Check the policy and metadata
- te, err := c.tokenStore.Lookup(clientToken)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- expect := &TokenEntry{
- ID: clientToken,
- Accessor: te.Accessor,
- Parent: "",
- Policies: []string{"bar", "default", "foo"},
- Path: "auth/foo/login",
- Meta: map[string]string{
- "user": "armon",
- },
- DisplayName: "foo-armon",
- TTL: time.Hour * 24,
- CreationTime: te.CreationTime,
- }
-
- if !reflect.DeepEqual(te, expect) {
- t.Fatalf("Bad: %#v expect: %#v", te, expect)
- }
-
- // Check that we have a lease with default duration
- if lresp.Auth.TTL != noop.System().DefaultLeaseTTL() {
- t.Fatalf("bad: %#v, defaultLeaseTTL: %#v", lresp.Auth, c.defaultLeaseTTL)
- }
-}
-
-func TestCore_HandleRequest_AuditTrail(t *testing.T) {
- // Create a noop audit backend
- noop := &NoopAudit{}
- c, _, root := TestCoreUnsealed(t)
- c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- noop = &NoopAudit{
- Config: config,
- }
- return noop, nil
- }
-
- // Enable the audit backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/audit/noop")
- req.Data["type"] = "noop"
- req.ClientToken = root
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Make a request
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- ClientToken: root,
- }
- req.ClientToken = root
- if _, err := c.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the audit trail on request and response
- if len(noop.ReqAuth) != 1 {
- t.Fatalf("bad: %#v", noop)
- }
- auth := noop.ReqAuth[0]
- if auth.ClientToken != root {
- t.Fatalf("bad client token: %#v", auth)
- }
- if len(auth.Policies) != 1 || auth.Policies[0] != "root" {
- t.Fatalf("bad: %#v", auth)
- }
- if len(noop.Req) != 1 || !reflect.DeepEqual(noop.Req[0], req) {
- t.Fatalf("Bad: %#v", noop.Req[0])
- }
-
- if len(noop.RespAuth) != 2 {
- t.Fatalf("bad: %#v", noop)
- }
- if !reflect.DeepEqual(noop.RespAuth[1], auth) {
- t.Fatalf("bad: %#v", auth)
- }
- if len(noop.RespReq) != 2 || !reflect.DeepEqual(noop.RespReq[1], req) {
- t.Fatalf("Bad: %#v", noop.RespReq[1])
- }
- if len(noop.Resp) != 2 || !reflect.DeepEqual(noop.Resp[1], resp) {
- t.Fatalf("Bad: %#v", noop.Resp[1])
- }
-}
-
-// Ensure we get a client token
-func TestCore_HandleLogin_AuditTrail(t *testing.T) {
- // Create a badass credential backend that always logs in as armon
- noop := &NoopAudit{}
- noopBack := &NoopBackend{
- Login: []string{"login"},
- Response: &logical.Response{
- Auth: &logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Hour,
- },
- Policies: []string{"foo", "bar"},
- Metadata: map[string]string{
- "user": "armon",
- },
- },
- },
- }
- c, _, root := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noopBack, nil
- }
- c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
- noop = &NoopAudit{
- Config: config,
- }
- return noop, nil
- }
-
- // Enable the credential backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Enable the audit backend
- req = logical.TestRequest(t, logical.UpdateOperation, "sys/audit/noop")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to login
- lreq := &logical.Request{
- Path: "auth/foo/login",
- }
- lresp, err := c.HandleRequest(lreq)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ensure we got a client token back
- clientToken := lresp.Auth.ClientToken
- if clientToken == "" {
- t.Fatalf("bad: %#v", lresp)
- }
-
- // Check the audit trail on request and response
- if len(noop.ReqAuth) != 1 {
- t.Fatalf("bad: %#v", noop)
- }
- if len(noop.Req) != 1 || !reflect.DeepEqual(noop.Req[0], lreq) {
- t.Fatalf("Bad: %#v %#v", noop.Req[0], lreq)
- }
-
- if len(noop.RespAuth) != 2 {
- t.Fatalf("bad: %#v", noop)
- }
- auth := noop.RespAuth[1]
- if auth.ClientToken != clientToken {
- t.Fatalf("bad client token: %#v", auth)
- }
- if len(auth.Policies) != 3 || auth.Policies[0] != "bar" || auth.Policies[1] != "default" || auth.Policies[2] != "foo" {
- t.Fatalf("bad: %#v", auth)
- }
- if len(noop.RespReq) != 2 || !reflect.DeepEqual(noop.RespReq[1], lreq) {
- t.Fatalf("Bad: %#v", noop.RespReq[1])
- }
- if len(noop.Resp) != 2 || !reflect.DeepEqual(noop.Resp[1], lresp) {
- t.Fatalf("Bad: %#v %#v", noop.Resp[1], lresp)
- }
-}
-
-// Check that we register a lease for new tokens
-func TestCore_HandleRequest_CreateToken_Lease(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- // Create a new credential
- req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
- req.ClientToken = root
- req.Data["policies"] = []string{"foo"}
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ensure we got a new client token back
- clientToken := resp.Auth.ClientToken
- if clientToken == "" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Check the policy and metadata
- te, err := c.tokenStore.Lookup(clientToken)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- expect := &TokenEntry{
- ID: clientToken,
- Accessor: te.Accessor,
- Parent: root,
- Policies: []string{"default", "foo"},
- Path: "auth/token/create",
- DisplayName: "token",
- CreationTime: te.CreationTime,
- TTL: time.Hour * 24 * 32,
- }
- if !reflect.DeepEqual(te, expect) {
- t.Fatalf("Bad: %#v expect: %#v", te, expect)
- }
-
- // Check that we have a lease with default duration
- if resp.Auth.TTL != c.defaultLeaseTTL {
- t.Fatalf("bad: %#v", resp.Auth)
- }
-}
-
-// Check that we handle excluding the default policy
-func TestCore_HandleRequest_CreateToken_NoDefaultPolicy(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- // Create a new credential
- req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
- req.ClientToken = root
- req.Data["policies"] = []string{"foo"}
- req.Data["no_default_policy"] = true
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ensure we got a new client token back
- clientToken := resp.Auth.ClientToken
- if clientToken == "" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Check the policy and metadata
- te, err := c.tokenStore.Lookup(clientToken)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- expect := &TokenEntry{
- ID: clientToken,
- Accessor: te.Accessor,
- Parent: root,
- Policies: []string{"foo"},
- Path: "auth/token/create",
- DisplayName: "token",
- CreationTime: te.CreationTime,
- TTL: time.Hour * 24 * 32,
- }
- if !reflect.DeepEqual(te, expect) {
- t.Fatalf("Bad: %#v expect: %#v", te, expect)
- }
-}
-
-func TestCore_LimitedUseToken(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- // Create a new credential
- req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
- req.ClientToken = root
- req.Data["num_uses"] = "1"
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Put a secret
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/foo",
- Data: map[string]interface{}{
- "foo": "bar",
- },
- ClientToken: resp.Auth.ClientToken,
- }
- _, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Second operation should fail
- _, err = c.HandleRequest(req)
- if err == nil || !errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
- t.Fatalf("err: %v", err)
- }
-}
-
-func TestCore_Standby_Seal(t *testing.T) {
- // Create the first core and initialize it
- logger = logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- redirectOriginal := "http://127.0.0.1:8200"
- core, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- keys, root := TestCoreInit(t, core)
- for _, key := range keys {
- if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err := core.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Wait for core to become active
- TestWaitActive(t, core)
-
- // Check the leader is local
- isLeader, advertise, _, err := core.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Create the second core and initialize it
- redirectOriginal2 := "http://127.0.0.1:8500"
- core2, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal2,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for _, key := range keys {
- if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err = core2.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Core2 should be in standby
- standby, err := core2.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Check the leader is not local
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if isLeader {
- t.Fatalf("should not be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Seal the standby core with the correct token. Shouldn't go down
- err = core2.Seal(root)
- if err == nil {
- t.Fatal("should not be sealed")
- }
-
- keyUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- // Seal the standby core with an invalid token. Shouldn't go down
- err = core2.Seal(keyUUID)
- if err == nil {
- t.Fatal("should not be sealed")
- }
-}
-
-func TestCore_StepDown(t *testing.T) {
- // Create the first core and initialize it
- logger = logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- redirectOriginal := "http://127.0.0.1:8200"
- core, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- keys, root := TestCoreInit(t, core)
- for _, key := range keys {
- if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err := core.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Wait for core to become active
- TestWaitActive(t, core)
-
- // Check the leader is local
- isLeader, advertise, _, err := core.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Create the second core and initialize it
- redirectOriginal2 := "http://127.0.0.1:8500"
- core2, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal2,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for _, key := range keys {
- if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err = core2.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Core2 should be in standby
- standby, err := core2.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Check the leader is not local
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if isLeader {
- t.Fatalf("should not be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- req := &logical.Request{
- ClientToken: root,
- Path: "sys/step-down",
- }
-
- // Create an identifier for the request
- req.ID, err = uuid.GenerateUUID()
- if err != nil {
- t.Fatalf("failed to generate identifier for the request: path: %s err: %v", req.Path, err)
- }
-
- // Step down core
- err = core.StepDown(req)
- if err != nil {
- t.Fatal("error stepping down core 1")
- }
-
- // Give time to switch leaders
- time.Sleep(5 * time.Second)
-
- // Core1 should be in standby
- standby, err = core.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Check the leader is core2
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal2 {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
- }
-
- // Check the leader is not local
- isLeader, advertise, _, err = core.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if isLeader {
- t.Fatalf("should not be leader")
- }
- if advertise != redirectOriginal2 {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
- }
-
- // Step down core2
- err = core2.StepDown(req)
- if err != nil {
- t.Fatal("error stepping down core 1")
- }
-
- // Give time to switch leaders -- core 1 will still be waiting on its
- // cooling off period so give it a full 10 seconds to recover
- time.Sleep(10 * time.Second)
-
- // Core2 should be in standby
- standby, err = core2.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Check the leader is core1
- isLeader, advertise, _, err = core.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Check the leader is not local
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if isLeader {
- t.Fatalf("should not be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-}
-
-func TestCore_CleanLeaderPrefix(t *testing.T) {
- // Create the first core and initialize it
- logger = logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- redirectOriginal := "http://127.0.0.1:8200"
- core, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- keys, root := TestCoreInit(t, core)
- for _, key := range keys {
- if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err := core.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Wait for core to become active
- TestWaitActive(t, core)
-
- // Ensure that the original clean function has stopped running
- time.Sleep(2 * time.Second)
-
- // Put several random entries
- for i := 0; i < 5; i++ {
- keyUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- valueUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- core.barrier.Put(&Entry{
- Key: coreLeaderPrefix + keyUUID,
- Value: []byte(valueUUID),
- })
- }
-
- entries, err := core.barrier.List(coreLeaderPrefix)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(entries) != 6 {
- t.Fatalf("wrong number of core leader prefix entries, got %d", len(entries))
- }
-
- // Check the leader is local
- isLeader, advertise, _, err := core.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Create a second core, attached to same in-memory store
- redirectOriginal2 := "http://127.0.0.1:8500"
- core2, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal2,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for _, key := range keys {
- if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err = core2.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Core2 should be in standby
- standby, err := core2.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Check the leader is not local
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if isLeader {
- t.Fatalf("should not be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Seal the first core, should step down
- err = core.Seal(root)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Core should be in standby
- standby, err = core.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Wait for core2 to become active
- TestWaitActive(t, core2)
-
- // Check the leader is local
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal2 {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
- }
-
- // Give time for the entries to clear out; it is conservative at 1/second
- time.Sleep(10 * leaderPrefixCleanDelay)
-
- entries, err = core2.barrier.List(coreLeaderPrefix)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(entries) != 1 {
- t.Fatalf("wrong number of core leader prefix entries, got %d", len(entries))
- }
-}
-
-func TestCore_Standby(t *testing.T) {
- logger = logformat.NewVaultLogger(log.LevelTrace)
-
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- testCore_Standby_Common(t, inmha, inmha.(physical.HABackend))
-}
-
-func TestCore_Standby_SeparateHA(t *testing.T) {
- logger = logformat.NewVaultLogger(log.LevelTrace)
-
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- inmha2, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- testCore_Standby_Common(t, inmha, inmha2.(physical.HABackend))
-}
-
-func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.HABackend) {
- // Create the first core and initialize it
- redirectOriginal := "http://127.0.0.1:8200"
- core, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha,
- RedirectAddr: redirectOriginal,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- keys, root := TestCoreInit(t, core)
- for _, key := range keys {
- if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err := core.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Wait for core to become active
- TestWaitActive(t, core)
-
- // Put a secret
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/foo",
- Data: map[string]interface{}{
- "foo": "bar",
- },
- ClientToken: root,
- }
- _, err = core.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the leader is local
- isLeader, advertise, _, err := core.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Create a second core, attached to same in-memory store
- redirectOriginal2 := "http://127.0.0.1:8500"
- core2, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha,
- RedirectAddr: redirectOriginal2,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for _, key := range keys {
- if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- sealed, err = core2.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
-
- // Core2 should be in standby
- standby, err := core2.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Request should fail in standby mode
- _, err = core2.HandleRequest(req)
- if err != consts.ErrStandby {
- t.Fatalf("err: %v", err)
- }
-
- // Check the leader is not local
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if isLeader {
- t.Fatalf("should not be leader")
- }
- if advertise != redirectOriginal {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal)
- }
-
- // Seal the first core, should step down
- err = core.Seal(root)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Core should be in standby
- standby, err = core.Standby()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !standby {
- t.Fatalf("should be standby")
- }
-
- // Wait for core2 to become active
- TestWaitActive(t, core2)
-
- // Read the secret
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "secret/foo",
- ClientToken: root,
- }
- resp, err := core2.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the response
- if resp.Data["foo"] != "bar" {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Check the leader is local
- isLeader, advertise, _, err = core2.Leader()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !isLeader {
- t.Fatalf("should be leader")
- }
- if advertise != redirectOriginal2 {
- t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
- }
-
- if inm.(*inmem.InmemHABackend) == inmha.(*inmem.InmemHABackend) {
- lockSize := inm.(*inmem.InmemHABackend).LockMapSize()
- if lockSize == 0 {
- t.Fatalf("locks not used with only one HA backend")
- }
- } else {
- lockSize := inmha.(*inmem.InmemHABackend).LockMapSize()
- if lockSize == 0 {
- t.Fatalf("locks not used with expected HA backend")
- }
-
- lockSize = inm.(*inmem.InmemHABackend).LockMapSize()
- if lockSize != 0 {
- t.Fatalf("locks used with unexpected HA backend")
- }
- }
-}
-
-// Ensure that InternalData is never returned
-func TestCore_HandleRequest_Login_InternalData(t *testing.T) {
- noop := &NoopBackend{
- Login: []string{"login"},
- Response: &logical.Response{
- Auth: &logical.Auth{
- Policies: []string{"foo", "bar"},
- InternalData: map[string]interface{}{
- "foo": "bar",
- },
- },
- },
- }
-
- c, _, root := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noop, nil
- }
-
- // Enable the credential backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to login
- lreq := &logical.Request{
- Path: "auth/foo/login",
- }
- lresp, err := c.HandleRequest(lreq)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ensure we do not get the internal data
- if lresp.Auth.InternalData != nil {
- t.Fatalf("bad: %#v", lresp)
- }
-}
-
-// Ensure that InternalData is never returned
-func TestCore_HandleRequest_InternalData(t *testing.T) {
- noop := &NoopBackend{
- Response: &logical.Response{
- Secret: &logical.Secret{
- InternalData: map[string]interface{}{
- "foo": "bar",
- },
- },
- Data: map[string]interface{}{
- "foo": "bar",
- },
- },
- }
-
- c, _, root := TestCoreUnsealed(t)
- c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noop, nil
- }
-
- // Enable the credential backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/mounts/foo")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to read
- lreq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "foo/test",
- ClientToken: root,
- }
- lresp, err := c.HandleRequest(lreq)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ensure we do not get the internal data
- if lresp.Secret.InternalData != nil {
- t.Fatalf("bad: %#v", lresp)
- }
-}
-
-// Ensure login does not return a secret
-func TestCore_HandleLogin_ReturnSecret(t *testing.T) {
- // Create a badass credential backend that always logs in as armon
- noopBack := &NoopBackend{
- Login: []string{"login"},
- Response: &logical.Response{
- Secret: &logical.Secret{},
- Auth: &logical.Auth{
- Policies: []string{"foo", "bar"},
- },
- },
- }
- c, _, root := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noopBack, nil
- }
-
- // Enable the credential backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to login
- lreq := &logical.Request{
- Path: "auth/foo/login",
- }
- _, err = c.HandleRequest(lreq)
- if err != ErrInternalError {
- t.Fatalf("err: %v", err)
- }
-}
-
-// Renew should return the same lease back
-func TestCore_RenewSameLease(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- // Create a leasable secret
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read the key
- req.Operation = logical.ReadOperation
- req.Data = nil
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
- t.Fatalf("bad: %#v", resp.Secret)
- }
- original := resp.Secret.LeaseID
-
- // Renew the lease
- req = logical.TestRequest(t, logical.UpdateOperation, "sys/renew/"+resp.Secret.LeaseID)
- req.ClientToken = root
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the lease did not change
- if resp.Secret.LeaseID != original {
- t.Fatalf("lease id changed: %s %s", original, resp.Secret.LeaseID)
- }
-
- // Renew the lease (alternate path)
- req = logical.TestRequest(t, logical.UpdateOperation, "sys/leases/renew/"+resp.Secret.LeaseID)
- req.ClientToken = root
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the lease did not change
- if resp.Secret.LeaseID != original {
- t.Fatalf("lease id changed: %s %s", original, resp.Secret.LeaseID)
- }
-}
-
-// Renew of a token should not create a new lease
-func TestCore_RenewToken_SingleRegister(t *testing.T) {
- c, _, root := TestCoreUnsealed(t)
-
- // Create a new token
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "auth/token/create",
- Data: map[string]interface{}{
- "lease": "1h",
- },
- ClientToken: root,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- newClient := resp.Auth.ClientToken
-
- // Renew the token
- req = logical.TestRequest(t, logical.UpdateOperation, "auth/token/renew")
- req.ClientToken = newClient
- req.Data = map[string]interface{}{
- "token": newClient,
- }
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Revoke using the renew prefix
- req = logical.TestRequest(t, logical.UpdateOperation, "sys/revoke-prefix/auth/token/renew/")
- req.ClientToken = root
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify our token is still valid (e.g. we did not get invalided by the revoke)
- req = logical.TestRequest(t, logical.UpdateOperation, "auth/token/lookup")
- req.Data = map[string]interface{}{
- "token": newClient,
- }
- req.ClientToken = newClient
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the token exists
- if resp.Data["id"] != newClient {
- t.Fatalf("bad: %#v", resp.Data)
- }
-}
-
-// Based on bug GH-203, attempt to disable a credential backend with leased secrets
-func TestCore_EnableDisableCred_WithLease(t *testing.T) {
- noopBack := &NoopBackend{
- Login: []string{"login"},
- Response: &logical.Response{
- Auth: &logical.Auth{
- Policies: []string{"root"},
- },
- },
- }
-
- c, _, root := TestCoreUnsealed(t)
- c.credentialBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noopBack, nil
- }
-
- var secretWritingPolicy = `
-name = "admins"
-path "secret/*" {
- capabilities = ["update", "create", "read"]
-}
-`
-
- ps := c.policyStore
- policy, _ := Parse(secretWritingPolicy)
- if err := ps.SetPolicy(policy); err != nil {
- t.Fatal(err)
- }
-
- // Enable the credential backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/auth/foo")
- req.Data["type"] = "noop"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to login -- should fail because we don't allow root to be returned
- lreq := &logical.Request{
- Path: "auth/foo/login",
- }
- lresp, err := c.HandleRequest(lreq)
- if err == nil || lresp == nil || !lresp.IsError() {
- t.Fatalf("expected error trying to auth and receive root policy")
- }
-
- // Fix and try again
- noopBack.Response.Auth.Policies = []string{"admins"}
- lreq = &logical.Request{
- Path: "auth/foo/login",
- }
- lresp, err = c.HandleRequest(lreq)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Create a leasable secret
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "secret/test",
- Data: map[string]interface{}{
- "foo": "bar",
- "lease": "1h",
- },
- ClientToken: lresp.Auth.ClientToken,
- }
- resp, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %#v", resp)
- }
-
- // Read the key
- req.Operation = logical.ReadOperation
- req.Data = nil
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp == nil || resp.Secret == nil || resp.Secret.LeaseID == "" {
- t.Fatalf("bad: %#v", resp.Secret)
- }
-
- // Renew the lease
- req = logical.TestRequest(t, logical.UpdateOperation, "sys/leases/renew")
- req.Data = map[string]interface{}{
- "lease_id": resp.Secret.LeaseID,
- }
- req.ClientToken = lresp.Auth.ClientToken
- _, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Disable the credential backend
- req = logical.TestRequest(t, logical.DeleteOperation, "sys/auth/foo")
- req.ClientToken = root
- resp, err = c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v %#v", err, resp)
- }
-}
-
-func TestCore_HandleRequest_MountPointType(t *testing.T) {
- noop := &NoopBackend{
- Response: &logical.Response{},
- }
- c, _, root := TestCoreUnsealed(t)
- c.logicalBackends["noop"] = func(*logical.BackendConfig) (logical.Backend, error) {
- return noop, nil
- }
-
- // Enable the logical backend
- req := logical.TestRequest(t, logical.UpdateOperation, "sys/mounts/foo")
- req.Data["type"] = "noop"
- req.Data["description"] = "foo"
- req.ClientToken = root
- _, err := c.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to request
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "foo/test",
- Connection: &logical.Connection{},
- }
- req.ClientToken = root
- if _, err := c.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify Path, MountPoint, and MountType
- if noop.Requests[0].Path != "test" {
- t.Fatalf("bad: %#v", noop.Requests)
- }
- if noop.Requests[0].MountPoint != "foo/" {
- t.Fatalf("bad: %#v", noop.Requests)
- }
- if noop.Requests[0].MountType != "noop" {
- t.Fatalf("bad: %#v", noop.Requests)
- }
-}
-
-func TestCore_Standby_Rotate(t *testing.T) {
- // Create the first core and initialize it
- logger = logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- inmha, err := inmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- redirectOriginal := "http://127.0.0.1:8200"
- core, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- keys, root := TestCoreInit(t, core)
- for _, key := range keys {
- if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Wait for core to become active
- TestWaitActive(t, core)
-
- // Create a second core, attached to same in-memory store
- redirectOriginal2 := "http://127.0.0.1:8500"
- core2, err := NewCore(&CoreConfig{
- Physical: inm,
- HAPhysical: inmha.(physical.HABackend),
- RedirectAddr: redirectOriginal2,
- DisableMlock: true,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- for _, key := range keys {
- if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // Rotate the encryption key
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/rotate",
- ClientToken: root,
- }
- _, err = core.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Seal the first core, should step down
- err = core.Seal(root)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Wait for core2 to become active
- TestWaitActive(t, core2)
-
- // Read the key status
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "sys/key-status",
- ClientToken: root,
- }
- resp, err := core2.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Verify the response
- if resp.Data["term"] != 2 {
- t.Fatalf("bad: %#v", resp)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/cors.go b/vendor/github.com/hashicorp/vault/vault/cors.go
deleted file mode 100644
index f94f078..0000000
--- a/vendor/github.com/hashicorp/vault/vault/cors.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package vault
-
-import (
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
-
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- CORSDisabled uint32 = iota
- CORSEnabled
-)
-
-var StdAllowedHeaders = []string{
- "Content-Type",
- "X-Requested-With",
- "X-Vault-AWS-IAM-Server-ID",
- "X-Vault-MFA",
- "X-Vault-No-Request-Forwarding",
- "X-Vault-Token",
- "X-Vault-Wrap-Format",
- "X-Vault-Wrap-TTL",
-}
-
-// CORSConfig stores the state of the CORS configuration.
-type CORSConfig struct {
- sync.RWMutex `json:"-"`
- core *Core
- Enabled uint32 `json:"enabled"`
- AllowedOrigins []string `json:"allowed_origins,omitempty"`
- AllowedHeaders []string `json:"allowed_headers,omitempty"`
-}
-
-func (c *Core) saveCORSConfig() error {
- view := c.systemBarrierView.SubView("config/")
-
- localConfig := &CORSConfig{
- Enabled: atomic.LoadUint32(&c.corsConfig.Enabled),
- }
- c.corsConfig.RLock()
- localConfig.AllowedOrigins = c.corsConfig.AllowedOrigins
- localConfig.AllowedHeaders = c.corsConfig.AllowedHeaders
- c.corsConfig.RUnlock()
-
- entry, err := logical.StorageEntryJSON("cors", localConfig)
- if err != nil {
- return fmt.Errorf("failed to create CORS config entry: %v", err)
- }
-
- if err := view.Put(entry); err != nil {
- return fmt.Errorf("failed to save CORS config: %v", err)
- }
-
- return nil
-}
-
-// This should only be called with the core state lock held for writing
-func (c *Core) loadCORSConfig() error {
- view := c.systemBarrierView.SubView("config/")
-
- // Load the config in
- out, err := view.Get("cors")
- if err != nil {
- return fmt.Errorf("failed to read CORS config: %v", err)
- }
- if out == nil {
- return nil
- }
-
- newConfig := new(CORSConfig)
- err = out.DecodeJSON(newConfig)
- if err != nil {
- return err
- }
- newConfig.core = c
-
- c.corsConfig = newConfig
-
- return nil
-}
-
-// Enable takes either a '*' or a comma-seprated list of URLs that can make
-// cross-origin requests to Vault.
-func (c *CORSConfig) Enable(urls []string, headers []string) error {
- if len(urls) == 0 {
- return errors.New("at least one origin or the wildcard must be provided.")
- }
-
- if strutil.StrListContains(urls, "*") && len(urls) > 1 {
- return errors.New("to allow all origins the '*' must be the only value for allowed_origins")
- }
-
- c.Lock()
- c.AllowedOrigins = urls
-
- // Start with the standard headers to Vault accepts.
- c.AllowedHeaders = append(c.AllowedHeaders, StdAllowedHeaders...)
-
- // Allow the user to add additional headers to the list of
- // headers allowed on cross-origin requests.
- if len(headers) > 0 {
- c.AllowedHeaders = append(c.AllowedHeaders, headers...)
- }
- c.Unlock()
-
- atomic.StoreUint32(&c.Enabled, CORSEnabled)
-
- return c.core.saveCORSConfig()
-}
-
-// IsEnabled returns the value of CORSConfig.isEnabled
-func (c *CORSConfig) IsEnabled() bool {
- return atomic.LoadUint32(&c.Enabled) == CORSEnabled
-}
-
-// Disable sets CORS to disabled and clears the allowed origins & headers.
-func (c *CORSConfig) Disable() error {
- atomic.StoreUint32(&c.Enabled, CORSDisabled)
- c.Lock()
-
- c.AllowedOrigins = nil
- c.AllowedHeaders = nil
-
- c.Unlock()
-
- return c.core.saveCORSConfig()
-}
-
-// IsValidOrigin determines if the origin of the request is allowed to make
-// cross-origin requests based on the CORSConfig.
-func (c *CORSConfig) IsValidOrigin(origin string) bool {
- // If we aren't enabling CORS then all origins are valid
- if !c.IsEnabled() {
- return true
- }
-
- c.RLock()
- defer c.RUnlock()
-
- if len(c.AllowedOrigins) == 0 {
- return false
- }
-
- if len(c.AllowedOrigins) == 1 && (c.AllowedOrigins)[0] == "*" {
- return true
- }
-
- return strutil.StrListContains(c.AllowedOrigins, origin)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
deleted file mode 100644
index b5e477a..0000000
--- a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package vault
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/errwrap"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
-)
-
-type dynamicSystemView struct {
- core *Core
- mountEntry *MountEntry
-}
-
-func (d dynamicSystemView) DefaultLeaseTTL() time.Duration {
- def, _ := d.fetchTTLs()
- return def
-}
-
-func (d dynamicSystemView) MaxLeaseTTL() time.Duration {
- _, max := d.fetchTTLs()
- return max
-}
-
-func (d dynamicSystemView) SudoPrivilege(path string, token string) bool {
- // Resolve the token policy
- te, err := d.core.tokenStore.Lookup(token)
- if err != nil {
- d.core.logger.Error("core: failed to lookup token", "error", err)
- return false
- }
-
- // Ensure the token is valid
- if te == nil {
- d.core.logger.Error("entry not found for given token")
- return false
- }
-
- // Construct the corresponding ACL object
- acl, err := d.core.policyStore.ACL(te.Policies...)
- if err != nil {
- d.core.logger.Error("failed to retrieve ACL for token's policies", "token_policies", te.Policies, "error", err)
- return false
- }
-
- // The operation type isn't important here as this is run from a path the
- // user has already been given access to; we only care about whether they
- // have sudo
- req := new(logical.Request)
- req.Operation = logical.ReadOperation
- req.Path = path
- _, rootPrivs := acl.AllowOperation(req)
- return rootPrivs
-}
-
-// TTLsByPath returns the default and max TTLs corresponding to a particular
-// mount point, or the system default
-func (d dynamicSystemView) fetchTTLs() (def, max time.Duration) {
- def = d.core.defaultLeaseTTL
- max = d.core.maxLeaseTTL
-
- if d.mountEntry.Config.DefaultLeaseTTL != 0 {
- def = d.mountEntry.Config.DefaultLeaseTTL
- }
- if d.mountEntry.Config.MaxLeaseTTL != 0 {
- max = d.mountEntry.Config.MaxLeaseTTL
- }
-
- return
-}
-
-// Tainted indicates that the mount is in the process of being removed
-func (d dynamicSystemView) Tainted() bool {
- return d.mountEntry.Tainted
-}
-
-// CachingDisabled indicates whether to use caching behavior
-func (d dynamicSystemView) CachingDisabled() bool {
- return d.core.cachingDisabled || (d.mountEntry != nil && d.mountEntry.Config.ForceNoCache)
-}
-
-// Checks if this is a primary Vault instance. Caller should hold the stateLock
-// in read mode.
-func (d dynamicSystemView) ReplicationState() consts.ReplicationState {
- return d.core.replicationState
-}
-
-// ResponseWrapData wraps the given data in a cubbyhole and returns the
-// token used to unwrap.
-func (d dynamicSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- req := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "sys/wrapping/wrap",
- }
-
- resp := &logical.Response{
- WrapInfo: &wrapping.ResponseWrapInfo{
- TTL: ttl,
- },
- Data: data,
- }
-
- if jwt {
- resp.WrapInfo.Format = "jwt"
- }
-
- _, err := d.core.wrapInCubbyhole(req, resp)
- if err != nil {
- return nil, err
- }
-
- return resp.WrapInfo, nil
-}
-
-// LookupPlugin looks for a plugin with the given name in the plugin catalog. It
-// returns a PluginRunner or an error if no plugin was found.
-func (d dynamicSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) {
- if d.core == nil {
- return nil, fmt.Errorf("system view core is nil")
- }
- if d.core.pluginCatalog == nil {
- return nil, fmt.Errorf("system view core plugin catalog is nil")
- }
- r, err := d.core.pluginCatalog.Get(name)
- if err != nil {
- return nil, err
- }
- if r == nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("{{err}}: %s", name), ErrPluginNotFound)
- }
-
- return r, nil
-}
-
-// MlockEnabled returns the configuration setting for enabling mlock on plugins.
-func (d dynamicSystemView) MlockEnabled() bool {
- return d.core.enableMlock
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration.go b/vendor/github.com/hashicorp/vault/vault/expiration.go
deleted file mode 100644
index 628df8e..0000000
--- a/vendor/github.com/hashicorp/vault/vault/expiration.go
+++ /dev/null
@@ -1,1268 +0,0 @@
-package vault
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "path"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/armon/go-metrics"
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/errwrap"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // expirationSubPath is the sub-path used for the expiration manager
- // view. This is nested under the system view.
- expirationSubPath = "expire/"
-
- // leaseViewPrefix is the prefix used for the ID based lookup of leases.
- leaseViewPrefix = "id/"
-
- // tokenViewPrefix is the prefix used for the token based lookup of leases.
- tokenViewPrefix = "token/"
-
- // maxRevokeAttempts limits how many revoke attempts are made
- maxRevokeAttempts = 6
-
- // revokeRetryBase is a baseline retry time
- revokeRetryBase = 10 * time.Second
-
- // maxLeaseDuration is the default maximum lease duration
- maxLeaseTTL = 32 * 24 * time.Hour
-
- // defaultLeaseDuration is the default lease duration used when no lease is specified
- defaultLeaseTTL = maxLeaseTTL
-)
-
-// ExpirationManager is used by the Core to manage leases. Secrets
-// can provide a lease, meaning that they can be renewed or revoked.
-// If a secret is not renewed in timely manner, it may be expired, and
-// the ExpirationManager will handle doing automatic revocation.
-type ExpirationManager struct {
- router *Router
- idView *BarrierView
- tokenView *BarrierView
- tokenStore *TokenStore
- logger log.Logger
-
- pending map[string]*time.Timer
- pendingLock sync.RWMutex
-
- tidyLock int32
-
- restoreMode int32
- restoreModeLock sync.RWMutex
- restoreRequestLock sync.RWMutex
- restoreLocks []*locksutil.LockEntry
- restoreLoaded sync.Map
- quitCh chan struct{}
-}
-
-// NewExpirationManager creates a new ExpirationManager that is backed
-// using a given view, and uses the provided router for revocation.
-func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger log.Logger) *ExpirationManager {
- if logger == nil {
- logger = log.New("expiration_manager")
- }
-
- exp := &ExpirationManager{
- router: router,
- idView: view.SubView(leaseViewPrefix),
- tokenView: view.SubView(tokenViewPrefix),
- tokenStore: ts,
- logger: logger,
- pending: make(map[string]*time.Timer),
-
- // new instances of the expiration manager will go immediately into
- // restore mode
- restoreMode: 1,
- restoreLocks: locksutil.CreateLocks(),
- quitCh: make(chan struct{}),
- }
- return exp
-}
-
-// setupExpiration is invoked after we've loaded the mount table to
-// initialize the expiration manager
-func (c *Core) setupExpiration() error {
- c.metricsMutex.Lock()
- defer c.metricsMutex.Unlock()
- // Create a sub-view
- view := c.systemBarrierView.SubView(expirationSubPath)
-
- // Create the manager
- mgr := NewExpirationManager(c.router, view, c.tokenStore, c.logger)
- c.expiration = mgr
-
- // Link the token store to this
- c.tokenStore.SetExpirationManager(mgr)
-
- // Restore the existing state
- c.logger.Info("expiration: restoring leases")
- errorFunc := func() {
- c.logger.Error("expiration: shutting down")
- if err := c.Shutdown(); err != nil {
- c.logger.Error("expiration: error shutting down core: %v", err)
- }
- }
- go c.expiration.Restore(errorFunc)
-
- return nil
-}
-
-// stopExpiration is used to stop the expiration manager before
-// sealing the Vault.
-func (c *Core) stopExpiration() error {
- if c.expiration != nil {
- if err := c.expiration.Stop(); err != nil {
- return err
- }
- c.metricsMutex.Lock()
- defer c.metricsMutex.Unlock()
- c.expiration = nil
- }
- return nil
-}
-
-// lockLease takes out a lock for a given lease ID
-func (m *ExpirationManager) lockLease(leaseID string) {
- locksutil.LockForKey(m.restoreLocks, leaseID).Lock()
-}
-
-// unlockLease unlocks a given lease ID
-func (m *ExpirationManager) unlockLease(leaseID string) {
- locksutil.LockForKey(m.restoreLocks, leaseID).Unlock()
-}
-
-// inRestoreMode returns if we are currently in restore mode
-func (m *ExpirationManager) inRestoreMode() bool {
- return atomic.LoadInt32(&m.restoreMode) == 1
-}
-
-// Tidy cleans up the dangling storage entries for leases. It scans the storage
-// view to find all the available leases, checks if the token embedded in it is
-// either empty or invalid and in both the cases, it revokes them. It also uses
-// a token cache to avoid multiple lookups of the same token ID. It is normally
-// not required to use the API that invokes this. This is only intended to
-// clean up the corrupt storage due to bugs.
-func (m *ExpirationManager) Tidy() error {
- if m.inRestoreMode() {
- return errors.New("cannot run tidy while restoring leases")
- }
-
- var tidyErrors *multierror.Error
-
- if !atomic.CompareAndSwapInt32(&m.tidyLock, 0, 1) {
- m.logger.Warn("expiration: tidy operation on leases is already in progress")
- return fmt.Errorf("tidy operation on leases is already in progress")
- }
-
- defer atomic.CompareAndSwapInt32(&m.tidyLock, 1, 0)
-
- m.logger.Info("expiration: beginning tidy operation on leases")
- defer m.logger.Info("expiration: finished tidy operation on leases")
-
- // Create a cache to keep track of looked up tokens
- tokenCache := make(map[string]bool)
- var countLease, revokedCount, deletedCountInvalidToken, deletedCountEmptyToken int64
-
- tidyFunc := func(leaseID string) {
- countLease++
- if countLease%500 == 0 {
- m.logger.Info("expiration: tidying leases", "progress", countLease)
- }
-
- le, err := m.loadEntry(leaseID)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to load the lease ID %q: %v", leaseID, err))
- return
- }
-
- if le == nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("nil entry for lease ID %q: %v", leaseID, err))
- return
- }
-
- var isValid, ok bool
- revokeLease := false
- if le.ClientToken == "" {
- m.logger.Trace("expiration: revoking lease which has an empty token", "lease_id", leaseID)
- revokeLease = true
- deletedCountEmptyToken++
- goto REVOKE_CHECK
- }
-
- isValid, ok = tokenCache[le.ClientToken]
- if !ok {
- saltedID, err := m.tokenStore.SaltID(le.ClientToken)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup salt id: %v", err))
- return
- }
- lock := locksutil.LockForKey(m.tokenStore.tokenLocks, le.ClientToken)
- lock.RLock()
- te, err := m.tokenStore.lookupSalted(saltedID, true)
- lock.RUnlock()
-
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup token: %v", err))
- return
- }
-
- if te == nil {
- m.logger.Trace("expiration: revoking lease which holds an invalid token", "lease_id", leaseID)
- revokeLease = true
- deletedCountInvalidToken++
- tokenCache[le.ClientToken] = false
- } else {
- tokenCache[le.ClientToken] = true
- }
- goto REVOKE_CHECK
- } else {
- if isValid {
- return
- }
-
- m.logger.Trace("expiration: revoking lease which contains an invalid token", "lease_id", leaseID)
- revokeLease = true
- deletedCountInvalidToken++
- goto REVOKE_CHECK
- }
-
- REVOKE_CHECK:
- if revokeLease {
- // Force the revocation and skip going through the token store
- // again
- err = m.revokeCommon(leaseID, true, true)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke an invalid lease with ID %q: %v", leaseID, err))
- return
- }
- revokedCount++
- }
- }
-
- if err := logical.ScanView(m.idView, tidyFunc); err != nil {
- return err
- }
-
- m.logger.Debug("expiration: number of leases scanned", "count", countLease)
- m.logger.Debug("expiration: number of leases which had empty tokens", "count", deletedCountEmptyToken)
- m.logger.Debug("expiration: number of leases which had invalid tokens", "count", deletedCountInvalidToken)
- m.logger.Debug("expiration: number of leases successfully revoked", "count", revokedCount)
-
- return tidyErrors.ErrorOrNil()
-}
-
-// Restore is used to recover the lease states when starting.
-// This is used after starting the vault.
-func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) {
- defer func() {
- // Turn off restore mode. We can do this safely without the lock because
- // if restore mode finished successfully, restore mode was already
- // disabled with the lock. In an error state, this will allow the
- // Stop() function to shut everything down.
- atomic.StoreInt32(&m.restoreMode, 0)
-
- switch {
- case retErr == nil:
- case errwrap.Contains(retErr, ErrBarrierSealed.Error()):
- // Don't run error func because we're likely already shutting down
- m.logger.Warn("expiration: barrier sealed while restoring leases, stopping lease loading")
- retErr = nil
- default:
- m.logger.Error("expiration: error restoring leases", "error", retErr)
- if errorFunc != nil {
- errorFunc()
- }
- }
- }()
-
- // Accumulate existing leases
- m.logger.Debug("expiration: collecting leases")
- existing, err := logical.CollectKeys(m.idView)
- if err != nil {
- return errwrap.Wrapf("failed to scan for leases: {{err}}", err)
- }
- m.logger.Debug("expiration: leases collected", "num_existing", len(existing))
-
- // Make the channels used for the worker pool
- broker := make(chan string)
- quit := make(chan bool)
- // Buffer these channels to prevent deadlocks
- errs := make(chan error, len(existing))
- result := make(chan struct{}, len(existing))
-
- // Use a wait group
- wg := &sync.WaitGroup{}
-
- // Create 64 workers to distribute work to
- for i := 0; i < consts.ExpirationRestoreWorkerCount; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- for {
- select {
- case leaseID, ok := <-broker:
- // broker has been closed, we are done
- if !ok {
- return
- }
-
- err := m.processRestore(leaseID)
- if err != nil {
- errs <- err
- continue
- }
-
- // Send message that lease is done
- result <- struct{}{}
-
- // quit early
- case <-quit:
- return
-
- case <-m.quitCh:
- return
- }
- }
- }()
- }
-
- // Distribute the collected keys to the workers in a go routine
- wg.Add(1)
- go func() {
- defer wg.Done()
- for i, leaseID := range existing {
- if i > 0 && i%500 == 0 {
- m.logger.Trace("expiration: leases loading", "progress", i)
- }
-
- select {
- case <-quit:
- return
-
- case <-m.quitCh:
- return
-
- default:
- broker <- leaseID
- }
- }
-
- // Close the broker, causing worker routines to exit
- close(broker)
- }()
-
- // Ensure all keys on the chan are processed
- for i := 0; i < len(existing); i++ {
- select {
- case err := <-errs:
- // Close all go routines
- close(quit)
- return err
-
- case <-m.quitCh:
- close(quit)
- return nil
-
- case <-result:
- }
- }
-
- // Let all go routines finish
- wg.Wait()
-
- m.restoreModeLock.Lock()
- m.restoreLoaded = sync.Map{}
- m.restoreLocks = nil
- atomic.StoreInt32(&m.restoreMode, 0)
- m.restoreModeLock.Unlock()
-
- m.logger.Info("expiration: lease restore complete")
- return nil
-}
-
-// processRestore takes a lease and restores it in the expiration manager if it has
-// not already been seen
-func (m *ExpirationManager) processRestore(leaseID string) error {
- m.restoreRequestLock.RLock()
- defer m.restoreRequestLock.RUnlock()
-
- // Check if the lease has been seen
- if _, ok := m.restoreLoaded.Load(leaseID); ok {
- return nil
- }
-
- m.lockLease(leaseID)
- defer m.unlockLease(leaseID)
-
- // Check again with the lease locked
- if _, ok := m.restoreLoaded.Load(leaseID); ok {
- return nil
- }
-
- // Load lease and restore expiration timer
- _, err := m.loadEntryInternal(leaseID, true, false)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Stop is used to prevent further automatic revocations.
-// This must be called before sealing the view.
-func (m *ExpirationManager) Stop() error {
- // Stop all the pending expiration timers
- m.logger.Debug("expiration: stop triggered")
- defer m.logger.Debug("expiration: finished stopping")
-
- m.pendingLock.Lock()
- for _, timer := range m.pending {
- timer.Stop()
- }
- m.pending = make(map[string]*time.Timer)
- m.pendingLock.Unlock()
-
- close(m.quitCh)
- if m.inRestoreMode() {
- for {
- if !m.inRestoreMode() {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- }
-
- return nil
-}
-
-// Revoke is used to revoke a secret named by the given LeaseID
-func (m *ExpirationManager) Revoke(leaseID string) error {
- defer metrics.MeasureSince([]string{"expire", "revoke"}, time.Now())
-
- return m.revokeCommon(leaseID, false, false)
-}
-
-// revokeCommon does the heavy lifting. If force is true, we ignore a problem
-// during revocation and still remove entries/index/lease timers
-func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-common"}, time.Now())
-
- // Load the entry
- le, err := m.loadEntry(leaseID)
- if err != nil {
- return err
- }
-
- // If there is no entry, nothing to revoke
- if le == nil {
- return nil
- }
-
- // Revoke the entry
- if !skipToken || le.Auth == nil {
- if err := m.revokeEntry(le); err != nil {
- if !force {
- return err
- }
-
- if m.logger.IsWarn() {
- m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err)
- }
- }
- }
-
- // Delete the entry
- if err := m.deleteEntry(leaseID); err != nil {
- return err
- }
-
- // Delete the secondary index, but only if it's a leased secret (not auth)
- if le.Secret != nil {
- if err := m.removeIndexByToken(le.ClientToken, le.LeaseID); err != nil {
- return err
- }
- }
-
- // Clear the expiration handler
- m.pendingLock.Lock()
- if timer, ok := m.pending[leaseID]; ok {
- timer.Stop()
- delete(m.pending, leaseID)
- }
- m.pendingLock.Unlock()
- return nil
-}
-
-// RevokeForce works similarly to RevokePrefix but continues in the case of a
-// revocation error; this is mostly meant for recovery operations
-func (m *ExpirationManager) RevokeForce(prefix string) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-force"}, time.Now())
-
- return m.revokePrefixCommon(prefix, true)
-}
-
-// RevokePrefix is used to revoke all secrets with a given prefix.
-// The prefix maps to that of the mount table to make this simpler
-// to reason about.
-func (m *ExpirationManager) RevokePrefix(prefix string) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-prefix"}, time.Now())
-
- return m.revokePrefixCommon(prefix, false)
-}
-
-// RevokeByToken is used to revoke all the secrets issued with a given token.
-// This is done by using the secondary index. It also removes the lease entry
-// for the token itself. As a result it should *ONLY* ever be called from the
-// token store's revokeSalted function.
-func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now())
-
- // Lookup the leases
- existing, err := m.lookupByToken(te.ID)
- if err != nil {
- return fmt.Errorf("failed to scan for leases: %v", err)
- }
-
- // Revoke all the keys
- for idx, leaseID := range existing {
- if err := m.revokeCommon(leaseID, false, false); err != nil {
- return fmt.Errorf("failed to revoke '%s' (%d / %d): %v",
- leaseID, idx+1, len(existing), err)
- }
- }
-
- if te.Path != "" {
- saltedID, err := m.tokenStore.SaltID(te.ID)
- if err != nil {
- return err
- }
- tokenLeaseID := path.Join(te.Path, saltedID)
-
- // We want to skip the revokeEntry call as that will call back into
- // revocation logic in the token store, which is what is running this
- // function in the first place -- it'd be a deadlock loop. Since the only
- // place that this function is called is revokeSalted in the token store,
- // we're already revoking the token, so we just want to clean up the lease.
- // This avoids spurious revocations later in the log when the timer runs
- // out, and eases up resource usage.
- return m.revokeCommon(tokenLeaseID, false, true)
- }
-
- return nil
-}
-
-func (m *ExpirationManager) revokePrefixCommon(prefix string, force bool) error {
- if m.inRestoreMode() {
- m.restoreRequestLock.Lock()
- defer m.restoreRequestLock.Unlock()
- }
-
- // Ensure there is a trailing slash
- if !strings.HasSuffix(prefix, "/") {
- prefix = prefix + "/"
- }
-
- // Accumulate existing leases
- sub := m.idView.SubView(prefix)
- existing, err := logical.CollectKeys(sub)
- if err != nil {
- return fmt.Errorf("failed to scan for leases: %v", err)
- }
-
- // Revoke all the keys
- for idx, suffix := range existing {
- leaseID := prefix + suffix
- if err := m.revokeCommon(leaseID, force, false); err != nil {
- return fmt.Errorf("failed to revoke '%s' (%d / %d): %v",
- leaseID, idx+1, len(existing), err)
- }
- }
- return nil
-}
-
-// Renew is used to renew a secret using the given leaseID
-// and a renew interval. The increment may be ignored.
-func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*logical.Response, error) {
- defer metrics.MeasureSince([]string{"expire", "renew"}, time.Now())
-
- // Load the entry
- le, err := m.loadEntry(leaseID)
- if err != nil {
- return nil, err
- }
-
- // Check if the lease is renewable
- if _, err := le.renewable(); err != nil {
- return nil, err
- }
-
- if le.Secret == nil {
- if le.Auth != nil {
- return logical.ErrorResponse("tokens cannot be renewed through this endpoint"), logical.ErrPermissionDenied
- }
- return logical.ErrorResponse("lease does not correspond to a secret"), nil
- }
-
- // Attempt to renew the entry
- resp, err := m.renewEntry(le, increment)
- if err != nil {
- return nil, err
- }
-
- // Fast-path if there is no lease
- if resp == nil || resp.Secret == nil || !resp.Secret.LeaseEnabled() {
- return resp, nil
- }
-
- // Validate the lease
- if err := resp.Secret.Validate(); err != nil {
- return nil, err
- }
-
- // Attach the LeaseID
- resp.Secret.LeaseID = leaseID
-
- // Update the lease entry
- le.Data = resp.Data
- le.Secret = resp.Secret
- le.ExpireTime = resp.Secret.ExpirationTime()
- le.LastRenewalTime = time.Now()
- if err := m.persistEntry(le); err != nil {
- return nil, err
- }
-
- // Update the expiration time
- m.updatePending(le, resp.Secret.LeaseTotal())
-
- // Return the response
- return resp, nil
-}
-
-// RestoreSaltedTokenCheck verifies that the token is not expired while running
-// in restore mode. If we are not in restore mode, the lease has already been
-// restored or the lease still has time left, it returns true.
-func (m *ExpirationManager) RestoreSaltedTokenCheck(source string, saltedID string) (bool, error) {
- defer metrics.MeasureSince([]string{"expire", "restore-token-check"}, time.Now())
-
- // Return immediately if we are not in restore mode, expiration manager is
- // already loaded
- if !m.inRestoreMode() {
- return true, nil
- }
-
- m.restoreModeLock.RLock()
- defer m.restoreModeLock.RUnlock()
-
- // Check again after we obtain the lock
- if !m.inRestoreMode() {
- return true, nil
- }
-
- leaseID := path.Join(source, saltedID)
-
- m.lockLease(leaseID)
- defer m.unlockLease(leaseID)
-
- le, err := m.loadEntryInternal(leaseID, true, true)
- if err != nil {
- return false, err
- }
- if le != nil && !le.ExpireTime.IsZero() {
- expires := le.ExpireTime.Sub(time.Now())
- if expires <= 0 {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-// RenewToken is used to renew a token which does not need to
-// invoke a logical backend.
-func (m *ExpirationManager) RenewToken(req *logical.Request, source string, token string,
- increment time.Duration) (*logical.Response, error) {
- defer metrics.MeasureSince([]string{"expire", "renew-token"}, time.Now())
-
- // Compute the Lease ID
- saltedID, err := m.tokenStore.SaltID(token)
- if err != nil {
- return nil, err
- }
- leaseID := path.Join(source, saltedID)
-
- // Load the entry
- le, err := m.loadEntry(leaseID)
- if err != nil {
- return nil, err
- }
-
- // Check if the lease is renewable. Note that this also checks for a nil
- // lease and errors in that case as well.
- if _, err := le.renewable(); err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-
- // Attempt to renew the auth entry
- resp, err := m.renewAuthEntry(req, le, increment)
- if err != nil {
- return nil, err
- }
-
- if resp == nil {
- return nil, nil
- }
-
- if resp.IsError() {
- return &logical.Response{
- Data: resp.Data,
- }, nil
- }
-
- if resp.Auth == nil || !resp.Auth.LeaseEnabled() {
- return &logical.Response{
- Auth: resp.Auth,
- }, nil
- }
-
- // Attach the ClientToken
- resp.Auth.ClientToken = token
- resp.Auth.Increment = 0
-
- // Update the lease entry
- le.Auth = resp.Auth
- le.ExpireTime = resp.Auth.ExpirationTime()
- le.LastRenewalTime = time.Now()
- if err := m.persistEntry(le); err != nil {
- return nil, err
- }
-
- // Update the expiration time
- m.updatePending(le, resp.Auth.LeaseTotal())
- return &logical.Response{
- Auth: resp.Auth,
- }, nil
-}
-
-// Register is used to take a request and response with an associated
-// lease. The secret gets assigned a LeaseID and the management of
-// of lease is assumed by the expiration manager.
-func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Response) (id string, retErr error) {
- defer metrics.MeasureSince([]string{"expire", "register"}, time.Now())
-
- if req.ClientToken == "" {
- return "", fmt.Errorf("expiration: cannot register a lease with an empty client token")
- }
-
- // Ignore if there is no leased secret
- if resp == nil || resp.Secret == nil {
- return "", nil
- }
-
- // Validate the secret
- if err := resp.Secret.Validate(); err != nil {
- return "", err
- }
-
- // Create a lease entry
- leaseUUID, err := uuid.GenerateUUID()
- if err != nil {
- return "", err
- }
-
- leaseID := path.Join(req.Path, leaseUUID)
-
- defer func() {
- // If there is an error we want to rollback as much as possible (note
- // that errors here are ignored to do as much cleanup as we can). We
- // want to revoke a generated secret (since an error means we may not
- // be successfully tracking it), remove indexes, and delete the entry.
- if retErr != nil {
- revResp, err := m.router.Route(logical.RevokeRequest(req.Path, resp.Secret, resp.Data))
- if err != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional internal error was encountered revoking the newly-generated secret: {{err}}", err))
- } else if revResp != nil && revResp.IsError() {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered revoking the newly-generated secret: {{err}}", revResp.Error()))
- }
-
- if err := m.deleteEntry(leaseID); err != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered deleting any lease associated with the newly-generated secret: {{err}}", err))
- }
-
- if err := m.removeIndexByToken(req.ClientToken, leaseID); err != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered removing lease indexes associated with the newly-generated secret: {{err}}", err))
- }
- }
- }()
-
- le := leaseEntry{
- LeaseID: leaseID,
- ClientToken: req.ClientToken,
- Path: req.Path,
- Data: resp.Data,
- Secret: resp.Secret,
- IssueTime: time.Now(),
- ExpireTime: resp.Secret.ExpirationTime(),
- }
-
- // Encode the entry
- if err := m.persistEntry(&le); err != nil {
- return "", err
- }
-
- // Maintain secondary index by token
- if err := m.createIndexByToken(le.ClientToken, le.LeaseID); err != nil {
- return "", err
- }
-
- // Setup revocation timer if there is a lease
- m.updatePending(&le, resp.Secret.LeaseTotal())
-
- // Done
- return le.LeaseID, nil
-}
-
-// RegisterAuth is used to take an Auth response with an associated lease.
-// The token does not get a LeaseID, but the lease management is handled by
-// the expiration manager.
-func (m *ExpirationManager) RegisterAuth(source string, auth *logical.Auth) error {
- defer metrics.MeasureSince([]string{"expire", "register-auth"}, time.Now())
-
- if auth.ClientToken == "" {
- return fmt.Errorf("expiration: cannot register an auth lease with an empty token")
- }
-
- if strings.Contains(source, "..") {
- return fmt.Errorf("expiration: %s", consts.ErrPathContainsParentReferences)
- }
-
- saltedID, err := m.tokenStore.SaltID(auth.ClientToken)
- if err != nil {
- return err
- }
-
- // Create a lease entry
- le := leaseEntry{
- LeaseID: path.Join(source, saltedID),
- ClientToken: auth.ClientToken,
- Auth: auth,
- Path: source,
- IssueTime: time.Now(),
- ExpireTime: auth.ExpirationTime(),
- }
-
- // Encode the entry
- if err := m.persistEntry(&le); err != nil {
- return err
- }
-
- // Setup revocation timer
- m.updatePending(&le, auth.LeaseTotal())
- return nil
-}
-
-// FetchLeaseTimesByToken is a helper function to use token values to compute
-// the leaseID, rather than pushing that logic back into the token store.
-func (m *ExpirationManager) FetchLeaseTimesByToken(source, token string) (*leaseEntry, error) {
- defer metrics.MeasureSince([]string{"expire", "fetch-lease-times-by-token"}, time.Now())
-
- // Compute the Lease ID
- saltedID, err := m.tokenStore.SaltID(token)
- if err != nil {
- return nil, err
- }
- leaseID := path.Join(source, saltedID)
- return m.FetchLeaseTimes(leaseID)
-}
-
-// FetchLeaseTimes is used to fetch the issue time, expiration time, and last
-// renewed time of a lease entry. It returns a leaseEntry itself, but with only
-// those values copied over.
-func (m *ExpirationManager) FetchLeaseTimes(leaseID string) (*leaseEntry, error) {
- defer metrics.MeasureSince([]string{"expire", "fetch-lease-times"}, time.Now())
-
- // Load the entry
- le, err := m.loadEntry(leaseID)
- if err != nil {
- return nil, err
- }
- if le == nil {
- return nil, nil
- }
-
- ret := &leaseEntry{
- IssueTime: le.IssueTime,
- ExpireTime: le.ExpireTime,
- LastRenewalTime: le.LastRenewalTime,
- }
- if le.Secret != nil {
- ret.Secret = &logical.Secret{}
- ret.Secret.Renewable = le.Secret.Renewable
- ret.Secret.TTL = le.Secret.TTL
- }
- if le.Auth != nil {
- ret.Auth = &logical.Auth{}
- ret.Auth.Renewable = le.Auth.Renewable
- ret.Auth.TTL = le.Auth.TTL
- }
-
- return ret, nil
-}
-
-// updatePending is used to update a pending invocation for a lease
-func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Duration) {
- m.pendingLock.Lock()
- defer m.pendingLock.Unlock()
-
- // Check for an existing timer
- timer, ok := m.pending[le.LeaseID]
-
- // If there is no expiry time, don't do anything
- if le.ExpireTime.IsZero() {
- // if the timer happened to exist, stop the time and delete it from the
- // pending timers.
- if ok {
- timer.Stop()
- delete(m.pending, le.LeaseID)
- }
- return
- }
-
- // Create entry if it does not exist
- if !ok {
- timer := time.AfterFunc(leaseTotal, func() {
- m.expireID(le.LeaseID)
- })
- m.pending[le.LeaseID] = timer
- return
- }
-
- // Extend the timer by the lease total
- timer.Reset(leaseTotal)
-}
-
-// expireID is invoked when a given ID is expired
-func (m *ExpirationManager) expireID(leaseID string) {
- // Clear from the pending expiration
- m.pendingLock.Lock()
- delete(m.pending, leaseID)
- m.pendingLock.Unlock()
-
- for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ {
- select {
- case <-m.quitCh:
- m.logger.Error("expiration: shutting down, not attempting further revocation of lease", "lease_id", leaseID)
- return
- default:
- }
- err := m.Revoke(leaseID)
- if err == nil {
- if m.logger.IsInfo() {
- m.logger.Info("expiration: revoked lease", "lease_id", leaseID)
- }
- return
- }
- m.logger.Error("expiration: failed to revoke lease", "lease_id", leaseID, "error", err)
- time.Sleep((1 << attempt) * revokeRetryBase)
- }
- m.logger.Error("expiration: maximum revoke attempts reached", "lease_id", leaseID)
-}
-
-// revokeEntry is used to attempt revocation of an internal entry
-func (m *ExpirationManager) revokeEntry(le *leaseEntry) error {
- // Revocation of login tokens is special since we can by-pass the
- // backend and directly interact with the token store
- if le.Auth != nil {
- if err := m.tokenStore.RevokeTree(le.ClientToken); err != nil {
- return fmt.Errorf("failed to revoke token: %v", err)
- }
-
- return nil
- }
-
- // Handle standard revocation via backends
- resp, err := m.router.Route(logical.RevokeRequest(
- le.Path, le.Secret, le.Data))
- if err != nil || (resp != nil && resp.IsError()) {
- return fmt.Errorf("failed to revoke entry: resp:%#v err:%s", resp, err)
- }
- return nil
-}
-
-// renewEntry is used to attempt renew of an internal entry
-func (m *ExpirationManager) renewEntry(le *leaseEntry, increment time.Duration) (*logical.Response, error) {
- secret := *le.Secret
- secret.IssueTime = le.IssueTime
- secret.Increment = increment
- secret.LeaseID = ""
-
- req := logical.RenewRequest(le.Path, &secret, le.Data)
- resp, err := m.router.Route(req)
- if err != nil || (resp != nil && resp.IsError()) {
- return nil, fmt.Errorf("failed to renew entry: resp:%#v err:%s", resp, err)
- }
- return resp, nil
-}
-
-// renewAuthEntry is used to attempt renew of an auth entry. Only the token
-// store should get the actual token ID intact.
-func (m *ExpirationManager) renewAuthEntry(req *logical.Request, le *leaseEntry, increment time.Duration) (*logical.Response, error) {
- auth := *le.Auth
- auth.IssueTime = le.IssueTime
- auth.Increment = increment
- if strings.HasPrefix(le.Path, "auth/token/") {
- auth.ClientToken = le.ClientToken
- } else {
- auth.ClientToken = ""
- }
-
- authReq := logical.RenewAuthRequest(le.Path, &auth, nil)
- authReq.Connection = req.Connection
- resp, err := m.router.Route(authReq)
- if err != nil {
- return nil, fmt.Errorf("failed to renew entry: %v", err)
- }
- return resp, nil
-}
-
-// loadEntry is used to read a lease entry
-func (m *ExpirationManager) loadEntry(leaseID string) (*leaseEntry, error) {
- // Take out the lease locks after we ensure we are in restore mode
- restoreMode := m.inRestoreMode()
- if restoreMode {
- m.restoreModeLock.RLock()
- defer m.restoreModeLock.RUnlock()
-
- restoreMode = m.inRestoreMode()
- if restoreMode {
- m.lockLease(leaseID)
- defer m.unlockLease(leaseID)
- }
- }
- return m.loadEntryInternal(leaseID, restoreMode, true)
-}
-
-// loadEntryInternal is used when you need to load an entry but also need to
-// control the lifecycle of the restoreLock
-func (m *ExpirationManager) loadEntryInternal(leaseID string, restoreMode bool, checkRestored bool) (*leaseEntry, error) {
- out, err := m.idView.Get(leaseID)
- if err != nil {
- return nil, fmt.Errorf("failed to read lease entry: %v", err)
- }
- if out == nil {
- return nil, nil
- }
- le, err := decodeLeaseEntry(out.Value)
- if err != nil {
- return nil, fmt.Errorf("failed to decode lease entry: %v", err)
- }
-
- if restoreMode {
- if checkRestored {
- // If we have already loaded this lease, we don't need to update on
- // load. In the case of renewal and revocation, updatePending will be
- // done after making the appropriate modifications to the lease.
- if _, ok := m.restoreLoaded.Load(leaseID); ok {
- return le, nil
- }
- }
-
- // Update the cache of restored leases, either synchronously or through
- // the lazy loaded restore process
- m.restoreLoaded.Store(le.LeaseID, struct{}{})
-
- // Setup revocation timer
- m.updatePending(le, le.ExpireTime.Sub(time.Now()))
- }
- return le, nil
-}
-
-// persistEntry is used to persist a lease entry
-func (m *ExpirationManager) persistEntry(le *leaseEntry) error {
- // Encode the entry
- buf, err := le.encode()
- if err != nil {
- return fmt.Errorf("failed to encode lease entry: %v", err)
- }
-
- // Write out to the view
- ent := logical.StorageEntry{
- Key: le.LeaseID,
- Value: buf,
- }
- if err := m.idView.Put(&ent); err != nil {
- return fmt.Errorf("failed to persist lease entry: %v", err)
- }
- return nil
-}
-
-// deleteEntry is used to delete a lease entry
-func (m *ExpirationManager) deleteEntry(leaseID string) error {
- if err := m.idView.Delete(leaseID); err != nil {
- return fmt.Errorf("failed to delete lease entry: %v", err)
- }
- return nil
-}
-
-// createIndexByToken creates a secondary index from the token to a lease entry
-func (m *ExpirationManager) createIndexByToken(token, leaseID string) error {
- saltedID, err := m.tokenStore.SaltID(token)
- if err != nil {
- return err
- }
-
- leaseSaltedID, err := m.tokenStore.SaltID(leaseID)
- if err != nil {
- return err
- }
-
- ent := logical.StorageEntry{
- Key: saltedID + "/" + leaseSaltedID,
- Value: []byte(leaseID),
- }
- if err := m.tokenView.Put(&ent); err != nil {
- return fmt.Errorf("failed to persist lease index entry: %v", err)
- }
- return nil
-}
-
-// indexByToken looks up the secondary index from the token to a lease entry
-func (m *ExpirationManager) indexByToken(token, leaseID string) (*logical.StorageEntry, error) {
- saltedID, err := m.tokenStore.SaltID(token)
- if err != nil {
- return nil, err
- }
-
- leaseSaltedID, err := m.tokenStore.SaltID(leaseID)
- if err != nil {
- return nil, err
- }
-
- key := saltedID + "/" + leaseSaltedID
- entry, err := m.tokenView.Get(key)
- if err != nil {
- return nil, fmt.Errorf("failed to look up secondary index entry")
- }
- return entry, nil
-}
-
-// removeIndexByToken removes the secondary index from the token to a lease entry
-func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error {
- saltedID, err := m.tokenStore.SaltID(token)
- if err != nil {
- return err
- }
-
- leaseSaltedID, err := m.tokenStore.SaltID(leaseID)
- if err != nil {
- return err
- }
-
- key := saltedID + "/" + leaseSaltedID
- if err := m.tokenView.Delete(key); err != nil {
- return fmt.Errorf("failed to delete lease index entry: %v", err)
- }
- return nil
-}
-
-// lookupByToken is used to lookup all the leaseID's via the
-func (m *ExpirationManager) lookupByToken(token string) ([]string, error) {
- saltedID, err := m.tokenStore.SaltID(token)
- if err != nil {
- return nil, err
- }
-
- // Scan via the index for sub-leases
- prefix := saltedID + "/"
- subKeys, err := m.tokenView.List(prefix)
- if err != nil {
- return nil, fmt.Errorf("failed to list leases: %v", err)
- }
-
- // Read each index entry
- leaseIDs := make([]string, 0, len(subKeys))
- for _, sub := range subKeys {
- out, err := m.tokenView.Get(prefix + sub)
- if err != nil {
- return nil, fmt.Errorf("failed to read lease index: %v", err)
- }
- if out == nil {
- continue
- }
- leaseIDs = append(leaseIDs, string(out.Value))
- }
- return leaseIDs, nil
-}
-
-// emitMetrics is invoked periodically to emit statistics
-func (m *ExpirationManager) emitMetrics() {
- m.pendingLock.RLock()
- num := len(m.pending)
- m.pendingLock.RUnlock()
- metrics.SetGauge([]string{"expire", "num_leases"}, float32(num))
-}
-
-// leaseEntry is used to structure the values the expiration
-// manager stores. This is used to handle renew and revocation.
-type leaseEntry struct {
- LeaseID string `json:"lease_id"`
- ClientToken string `json:"client_token"`
- Path string `json:"path"`
- Data map[string]interface{} `json:"data"`
- Secret *logical.Secret `json:"secret"`
- Auth *logical.Auth `json:"auth"`
- IssueTime time.Time `json:"issue_time"`
- ExpireTime time.Time `json:"expire_time"`
- LastRenewalTime time.Time `json:"last_renewal_time"`
-}
-
-// encode is used to JSON encode the lease entry
-func (le *leaseEntry) encode() ([]byte, error) {
- return json.Marshal(le)
-}
-
-func (le *leaseEntry) renewable() (bool, error) {
- var err error
- switch {
- // If there is no entry, cannot review
- case le == nil || le.ExpireTime.IsZero():
- err = fmt.Errorf("lease not found or lease is not renewable")
- // Determine if the lease is expired
- case le.ExpireTime.Before(time.Now()):
- err = fmt.Errorf("lease expired")
- // Determine if the lease is renewable
- case le.Secret != nil && !le.Secret.Renewable:
- err = fmt.Errorf("lease is not renewable")
- case le.Auth != nil && !le.Auth.Renewable:
- err = fmt.Errorf("lease is not renewable")
- }
-
- if err != nil {
- return false, err
- }
- return true, nil
-}
-
-func (le *leaseEntry) ttl() int64 {
- return int64(le.ExpireTime.Sub(time.Now().Round(time.Second)).Seconds())
-}
-
-// decodeLeaseEntry is used to reverse encode and return a new entry
-func decodeLeaseEntry(buf []byte) (*leaseEntry, error) {
- out := new(leaseEntry)
- return out, jsonutil.DecodeJSON(buf, out)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration_test.go b/vendor/github.com/hashicorp/vault/vault/expiration_test.go
deleted file mode 100644
index 144bd16..0000000
--- a/vendor/github.com/hashicorp/vault/vault/expiration_test.go
+++ /dev/null
@@ -1,1480 +0,0 @@
-package vault
-
-import (
- "fmt"
- "reflect"
- "sort"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/physical/inmem"
- log "github.com/mgutz/logxi/v1"
-)
-
-var (
- testImagePull sync.Once
-)
-
-// mockExpiration returns a mock expiration manager
-func mockExpiration(t testing.TB) *ExpirationManager {
- _, ts, _, _ := TestCoreWithTokenStore(t)
- return ts.expiration
-}
-
-func mockBackendExpiration(t testing.TB, backend physical.Backend) (*Core, *ExpirationManager) {
- c, ts, _, _ := TestCoreWithBackendTokenStore(t, backend)
- return c, ts.expiration
-}
-
-func TestExpiration_Tidy(t *testing.T) {
- var err error
-
- exp := mockExpiration(t)
- if err := exp.Restore(nil); err != nil {
- t.Fatal(err)
- }
-
- // Set up a count function to calculate number of leases
- count := 0
- countFunc := func(leaseID string) {
- count++
- }
-
- // Scan the storage with the count func set
- if err = logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Check that there are no leases to begin with
- if count != 0 {
- t.Fatalf("bad: lease count; expected:0 actual:%d", count)
- }
-
- // Create a lease entry without a client token in it
- le := &leaseEntry{
- LeaseID: "lease/with/no/client/token",
- Path: "foo/bar",
- }
-
- // Persist the invalid lease entry
- if err = exp.persistEntry(le); err != nil {
- t.Fatalf("error persisting entry: %v", err)
- }
-
- count = 0
- if err = logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Check that the storage was successful and that the count of leases is
- // now 1
- if count != 1 {
- t.Fatalf("bad: lease count; expected:1 actual:%d", count)
- }
-
- // Run the tidy operation
- err = exp.Tidy()
- if err != nil {
- t.Fatal(err)
- }
-
- count = 0
- if err := logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Post the tidy operation, the invalid lease entry should have been gone
- if count != 0 {
- t.Fatalf("bad: lease count; expected:0 actual:%d", count)
- }
-
- // Set a revoked/invalid token in the lease entry
- le.ClientToken = "invalidtoken"
-
- // Persist the invalid lease entry
- if err = exp.persistEntry(le); err != nil {
- t.Fatalf("error persisting entry: %v", err)
- }
-
- count = 0
- if err = logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Check that the storage was successful and that the count of leases is
- // now 1
- if count != 1 {
- t.Fatalf("bad: lease count; expected:1 actual:%d", count)
- }
-
- // Run the tidy operation
- err = exp.Tidy()
- if err != nil {
- t.Fatal(err)
- }
-
- count = 0
- if err = logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Post the tidy operation, the invalid lease entry should have been gone
- if count != 0 {
- t.Fatalf("bad: lease count; expected:0 actual:%d", count)
- }
-
- // Attach an invalid token with 2 leases
- if err = exp.persistEntry(le); err != nil {
- t.Fatalf("error persisting entry: %v", err)
- }
-
- le.LeaseID = "another/invalid/lease"
- if err = exp.persistEntry(le); err != nil {
- t.Fatalf("error persisting entry: %v", err)
- }
-
- // Run the tidy operation
- err = exp.Tidy()
- if err != nil {
- t.Fatal(err)
- }
-
- count = 0
- if err = logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Post the tidy operation, the invalid lease entry should have been gone
- if count != 0 {
- t.Fatalf("bad: lease count; expected:0 actual:%d", count)
- }
-
- for i := 0; i < 1000; i++ {
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "invalid/lease/" + fmt.Sprintf("%d", i+1),
- ClientToken: "invalidtoken",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 100 * time.Millisecond,
- },
- },
- Data: map[string]interface{}{
- "test_key": "test_value",
- },
- }
- _, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- }
-
- count = 0
- if err = logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Check that there are 1000 leases now
- if count != 1000 {
- t.Fatalf("bad: lease count; expected:1000 actual:%d", count)
- }
-
- errCh1 := make(chan error)
- errCh2 := make(chan error)
-
- // Initiate tidy of the above 1000 invalid leases in quick succession. Only
- // one tidy operation can be in flight at any time. One of these requests
- // should error out.
- go func() {
- errCh1 <- exp.Tidy()
- }()
-
- go func() {
- errCh2 <- exp.Tidy()
- }()
-
- var err1, err2 error
-
- for i := 0; i < 2; i++ {
- select {
- case err1 = <-errCh1:
- case err2 = <-errCh2:
- }
- }
-
- if !(err1 != nil && err1.Error() == "tidy operation on leases is already in progress") &&
- !(err2 != nil && err2.Error() == "tidy operation on leases is already in progress") {
- t.Fatalf("expected at least one of err1 or err2 to be set; err1: %#v\n err2:%#v\n", err1, err2)
- }
-
- root, err := exp.tokenStore.rootToken()
- if err != nil {
- t.Fatal(err)
- }
- le.ClientToken = root.ID
-
- // Attach a valid token with the leases
- if err = exp.persistEntry(le); err != nil {
- t.Fatalf("error persisting entry: %v", err)
- }
-
- // Run the tidy operation
- err = exp.Tidy()
- if err != nil {
- t.Fatal(err)
- }
-
- count = 0
- if err = logical.ScanView(exp.idView, countFunc); err != nil {
- t.Fatal(err)
- }
-
- // Post the tidy operation, the valid lease entry should not get affected
- if count != 1 {
- t.Fatalf("bad: lease count; expected:1 actual:%d", count)
- }
-}
-
-// To avoid pulling in deps for all users of the package, don't leave these
-// uncommented in the public tree
-/*
-func BenchmarkExpiration_Restore_Etcd(b *testing.B) {
- addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR")
- randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
- physicalBackend, err := physEtcd.NewEtcdBackend(map[string]string{
- "address": addr,
- "path": randPath,
- "max_parallel": "256",
- }, logger)
- if err != nil {
- b.Fatalf("err: %s", err)
- }
-
- benchmarkExpirationBackend(b, physicalBackend, 10000) // 10,000 leases
-}
-
-func BenchmarkExpiration_Restore_Consul(b *testing.B) {
- addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR")
- randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
-
- logger := logformat.NewVaultLogger(log.LevelTrace)
- physicalBackend, err := physConsul.NewConsulBackend(map[string]string{
- "address": addr,
- "path": randPath,
- "max_parallel": "256",
- }, logger)
- if err != nil {
- b.Fatalf("err: %s", err)
- }
-
- benchmarkExpirationBackend(b, physicalBackend, 10000) // 10,000 leases
-}
-*/
-
-func BenchmarkExpiration_Restore_InMem(b *testing.B) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- b.Fatal(err)
- }
- benchmarkExpirationBackend(b, inm, 100000) // 100,000 Leases
-}
-
-func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, numLeases int) {
- c, exp := mockBackendExpiration(b, physicalBackend)
- noop := &NoopBackend{}
- view := NewBarrierView(c.barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- b.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- b.Fatal(err)
- }
-
- // Register fake leases
- for i := 0; i < numLeases; i++ {
- pathUUID, err := uuid.GenerateUUID()
- if err != nil {
- b.Fatal(err)
- }
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/" + pathUUID,
- ClientToken: "root",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 400 * time.Second,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
- _, err = exp.Register(req, resp)
- if err != nil {
- b.Fatalf("err: %v", err)
- }
- }
-
- // Stop everything
- err = exp.Stop()
- if err != nil {
- b.Fatalf("err: %v", err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- err = exp.Restore(nil)
- // Restore
- if err != nil {
- b.Fatalf("err: %v", err)
- }
- }
- b.StopTimer()
-}
-
-func TestExpiration_Restore(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- paths := []string{
- "prod/aws/foo",
- "prod/aws/sub/bar",
- "prod/aws/zip",
- }
- for _, path := range paths {
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: path,
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
- _, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- }
-
- // Stop everything
- err = exp.Stop()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Restore
- err = exp.Restore(nil)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Ensure all are reaped
- start := time.Now()
- for time.Now().Sub(start) < time.Second {
- noop.Lock()
- less := len(noop.Requests) < 3
- noop.Unlock()
-
- if less {
- time.Sleep(5 * time.Millisecond)
- continue
- }
- break
- }
- for _, req := range noop.Requests {
- if req.Operation != logical.RevokeOperation {
- t.Fatalf("Bad: %v", req)
- }
- }
-}
-
-func TestExpiration_Register(t *testing.T) {
- exp := mockExpiration(t)
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Hour,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
-
- id, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !strings.HasPrefix(id, req.Path) {
- t.Fatalf("bad: %s", id)
- }
-
- if len(id) <= len(req.Path) {
- t.Fatalf("bad: %s", id)
- }
-}
-
-func TestExpiration_RegisterAuth(t *testing.T) {
- exp := mockExpiration(t)
- root, err := exp.tokenStore.rootToken()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- auth := &logical.Auth{
- ClientToken: root.ID,
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Hour,
- },
- }
-
- err = exp.RegisterAuth("auth/github/login", auth)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- err = exp.RegisterAuth("auth/github/../login", auth)
- if err == nil {
- t.Fatal("expected error")
- }
-}
-
-func TestExpiration_RegisterAuth_NoLease(t *testing.T) {
- exp := mockExpiration(t)
- root, err := exp.tokenStore.rootToken()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- auth := &logical.Auth{
- ClientToken: root.ID,
- }
-
- err = exp.RegisterAuth("auth/github/login", auth)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should not be able to renew, no expiration
- resp, err := exp.RenewToken(&logical.Request{}, "auth/github/login", root.ID, 0)
- if err != nil && (err != logical.ErrInvalidRequest || (resp != nil && resp.IsError() && resp.Error().Error() != "lease not found or lease is not renewable")) {
- t.Fatalf("bad: err:%v resp:%#v", err, resp)
- }
- if resp == nil {
- t.Fatal("expected a response")
- }
-
- // Wait and check token is not invalidated
- time.Sleep(20 * time.Millisecond)
-
- // Verify token does not get revoked
- out, err := exp.tokenStore.Lookup(root.ID)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("missing token")
- }
-}
-
-func TestExpiration_Revoke(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Hour,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
-
- id, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if err := exp.Revoke(id); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = noop.Requests[0]
- if req.Operation != logical.RevokeOperation {
- t.Fatalf("Bad: %v", req)
- }
-}
-
-func TestExpiration_RevokeOnExpire(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
-
- _, err = exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- start := time.Now()
- for time.Now().Sub(start) < time.Second {
- req = nil
-
- noop.Lock()
- if len(noop.Requests) > 0 {
- req = noop.Requests[0]
- }
- noop.Unlock()
- if req == nil {
- time.Sleep(5 * time.Millisecond)
- continue
- }
- if req.Operation != logical.RevokeOperation {
- t.Fatalf("Bad: %v", req)
- }
-
- break
- }
-}
-
-func TestExpiration_RevokePrefix(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- paths := []string{
- "prod/aws/foo",
- "prod/aws/sub/bar",
- "prod/aws/zip",
- }
- for _, path := range paths {
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: path,
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
- _, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- }
-
- // Should nuke all the keys
- if err := exp.RevokePrefix("prod/aws/"); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if len(noop.Requests) != 3 {
- t.Fatalf("Bad: %v", noop.Requests)
- }
- for _, req := range noop.Requests {
- if req.Operation != logical.RevokeOperation {
- t.Fatalf("Bad: %v", req)
- }
- }
-
- expect := []string{
- "foo",
- "sub/bar",
- "zip",
- }
- sort.Strings(noop.Paths)
- sort.Strings(expect)
- if !reflect.DeepEqual(noop.Paths, expect) {
- t.Fatalf("bad: %v", noop.Paths)
- }
-}
-
-func TestExpiration_RevokeByToken(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- paths := []string{
- "prod/aws/foo",
- "prod/aws/sub/bar",
- "prod/aws/zip",
- }
- for _, path := range paths {
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: path,
- ClientToken: "foobarbaz",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
- _, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- }
-
- // Should nuke all the keys
- te := &TokenEntry{
- ID: "foobarbaz",
- }
- if err := exp.RevokeByToken(te); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if len(noop.Requests) != 3 {
- t.Fatalf("Bad: %v", noop.Requests)
- }
- for _, req := range noop.Requests {
- if req.Operation != logical.RevokeOperation {
- t.Fatalf("Bad: %v", req)
- }
- }
-
- expect := []string{
- "foo",
- "sub/bar",
- "zip",
- }
- sort.Strings(noop.Paths)
- sort.Strings(expect)
- if !reflect.DeepEqual(noop.Paths, expect) {
- t.Fatalf("bad: %v", noop.Paths)
- }
-}
-
-func TestExpiration_RenewToken(t *testing.T) {
- exp := mockExpiration(t)
- root, err := exp.tokenStore.rootToken()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Register a token
- auth := &logical.Auth{
- ClientToken: root.ID,
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Hour,
- Renewable: true,
- },
- }
- err = exp.RegisterAuth("auth/token/login", auth)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Renew the token
- out, err := exp.RenewToken(&logical.Request{}, "auth/token/login", root.ID, 0)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if auth.ClientToken != out.Auth.ClientToken {
- t.Fatalf("Bad: %#v", out)
- }
-}
-
-func TestExpiration_RenewToken_NotRenewable(t *testing.T) {
- exp := mockExpiration(t)
- root, err := exp.tokenStore.rootToken()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Register a token
- auth := &logical.Auth{
- ClientToken: root.ID,
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Hour,
- Renewable: false,
- },
- }
- err = exp.RegisterAuth("auth/github/login", auth)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Attempt to renew the token
- resp, err := exp.RenewToken(&logical.Request{}, "auth/github/login", root.ID, 0)
- if err != nil && (err != logical.ErrInvalidRequest || (resp != nil && resp.IsError() && resp.Error().Error() != "lease is not renewable")) {
- t.Fatalf("bad: err:%v resp:%#v", err, resp)
- }
- if resp == nil {
- t.Fatal("expected a response")
- }
-
-}
-
-func TestExpiration_Renew(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- Renewable: true,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
-
- id, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- noop.Response = &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- },
- },
- Data: map[string]interface{}{
- "access_key": "123",
- "secret_key": "abcd",
- },
- }
-
- out, err := exp.Renew(id, 0)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- noop.Lock()
- defer noop.Unlock()
-
- if !reflect.DeepEqual(out, noop.Response) {
- t.Fatalf("Bad: %#v", out)
- }
-
- if len(noop.Requests) != 1 {
- t.Fatalf("Bad: %#v", noop.Requests)
- }
- req = noop.Requests[0]
- if req.Operation != logical.RenewOperation {
- t.Fatalf("Bad: %v", req)
- }
-}
-
-func TestExpiration_Renew_NotRenewable(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- Renewable: false,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
-
- id, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- _, err = exp.Renew(id, 0)
- if err.Error() != "lease is not renewable" {
- t.Fatalf("err: %v", err)
- }
-
- noop.Lock()
- defer noop.Unlock()
-
- if len(noop.Requests) != 0 {
- t.Fatalf("Bad: %#v", noop.Requests)
- }
-}
-
-func TestExpiration_Renew_RevokeOnExpire(t *testing.T) {
- exp := mockExpiration(t)
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
- ClientToken: "foobar",
- }
- resp := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- Renewable: true,
- },
- },
- Data: map[string]interface{}{
- "access_key": "xyz",
- "secret_key": "abcd",
- },
- }
-
- id, err := exp.Register(req, resp)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- noop.Response = &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: 20 * time.Millisecond,
- },
- },
- Data: map[string]interface{}{
- "access_key": "123",
- "secret_key": "abcd",
- },
- }
-
- _, err = exp.Renew(id, 0)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- start := time.Now()
- for time.Now().Sub(start) < time.Second {
- req = nil
-
- noop.Lock()
- if len(noop.Requests) >= 2 {
- req = noop.Requests[1]
- }
- noop.Unlock()
-
- if req == nil {
- time.Sleep(5 * time.Millisecond)
- continue
- }
- if req.Operation != logical.RevokeOperation {
- t.Fatalf("Bad: %v", req)
- }
- break
- }
-}
-
-func TestExpiration_revokeEntry(t *testing.T) {
- exp := mockExpiration(t)
-
- noop := &NoopBackend{}
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "foo/bar/", &MountEntry{Path: "foo/bar/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- le := &leaseEntry{
- LeaseID: "foo/bar/1234",
- Path: "foo/bar",
- Data: map[string]interface{}{
- "testing": true,
- },
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Minute,
- },
- },
- IssueTime: time.Now(),
- ExpireTime: time.Now(),
- }
-
- err = exp.revokeEntry(le)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- noop.Lock()
- defer noop.Unlock()
-
- req := noop.Requests[0]
- if req.Operation != logical.RevokeOperation {
- t.Fatalf("bad: operation; req: %#v", req)
- }
- if !reflect.DeepEqual(req.Data, le.Data) {
- t.Fatalf("bad: data; req: %#v\n le: %#v\n", req, le)
- }
-}
-
-func TestExpiration_revokeEntry_token(t *testing.T) {
- exp := mockExpiration(t)
- root, err := exp.tokenStore.rootToken()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // N.B.: Vault doesn't allow both a secret and auth to be returned, but the
- // reason for both is that auth needs to be included in order to use the
- // token store as it's the only mounted backend, *but* RegisterAuth doesn't
- // actually create the index by token, only Register (for a Secret) does.
- // So without the Secret we don't do anything when removing the index which
- // (at the time of writing) now fails because a bug causing every token
- // expiration to do an extra delete to a non-existent key has been fixed,
- // and this test relies on this nonstandard behavior.
- le := &leaseEntry{
- LeaseID: "foo/bar/1234",
- Auth: &logical.Auth{
- ClientToken: root.ID,
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Minute,
- },
- },
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Minute,
- },
- },
- ClientToken: root.ID,
- Path: "foo/bar",
- IssueTime: time.Now(),
- ExpireTime: time.Now(),
- }
-
- if err := exp.persistEntry(le); err != nil {
- t.Fatalf("error persisting entry: %v", err)
- }
- if err := exp.createIndexByToken(le.ClientToken, le.LeaseID); err != nil {
- t.Fatalf("error creating secondary index: %v", err)
- }
- exp.updatePending(le, le.Secret.LeaseTotal())
-
- indexEntry, err := exp.indexByToken(le.ClientToken, le.LeaseID)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if indexEntry == nil {
- t.Fatalf("err: should have found a secondary index entry")
- }
-
- err = exp.revokeEntry(le)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- out, err := exp.tokenStore.Lookup(le.ClientToken)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- indexEntry, err = exp.indexByToken(le.ClientToken, le.LeaseID)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if indexEntry != nil {
- t.Fatalf("err: should not have found a secondary index entry")
- }
-}
-
-func TestExpiration_renewEntry(t *testing.T) {
- exp := mockExpiration(t)
-
- noop := &NoopBackend{
- Response: &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- TTL: time.Hour,
- },
- },
- Data: map[string]interface{}{
- "testing": false,
- },
- },
- }
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "logical/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "foo/bar/", &MountEntry{Path: "foo/bar/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- le := &leaseEntry{
- LeaseID: "foo/bar/1234",
- Path: "foo/bar",
- Data: map[string]interface{}{
- "testing": true,
- },
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Minute,
- },
- },
- IssueTime: time.Now(),
- ExpireTime: time.Now(),
- }
-
- resp, err := exp.renewEntry(le, time.Second)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- noop.Lock()
- defer noop.Unlock()
-
- if !reflect.DeepEqual(resp, noop.Response) {
- t.Fatalf("bad: %#v", resp)
- }
-
- req := noop.Requests[0]
- if req.Operation != logical.RenewOperation {
- t.Fatalf("Bad: %v", req)
- }
- if !reflect.DeepEqual(req.Data, le.Data) {
- t.Fatalf("Bad: %v", req)
- }
- if req.Secret.Increment != time.Second {
- t.Fatalf("Bad: %v", req)
- }
- if req.Secret.IssueTime.IsZero() {
- t.Fatalf("Bad: %v", req)
- }
-}
-
-func TestExpiration_renewAuthEntry(t *testing.T) {
- exp := mockExpiration(t)
-
- noop := &NoopBackend{
- Response: &logical.Response{
- Auth: &logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- TTL: time.Hour,
- },
- },
- },
- }
- _, barrier, _ := mockBarrier(t)
- view := NewBarrierView(barrier, "auth/foo/")
- meUUID, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- err = exp.router.Mount(noop, "auth/foo/", &MountEntry{Path: "auth/foo/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
- if err != nil {
- t.Fatal(err)
- }
-
- le := &leaseEntry{
- LeaseID: "auth/foo/1234",
- Path: "auth/foo/login",
- Auth: &logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- TTL: time.Minute,
- },
- InternalData: map[string]interface{}{
- "MySecret": "secret",
- },
- },
- IssueTime: time.Now(),
- ExpireTime: time.Now().Add(time.Minute),
- }
-
- resp, err := exp.renewAuthEntry(&logical.Request{}, le, time.Second)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- noop.Lock()
- defer noop.Unlock()
-
- if !reflect.DeepEqual(resp, noop.Response) {
- t.Fatalf("bad: %#v", resp)
- }
-
- req := noop.Requests[0]
- if req.Operation != logical.RenewOperation {
- t.Fatalf("Bad: %v", req)
- }
- if req.Path != "login" {
- t.Fatalf("Bad: %v", req)
- }
- if req.Auth.Increment != time.Second {
- t.Fatalf("Bad: %v", req)
- }
- if req.Auth.IssueTime.IsZero() {
- t.Fatalf("Bad: %v", req)
- }
- if req.Auth.InternalData["MySecret"] != "secret" {
- t.Fatalf("Bad: %v", req)
- }
-}
-
-func TestExpiration_PersistLoadDelete(t *testing.T) {
- exp := mockExpiration(t)
- lastTime := time.Now()
- le := &leaseEntry{
- LeaseID: "foo/bar/1234",
- Path: "foo/bar",
- Data: map[string]interface{}{
- "testing": true,
- },
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Minute,
- },
- },
- IssueTime: lastTime,
- ExpireTime: lastTime,
- LastRenewalTime: lastTime,
- }
- if err := exp.persistEntry(le); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- out, err := exp.loadEntry("foo/bar/1234")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !le.LastRenewalTime.Equal(out.LastRenewalTime) ||
- !le.IssueTime.Equal(out.IssueTime) ||
- !le.ExpireTime.Equal(out.ExpireTime) {
- t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", le, out)
- }
- le.LastRenewalTime = out.LastRenewalTime
- le.IssueTime = out.IssueTime
- le.ExpireTime = out.ExpireTime
- if !reflect.DeepEqual(out, le) {
- t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", le, out)
- }
-
- err = exp.deleteEntry("foo/bar/1234")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- out, err = exp.loadEntry("foo/bar/1234")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("out: %#v", out)
- }
-}
-
-func TestLeaseEntry(t *testing.T) {
- le := &leaseEntry{
- LeaseID: "foo/bar/1234",
- Path: "foo/bar",
- Data: map[string]interface{}{
- "testing": true,
- },
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Minute,
- Renewable: true,
- },
- },
- IssueTime: time.Now(),
- ExpireTime: time.Now(),
- }
-
- enc, err := le.encode()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- out, err := decodeLeaseEntry(enc)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !reflect.DeepEqual(out.Data, le.Data) {
- t.Fatalf("got: %#v, expect %#v", out, le)
- }
-
- // Test renewability
- le.ExpireTime = time.Time{}
- if r, _ := le.renewable(); r {
- t.Fatal("lease with zero expire time is not renewable")
- }
- le.ExpireTime = time.Now().Add(-1 * time.Hour)
- if r, _ := le.renewable(); r {
- t.Fatal("lease with expire time in the past is not renewable")
- }
- le.ExpireTime = time.Now().Add(1 * time.Hour)
- if r, err := le.renewable(); !r {
- t.Fatalf("lease with future expire time is renewable, err: %v", err)
- }
- le.Secret.LeaseOptions.Renewable = false
- if r, _ := le.renewable(); r {
- t.Fatal("secret is set to not be renewable but returns as renewable")
- }
- le.Secret = nil
- le.Auth = &logical.Auth{
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- },
- }
- if r, err := le.renewable(); !r {
- t.Fatalf("auth is renewable but is set to not be, err: %v", err)
- }
- le.Auth.LeaseOptions.Renewable = false
- if r, _ := le.renewable(); r {
- t.Fatal("auth is set to not be renewable but returns as renewable")
- }
-}
-
-func TestExpiration_RevokeForce(t *testing.T) {
- core, _, _, root := TestCoreWithTokenStore(t)
-
- core.logicalBackends["badrenew"] = badRenewFactory
- me := &MountEntry{
- Table: mountTableType,
- Path: "badrenew/",
- Type: "badrenew",
- Accessor: "badrenewaccessor",
- }
-
- err := core.mount(me)
- if err != nil {
- t.Fatal(err)
- }
-
- req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "badrenew/creds",
- ClientToken: root,
- }
-
- resp, err := core.HandleRequest(req)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("response was nil")
- }
- if resp.Secret == nil {
- t.Fatalf("response secret was nil, response was %#v", *resp)
- }
-
- req.Operation = logical.UpdateOperation
- req.Path = "sys/revoke-prefix/badrenew/creds"
-
- resp, err = core.HandleRequest(req)
- if err == nil {
- t.Fatal("expected error")
- }
-
- req.Path = "sys/revoke-force/badrenew/creds"
- resp, err = core.HandleRequest(req)
- if err != nil {
- t.Fatalf("got error: %s", err)
- }
-}
-
-func badRenewFactory(conf *logical.BackendConfig) (logical.Backend, error) {
- be := &framework.Backend{
- Paths: []*framework.Path{
- &framework.Path{
- Pattern: "creds",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: func(*logical.Request, *framework.FieldData) (*logical.Response, error) {
- resp := &logical.Response{
- Secret: &logical.Secret{
- InternalData: map[string]interface{}{
- "secret_type": "badRenewBackend",
- },
- },
- }
- resp.Secret.TTL = time.Second * 30
- return resp, nil
- },
- },
- },
- },
-
- Secrets: []*framework.Secret{
- &framework.Secret{
- Type: "badRenewBackend",
- Revoke: func(*logical.Request, *framework.FieldData) (*logical.Response, error) {
- return nil, fmt.Errorf("always errors")
- },
- },
- },
- }
-
- err := be.Setup(conf)
- if err != nil {
- return nil, err
- }
-
- return be, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/generate_root.go b/vendor/github.com/hashicorp/vault/vault/generate_root.go
deleted file mode 100644
index 4278b02..0000000
--- a/vendor/github.com/hashicorp/vault/vault/generate_root.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package vault
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/helper/xor"
- "github.com/hashicorp/vault/shamir"
-)
-
-// GenerateRootConfig holds the configuration for a root generation
-// command.
-type GenerateRootConfig struct {
- Nonce string
- PGPKey string
- PGPFingerprint string
- OTP string
-}
-
-// GenerateRootResult holds the result of a root generation update
-// command
-type GenerateRootResult struct {
- Progress int
- Required int
- EncodedRootToken string
- PGPFingerprint string
-}
-
-// GenerateRoot is used to return the root generation progress (num shares)
-func (c *Core) GenerateRootProgress() (int, error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.sealed {
- return 0, consts.ErrSealed
- }
- if c.standby {
- return 0, consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- return len(c.generateRootProgress), nil
-}
-
-// GenerateRootConfig is used to read the root generation configuration
-// It stubbornly refuses to return the OTP if one is there.
-func (c *Core) GenerateRootConfiguration() (*GenerateRootConfig, error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.sealed {
- return nil, consts.ErrSealed
- }
- if c.standby {
- return nil, consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Copy the config if any
- var conf *GenerateRootConfig
- if c.generateRootConfig != nil {
- conf = new(GenerateRootConfig)
- *conf = *c.generateRootConfig
- conf.OTP = ""
- }
- return conf, nil
-}
-
-// GenerateRootInit is used to initialize the root generation settings
-func (c *Core) GenerateRootInit(otp, pgpKey string) error {
- var fingerprint string
- switch {
- case len(otp) > 0:
- otpBytes, err := base64.StdEncoding.DecodeString(otp)
- if err != nil {
- return fmt.Errorf("error decoding base64 OTP value: %s", err)
- }
- if otpBytes == nil || len(otpBytes) != 16 {
- return fmt.Errorf("decoded OTP value is invalid or wrong length")
- }
-
- case len(pgpKey) > 0:
- fingerprints, err := pgpkeys.GetFingerprints([]string{pgpKey}, nil)
- if err != nil {
- return fmt.Errorf("error parsing PGP key: %s", err)
- }
- if len(fingerprints) != 1 || fingerprints[0] == "" {
- return fmt.Errorf("could not acquire PGP key entity")
- }
- fingerprint = fingerprints[0]
-
- default:
- return fmt.Errorf("unreachable condition")
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.sealed {
- return consts.ErrSealed
- }
- if c.standby {
- return consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Prevent multiple concurrent root generations
- if c.generateRootConfig != nil {
- return fmt.Errorf("root generation already in progress")
- }
-
- // Copy the configuration
- generationNonce, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
-
- c.generateRootConfig = &GenerateRootConfig{
- Nonce: generationNonce,
- OTP: otp,
- PGPKey: pgpKey,
- PGPFingerprint: fingerprint,
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("core: root generation initialized", "nonce", c.generateRootConfig.Nonce)
- }
- return nil
-}
-
-// GenerateRootUpdate is used to provide a new key part
-func (c *Core) GenerateRootUpdate(key []byte, nonce string) (*GenerateRootResult, error) {
- // Verify the key length
- min, max := c.barrier.KeyLength()
- max += shamir.ShareOverhead
- if len(key) < min {
- return nil, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
- }
- if len(key) > max {
- return nil, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
- }
-
- // Get the seal configuration
- var config *SealConfig
- var err error
- if c.seal.RecoveryKeySupported() {
- config, err = c.seal.RecoveryConfig()
- if err != nil {
- return nil, err
- }
- } else {
- config, err = c.seal.BarrierConfig()
- if err != nil {
- return nil, err
- }
- }
-
- // Ensure the barrier is initialized
- if config == nil {
- return nil, ErrNotInit
- }
-
- // Ensure we are already unsealed
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.sealed {
- return nil, consts.ErrSealed
- }
- if c.standby {
- return nil, consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Ensure a generateRoot is in progress
- if c.generateRootConfig == nil {
- return nil, fmt.Errorf("no root generation in progress")
- }
-
- if nonce != c.generateRootConfig.Nonce {
- return nil, fmt.Errorf("incorrect nonce supplied; nonce for this root generation operation is %s", c.generateRootConfig.Nonce)
- }
-
- // Check if we already have this piece
- for _, existing := range c.generateRootProgress {
- if bytes.Equal(existing, key) {
- return nil, fmt.Errorf("given key has already been provided during this generation operation")
- }
- }
-
- // Store this key
- c.generateRootProgress = append(c.generateRootProgress, key)
- progress := len(c.generateRootProgress)
-
- // Check if we don't have enough keys to unlock
- if len(c.generateRootProgress) < config.SecretThreshold {
- if c.logger.IsDebug() {
- c.logger.Debug("core: cannot generate root, not enough keys", "keys", progress, "threshold", config.SecretThreshold)
- }
- return &GenerateRootResult{
- Progress: progress,
- Required: config.SecretThreshold,
- PGPFingerprint: c.generateRootConfig.PGPFingerprint,
- }, nil
- }
-
- // Recover the master key
- var masterKey []byte
- if config.SecretThreshold == 1 {
- masterKey = c.generateRootProgress[0]
- c.generateRootProgress = nil
- } else {
- masterKey, err = shamir.Combine(c.generateRootProgress)
- c.generateRootProgress = nil
- if err != nil {
- return nil, fmt.Errorf("failed to compute master key: %v", err)
- }
- }
-
- // Verify the master key
- if c.seal.RecoveryKeySupported() {
- if err := c.seal.VerifyRecoveryKey(masterKey); err != nil {
- c.logger.Error("core: root generation aborted, recovery key verification failed", "error", err)
- return nil, err
- }
- } else {
- if err := c.barrier.VerifyMaster(masterKey); err != nil {
- c.logger.Error("core: root generation aborted, master key verification failed", "error", err)
- return nil, err
- }
- }
-
- te, err := c.tokenStore.rootToken()
- if err != nil {
- c.logger.Error("core: root token generation failed", "error", err)
- return nil, err
- }
- if te == nil {
- c.logger.Error("core: got nil token entry back from root generation")
- return nil, fmt.Errorf("got nil token entry back from root generation")
- }
-
- uuidBytes, err := uuid.ParseUUID(te.ID)
- if err != nil {
- c.tokenStore.Revoke(te.ID)
- c.logger.Error("core: error getting generated token bytes", "error", err)
- return nil, err
- }
- if uuidBytes == nil {
- c.tokenStore.Revoke(te.ID)
- c.logger.Error("core: got nil parsed UUID bytes")
- return nil, fmt.Errorf("got nil parsed UUID bytes")
- }
-
- var tokenBytes []byte
- // Get the encoded value first so that if there is an error we don't create
- // the root token.
- switch {
- case len(c.generateRootConfig.OTP) > 0:
- // This function performs decoding checks so rather than decode the OTP,
- // just encode the value we're passing in.
- tokenBytes, err = xor.XORBase64(c.generateRootConfig.OTP, base64.StdEncoding.EncodeToString(uuidBytes))
- if err != nil {
- c.tokenStore.Revoke(te.ID)
- c.logger.Error("core: xor of root token failed", "error", err)
- return nil, err
- }
-
- case len(c.generateRootConfig.PGPKey) > 0:
- _, tokenBytesArr, err := pgpkeys.EncryptShares([][]byte{[]byte(te.ID)}, []string{c.generateRootConfig.PGPKey})
- if err != nil {
- c.tokenStore.Revoke(te.ID)
- c.logger.Error("core: error encrypting new root token", "error", err)
- return nil, err
- }
- tokenBytes = tokenBytesArr[0]
-
- default:
- c.tokenStore.Revoke(te.ID)
- return nil, fmt.Errorf("unreachable condition")
- }
-
- results := &GenerateRootResult{
- Progress: progress,
- Required: config.SecretThreshold,
- EncodedRootToken: base64.StdEncoding.EncodeToString(tokenBytes),
- PGPFingerprint: c.generateRootConfig.PGPFingerprint,
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("core: root generation finished", "nonce", c.generateRootConfig.Nonce)
- }
-
- c.generateRootProgress = nil
- c.generateRootConfig = nil
- return results, nil
-}
-
-// GenerateRootCancel is used to cancel an in-progress root generation
-func (c *Core) GenerateRootCancel() error {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.sealed {
- return consts.ErrSealed
- }
- if c.standby {
- return consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Clear any progress or config
- c.generateRootConfig = nil
- c.generateRootProgress = nil
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/generate_root_test.go b/vendor/github.com/hashicorp/vault/vault/generate_root_test.go
deleted file mode 100644
index 3def4d9..0000000
--- a/vendor/github.com/hashicorp/vault/vault/generate_root_test.go
+++ /dev/null
@@ -1,341 +0,0 @@
-package vault
-
-import (
- "encoding/base64"
- "testing"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/helper/xor"
-)
-
-func TestCore_GenerateRoot_Lifecycle(t *testing.T) {
- bc, rc := TestSealDefConfigs()
- c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
- c.seal.(*TestSeal).recoveryKeysDisabled = true
- testCore_GenerateRoot_Lifecycle_Common(t, c, masterKeys)
-
- bc, rc = TestSealDefConfigs()
- c, _, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
- testCore_GenerateRoot_Lifecycle_Common(t, c, recoveryKeys)
-}
-
-func testCore_GenerateRoot_Lifecycle_Common(t *testing.T, c *Core, keys [][]byte) {
- // Verify update not allowed
- if _, err := c.GenerateRootUpdate(keys[0], ""); err == nil {
- t.Fatalf("no root generation in progress")
- }
-
- // Should be no progress
- num, err := c.GenerateRootProgress()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if num != 0 {
- t.Fatalf("bad: %d", num)
- }
-
- // Should be no config
- conf, err := c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if conf != nil {
- t.Fatalf("bad: %v", conf)
- }
-
- // Cancel should be idempotent
- err = c.GenerateRootCancel()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- otpBytes, err := GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
-
- // Start a root generation
- err = c.GenerateRootInit(base64.StdEncoding.EncodeToString(otpBytes), "")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should get config
- conf, err = c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Cancel should be clear
- err = c.GenerateRootCancel()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should be no config
- conf, err = c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if conf != nil {
- t.Fatalf("bad: %v", conf)
- }
-}
-
-func TestCore_GenerateRoot_Init(t *testing.T) {
- c, _, _ := TestCoreUnsealed(t)
- testCore_GenerateRoot_Init_Common(t, c)
-
- bc, rc := TestSealDefConfigs()
- c, _, _, _ = TestCoreUnsealedWithConfigs(t, bc, rc)
- testCore_GenerateRoot_Init_Common(t, c)
-}
-
-func testCore_GenerateRoot_Init_Common(t *testing.T, c *Core) {
- otpBytes, err := GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
-
- err = c.GenerateRootInit(base64.StdEncoding.EncodeToString(otpBytes), "")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Second should fail
- err = c.GenerateRootInit("", pgpkeys.TestPubKey1)
- if err == nil {
- t.Fatalf("should fail")
- }
-}
-
-func TestCore_GenerateRoot_InvalidMasterNonce(t *testing.T) {
- bc, rc := TestSealDefConfigs()
- bc.SecretShares = 1
- bc.SecretThreshold = 1
- bc.StoredShares = 0
- c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
- c.seal.(*TestSeal).recoveryKeysDisabled = true
- // Make the master invalid
- masterKeys[0][0]++
- testCore_GenerateRoot_InvalidMasterNonce_Common(t, c, masterKeys)
-
- bc, rc = TestSealDefConfigs()
- // For ease of use let's make the threshold the same as the shares and also
- // no stored shares so we get an error after the full set
- bc.StoredShares = 0
- bc.SecretShares = 5
- bc.SecretThreshold = 5
- rc.SecretShares = 5
- rc.SecretThreshold = 5
- // In this case, pass in master keys instead as they'll be invalid
- c, masterKeys, _, _ = TestCoreUnsealedWithConfigs(t, bc, rc)
- testCore_GenerateRoot_InvalidMasterNonce_Common(t, c, masterKeys)
-}
-
-func testCore_GenerateRoot_InvalidMasterNonce_Common(t *testing.T, c *Core, keys [][]byte) {
- otpBytes, err := GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
-
- err = c.GenerateRootInit(base64.StdEncoding.EncodeToString(otpBytes), "")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Fetch new config with generated nonce
- rgconf, err := c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if rgconf == nil {
- t.Fatalf("bad: no rekey config received")
- }
-
- // Provide the nonce (invalid)
- _, err = c.GenerateRootUpdate(keys[0], "abcd")
- if err == nil {
- t.Fatalf("expected error")
- }
-
- // Provide the master (invalid)
- for _, key := range keys {
- _, err = c.GenerateRootUpdate(key, rgconf.Nonce)
- }
- if err == nil {
- t.Fatalf("expected error")
- }
-}
-
-func TestCore_GenerateRoot_Update_OTP(t *testing.T) {
- bc, rc := TestSealDefConfigs()
- c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
- c.seal.(*TestSeal).recoveryKeysDisabled = true
- testCore_GenerateRoot_Update_OTP_Common(t, c, masterKeys[0:bc.SecretThreshold])
-
- bc, rc = TestSealDefConfigs()
- c, _, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
- testCore_GenerateRoot_Update_OTP_Common(t, c, recoveryKeys[0:rc.SecretThreshold])
-}
-
-func testCore_GenerateRoot_Update_OTP_Common(t *testing.T, c *Core, keys [][]byte) {
- otpBytes, err := GenerateRandBytes(16)
- if err != nil {
- t.Fatal(err)
- }
-
- otp := base64.StdEncoding.EncodeToString(otpBytes)
- // Start a root generation
- err = c.GenerateRootInit(otp, "")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Fetch new config with generated nonce
- rkconf, err := c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if rkconf == nil {
- t.Fatalf("bad: no root generation config received")
- }
-
- // Provide the keys
- var result *GenerateRootResult
- for _, key := range keys {
- result, err = c.GenerateRootUpdate(key, rkconf.Nonce)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- }
- if result == nil {
- t.Fatalf("Bad, result is nil")
- }
-
- encodedRootToken := result.EncodedRootToken
-
- // Should be no progress
- num, err := c.GenerateRootProgress()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if num != 0 {
- t.Fatalf("bad: %d", num)
- }
-
- // Should be no config
- conf, err := c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if conf != nil {
- t.Fatalf("bad: %v", conf)
- }
-
- tokenBytes, err := xor.XORBase64(encodedRootToken, otp)
- if err != nil {
- t.Fatal(err)
- }
- token, err := uuid.FormatUUID(tokenBytes)
- if err != nil {
- t.Fatal(err)
- }
-
- // Ensure that the token is a root token
- te, err := c.tokenStore.Lookup(token)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if te == nil {
- t.Fatalf("token was nil")
- }
- if te.ID != token || te.Parent != "" ||
- len(te.Policies) != 1 || te.Policies[0] != "root" {
- t.Fatalf("bad: %#v", *te)
- }
-}
-
-func TestCore_GenerateRoot_Update_PGP(t *testing.T) {
- bc, rc := TestSealDefConfigs()
- c, masterKeys, _, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
- c.seal.(*TestSeal).recoveryKeysDisabled = true
- testCore_GenerateRoot_Update_PGP_Common(t, c, masterKeys[0:bc.SecretThreshold])
-
- bc, rc = TestSealDefConfigs()
- c, _, recoveryKeys, _ := TestCoreUnsealedWithConfigs(t, bc, rc)
- testCore_GenerateRoot_Update_PGP_Common(t, c, recoveryKeys[0:rc.SecretThreshold])
-}
-
-func testCore_GenerateRoot_Update_PGP_Common(t *testing.T, c *Core, keys [][]byte) {
- // Start a root generation
- err := c.GenerateRootInit("", pgpkeys.TestPubKey1)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Fetch new config with generated nonce
- rkconf, err := c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if rkconf == nil {
- t.Fatalf("bad: no root generation config received")
- }
-
- // Provide the keys
- var result *GenerateRootResult
- for _, key := range keys {
- result, err = c.GenerateRootUpdate(key, rkconf.Nonce)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- }
- if result == nil {
- t.Fatalf("Bad, result is nil")
- }
-
- encodedRootToken := result.EncodedRootToken
-
- // Should be no progress
- num, err := c.GenerateRootProgress()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if num != 0 {
- t.Fatalf("bad: %d", num)
- }
-
- // Should be no config
- conf, err := c.GenerateRootConfiguration()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if conf != nil {
- t.Fatalf("bad: %v", conf)
- }
-
- ptBuf, err := pgpkeys.DecryptBytes(encodedRootToken, pgpkeys.TestPrivKey1)
- if err != nil {
- t.Fatal(err)
- }
- if ptBuf == nil {
- t.Fatal("Got nil plaintext key")
- }
-
- token := ptBuf.String()
-
- // Ensure that the token is a root token
- te, err := c.tokenStore.Lookup(token)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if te == nil {
- t.Fatalf("token was nil")
- }
- if te.ID != token || te.Parent != "" ||
- len(te.Policies) != 1 || te.Policies[0] != "root" {
- t.Fatalf("bad: %#v", *te)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/init.go b/vendor/github.com/hashicorp/vault/vault/init.go
deleted file mode 100644
index 3e267fd..0000000
--- a/vendor/github.com/hashicorp/vault/vault/init.go
+++ /dev/null
@@ -1,302 +0,0 @@
-package vault
-
-import (
- "encoding/base64"
- "encoding/hex"
- "fmt"
-
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/shamir"
-)
-
-// InitParams keeps the init function from being littered with too many
-// params, that's it!
-type InitParams struct {
- BarrierConfig *SealConfig
- RecoveryConfig *SealConfig
- RootTokenPGPKey string
-}
-
-// InitResult is used to provide the key parts back after
-// they are generated as part of the initialization.
-type InitResult struct {
- SecretShares [][]byte
- RecoveryShares [][]byte
- RootToken string
-}
-
-// Initialized checks if the Vault is already initialized
-func (c *Core) Initialized() (bool, error) {
- // Check the barrier first
- init, err := c.barrier.Initialized()
- if err != nil {
- c.logger.Error("core: barrier init check failed", "error", err)
- return false, err
- }
- if !init {
- c.logger.Info("core: security barrier not initialized")
- return false, nil
- }
-
- // Verify the seal configuration
- sealConf, err := c.seal.BarrierConfig()
- if err != nil {
- return false, err
- }
- if sealConf == nil {
- return false, fmt.Errorf("core: barrier reports initialized but no seal configuration found")
- }
-
- return true, nil
-}
-
-func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) {
- // Generate a master key
- masterKey, err := c.barrier.GenerateKey()
- if err != nil {
- return nil, nil, fmt.Errorf("key generation failed: %v", err)
- }
-
- // Return the master key if only a single key part is used
- var unsealKeys [][]byte
- if sc.SecretShares == 1 {
- unsealKeys = append(unsealKeys, masterKey)
- } else {
- // Split the master key using the Shamir algorithm
- shares, err := shamir.Split(masterKey, sc.SecretShares, sc.SecretThreshold)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to generate barrier shares: %v", err)
- }
- unsealKeys = shares
- }
-
- // If we have PGP keys, perform the encryption
- if len(sc.PGPKeys) > 0 {
- hexEncodedShares := make([][]byte, len(unsealKeys))
- for i, _ := range unsealKeys {
- hexEncodedShares[i] = []byte(hex.EncodeToString(unsealKeys[i]))
- }
- _, encryptedShares, err := pgpkeys.EncryptShares(hexEncodedShares, sc.PGPKeys)
- if err != nil {
- return nil, nil, err
- }
- unsealKeys = encryptedShares
- }
-
- return masterKey, unsealKeys, nil
-}
-
-// Initialize is used to initialize the Vault with the given
-// configurations.
-func (c *Core) Initialize(initParams *InitParams) (*InitResult, error) {
- barrierConfig := initParams.BarrierConfig
- recoveryConfig := initParams.RecoveryConfig
-
- if c.seal.RecoveryKeySupported() {
- if recoveryConfig == nil {
- return nil, fmt.Errorf("recovery configuration must be supplied")
- }
-
- if recoveryConfig.SecretShares < 1 {
- return nil, fmt.Errorf("recovery configuration must specify a positive number of shares")
- }
-
- // Check if the seal configuration is valid
- if err := recoveryConfig.Validate(); err != nil {
- c.logger.Error("core: invalid recovery configuration", "error", err)
- return nil, fmt.Errorf("invalid recovery configuration: %v", err)
- }
- }
-
- // Check if the seal configuration is valid
- if err := barrierConfig.Validate(); err != nil {
- c.logger.Error("core: invalid seal configuration", "error", err)
- return nil, fmt.Errorf("invalid seal configuration: %v", err)
- }
-
- // Avoid an initialization race
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
-
- // Check if we are initialized
- init, err := c.Initialized()
- if err != nil {
- return nil, err
- }
- if init {
- return nil, ErrAlreadyInit
- }
-
- err = c.seal.Init()
- if err != nil {
- c.logger.Error("core: failed to initialize seal", "error", err)
- return nil, fmt.Errorf("error initializing seal: %v", err)
- }
-
- barrierKey, barrierUnsealKeys, err := c.generateShares(barrierConfig)
- if err != nil {
- c.logger.Error("core: error generating shares", "error", err)
- return nil, err
- }
-
- // Initialize the barrier
- if err := c.barrier.Initialize(barrierKey); err != nil {
- c.logger.Error("core: failed to initialize barrier", "error", err)
- return nil, fmt.Errorf("failed to initialize barrier: %v", err)
- }
- if c.logger.IsInfo() {
- c.logger.Info("core: security barrier initialized", "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold)
- }
-
- // Unseal the barrier
- if err := c.barrier.Unseal(barrierKey); err != nil {
- c.logger.Error("core: failed to unseal barrier", "error", err)
- return nil, fmt.Errorf("failed to unseal barrier: %v", err)
- }
-
- // Ensure the barrier is re-sealed
- defer func() {
- // Defers are LIFO so we need to run this here too to ensure the stop
- // happens before sealing. preSeal also stops, so we just make the
- // stopping safe against multiple calls.
- if err := c.barrier.Seal(); err != nil {
- c.logger.Error("core: failed to seal barrier", "error", err)
- }
- }()
-
- err = c.seal.SetBarrierConfig(barrierConfig)
- if err != nil {
- c.logger.Error("core: failed to save barrier configuration", "error", err)
- return nil, fmt.Errorf("barrier configuration saving failed: %v", err)
- }
-
- // If we are storing shares, pop them out of the returned results and push
- // them through the seal
- if barrierConfig.StoredShares > 0 {
- var keysToStore [][]byte
- for i := 0; i < barrierConfig.StoredShares; i++ {
- keysToStore = append(keysToStore, barrierUnsealKeys[0])
- barrierUnsealKeys = barrierUnsealKeys[1:]
- }
- if err := c.seal.SetStoredKeys(keysToStore); err != nil {
- c.logger.Error("core: failed to store keys", "error", err)
- return nil, fmt.Errorf("failed to store keys: %v", err)
- }
- }
-
- results := &InitResult{
- SecretShares: barrierUnsealKeys,
- }
-
- // Perform initial setup
- if err := c.setupCluster(); err != nil {
- c.logger.Error("core: cluster setup failed during init", "error", err)
- return nil, err
- }
- if err := c.postUnseal(); err != nil {
- c.logger.Error("core: post-unseal setup failed during init", "error", err)
- return nil, err
- }
-
- // Save the configuration regardless, but only generate a key if it's not
- // disabled. When using recovery keys they are stored in the barrier, so
- // this must happen post-unseal.
- if c.seal.RecoveryKeySupported() {
- err = c.seal.SetRecoveryConfig(recoveryConfig)
- if err != nil {
- c.logger.Error("core: failed to save recovery configuration", "error", err)
- return nil, fmt.Errorf("recovery configuration saving failed: %v", err)
- }
-
- if recoveryConfig.SecretShares > 0 {
- recoveryKey, recoveryUnsealKeys, err := c.generateShares(recoveryConfig)
- if err != nil {
- c.logger.Error("core: failed to generate recovery shares", "error", err)
- return nil, err
- }
-
- err = c.seal.SetRecoveryKey(recoveryKey)
- if err != nil {
- return nil, err
- }
-
- results.RecoveryShares = recoveryUnsealKeys
- }
- }
-
- // Generate a new root token
- rootToken, err := c.tokenStore.rootToken()
- if err != nil {
- c.logger.Error("core: root token generation failed", "error", err)
- return nil, err
- }
- results.RootToken = rootToken.ID
- c.logger.Info("core: root token generated")
-
- if initParams.RootTokenPGPKey != "" {
- _, encryptedVals, err := pgpkeys.EncryptShares([][]byte{[]byte(results.RootToken)}, []string{initParams.RootTokenPGPKey})
- if err != nil {
- c.logger.Error("core: root token encryption failed", "error", err)
- return nil, err
- }
- results.RootToken = base64.StdEncoding.EncodeToString(encryptedVals[0])
- }
-
- // Prepare to re-seal
- if err := c.preSeal(); err != nil {
- c.logger.Error("core: pre-seal teardown failed", "error", err)
- return nil, err
- }
-
- return results, nil
-}
-
-func (c *Core) UnsealWithStoredKeys() error {
- if !c.seal.StoredKeysSupported() {
- return nil
- }
-
- sealed, err := c.Sealed()
- if err != nil {
- c.logger.Error("core: error checking sealed status in auto-unseal", "error", err)
- return fmt.Errorf("error checking sealed status in auto-unseal: %s", err)
- }
- if !sealed {
- return nil
- }
-
- c.logger.Info("core: stored unseal keys supported, attempting fetch")
- keys, err := c.seal.GetStoredKeys()
- if err != nil {
- c.logger.Error("core: fetching stored unseal keys failed", "error", err)
- return &NonFatalError{Err: fmt.Errorf("fetching stored unseal keys failed: %v", err)}
- }
- if len(keys) == 0 {
- c.logger.Warn("core: stored unseal key(s) supported but none found")
- } else {
- unsealed := false
- keysUsed := 0
- for _, key := range keys {
- unsealed, err = c.Unseal(key)
- if err != nil {
- c.logger.Error("core: unseal with stored unseal key failed", "error", err)
- return &NonFatalError{Err: fmt.Errorf("unseal with stored key failed: %v", err)}
- }
- keysUsed += 1
- if unsealed {
- break
- }
- }
- if !unsealed {
- if c.logger.IsWarn() {
- c.logger.Warn("core: stored unseal key(s) used but Vault not unsealed yet", "stored_keys_used", keysUsed)
- }
- } else {
- if c.logger.IsInfo() {
- c.logger.Info("core: successfully unsealed with stored key(s)", "stored_keys_used", keysUsed)
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/init_test.go b/vendor/github.com/hashicorp/vault/vault/init_test.go
deleted file mode 100644
index 48581f7..0000000
--- a/vendor/github.com/hashicorp/vault/vault/init_test.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package vault
-
-import (
- "reflect"
- "testing"
-
- log "github.com/mgutz/logxi/v1"
-
- "github.com/hashicorp/vault/helper/logformat"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical/inmem"
-)
-
-func TestCore_Init(t *testing.T) {
- c, conf := testCore_NewTestCore(t, nil)
- testCore_Init_Common(t, c, conf, &SealConfig{SecretShares: 5, SecretThreshold: 3}, nil)
-
- c, conf = testCore_NewTestCore(t, newTestSeal(t))
- bc, rc := TestSealDefConfigs()
- rc.SecretShares = 4
- rc.SecretThreshold = 2
- testCore_Init_Common(t, c, conf, bc, rc)
-}
-
-func testCore_NewTestCore(t *testing.T, seal Seal) (*Core, *CoreConfig) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm, err := inmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- conf := &CoreConfig{
- Physical: inm,
- DisableMlock: true,
- LogicalBackends: map[string]logical.Factory{
- "kv": LeasedPassthroughBackendFactory,
- },
- Seal: seal,
- }
- c, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- return c, conf
-}
-
-func testCore_Init_Common(t *testing.T, c *Core, conf *CoreConfig, barrierConf, recoveryConf *SealConfig) {
- init, err := c.Initialized()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if init {
- t.Fatalf("should not be init")
- }
-
- // Check the seal configuration
- outConf, err := c.seal.BarrierConfig()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if outConf != nil {
- t.Fatalf("bad: %v", outConf)
- }
- if recoveryConf != nil {
- outConf, err := c.seal.RecoveryConfig()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if outConf != nil {
- t.Fatalf("bad: %v", outConf)
- }
- }
-
- res, err := c.Initialize(&InitParams{
- BarrierConfig: barrierConf,
- RecoveryConfig: recoveryConf,
- })
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if len(res.SecretShares) != (barrierConf.SecretShares - barrierConf.StoredShares) {
- t.Fatalf("Bad: got\n%#v\nexpected conf matching\n%#v\n", *res, *barrierConf)
- }
- if recoveryConf != nil {
- if len(res.RecoveryShares) != recoveryConf.SecretShares {
- t.Fatalf("Bad: got\n%#v\nexpected conf matching\n%#v\n", *res, *recoveryConf)
- }
- }
-
- if res.RootToken == "" {
- t.Fatalf("Bad: %#v", res)
- }
-
- _, err = c.Initialize(&InitParams{
- BarrierConfig: barrierConf,
- RecoveryConfig: recoveryConf,
- })
- if err != ErrAlreadyInit {
- t.Fatalf("err: %v", err)
- }
-
- init, err = c.Initialized()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !init {
- t.Fatalf("should be init")
- }
-
- // Check the seal configuration
- outConf, err = c.seal.BarrierConfig()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(outConf, barrierConf) {
- t.Fatalf("bad: %v expect: %v", outConf, barrierConf)
- }
- if recoveryConf != nil {
- outConf, err = c.seal.RecoveryConfig()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(outConf, recoveryConf) {
- t.Fatalf("bad: %v expect: %v", outConf, recoveryConf)
- }
- }
-
- // New Core, same backend
- c2, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- _, err = c2.Initialize(&InitParams{
- BarrierConfig: barrierConf,
- RecoveryConfig: recoveryConf,
- })
- if err != ErrAlreadyInit {
- t.Fatalf("err: %v", err)
- }
-
- init, err = c2.Initialized()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if !init {
- t.Fatalf("should be init")
- }
-
- // Check the seal configuration
- outConf, err = c2.seal.BarrierConfig()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(outConf, barrierConf) {
- t.Fatalf("bad: %v expect: %v", outConf, barrierConf)
- }
- if recoveryConf != nil {
- outConf, err = c2.seal.RecoveryConfig()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(outConf, recoveryConf) {
- t.Fatalf("bad: %v expect: %v", outConf, recoveryConf)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/keyring.go b/vendor/github.com/hashicorp/vault/vault/keyring.go
deleted file mode 100644
index 2cd4871..0000000
--- a/vendor/github.com/hashicorp/vault/vault/keyring.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package vault
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-// Keyring is used to manage multiple encryption keys used by
-// the barrier. New keys can be installed and each has a sequential term.
-// The term used to encrypt a key is prefixed to the key written out.
-// All data is encrypted with the latest key, but storing the old keys
-// allows for decryption of keys written previously. Along with the encryption
-// keys, the keyring also tracks the master key. This is necessary so that
-// when a new key is added to the keyring, we can encrypt with the master key
-// and write out the new keyring.
-type Keyring struct {
- masterKey []byte
- keys map[uint32]*Key
- activeTerm uint32
-}
-
-// EncodedKeyring is used for serialization of the keyring
-type EncodedKeyring struct {
- MasterKey []byte
- Keys []*Key
-}
-
-// Key represents a single term, along with the key used.
-type Key struct {
- Term uint32
- Version int
- Value []byte
- InstallTime time.Time
-}
-
-// Serialize is used to create a byte encoded key
-func (k *Key) Serialize() ([]byte, error) {
- return json.Marshal(k)
-}
-
-// DeserializeKey is used to deserialize and return a new key
-func DeserializeKey(buf []byte) (*Key, error) {
- k := new(Key)
- if err := jsonutil.DecodeJSON(buf, k); err != nil {
- return nil, fmt.Errorf("deserialization failed: %v", err)
- }
- return k, nil
-}
-
-// NewKeyring creates a new keyring
-func NewKeyring() *Keyring {
- k := &Keyring{
- keys: make(map[uint32]*Key),
- activeTerm: 0,
- }
- return k
-}
-
-// Clone returns a new copy of the keyring
-func (k *Keyring) Clone() *Keyring {
- clone := &Keyring{
- masterKey: k.masterKey,
- keys: make(map[uint32]*Key, len(k.keys)),
- activeTerm: k.activeTerm,
- }
- for idx, key := range k.keys {
- clone.keys[idx] = key
- }
- return clone
-}
-
-// AddKey adds a new key to the keyring
-func (k *Keyring) AddKey(key *Key) (*Keyring, error) {
- // Ensure there is no conflict
- if exist, ok := k.keys[key.Term]; ok {
- if !bytes.Equal(key.Value, exist.Value) {
- return nil, fmt.Errorf("Conflicting key for term %d already installed", key.Term)
- }
- return k, nil
- }
-
- // Add a time if none
- if key.InstallTime.IsZero() {
- key.InstallTime = time.Now()
- }
-
- // Make a new keyring
- clone := k.Clone()
-
- // Install the new key
- clone.keys[key.Term] = key
-
- // Update the active term if newer
- if key.Term > clone.activeTerm {
- clone.activeTerm = key.Term
- }
- return clone, nil
-}
-
-// RemoveKey removes a key from the keyring
-func (k *Keyring) RemoveKey(term uint32) (*Keyring, error) {
- // Ensure this is not the active key
- if term == k.activeTerm {
- return nil, fmt.Errorf("Cannot remove active key")
- }
-
- // Check if this term does not exist
- if _, ok := k.keys[term]; !ok {
- return k, nil
- }
-
- // Delete the key
- clone := k.Clone()
- delete(clone.keys, term)
- return clone, nil
-}
-
-// ActiveTerm returns the currently active term
-func (k *Keyring) ActiveTerm() uint32 {
- return k.activeTerm
-}
-
-// ActiveKey returns the active encryption key, or nil
-func (k *Keyring) ActiveKey() *Key {
- return k.keys[k.activeTerm]
-}
-
-// TermKey returns the key for the given term, or nil
-func (k *Keyring) TermKey(term uint32) *Key {
- return k.keys[term]
-}
-
-// SetMasterKey is used to update the master key
-func (k *Keyring) SetMasterKey(val []byte) *Keyring {
- valCopy := make([]byte, len(val))
- copy(valCopy, val)
- clone := k.Clone()
- clone.masterKey = valCopy
- return clone
-}
-
-// MasterKey returns the master key
-func (k *Keyring) MasterKey() []byte {
- return k.masterKey
-}
-
-// Serialize is used to create a byte encoded keyring
-func (k *Keyring) Serialize() ([]byte, error) {
- // Create the encoded entry
- enc := EncodedKeyring{
- MasterKey: k.masterKey,
- }
- for _, key := range k.keys {
- enc.Keys = append(enc.Keys, key)
- }
-
- // JSON encode the keyring
- buf, err := json.Marshal(enc)
- return buf, err
-}
-
-// DeserializeKeyring is used to deserialize and return a new keyring
-func DeserializeKeyring(buf []byte) (*Keyring, error) {
- // Deserialize the keyring
- var enc EncodedKeyring
- if err := jsonutil.DecodeJSON(buf, &enc); err != nil {
- return nil, fmt.Errorf("deserialization failed: %v", err)
- }
-
- // Create a new keyring
- k := NewKeyring()
- k.masterKey = enc.MasterKey
- for _, key := range enc.Keys {
- k.keys[key.Term] = key
- if key.Term > k.activeTerm {
- k.activeTerm = key.Term
- }
- }
- return k, nil
-}
-
-// N.B.:
-// Since Go 1.5 these are not reliable; see the documentation around the memzero
-// function. These are best-effort.
-func (k *Keyring) Zeroize(keysToo bool) {
- if k == nil {
- return
- }
- if k.masterKey != nil {
- memzero(k.masterKey)
- }
- if !keysToo || k.keys == nil {
- return
- }
- for _, key := range k.keys {
- memzero(key.Value)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/keyring_test.go b/vendor/github.com/hashicorp/vault/vault/keyring_test.go
deleted file mode 100644
index 60e3925..0000000
--- a/vendor/github.com/hashicorp/vault/vault/keyring_test.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package vault
-
-import (
- "bytes"
- "reflect"
- "testing"
- "time"
-)
-
-func TestKeyring(t *testing.T) {
- k := NewKeyring()
-
- // Term should be 0
- if term := k.ActiveTerm(); term != 0 {
- t.Fatalf("bad: %d", term)
- }
-
- // Should have no key
- if key := k.ActiveKey(); key != nil {
- t.Fatalf("bad: %v", key)
- }
-
- // Add a key
- testKey := []byte("testing")
- key1 := &Key{Term: 1, Version: 1, Value: testKey, InstallTime: time.Now()}
- k, err := k.AddKey(key1)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Term should be 1
- if term := k.ActiveTerm(); term != 1 {
- t.Fatalf("bad: %d", term)
- }
-
- // Should have key
- key := k.ActiveKey()
- if key == nil {
- t.Fatalf("bad: %v", key)
- }
- if !bytes.Equal(key.Value, testKey) {
- t.Fatalf("bad: %v", key)
- }
- if tKey := k.TermKey(1); tKey != key {
- t.Fatalf("bad: %v", tKey)
- }
-
- // Should handle idempotent set
- k, err = k.AddKey(key1)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should not allow conficting set
- testConflict := []byte("nope")
- key1Conf := &Key{Term: 1, Version: 1, Value: testConflict, InstallTime: time.Now()}
- _, err = k.AddKey(key1Conf)
- if err == nil {
- t.Fatalf("err: %v", err)
- }
-
- // Add a new key
- testSecond := []byte("second")
- key2 := &Key{Term: 2, Version: 1, Value: testSecond, InstallTime: time.Now()}
- k, err = k.AddKey(key2)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Term should be 2
- if term := k.ActiveTerm(); term != 2 {
- t.Fatalf("bad: %d", term)
- }
-
- // Should have key
- newKey := k.ActiveKey()
- if newKey == nil {
- t.Fatalf("bad: %v", key)
- }
- if !bytes.Equal(newKey.Value, testSecond) {
- t.Fatalf("bad: %v", key)
- }
- if tKey := k.TermKey(2); tKey != newKey {
- t.Fatalf("bad: %v", tKey)
- }
-
- // Read of old key should work
- if tKey := k.TermKey(1); tKey != key {
- t.Fatalf("bad: %v", tKey)
- }
-
- // Remove the old key
- k, err = k.RemoveKey(1)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Read of old key should not work
- if tKey := k.TermKey(1); tKey != nil {
- t.Fatalf("bad: %v", tKey)
- }
-
- // Remove the active key should fail
- k, err = k.RemoveKey(2)
- if err == nil {
- t.Fatalf("err: %v", err)
- }
-}
-
-func TestKeyring_MasterKey(t *testing.T) {
- k := NewKeyring()
- master := []byte("test")
- master2 := []byte("test2")
-
- // Check no master
- out := k.MasterKey()
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Set master
- k = k.SetMasterKey(master)
- out = k.MasterKey()
- if !bytes.Equal(out, master) {
- t.Fatalf("bad: %v", out)
- }
-
- // Update master
- k = k.SetMasterKey(master2)
- out = k.MasterKey()
- if !bytes.Equal(out, master2) {
- t.Fatalf("bad: %v", out)
- }
-}
-
-func TestKeyring_Serialize(t *testing.T) {
- k := NewKeyring()
- master := []byte("test")
- k = k.SetMasterKey(master)
-
- now := time.Now()
- testKey := []byte("testing")
- testSecond := []byte("second")
- k, _ = k.AddKey(&Key{Term: 1, Version: 1, Value: testKey, InstallTime: now})
- k, _ = k.AddKey(&Key{Term: 2, Version: 1, Value: testSecond, InstallTime: now})
-
- buf, err := k.Serialize()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- k2, err := DeserializeKeyring(buf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- out := k2.MasterKey()
- if !bytes.Equal(out, master) {
- t.Fatalf("bad: %v", out)
- }
-
- if k2.ActiveTerm() != k.ActiveTerm() {
- t.Fatalf("Term mismatch")
- }
-
- var i uint32
- for i = 1; i < k.ActiveTerm(); i++ {
- key1 := k2.TermKey(i)
- key2 := k.TermKey(i)
- // Work around timezone bug due to DeepEqual using == for comparison
- if !key1.InstallTime.Equal(key2.InstallTime) {
- t.Fatalf("bad: key 1:\n%#v\nkey 2:\n%#v", key1, key2)
- }
- key1.InstallTime = key2.InstallTime
- if !reflect.DeepEqual(key1, key2) {
- t.Fatalf("bad: key 1:\n%#v\nkey 2:\n%#v", key1, key2)
- }
- }
-}
-
-func TestKey_Serialize(t *testing.T) {
- k := &Key{
- Term: 10,
- Version: 1,
- Value: []byte("foobarbaz"),
- InstallTime: time.Now(),
- }
-
- buf, err := k.Serialize()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- out, err := DeserializeKey(buf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Work around timezone bug due to DeepEqual using == for comparison
- if !k.InstallTime.Equal(out.InstallTime) {
- t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", k, out)
- }
- k.InstallTime = out.InstallTime
-
- if !reflect.DeepEqual(k, out) {
- t.Fatalf("bad: %#v", out)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
deleted file mode 100644
index cedb241..0000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package vault
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// CubbyholeBackendFactory constructs a new cubbyhole backend
-func CubbyholeBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) {
- var b CubbyholeBackend
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(cubbyholeHelp),
-
- Paths: []*framework.Path{
- &framework.Path{
- Pattern: ".*",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRead,
- logical.CreateOperation: b.handleWrite,
- logical.UpdateOperation: b.handleWrite,
- logical.DeleteOperation: b.handleDelete,
- logical.ListOperation: b.handleList,
- },
-
- ExistenceCheck: b.handleExistenceCheck,
-
- HelpSynopsis: strings.TrimSpace(cubbyholeHelpSynopsis),
- HelpDescription: strings.TrimSpace(cubbyholeHelpDescription),
- },
- },
- }
-
- if conf == nil {
- return nil, fmt.Errorf("Configuation passed into backend is nil")
- }
- b.Backend.Setup(conf)
-
- return &b, nil
-}
-
-// CubbyholeBackend is used for storing secrets directly into the physical
-// backend. The secrets are encrypted in the durable storage.
-// This differs from kv in that every token has its own private
-// storage view. The view is removed when the token expires.
-type CubbyholeBackend struct {
- *framework.Backend
-
- saltUUID string
- storageView logical.Storage
-}
-
-func (b *CubbyholeBackend) revoke(saltedToken string) error {
- if saltedToken == "" {
- return fmt.Errorf("cubbyhole: client token empty during revocation")
- }
-
- if err := logical.ClearView(b.storageView.(*BarrierView).SubView(saltedToken + "/")); err != nil {
- return err
- }
-
- return nil
-}
-
-func (b *CubbyholeBackend) handleExistenceCheck(
- req *logical.Request, data *framework.FieldData) (bool, error) {
- out, err := req.Storage.Get(req.ClientToken + "/" + req.Path)
- if err != nil {
- return false, fmt.Errorf("existence check failed: %v", err)
- }
-
- return out != nil, nil
-}
-
-func (b *CubbyholeBackend) handleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("cubbyhole read: client token empty")
- }
-
- // Read the path
- out, err := req.Storage.Get(req.ClientToken + "/" + req.Path)
- if err != nil {
- return nil, fmt.Errorf("read failed: %v", err)
- }
-
- // Fast-path the no data case
- if out == nil {
- return nil, nil
- }
-
- // Decode the data
- var rawData map[string]interface{}
- if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil {
- return nil, fmt.Errorf("json decoding failed: %v", err)
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: rawData,
- }
-
- return resp, nil
-}
-
-func (b *CubbyholeBackend) handleWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("cubbyhole write: client token empty")
- }
- // Check that some fields are given
- if len(req.Data) == 0 {
- return nil, fmt.Errorf("missing data fields")
- }
-
- // JSON encode the data
- buf, err := json.Marshal(req.Data)
- if err != nil {
- return nil, fmt.Errorf("json encoding failed: %v", err)
- }
-
- // Write out a new key
- entry := &logical.StorageEntry{
- Key: req.ClientToken + "/" + req.Path,
- Value: buf,
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, fmt.Errorf("failed to write: %v", err)
- }
-
- return nil, nil
-}
-
-func (b *CubbyholeBackend) handleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("cubbyhole delete: client token empty")
- }
- // Delete the key at the request path
- if err := req.Storage.Delete(req.ClientToken + "/" + req.Path); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *CubbyholeBackend) handleList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("cubbyhole list: client token empty")
- }
-
- // Right now we only handle directories, so ensure it ends with / We also
- // check if it's empty so we don't end up doing a listing on '//'
- path := req.Path
- if path != "" && !strings.HasSuffix(path, "/") {
- path = path + "/"
- }
-
- // List the keys at the prefix given by the request
- keys, err := req.Storage.List(req.ClientToken + "/" + path)
- if err != nil {
- return nil, err
- }
-
- // Strip the token
- strippedKeys := make([]string, len(keys))
- for i, key := range keys {
- strippedKeys[i] = strings.TrimPrefix(key, req.ClientToken+"/")
- }
-
- // Generate the response
- return logical.ListResponse(strippedKeys), nil
-}
-
-const cubbyholeHelp = `
-The cubbyhole backend reads and writes arbitrary secrets to the backend.
-The secrets are encrypted/decrypted by Vault: they are never stored
-unencrypted in the backend and the backend never has an opportunity to
-see the unencrypted value.
-
-This backend differs from the 'kv' backend in that it is namespaced
-per-token. Tokens can only read and write their own values, with no
-sharing possible (per-token cubbyholes). This can be useful for implementing
-certain authentication workflows, as well as "scratch" areas for individual
-clients. When the token is revoked, the entire set of stored values for that
-token is also removed.
-`
-
-const cubbyholeHelpSynopsis = `
-Pass-through secret storage to a token-specific cubbyhole in the storage
-backend, allowing you to read/write arbitrary data into secret storage.
-`
-
-const cubbyholeHelpDescription = `
-The cubbyhole backend reads and writes arbitrary data into secret storage,
-encrypting it along the way.
-
-The view into the cubbyhole storage space is different for each token; it is
-a per-token cubbyhole. When the token is revoked all values are removed.
-`
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole_test.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole_test.go
deleted file mode 100644
index 8800115..0000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole_test.go
+++ /dev/null
@@ -1,266 +0,0 @@
-package vault
-
-import (
- "reflect"
- "sort"
- "testing"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestCubbyholeBackend_Write(t *testing.T) {
- b := testCubbyholeBackend()
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- clientToken, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- req.ClientToken = clientToken
- storage := req.Storage
- req.Data["raw"] = "test"
-
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "foo")
- req.Storage = storage
- req.ClientToken = clientToken
- _, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-}
-
-func TestCubbyholeBackend_Read(t *testing.T) {
- b := testCubbyholeBackend()
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- req.Data["raw"] = "test"
- storage := req.Storage
- clientToken, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- req.ClientToken = clientToken
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "foo")
- req.Storage = storage
- req.ClientToken = clientToken
-
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- expected := &logical.Response{
- Data: map[string]interface{}{
- "raw": "test",
- },
- }
-
- if !reflect.DeepEqual(resp, expected) {
- t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
- }
-}
-
-func TestCubbyholeBackend_Delete(t *testing.T) {
- b := testCubbyholeBackend()
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- req.Data["raw"] = "test"
- storage := req.Storage
- clientToken, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- req.ClientToken = clientToken
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = logical.TestRequest(t, logical.DeleteOperation, "foo")
- req.Storage = storage
- req.ClientToken = clientToken
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "foo")
- req.Storage = storage
- req.ClientToken = clientToken
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
-}
-
-func TestCubbyholeBackend_List(t *testing.T) {
- b := testCubbyholeBackend()
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- clientToken, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- req.Data["raw"] = "test"
- req.ClientToken = clientToken
- storage := req.Storage
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = logical.TestRequest(t, logical.UpdateOperation, "bar")
- req.Data["raw"] = "baz"
- req.ClientToken = clientToken
- req.Storage = storage
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = logical.TestRequest(t, logical.ListOperation, "")
- req.Storage = storage
- req.ClientToken = clientToken
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- expKeys := []string{"foo", "bar"}
- respKeys := resp.Data["keys"].([]string)
- sort.Strings(expKeys)
- sort.Strings(respKeys)
- if !reflect.DeepEqual(respKeys, expKeys) {
- t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expKeys, respKeys)
- }
-}
-
-func TestCubbyholeIsolation(t *testing.T) {
- b := testCubbyholeBackend()
-
- clientTokenA, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- clientTokenB, err := uuid.GenerateUUID()
- if err != nil {
- t.Fatal(err)
- }
- var storageA logical.Storage
- var storageB logical.Storage
-
- // Populate and test A entries
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- req.ClientToken = clientTokenA
- storageA = req.Storage
- req.Data["raw"] = "test"
-
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "foo")
- req.Storage = storageA
- req.ClientToken = clientTokenA
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- expected := &logical.Response{
- Data: map[string]interface{}{
- "raw": "test",
- },
- }
-
- if !reflect.DeepEqual(resp, expected) {
- t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
- }
-
- // Populate and test B entries
- req = logical.TestRequest(t, logical.UpdateOperation, "bar")
- req.ClientToken = clientTokenB
- storageB = req.Storage
- req.Data["raw"] = "baz"
-
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "bar")
- req.Storage = storageB
- req.ClientToken = clientTokenB
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- expected = &logical.Response{
- Data: map[string]interface{}{
- "raw": "baz",
- },
- }
-
- if !reflect.DeepEqual(resp, expected) {
- t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
- }
-
- // We shouldn't be able to read A from B and vice versa
- req = logical.TestRequest(t, logical.ReadOperation, "foo")
- req.Storage = storageB
- req.ClientToken = clientTokenB
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("err: was able to read from other user's cubbyhole")
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "bar")
- req.Storage = storageA
- req.ClientToken = clientTokenA
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("err: was able to read from other user's cubbyhole")
- }
-}
-
-func testCubbyholeBackend() logical.Backend {
- b, _ := CubbyholeBackendFactory(&logical.BackendConfig{
- Logger: nil,
- System: logical.StaticSystemView{
- DefaultLeaseTTLVal: time.Hour * 24,
- MaxLeaseTTLVal: time.Hour * 24 * 32,
- },
- })
- return b
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
deleted file mode 100644
index 5fc013e..0000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package vault
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// PassthroughBackendFactory returns a PassthroughBackend
-// with leases switched off
-func PassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) {
- return LeaseSwitchedPassthroughBackend(conf, false)
-}
-
-// LeasedPassthroughBackendFactory returns a PassthroughBackend
-// with leases switched on
-func LeasedPassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) {
- return LeaseSwitchedPassthroughBackend(conf, true)
-}
-
-// LeaseSwitchedPassthroughBackend returns a PassthroughBackend
-// with leases switched on or off
-func LeaseSwitchedPassthroughBackend(conf *logical.BackendConfig, leases bool) (logical.Backend, error) {
- var b PassthroughBackend
- b.generateLeases = leases
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(passthroughHelp),
-
- Paths: []*framework.Path{
- &framework.Path{
- Pattern: ".*",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRead,
- logical.CreateOperation: b.handleWrite,
- logical.UpdateOperation: b.handleWrite,
- logical.DeleteOperation: b.handleDelete,
- logical.ListOperation: b.handleList,
- },
-
- ExistenceCheck: b.handleExistenceCheck,
-
- HelpSynopsis: strings.TrimSpace(passthroughHelpSynopsis),
- HelpDescription: strings.TrimSpace(passthroughHelpDescription),
- },
- },
- }
-
- b.Backend.Secrets = []*framework.Secret{
- &framework.Secret{
- Type: "kv",
-
- Renew: b.handleRead,
- Revoke: b.handleRevoke,
- },
- }
-
- if conf == nil {
- return nil, fmt.Errorf("Configuation passed into backend is nil")
- }
- b.Backend.Setup(conf)
-
- return &b, nil
-}
-
-// PassthroughBackend is used storing secrets directly into the physical
-// backend. The secrets are encrypted in the durable storage and custom TTL
-// information can be specified, but otherwise this backend doesn't do anything
-// fancy.
-type PassthroughBackend struct {
- *framework.Backend
- generateLeases bool
-}
-
-func (b *PassthroughBackend) handleRevoke(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // This is a no-op
- return nil, nil
-}
-
-func (b *PassthroughBackend) handleExistenceCheck(
- req *logical.Request, data *framework.FieldData) (bool, error) {
- out, err := req.Storage.Get(req.Path)
- if err != nil {
- return false, fmt.Errorf("existence check failed: %v", err)
- }
-
- return out != nil, nil
-}
-
-func (b *PassthroughBackend) handleRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Read the path
- out, err := req.Storage.Get(req.Path)
- if err != nil {
- return nil, fmt.Errorf("read failed: %v", err)
- }
-
- // Fast-path the no data case
- if out == nil {
- return nil, nil
- }
-
- // Decode the data
- var rawData map[string]interface{}
-
- if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil {
- return nil, fmt.Errorf("json decoding failed: %v", err)
- }
-
- var resp *logical.Response
- if b.generateLeases {
- // Generate the response
- resp = b.Secret("kv").Response(rawData, nil)
- resp.Secret.Renewable = false
- } else {
- resp = &logical.Response{
- Secret: &logical.Secret{},
- Data: rawData,
- }
- }
-
- // Check if there is a ttl key
- ttlDuration := b.System().DefaultLeaseTTL()
- ttlRaw, ok := rawData["ttl"]
- if !ok {
- ttlRaw, ok = rawData["lease"]
- }
- if ok {
- dur, err := parseutil.ParseDurationSecond(ttlRaw)
- if err == nil {
- ttlDuration = dur
- }
-
- if b.generateLeases {
- resp.Secret.Renewable = true
- }
- }
-
- resp.Secret.TTL = ttlDuration
-
- return resp, nil
-}
-
-func (b *PassthroughBackend) GeneratesLeases() bool {
- return b.generateLeases
-}
-
-func (b *PassthroughBackend) handleWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Check that some fields are given
- if len(req.Data) == 0 {
- return logical.ErrorResponse("missing data fields"), nil
- }
-
- // JSON encode the data
- buf, err := json.Marshal(req.Data)
- if err != nil {
- return nil, fmt.Errorf("json encoding failed: %v", err)
- }
-
- // Write out a new key
- entry := &logical.StorageEntry{
- Key: req.Path,
- Value: buf,
- }
- if err := req.Storage.Put(entry); err != nil {
- return nil, fmt.Errorf("failed to write: %v", err)
- }
-
- return nil, nil
-}
-
-func (b *PassthroughBackend) handleDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Delete the key at the request path
- if err := req.Storage.Delete(req.Path); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *PassthroughBackend) handleList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Right now we only handle directories, so ensure it ends with /; however,
- // some physical backends may not handle the "/" case properly, so only add
- // it if we're not listing the root
- path := req.Path
- if path != "" && !strings.HasSuffix(path, "/") {
- path = path + "/"
- }
-
- // List the keys at the prefix given by the request
- keys, err := req.Storage.List(path)
- if err != nil {
- return nil, err
- }
-
- // Generate the response
- return logical.ListResponse(keys), nil
-}
-
-const passthroughHelp = `
-The kv backend reads and writes arbitrary secrets to the backend.
-The secrets are encrypted/decrypted by Vault: they are never stored
-unencrypted in the backend and the backend never has an opportunity to
-see the unencrypted value.
-
-TTLs can be set on a per-secret basis. These TTLs will be sent down
-when that secret is read, and it is assumed that some outside process will
-revoke and/or replace the secret at that path.
-`
-
-const passthroughHelpSynopsis = `
-Pass-through secret storage to the storage backend, allowing you to
-read/write arbitrary data into secret storage.
-`
-
-const passthroughHelpDescription = `
-The pass-through backend reads and writes arbitrary data into secret storage,
-encrypting it along the way.
-
-A TTL can be specified when writing with the "ttl" field. If given, the
-duration of leases returned by this backend will be set to this value. This
-can be used as a hint from the writer of a secret to the consumer of a secret
-that the consumer should re-read the value before the TTL has expired.
-However, any revocation must be handled by the user of this backend; the lease
-duration does not affect the provided data in any way.
-`
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go
deleted file mode 100644
index 1ccda69..0000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package vault
-
-import (
- "encoding/json"
- "reflect"
- "testing"
- "time"
-
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/logical"
-)
-
-func TestPassthroughBackend_RootPaths(t *testing.T) {
- b := testPassthroughBackend()
- test := func(b logical.Backend) {
- root := b.SpecialPaths()
- if root != nil {
- t.Fatalf("unexpected: %v", root)
- }
- }
- test(b)
- b = testPassthroughLeasedBackend()
- test(b)
-}
-
-func TestPassthroughBackend_Write(t *testing.T) {
- test := func(b logical.Backend) {
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- req.Data["raw"] = "test"
-
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
-
- out, err := req.Storage.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out == nil {
- t.Fatalf("failed to write to view")
- }
- }
- b := testPassthroughBackend()
- test(b)
- b = testPassthroughLeasedBackend()
- test(b)
-}
-
-func TestPassthroughBackend_Read(t *testing.T) {
- test := func(b logical.Backend, ttlType string, ttl interface{}, leased bool) {
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- req.Data["raw"] = "test"
- var reqTTL interface{}
- switch ttl.(type) {
- case int64:
- reqTTL = ttl.(int64)
- case string:
- reqTTL = ttl.(string)
- default:
- t.Fatal("unknown ttl type")
- }
- req.Data[ttlType] = reqTTL
- storage := req.Storage
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "foo")
- req.Storage = storage
-
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- expectedTTL, err := parseutil.ParseDurationSecond(ttl)
- if err != nil {
- t.Fatal(err)
- }
-
- // What comes back if an int is passed in is a json.Number which is
- // actually aliased as a string so to make the deep equal happy if it's
- // actually a number we set it to an int64
- var respTTL interface{} = resp.Data[ttlType]
- _, ok := respTTL.(json.Number)
- if ok {
- respTTL, err = respTTL.(json.Number).Int64()
- if err != nil {
- t.Fatal(err)
- }
- resp.Data[ttlType] = respTTL
- }
-
- expected := &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- Renewable: true,
- TTL: expectedTTL,
- },
- },
- Data: map[string]interface{}{
- "raw": "test",
- ttlType: reqTTL,
- },
- }
-
- if !leased {
- expected.Secret.Renewable = false
- }
- resp.Secret.InternalData = nil
- resp.Secret.LeaseID = ""
- if !reflect.DeepEqual(resp, expected) {
- t.Fatalf("bad response.\n\nexpected:\n%#v\n\nGot:\n%#v", expected, resp)
- }
- }
- b := testPassthroughLeasedBackend()
- test(b, "lease", "1h", true)
- test(b, "ttl", "5", true)
- b = testPassthroughBackend()
- test(b, "lease", int64(10), false)
- test(b, "ttl", "40s", false)
-}
-
-func TestPassthroughBackend_Delete(t *testing.T) {
- test := func(b logical.Backend) {
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- req.Data["raw"] = "test"
- storage := req.Storage
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = logical.TestRequest(t, logical.DeleteOperation, "foo")
- req.Storage = storage
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "foo")
- req.Storage = storage
- resp, err = b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp != nil {
- t.Fatalf("bad: %v", resp)
- }
- }
- b := testPassthroughBackend()
- test(b)
- b = testPassthroughLeasedBackend()
- test(b)
-}
-
-func TestPassthroughBackend_List(t *testing.T) {
- test := func(b logical.Backend) {
- req := logical.TestRequest(t, logical.UpdateOperation, "foo")
- req.Data["raw"] = "test"
- storage := req.Storage
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
-
- req = logical.TestRequest(t, logical.ListOperation, "")
- req.Storage = storage
- resp, err := b.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- expected := &logical.Response{
- Data: map[string]interface{}{
- "keys": []string{"foo"},
- },
- }
-
- if !reflect.DeepEqual(resp, expected) {
- t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
- }
- }
- b := testPassthroughBackend()
- test(b)
- b = testPassthroughLeasedBackend()
- test(b)
-}
-
-func TestPassthroughBackend_Revoke(t *testing.T) {
- test := func(b logical.Backend) {
- req := logical.TestRequest(t, logical.RevokeOperation, "kv")
- req.Secret = &logical.Secret{
- InternalData: map[string]interface{}{
- "secret_type": "kv",
- },
- }
-
- if _, err := b.HandleRequest(req); err != nil {
- t.Fatalf("err: %v", err)
- }
- }
- b := testPassthroughBackend()
- test(b)
- b = testPassthroughLeasedBackend()
- test(b)
-}
-
-func testPassthroughBackend() logical.Backend {
- b, _ := PassthroughBackendFactory(&logical.BackendConfig{
- Logger: nil,
- System: logical.StaticSystemView{
- DefaultLeaseTTLVal: time.Hour * 24,
- MaxLeaseTTLVal: time.Hour * 24 * 32,
- },
- })
- return b
-}
-
-func testPassthroughLeasedBackend() logical.Backend {
- b, _ := LeasedPassthroughBackendFactory(&logical.BackendConfig{
- Logger: nil,
- System: logical.StaticSystemView{
- DefaultLeaseTTLVal: time.Hour * 24,
- MaxLeaseTTLVal: time.Hour * 24 * 32,
- },
- })
- return b
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system.go b/vendor/github.com/hashicorp/vault/vault/logical_system.go
deleted file mode 100644
index 1593a1f..0000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_system.go
+++ /dev/null
@@ -1,3034 +0,0 @@
-package vault
-
-import (
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/fatih/structs"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/mitchellh/mapstructure"
-)
-
-var (
- // protectedPaths cannot be accessed via the raw APIs.
- // This is both for security and to prevent disrupting Vault.
- protectedPaths = []string{
- keyringPath,
- }
-
- replicationPaths = func(b *SystemBackend) []*framework.Path {
- return []*framework.Path{
- &framework.Path{
- Pattern: "replication/status",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var state consts.ReplicationState
- resp := &logical.Response{
- Data: map[string]interface{}{
- "mode": state.String(),
- },
- }
- return resp, nil
- },
- },
- },
- }
- }
-)
-
-func NewSystemBackend(core *Core) *SystemBackend {
- b := &SystemBackend{
- Core: core,
- }
-
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(sysHelpRoot),
-
- PathsSpecial: &logical.Paths{
- Root: []string{
- "auth/*",
- "remount",
- "audit",
- "audit/*",
- "raw",
- "raw/*",
- "replication/primary/secondary-token",
- "replication/reindex",
- "rotate",
- "config/cors",
- "config/auditing/*",
- "plugins/catalog/*",
- "revoke-prefix/*",
- "revoke-force/*",
- "leases/revoke-prefix/*",
- "leases/revoke-force/*",
- "leases/lookup/*",
- },
-
- Unauthenticated: []string{
- "wrapping/lookup",
- "wrapping/pubkey",
- "replication/status",
- },
- },
-
- Paths: []*framework.Path{
- &framework.Path{
- Pattern: "capabilities-accessor$",
-
- Fields: map[string]*framework.FieldSchema{
- "accessor": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Accessor of the token for which capabilities are being queried.",
- },
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Path on which capabilities are being queried.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleCapabilitiesAccessor,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_accessor"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["capabilities_accessor"][1]),
- },
-
- &framework.Path{
- Pattern: "config/cors$",
-
- Fields: map[string]*framework.FieldSchema{
- "enable": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Enables or disables CORS headers on requests.",
- },
- "allowed_origins": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "A comma-separated string or array of strings indicating origins that may make cross-origin requests.",
- },
- "allowed_headers": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "A comma-separated string or array of strings indicating headers that are allowed on cross-origin requests.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleCORSRead,
- logical.UpdateOperation: b.handleCORSUpdate,
- logical.DeleteOperation: b.handleCORSDelete,
- },
-
- HelpDescription: strings.TrimSpace(sysHelp["config/cors"][0]),
- HelpSynopsis: strings.TrimSpace(sysHelp["config/cors"][1]),
- },
-
- &framework.Path{
- Pattern: "capabilities$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token for which capabilities are being queried.",
- },
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Path on which capabilities are being queried.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleCapabilities,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["capabilities"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["capabilities"][1]),
- },
-
- &framework.Path{
- Pattern: "capabilities-self$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token for which capabilities are being queried.",
- },
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Path on which capabilities are being queried.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleCapabilities,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_self"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["capabilities_self"][1]),
- },
-
- &framework.Path{
- Pattern: "generate-root(/attempt)?$",
- HelpSynopsis: strings.TrimSpace(sysHelp["generate-root"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["generate-root"][1]),
- },
-
- &framework.Path{
- Pattern: "init$",
- HelpSynopsis: strings.TrimSpace(sysHelp["init"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["init"][1]),
- },
-
- &framework.Path{
- Pattern: "rekey/backup$",
-
- Fields: map[string]*framework.FieldSchema{},
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRekeyRetrieveBarrier,
- logical.DeleteOperation: b.handleRekeyDeleteBarrier,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- },
-
- &framework.Path{
- Pattern: "rekey/recovery-key-backup$",
-
- Fields: map[string]*framework.FieldSchema{},
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRekeyRetrieveRecovery,
- logical.DeleteOperation: b.handleRekeyDeleteRecovery,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- },
-
- &framework.Path{
- Pattern: "auth/(?P.+?)/tune$",
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_tune"][0]),
- },
- "default_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
- },
- "max_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleAuthTuneRead,
- logical.UpdateOperation: b.handleAuthTuneWrite,
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["auth_tune"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["auth_tune"][1]),
- },
-
- &framework.Path{
- Pattern: "mounts/(?P.+?)/tune$",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_path"][0]),
- },
- "default_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
- },
- "max_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleMountTuneRead,
- logical.UpdateOperation: b.handleMountTuneWrite,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["mount_tune"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["mount_tune"][1]),
- },
-
- &framework.Path{
- Pattern: "mounts/(?P.+?)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_path"][0]),
- },
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_type"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_desc"][0]),
- },
- "config": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: strings.TrimSpace(sysHelp["mount_config"][0]),
- },
- "local": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["mount_local"][0]),
- },
- "plugin_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_plugin_name"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleMount,
- logical.DeleteOperation: b.handleUnmount,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["mount"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["mount"][1]),
- },
-
- &framework.Path{
- Pattern: "mounts$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleMountTable,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["mounts"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["mounts"][1]),
- },
-
- &framework.Path{
- Pattern: "remount",
-
- Fields: map[string]*framework.FieldSchema{
- "from": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The previous mount point.",
- },
- "to": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The new mount point.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRemount,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["remount"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["remount"][1]),
- },
-
- &framework.Path{
- Pattern: "leases/lookup/(?P.+?)?",
-
- Fields: map[string]*framework.FieldSchema{
- "prefix": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["leases-list-prefix"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.handleLeaseLookupList,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
- },
-
- &framework.Path{
- Pattern: "leases/lookup",
-
- Fields: map[string]*framework.FieldSchema{
- "lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleLeaseLookup,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
- },
-
- &framework.Path{
- Pattern: "(leases/)?renew" + framework.OptionalParamRegex("url_lease_id"),
-
- Fields: map[string]*framework.FieldSchema{
- "url_lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- "lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- "increment": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: strings.TrimSpace(sysHelp["increment"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRenew,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["renew"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["renew"][1]),
- },
-
- &framework.Path{
- Pattern: "(leases/)?revoke" + framework.OptionalParamRegex("url_lease_id"),
-
- Fields: map[string]*framework.FieldSchema{
- "url_lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- "lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRevoke,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["revoke"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["revoke"][1]),
- },
-
- &framework.Path{
- Pattern: "(leases/)?revoke-force/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "prefix": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["revoke-force-path"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRevokeForce,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["revoke-force"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["revoke-force"][1]),
- },
-
- &framework.Path{
- Pattern: "(leases/)?revoke-prefix/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "prefix": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["revoke-prefix-path"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRevokePrefix,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["revoke-prefix"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["revoke-prefix"][1]),
- },
-
- &framework.Path{
- Pattern: "leases/tidy$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleTidyLeases,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["tidy_leases"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["tidy_leases"][1]),
- },
-
- &framework.Path{
- Pattern: "auth$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleAuthTable,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["auth-table"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["auth-table"][1]),
- },
-
- &framework.Path{
- Pattern: "auth/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_path"][0]),
- },
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_type"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
- },
- "config": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: strings.TrimSpace(sysHelp["auth_config"][0]),
- },
- "local": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["mount_local"][0]),
- },
- "plugin_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_plugin"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleEnableAuth,
- logical.DeleteOperation: b.handleDisableAuth,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["auth"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["auth"][1]),
- },
-
- &framework.Path{
- Pattern: "policy$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handlePolicyList,
- logical.ListOperation: b.handlePolicyList,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["policy-list"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["policy-list"][1]),
- },
-
- &framework.Path{
- Pattern: "policy/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["policy-name"][0]),
- },
- "rules": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["policy-rules"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handlePolicyRead,
- logical.UpdateOperation: b.handlePolicySet,
- logical.DeleteOperation: b.handlePolicyDelete,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["policy"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["policy"][1]),
- },
-
- &framework.Path{
- Pattern: "seal-status$",
- HelpSynopsis: strings.TrimSpace(sysHelp["seal-status"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["seal-status"][1]),
- },
-
- &framework.Path{
- Pattern: "seal$",
- HelpSynopsis: strings.TrimSpace(sysHelp["seal"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["seal"][1]),
- },
-
- &framework.Path{
- Pattern: "unseal$",
- HelpSynopsis: strings.TrimSpace(sysHelp["unseal"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["unseal"][1]),
- },
-
- &framework.Path{
- Pattern: "audit-hash/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_path"][0]),
- },
-
- "input": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleAuditHash,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audit-hash"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audit-hash"][1]),
- },
-
- &framework.Path{
- Pattern: "audit$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleAuditTable,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audit-table"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audit-table"][1]),
- },
-
- &framework.Path{
- Pattern: "audit/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_path"][0]),
- },
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_type"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_desc"][0]),
- },
- "options": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: strings.TrimSpace(sysHelp["audit_opts"][0]),
- },
- "local": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["mount_local"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleEnableAudit,
- logical.DeleteOperation: b.handleDisableAudit,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audit"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audit"][1]),
- },
-
- &framework.Path{
- Pattern: "key-status$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleKeyStatus,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["key-status"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["key-status"][1]),
- },
-
- &framework.Path{
- Pattern: "rotate$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRotate,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rotate"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rotate"][1]),
- },
-
- /*
- // Disabled for the moment as we don't support this externally
- &framework.Path{
- Pattern: "wrapping/pubkey$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleWrappingPubkey,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["wrappubkey"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["wrappubkey"][1]),
- },
- */
-
- &framework.Path{
- Pattern: "wrapping/wrap$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleWrappingWrap,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["wrap"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["wrap"][1]),
- },
-
- &framework.Path{
- Pattern: "wrapping/unwrap$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleWrappingUnwrap,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["unwrap"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["unwrap"][1]),
- },
-
- &framework.Path{
- Pattern: "wrapping/lookup$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleWrappingLookup,
- logical.ReadOperation: b.handleWrappingLookup,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["wraplookup"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["wraplookup"][1]),
- },
-
- &framework.Path{
- Pattern: "wrapping/rewrap$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleWrappingRewrap,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rewrap"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rewrap"][1]),
- },
-
- &framework.Path{
- Pattern: "config/auditing/request-headers/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "header": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- "hmac": &framework.FieldSchema{
- Type: framework.TypeBool,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleAuditedHeaderUpdate,
- logical.DeleteOperation: b.handleAuditedHeaderDelete,
- logical.ReadOperation: b.handleAuditedHeaderRead,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers-name"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audited-headers-name"][1]),
- },
-
- &framework.Path{
- Pattern: "config/auditing/request-headers$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleAuditedHeadersRead,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]),
- },
- &framework.Path{
- Pattern: "plugins/catalog/?$",
-
- Fields: map[string]*framework.FieldSchema{},
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.handlePluginCatalogList,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
- },
- &framework.Path{
- Pattern: "plugins/catalog/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_name"][0]),
- },
- "sha256": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]),
- },
- "sha_256": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]),
- },
- "command": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_command"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handlePluginCatalogUpdate,
- logical.DeleteOperation: b.handlePluginCatalogDelete,
- logical.ReadOperation: b.handlePluginCatalogRead,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
- },
- &framework.Path{
- Pattern: "plugins/reload/backend$",
-
- Fields: map[string]*framework.FieldSchema{
- "plugin": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-backend-reload-plugin"][0]),
- },
- "mounts": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["plugin-backend-reload-mounts"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handlePluginReloadUpdate,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["plugin-reload"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["plugin-reload"][1]),
- },
- },
- }
-
- b.Backend.Paths = append(b.Backend.Paths, replicationPaths(b)...)
-
- if core.rawEnabled {
- b.Backend.Paths = append(b.Backend.Paths, &framework.Path{
- Pattern: "(raw/?$|raw/(?P.+))",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- "value": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRawRead,
- logical.UpdateOperation: b.handleRawWrite,
- logical.DeleteOperation: b.handleRawDelete,
- logical.ListOperation: b.handleRawList,
- },
- })
- }
-
- b.Backend.Invalidate = b.invalidate
-
- return b
-}
-
-// SystemBackend implements logical.Backend and is used to interact with
-// the core of the system. This backend is hardcoded to exist at the "sys"
-// prefix. Conceptually it is similar to procfs on Linux.
-type SystemBackend struct {
- *framework.Backend
- Core *Core
-}
-
-// handleCORSRead returns the current CORS configuration
-func (b *SystemBackend) handleCORSRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- corsConf := b.Core.corsConfig
-
- enabled := corsConf.IsEnabled()
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "enabled": enabled,
- },
- }
-
- if enabled {
- corsConf.RLock()
- resp.Data["allowed_origins"] = corsConf.AllowedOrigins
- resp.Data["allowed_headers"] = corsConf.AllowedHeaders
- corsConf.RUnlock()
- }
-
- return resp, nil
-}
-
-// handleCORSUpdate sets the list of origins that are allowed to make
-// cross-origin requests and sets the CORS enabled flag to true
-func (b *SystemBackend) handleCORSUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- origins := d.Get("allowed_origins").([]string)
- headers := d.Get("allowed_headers").([]string)
-
- return nil, b.Core.corsConfig.Enable(origins, headers)
-}
-
-// handleCORSDelete sets the CORS enabled flag to false and clears the list of
-// allowed origins & headers.
-func (b *SystemBackend) handleCORSDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return nil, b.Core.corsConfig.Disable()
-}
-
-func (b *SystemBackend) handleTidyLeases(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- err := b.Core.expiration.Tidy()
- if err != nil {
- b.Backend.Logger().Error("sys: failed to tidy leases", "error", err)
- return handleError(err)
- }
- return nil, err
-}
-
-func (b *SystemBackend) invalidate(key string) {
- if b.Core.logger.IsTrace() {
- b.Core.logger.Trace("sys: invalidating key", "key", key)
- }
- switch {
- case strings.HasPrefix(key, policySubPath):
- b.Core.stateLock.RLock()
- defer b.Core.stateLock.RUnlock()
- if b.Core.policyStore != nil {
- b.Core.policyStore.invalidate(strings.TrimPrefix(key, policySubPath))
- }
- }
-}
-
-func (b *SystemBackend) handlePluginCatalogList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- plugins, err := b.Core.pluginCatalog.List()
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(plugins), nil
-}
-
-func (b *SystemBackend) handlePluginCatalogUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("name").(string)
- if pluginName == "" {
- return logical.ErrorResponse("missing plugin name"), nil
- }
-
- sha256 := d.Get("sha256").(string)
- if sha256 == "" {
- sha256 = d.Get("sha_256").(string)
- if sha256 == "" {
- return logical.ErrorResponse("missing SHA-256 value"), nil
- }
- }
-
- command := d.Get("command").(string)
- if command == "" {
- return logical.ErrorResponse("missing command value"), nil
- }
-
- sha256Bytes, err := hex.DecodeString(sha256)
- if err != nil {
- return logical.ErrorResponse("Could not decode SHA-256 value from Hex"), err
- }
-
- err = b.Core.pluginCatalog.Set(pluginName, command, sha256Bytes)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *SystemBackend) handlePluginCatalogRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("name").(string)
- if pluginName == "" {
- return logical.ErrorResponse("missing plugin name"), nil
- }
- plugin, err := b.Core.pluginCatalog.Get(pluginName)
- if err != nil {
- return nil, err
- }
- if plugin == nil {
- return nil, nil
- }
-
- // Create a map of data to be returned and remove sensitive information from it
- data := structs.New(plugin).Map()
-
- return &logical.Response{
- Data: data,
- }, nil
-}
-
-func (b *SystemBackend) handlePluginCatalogDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("name").(string)
- if pluginName == "" {
- return logical.ErrorResponse("missing plugin name"), nil
- }
- err := b.Core.pluginCatalog.Delete(pluginName)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *SystemBackend) handlePluginReloadUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("plugin").(string)
- pluginMounts := d.Get("mounts").([]string)
-
- if pluginName != "" && len(pluginMounts) > 0 {
- return logical.ErrorResponse("plugin and mounts cannot be set at the same time"), nil
- }
- if pluginName == "" && len(pluginMounts) == 0 {
- return logical.ErrorResponse("plugin or mounts must be provided"), nil
- }
-
- if pluginName != "" {
- err := b.Core.reloadMatchingPlugin(pluginName)
- if err != nil {
- return nil, err
- }
- } else if len(pluginMounts) > 0 {
- err := b.Core.reloadMatchingPluginMounts(pluginMounts)
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
-}
-
-// handleAuditedHeaderUpdate creates or overwrites a header entry
-func (b *SystemBackend) handleAuditedHeaderUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- header := d.Get("header").(string)
- hmac := d.Get("hmac").(bool)
- if header == "" {
- return logical.ErrorResponse("missing header name"), nil
- }
-
- headerConfig := b.Core.AuditedHeadersConfig()
- err := headerConfig.add(header, hmac)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// handleAudtedHeaderDelete deletes the header with the given name
-func (b *SystemBackend) handleAuditedHeaderDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- header := d.Get("header").(string)
- if header == "" {
- return logical.ErrorResponse("missing header name"), nil
- }
-
- headerConfig := b.Core.AuditedHeadersConfig()
- err := headerConfig.remove(header)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// handleAuditedHeaderRead returns the header configuration for the given header name
-func (b *SystemBackend) handleAuditedHeaderRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- header := d.Get("header").(string)
- if header == "" {
- return logical.ErrorResponse("missing header name"), nil
- }
-
- headerConfig := b.Core.AuditedHeadersConfig()
- settings, ok := headerConfig.Headers[header]
- if !ok {
- return logical.ErrorResponse("Could not find header in config"), nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- header: settings,
- },
- }, nil
-}
-
-// handleAuditedHeadersRead returns the whole audited headers config
-func (b *SystemBackend) handleAuditedHeadersRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- headerConfig := b.Core.AuditedHeadersConfig()
-
- return &logical.Response{
- Data: map[string]interface{}{
- "headers": headerConfig.Headers,
- },
- }, nil
-}
-
-// handleCapabilities returns the ACL capabilities of the token for a given path
-func (b *SystemBackend) handleCapabilities(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- token := d.Get("token").(string)
- if token == "" {
- token = req.ClientToken
- }
- capabilities, err := b.Core.Capabilities(token, d.Get("path").(string))
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "capabilities": capabilities,
- },
- }, nil
-}
-
-// handleCapabilitiesAccessor returns the ACL capabilities of the
-// token associted with the given accessor for a given path.
-func (b *SystemBackend) handleCapabilitiesAccessor(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- accessor := d.Get("accessor").(string)
- if accessor == "" {
- return logical.ErrorResponse("missing accessor"), nil
- }
-
- aEntry, err := b.Core.tokenStore.lookupByAccessor(accessor, false)
- if err != nil {
- return nil, err
- }
-
- capabilities, err := b.Core.Capabilities(aEntry.TokenID, d.Get("path").(string))
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "capabilities": capabilities,
- },
- }, nil
-}
-
-// handleRekeyRetrieve returns backed-up, PGP-encrypted unseal keys from a
-// rekey operation
-func (b *SystemBackend) handleRekeyRetrieve(
- req *logical.Request,
- data *framework.FieldData,
- recovery bool) (*logical.Response, error) {
- backup, err := b.Core.RekeyRetrieveBackup(recovery)
- if err != nil {
- return nil, fmt.Errorf("unable to look up backed-up keys: %v", err)
- }
- if backup == nil {
- return logical.ErrorResponse("no backed-up keys found"), nil
- }
-
- keysB64 := map[string][]string{}
- for k, v := range backup.Keys {
- for _, j := range v {
- currB64Keys := keysB64[k]
- if currB64Keys == nil {
- currB64Keys = []string{}
- }
- key, err := hex.DecodeString(j)
- if err != nil {
- return nil, fmt.Errorf("error decoding hex-encoded backup key: %v", err)
- }
- currB64Keys = append(currB64Keys, base64.StdEncoding.EncodeToString(key))
- keysB64[k] = currB64Keys
- }
- }
-
- // Format the status
- resp := &logical.Response{
- Data: map[string]interface{}{
- "nonce": backup.Nonce,
- "keys": backup.Keys,
- "keys_base64": keysB64,
- },
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) handleRekeyRetrieveBarrier(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyRetrieve(req, data, false)
-}
-
-func (b *SystemBackend) handleRekeyRetrieveRecovery(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyRetrieve(req, data, true)
-}
-
-// handleRekeyDelete deletes backed-up, PGP-encrypted unseal keys from a rekey
-// operation
-func (b *SystemBackend) handleRekeyDelete(
- req *logical.Request,
- data *framework.FieldData,
- recovery bool) (*logical.Response, error) {
- err := b.Core.RekeyDeleteBackup(recovery)
- if err != nil {
- return nil, fmt.Errorf("error during deletion of backed-up keys: %v", err)
- }
-
- return nil, nil
-}
-
-func (b *SystemBackend) handleRekeyDeleteBarrier(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyDelete(req, data, false)
-}
-
-func (b *SystemBackend) handleRekeyDeleteRecovery(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyDelete(req, data, true)
-}
-
-// handleMountTable handles the "mounts" endpoint to provide the mount table
-func (b *SystemBackend) handleMountTable(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.mountsLock.RLock()
- defer b.Core.mountsLock.RUnlock()
-
- resp := &logical.Response{
- Data: make(map[string]interface{}),
- }
-
- for _, entry := range b.Core.mounts.Entries {
- // Populate mount info
- structConfig := structs.New(entry.Config).Map()
- structConfig["default_lease_ttl"] = int64(structConfig["default_lease_ttl"].(time.Duration).Seconds())
- structConfig["max_lease_ttl"] = int64(structConfig["max_lease_ttl"].(time.Duration).Seconds())
- info := map[string]interface{}{
- "type": entry.Type,
- "description": entry.Description,
- "accessor": entry.Accessor,
- "config": structConfig,
- "local": entry.Local,
- }
- resp.Data[entry.Path] = info
- }
-
- return resp, nil
-}
-
-// handleMount is used to mount a new path
-func (b *SystemBackend) handleMount(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.replicationState
-
- local := data.Get("local").(bool)
- if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
- }
-
- // Get all the options
- path := data.Get("path").(string)
- logicalType := data.Get("type").(string)
- description := data.Get("description").(string)
- pluginName := data.Get("plugin_name").(string)
-
- path = sanitizeMountPath(path)
-
- var config MountConfig
- var apiConfig APIMountConfig
-
- configMap := data.Get("config").(map[string]interface{})
- if configMap != nil && len(configMap) != 0 {
- err := mapstructure.Decode(configMap, &apiConfig)
- if err != nil {
- return logical.ErrorResponse(
- "unable to convert given mount config information"),
- logical.ErrInvalidRequest
- }
- }
-
- switch apiConfig.DefaultLeaseTTL {
- case "":
- case "system":
- default:
- tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)),
- logical.ErrInvalidRequest
- }
- config.DefaultLeaseTTL = tmpDef
- }
-
- switch apiConfig.MaxLeaseTTL {
- case "":
- case "system":
- default:
- tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)),
- logical.ErrInvalidRequest
- }
- config.MaxLeaseTTL = tmpMax
- }
-
- if config.MaxLeaseTTL != 0 && config.DefaultLeaseTTL > config.MaxLeaseTTL {
- return logical.ErrorResponse(
- "given default lease TTL greater than given max lease TTL"),
- logical.ErrInvalidRequest
- }
-
- if config.DefaultLeaseTTL > b.Core.maxLeaseTTL {
- return logical.ErrorResponse(fmt.Sprintf(
- "given default lease TTL greater than system max lease TTL of %d", int(b.Core.maxLeaseTTL.Seconds()))),
- logical.ErrInvalidRequest
- }
-
- // Only set plugin-name if mount is of type plugin, with apiConfig.PluginName
- // option taking precedence.
- if logicalType == "plugin" {
- switch {
- case apiConfig.PluginName != "":
- config.PluginName = apiConfig.PluginName
- case pluginName != "":
- config.PluginName = pluginName
- default:
- return logical.ErrorResponse(
- "plugin_name must be provided for plugin backend"),
- logical.ErrInvalidRequest
- }
- }
-
- // Copy over the force no cache if set
- if apiConfig.ForceNoCache {
- config.ForceNoCache = true
- }
-
- if logicalType == "" {
- return logical.ErrorResponse(
- "backend type must be specified as a string"),
- logical.ErrInvalidRequest
- }
-
- // Create the mount entry
- me := &MountEntry{
- Table: mountTableType,
- Path: path,
- Type: logicalType,
- Description: description,
- Config: config,
- Local: local,
- }
-
- // Attempt mount
- if err := b.Core.mount(me); err != nil {
- b.Backend.Logger().Error("sys: mount failed", "path", me.Path, "error", err)
- return handleError(err)
- }
-
- return nil, nil
-}
-
-// used to intercept an HTTPCodedError so it goes back to callee
-func handleError(
- err error) (*logical.Response, error) {
- switch err.(type) {
- case logical.HTTPCodedError:
- return logical.ErrorResponse(err.Error()), err
- default:
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-}
-
-// handleUnmount is used to unmount a path
-func (b *SystemBackend) handleUnmount(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- path = sanitizeMountPath(path)
-
- repState := b.Core.replicationState
- entry := b.Core.router.MatchingMountEntry(path)
- if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
- }
-
- // We return success when the mount does not exists to not expose if the
- // mount existed or not
- match := b.Core.router.MatchingMount(path)
- if match == "" || path != match {
- return nil, nil
- }
-
- // Attempt unmount
- if err := b.Core.unmount(path); err != nil {
- b.Backend.Logger().Error("sys: unmount failed", "path", path, "error", err)
- return handleError(err)
- }
-
- return nil, nil
-}
-
-// handleRemount is used to remount a path
-func (b *SystemBackend) handleRemount(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.replicationState
-
- // Get the paths
- fromPath := data.Get("from").(string)
- toPath := data.Get("to").(string)
- if fromPath == "" || toPath == "" {
- return logical.ErrorResponse(
- "both 'from' and 'to' path must be specified as a string"),
- logical.ErrInvalidRequest
- }
-
- fromPath = sanitizeMountPath(fromPath)
- toPath = sanitizeMountPath(toPath)
-
- entry := b.Core.router.MatchingMountEntry(fromPath)
- if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot remount a non-local mount on a replication secondary"), nil
- }
-
- // Attempt remount
- if err := b.Core.remount(fromPath, toPath); err != nil {
- b.Backend.Logger().Error("sys: remount failed", "from_path", fromPath, "to_path", toPath, "error", err)
- return handleError(err)
- }
-
- return nil, nil
-}
-
-// handleAuthTuneRead is used to get config settings on a auth path
-func (b *SystemBackend) handleAuthTuneRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse(
- "path must be specified as a string"),
- logical.ErrInvalidRequest
- }
- return b.handleTuneReadCommon("auth/" + path)
-}
-
-// handleMountTuneRead is used to get config settings on a backend
-func (b *SystemBackend) handleMountTuneRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse(
- "path must be specified as a string"),
- logical.ErrInvalidRequest
- }
-
- // This call will read both logical backend's configuration as well as auth backends'.
- // Retaining this behavior for backward compatibility. If this behavior is not desired,
- // an error can be returned if path has a prefix of "auth/".
- return b.handleTuneReadCommon(path)
-}
-
-// handleTuneReadCommon returns the config settings of a path
-func (b *SystemBackend) handleTuneReadCommon(path string) (*logical.Response, error) {
- path = sanitizeMountPath(path)
-
- sysView := b.Core.router.MatchingSystemView(path)
- if sysView == nil {
- b.Backend.Logger().Error("sys: cannot fetch sysview", "path", path)
- return handleError(fmt.Errorf("sys: cannot fetch sysview for path %s", path))
- }
-
- mountEntry := b.Core.router.MatchingMountEntry(path)
- if mountEntry == nil {
- b.Backend.Logger().Error("sys: cannot fetch mount entry", "path", path)
- return handleError(fmt.Errorf("sys: cannot fetch mount entry for path %s", path))
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "default_lease_ttl": int(sysView.DefaultLeaseTTL().Seconds()),
- "max_lease_ttl": int(sysView.MaxLeaseTTL().Seconds()),
- "force_no_cache": mountEntry.Config.ForceNoCache,
- },
- }
-
- return resp, nil
-}
-
-// handleAuthTuneWrite is used to set config settings on an auth path
-func (b *SystemBackend) handleAuthTuneWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse("path must be specified as a string"),
- logical.ErrInvalidRequest
- }
- return b.handleTuneWriteCommon("auth/"+path, data)
-}
-
-// handleMountTuneWrite is used to set config settings on a backend
-func (b *SystemBackend) handleMountTuneWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse("path must be specified as a string"),
- logical.ErrInvalidRequest
- }
- // This call will write both logical backend's configuration as well as auth backends'.
- // Retaining this behavior for backward compatibility. If this behavior is not desired,
- // an error can be returned if path has a prefix of "auth/".
- return b.handleTuneWriteCommon(path, data)
-}
-
-// handleTuneWriteCommon is used to set config settings on a path
-func (b *SystemBackend) handleTuneWriteCommon(
- path string, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.replicationState
-
- path = sanitizeMountPath(path)
-
- // Prevent protected paths from being changed
- for _, p := range untunableMounts {
- if strings.HasPrefix(path, p) {
- b.Backend.Logger().Error("sys: cannot tune this mount", "path", path)
- return handleError(fmt.Errorf("sys: cannot tune '%s'", path))
- }
- }
-
- mountEntry := b.Core.router.MatchingMountEntry(path)
- if mountEntry == nil {
- b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path)
- return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path))
- }
- if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
- }
-
- var lock *sync.RWMutex
- switch {
- case strings.HasPrefix(path, "auth/"):
- lock = &b.Core.authLock
- default:
- lock = &b.Core.mountsLock
- }
-
- lock.Lock()
- defer lock.Unlock()
-
- // Check again after grabbing the lock
- mountEntry = b.Core.router.MatchingMountEntry(path)
- if mountEntry == nil {
- b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path)
- return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path))
- }
- if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
- }
-
- // Timing configuration parameters
- {
- var newDefault, newMax time.Duration
- defTTL := data.Get("default_lease_ttl").(string)
- switch defTTL {
- case "":
- newDefault = mountEntry.Config.DefaultLeaseTTL
- case "system":
- newDefault = time.Duration(0)
- default:
- tmpDef, err := parseutil.ParseDurationSecond(defTTL)
- if err != nil {
- return handleError(err)
- }
- newDefault = tmpDef
- }
-
- maxTTL := data.Get("max_lease_ttl").(string)
- switch maxTTL {
- case "":
- newMax = mountEntry.Config.MaxLeaseTTL
- case "system":
- newMax = time.Duration(0)
- default:
- tmpMax, err := parseutil.ParseDurationSecond(maxTTL)
- if err != nil {
- return handleError(err)
- }
- newMax = tmpMax
- }
-
- if newDefault != mountEntry.Config.DefaultLeaseTTL ||
- newMax != mountEntry.Config.MaxLeaseTTL {
-
- if err := b.tuneMountTTLs(path, mountEntry, newDefault, newMax); err != nil {
- b.Backend.Logger().Error("sys: tuning failed", "path", path, "error", err)
- return handleError(err)
- }
- }
- }
-
- description := data.Get("description").(string)
- if description != "" {
- oldDesc := mountEntry.Description
- mountEntry.Description = description
-
- // Update the mount table
- var err error
- switch {
- case strings.HasPrefix(path, "auth/"):
- err = b.Core.persistAuth(b.Core.auth, mountEntry.Local)
- default:
- err = b.Core.persistMounts(b.Core.mounts, mountEntry.Local)
- }
- if err != nil {
- mountEntry.Description = oldDesc
- return handleError(err)
- }
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("core: mount tuning of description successful", "path", path)
- }
- }
-
- return nil, nil
-}
-
-// handleLease is use to view the metadata for a given LeaseID
-func (b *SystemBackend) handleLeaseLookup(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- leaseID := data.Get("lease_id").(string)
- if leaseID == "" {
- return logical.ErrorResponse("lease_id must be specified"),
- logical.ErrInvalidRequest
- }
-
- leaseTimes, err := b.Core.expiration.FetchLeaseTimes(leaseID)
- if err != nil {
- b.Backend.Logger().Error("sys: error retrieving lease", "lease_id", leaseID, "error", err)
- return handleError(err)
- }
- if leaseTimes == nil {
- return logical.ErrorResponse("invalid lease"), logical.ErrInvalidRequest
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "id": leaseID,
- "issue_time": leaseTimes.IssueTime,
- "expire_time": nil,
- "last_renewal": nil,
- "ttl": int64(0),
- },
- }
- renewable, _ := leaseTimes.renewable()
- resp.Data["renewable"] = renewable
-
- if !leaseTimes.LastRenewalTime.IsZero() {
- resp.Data["last_renewal"] = leaseTimes.LastRenewalTime
- }
- if !leaseTimes.ExpireTime.IsZero() {
- resp.Data["expire_time"] = leaseTimes.ExpireTime
- resp.Data["ttl"] = leaseTimes.ttl()
- }
- return resp, nil
-}
-
-func (b *SystemBackend) handleLeaseLookupList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- prefix := data.Get("prefix").(string)
- if prefix != "" && !strings.HasSuffix(prefix, "/") {
- prefix = prefix + "/"
- }
-
- keys, err := b.Core.expiration.idView.List(prefix)
- if err != nil {
- b.Backend.Logger().Error("sys: error listing leases", "prefix", prefix, "error", err)
- return handleError(err)
- }
- return logical.ListResponse(keys), nil
-}
-
-// handleRenew is used to renew a lease with a given LeaseID
-func (b *SystemBackend) handleRenew(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get all the options
- leaseID := data.Get("lease_id").(string)
- if leaseID == "" {
- leaseID = data.Get("url_lease_id").(string)
- }
- if leaseID == "" {
- return logical.ErrorResponse("lease_id must be specified"),
- logical.ErrInvalidRequest
- }
- incrementRaw := data.Get("increment").(int)
-
- // Convert the increment
- increment := time.Duration(incrementRaw) * time.Second
-
- // Invoke the expiration manager directly
- resp, err := b.Core.expiration.Renew(leaseID, increment)
- if err != nil {
- b.Backend.Logger().Error("sys: lease renewal failed", "lease_id", leaseID, "error", err)
- return handleError(err)
- }
- return resp, err
-}
-
-// handleRevoke is used to revoke a given LeaseID
-func (b *SystemBackend) handleRevoke(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get all the options
- leaseID := data.Get("lease_id").(string)
- if leaseID == "" {
- leaseID = data.Get("url_lease_id").(string)
- }
- if leaseID == "" {
- return logical.ErrorResponse("lease_id must be specified"),
- logical.ErrInvalidRequest
- }
-
- // Invoke the expiration manager directly
- if err := b.Core.expiration.Revoke(leaseID); err != nil {
- b.Backend.Logger().Error("sys: lease revocation failed", "lease_id", leaseID, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleRevokePrefix is used to revoke a prefix with many LeaseIDs
-func (b *SystemBackend) handleRevokePrefix(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRevokePrefixCommon(req, data, false)
-}
-
-// handleRevokeForce is used to revoke a prefix with many LeaseIDs, ignoring errors
-func (b *SystemBackend) handleRevokeForce(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRevokePrefixCommon(req, data, true)
-}
-
-// handleRevokePrefixCommon is used to revoke a prefix with many LeaseIDs
-func (b *SystemBackend) handleRevokePrefixCommon(
- req *logical.Request, data *framework.FieldData, force bool) (*logical.Response, error) {
- // Get all the options
- prefix := data.Get("prefix").(string)
-
- // Invoke the expiration manager directly
- var err error
- if force {
- err = b.Core.expiration.RevokeForce(prefix)
- } else {
- err = b.Core.expiration.RevokePrefix(prefix)
- }
- if err != nil {
- b.Backend.Logger().Error("sys: revoke prefix failed", "prefix", prefix, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleAuthTable handles the "auth" endpoint to provide the auth table
-func (b *SystemBackend) handleAuthTable(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.authLock.RLock()
- defer b.Core.authLock.RUnlock()
-
- resp := &logical.Response{
- Data: make(map[string]interface{}),
- }
- for _, entry := range b.Core.auth.Entries {
- info := map[string]interface{}{
- "type": entry.Type,
- "description": entry.Description,
- "accessor": entry.Accessor,
- "config": map[string]interface{}{
- "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
- "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
- },
- "local": entry.Local,
- }
- resp.Data[entry.Path] = info
- }
- return resp, nil
-}
-
-// handleEnableAuth is used to enable a new credential backend
-func (b *SystemBackend) handleEnableAuth(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.replicationState
-
- local := data.Get("local").(bool)
- if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
- }
-
- // Get all the options
- path := data.Get("path").(string)
- logicalType := data.Get("type").(string)
- description := data.Get("description").(string)
- pluginName := data.Get("plugin_name").(string)
-
- var config MountConfig
- var apiConfig APIMountConfig
-
- configMap := data.Get("config").(map[string]interface{})
- if configMap != nil && len(configMap) != 0 {
- err := mapstructure.Decode(configMap, &apiConfig)
- if err != nil {
- return logical.ErrorResponse(
- "unable to convert given auth config information"),
- logical.ErrInvalidRequest
- }
- }
-
- // Only set plugin name if mount is of type plugin, with apiConfig.PluginName
- // option taking precedence.
- if logicalType == "plugin" {
- switch {
- case apiConfig.PluginName != "":
- config.PluginName = apiConfig.PluginName
- case pluginName != "":
- config.PluginName = pluginName
- default:
- return logical.ErrorResponse(
- "plugin_name must be provided for plugin backend"),
- logical.ErrInvalidRequest
- }
- }
-
- if logicalType == "" {
- return logical.ErrorResponse(
- "backend type must be specified as a string"),
- logical.ErrInvalidRequest
- }
-
- path = sanitizeMountPath(path)
-
- // Create the mount entry
- me := &MountEntry{
- Table: credentialTableType,
- Path: path,
- Type: logicalType,
- Description: description,
- Config: config,
- Local: local,
- }
-
- // Attempt enabling
- if err := b.Core.enableCredential(me); err != nil {
- b.Backend.Logger().Error("sys: enable auth mount failed", "path", me.Path, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleDisableAuth is used to disable a credential backend
-func (b *SystemBackend) handleDisableAuth(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- path = sanitizeMountPath(path)
- fullPath := credentialRoutePrefix + path
-
- repState := b.Core.replicationState
- entry := b.Core.router.MatchingMountEntry(fullPath)
- if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
- }
-
- // We return success when the mount does not exists to not expose if the
- // mount existed or not
- match := b.Core.router.MatchingMount(fullPath)
- if match == "" || fullPath != match {
- return nil, nil
- }
-
- // Attempt disable
- if err := b.Core.disableCredential(path); err != nil {
- b.Backend.Logger().Error("sys: disable auth mount failed", "path", path, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handlePolicyList handles the "policy" endpoint to provide the enabled policies
-func (b *SystemBackend) handlePolicyList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get all the configured policies
- policies, err := b.Core.policyStore.ListPolicies()
-
- // Add the special "root" policy
- policies = append(policies, "root")
- resp := logical.ListResponse(policies)
-
- // Backwords compatibility
- resp.Data["policies"] = resp.Data["keys"]
-
- return resp, err
-}
-
-// handlePolicyRead handles the "policy/" endpoint to read a policy
-func (b *SystemBackend) handlePolicyRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- policy, err := b.Core.policyStore.GetPolicy(name)
- if err != nil {
- return handleError(err)
- }
-
- if policy == nil {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "name": policy.Name,
- "rules": policy.Raw,
- },
- }, nil
-}
-
-// handlePolicySet handles the "policy/" endpoint to set a policy
-func (b *SystemBackend) handlePolicySet(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- rulesRaw, ok := data.GetOk("rules")
- if !ok {
- return logical.ErrorResponse("'rules' parameter not supplied"), nil
- }
-
- rules := rulesRaw.(string)
- if rules == "" {
- return logical.ErrorResponse("'rules' parameter empty"), nil
- }
-
- // Validate the rules parse
- parse, err := Parse(rules)
- if err != nil {
- return handleError(err)
- }
-
- if name != "" {
- parse.Name = name
- }
-
- // Update the policy
- if err := b.Core.policyStore.SetPolicy(parse); err != nil {
- return handleError(err)
- }
- return nil, nil
-}
-
-// handlePolicyDelete handles the "policy/" endpoint to delete a policy
-func (b *SystemBackend) handlePolicyDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- if err := b.Core.policyStore.DeletePolicy(name); err != nil {
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleAuditTable handles the "audit" endpoint to provide the audit table
-func (b *SystemBackend) handleAuditTable(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.auditLock.RLock()
- defer b.Core.auditLock.RUnlock()
-
- resp := &logical.Response{
- Data: make(map[string]interface{}),
- }
- for _, entry := range b.Core.audit.Entries {
- info := map[string]interface{}{
- "path": entry.Path,
- "type": entry.Type,
- "description": entry.Description,
- "options": entry.Options,
- "local": entry.Local,
- }
- resp.Data[entry.Path] = info
- }
- return resp, nil
-}
-
-// handleAuditHash is used to fetch the hash of the given input data with the
-// specified audit backend's salt
-func (b *SystemBackend) handleAuditHash(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- input := data.Get("input").(string)
- if input == "" {
- return logical.ErrorResponse("the \"input\" parameter is empty"), nil
- }
-
- path = sanitizeMountPath(path)
-
- hash, err := b.Core.auditBroker.GetHash(path, input)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "hash": hash,
- },
- }, nil
-}
-
-// handleEnableAudit is used to enable a new audit backend
-func (b *SystemBackend) handleEnableAudit(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.replicationState
-
- local := data.Get("local").(bool)
- if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
- }
-
- // Get all the options
- path := data.Get("path").(string)
- backendType := data.Get("type").(string)
- description := data.Get("description").(string)
- options := data.Get("options").(map[string]interface{})
-
- optionMap := make(map[string]string)
- for k, v := range options {
- vStr, ok := v.(string)
- if !ok {
- return logical.ErrorResponse("options must be string valued"),
- logical.ErrInvalidRequest
- }
- optionMap[k] = vStr
- }
-
- // Create the mount entry
- me := &MountEntry{
- Table: auditTableType,
- Path: path,
- Type: backendType,
- Description: description,
- Options: optionMap,
- Local: local,
- }
-
- // Attempt enabling
- if err := b.Core.enableAudit(me); err != nil {
- b.Backend.Logger().Error("sys: enable audit mount failed", "path", me.Path, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleDisableAudit is used to disable an audit backend
-func (b *SystemBackend) handleDisableAudit(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Attempt disable
- if existed, err := b.Core.disableAudit(path); existed && err != nil {
- b.Backend.Logger().Error("sys: disable audit mount failed", "path", path, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleRawRead is used to read directly from the barrier
-func (b *SystemBackend) handleRawRead(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot read '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- entry, err := b.Core.barrier.Get(path)
- if err != nil {
- return handleError(err)
- }
- if entry == nil {
- return nil, nil
- }
- resp := &logical.Response{
- Data: map[string]interface{}{
- "value": string(entry.Value),
- },
- }
- return resp, nil
-}
-
-// handleRawWrite is used to write directly to the barrier
-func (b *SystemBackend) handleRawWrite(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot write '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- value := data.Get("value").(string)
- entry := &Entry{
- Key: path,
- Value: []byte(value),
- }
- if err := b.Core.barrier.Put(entry); err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- return nil, nil
-}
-
-// handleRawDelete is used to delete directly from the barrier
-func (b *SystemBackend) handleRawDelete(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot delete '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- if err := b.Core.barrier.Delete(path); err != nil {
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleRawList is used to list directly from the barrier
-func (b *SystemBackend) handleRawList(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path != "" && !strings.HasSuffix(path, "/") {
- path = path + "/"
- }
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot list '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- keys, err := b.Core.barrier.List(path)
- if err != nil {
- return handleError(err)
- }
- return logical.ListResponse(keys), nil
-}
-
-// handleKeyStatus returns status information about the backend key
-func (b *SystemBackend) handleKeyStatus(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get the key info
- info, err := b.Core.barrier.ActiveKeyInfo()
- if err != nil {
- return nil, err
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "term": info.Term,
- "install_time": info.InstallTime.Format(time.RFC3339Nano),
- },
- }
- return resp, nil
-}
-
-// handleRotate is used to trigger a key rotation
-func (b *SystemBackend) handleRotate(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.replicationState
- if repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot rotate on a replication secondary"), nil
- }
-
- // Rotate to the new term
- newTerm, err := b.Core.barrier.Rotate()
- if err != nil {
- b.Backend.Logger().Error("sys: failed to create new encryption key", "error", err)
- return handleError(err)
- }
- b.Backend.Logger().Info("sys: installed new encryption key")
-
- // In HA mode, we need to an upgrade path for the standby instances
- if b.Core.ha != nil {
- // Create the upgrade path to the new term
- if err := b.Core.barrier.CreateUpgrade(newTerm); err != nil {
- b.Backend.Logger().Error("sys: failed to create new upgrade", "term", newTerm, "error", err)
- }
-
- // Schedule the destroy of the upgrade path
- time.AfterFunc(keyRotateGracePeriod, func() {
- if err := b.Core.barrier.DestroyUpgrade(newTerm); err != nil {
- b.Backend.Logger().Error("sys: failed to destroy upgrade", "term", newTerm, "error", err)
- }
- })
- }
-
- // Write to the canary path, which will force a synchronous truing during
- // replication
- if err := b.Core.barrier.Put(&Entry{
- Key: coreKeyringCanaryPath,
- Value: []byte(fmt.Sprintf("new-rotation-term-%d", newTerm)),
- }); err != nil {
- b.Core.logger.Error("core: error saving keyring canary", "error", err)
- return nil, fmt.Errorf("failed to save keyring canary: %v", err)
- }
-
- return nil, nil
-}
-
-func (b *SystemBackend) handleWrappingPubkey(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- x, _ := b.Core.wrappingJWTKey.X.MarshalText()
- y, _ := b.Core.wrappingJWTKey.Y.MarshalText()
- return &logical.Response{
- Data: map[string]interface{}{
- "jwt_x": string(x),
- "jwt_y": string(y),
- "jwt_curve": corePrivateKeyTypeP521,
- },
- }, nil
-}
-
-func (b *SystemBackend) handleWrappingWrap(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.WrapInfo == nil || req.WrapInfo.TTL == 0 {
- return logical.ErrorResponse("endpoint requires response wrapping to be used"), logical.ErrInvalidRequest
- }
-
- // N.B.: Do *NOT* allow JWT wrapping tokens to be created through this
- // endpoint. JWTs are signed so if we don't allow users to create wrapping
- // tokens using them we can ensure that an operator can't spoof a legit JWT
- // wrapped token, which makes certain init/rekey/generate-root cases have
- // better properties.
- req.WrapInfo.Format = "uuid"
-
- return &logical.Response{
- Data: data.Raw,
- }, nil
-}
-
-func (b *SystemBackend) handleWrappingUnwrap(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // If a third party is unwrapping (rather than the calling token being the
- // wrapping token) we detect this so that we can revoke the original
- // wrapping token after reading it
- var thirdParty bool
-
- token := data.Get("token").(string)
- if token != "" {
- thirdParty = true
- } else {
- token = req.ClientToken
- }
-
- if thirdParty {
- // Use the token to decrement the use count to avoid a second operation on the token.
- _, err := b.Core.tokenStore.UseTokenByID(token)
- if err != nil {
- return nil, fmt.Errorf("error decrementing wrapping token's use-count: %v", err)
- }
-
- defer b.Core.tokenStore.Revoke(token)
- }
-
- cubbyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/response",
- ClientToken: token,
- }
- cubbyResp, err := b.Core.router.Route(cubbyReq)
- if err != nil {
- return nil, fmt.Errorf("error looking up wrapping information: %v", err)
- }
- if cubbyResp == nil {
- return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp, nil
- }
- if cubbyResp.Data == nil {
- return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
- }
-
- responseRaw := cubbyResp.Data["response"]
- if responseRaw == nil {
- return nil, fmt.Errorf("no response found inside the cubbyhole")
- }
- response, ok := responseRaw.(string)
- if !ok {
- return nil, fmt.Errorf("could not decode response inside the cubbyhole")
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{},
- }
- if len(response) == 0 {
- resp.Data[logical.HTTPStatusCode] = 204
- } else {
- resp.Data[logical.HTTPStatusCode] = 200
- resp.Data[logical.HTTPRawBody] = []byte(response)
- resp.Data[logical.HTTPContentType] = "application/json"
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) handleWrappingLookup(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // This ordering of lookups has been validated already in the wrapping
- // validation func, we're just doing this for a safety check
- token := data.Get("token").(string)
- if token == "" {
- token = req.ClientToken
- if token == "" {
- return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest
- }
- }
-
- cubbyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/wrapinfo",
- ClientToken: token,
- }
- cubbyResp, err := b.Core.router.Route(cubbyReq)
- if err != nil {
- return nil, fmt.Errorf("error looking up wrapping information: %v", err)
- }
- if cubbyResp == nil {
- return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp, nil
- }
- if cubbyResp.Data == nil {
- return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
- }
-
- creationTTLRaw := cubbyResp.Data["creation_ttl"]
- creationTime := cubbyResp.Data["creation_time"]
- creationPath := cubbyResp.Data["creation_path"]
-
- resp := &logical.Response{
- Data: map[string]interface{}{},
- }
- if creationTTLRaw != nil {
- creationTTL, err := creationTTLRaw.(json.Number).Int64()
- if err != nil {
- return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err)
- }
- resp.Data["creation_ttl"] = time.Duration(creationTTL).Seconds()
- }
- if creationTime != nil {
- // This was JSON marshaled so it's already a string in RFC3339 format
- resp.Data["creation_time"] = cubbyResp.Data["creation_time"]
- }
- if creationPath != nil {
- resp.Data["creation_path"] = cubbyResp.Data["creation_path"]
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) handleWrappingRewrap(
- req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // If a third party is rewrapping (rather than the calling token being the
- // wrapping token) we detect this so that we can revoke the original
- // wrapping token after reading it. Right now wrapped tokens can't unwrap
- // themselves, but in case we change it, this will be ready to do the right
- // thing.
- var thirdParty bool
-
- token := data.Get("token").(string)
- if token != "" {
- thirdParty = true
- } else {
- token = req.ClientToken
- }
-
- if thirdParty {
- // Use the token to decrement the use count to avoid a second operation on the token.
- _, err := b.Core.tokenStore.UseTokenByID(token)
- if err != nil {
- return nil, fmt.Errorf("error decrementing wrapping token's use-count: %v", err)
- }
- defer b.Core.tokenStore.Revoke(token)
- }
-
- // Fetch the original TTL
- cubbyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/wrapinfo",
- ClientToken: token,
- }
- cubbyResp, err := b.Core.router.Route(cubbyReq)
- if err != nil {
- return nil, fmt.Errorf("error looking up wrapping information: %v", err)
- }
- if cubbyResp == nil {
- return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp, nil
- }
- if cubbyResp.Data == nil {
- return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
- }
-
- // Set the creation TTL on the request
- creationTTLRaw := cubbyResp.Data["creation_ttl"]
- if creationTTLRaw == nil {
- return nil, fmt.Errorf("creation_ttl value in wrapping information was nil")
- }
- creationTTL, err := cubbyResp.Data["creation_ttl"].(json.Number).Int64()
- if err != nil {
- return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err)
- }
-
- // Get creation_path to return as the response later
- creationPathRaw := cubbyResp.Data["creation_path"]
- if creationPathRaw == nil {
- return nil, fmt.Errorf("creation_path value in wrapping information was nil")
- }
- creationPath := creationPathRaw.(string)
-
- // Fetch the original response and return it as the data for the new response
- cubbyReq = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/response",
- ClientToken: token,
- }
- cubbyResp, err = b.Core.router.Route(cubbyReq)
- if err != nil {
- return nil, fmt.Errorf("error looking up response: %v", err)
- }
- if cubbyResp == nil {
- return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp, nil
- }
- if cubbyResp.Data == nil {
- return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
- }
-
- response := cubbyResp.Data["response"]
- if response == nil {
- return nil, fmt.Errorf("no response found inside the cubbyhole")
- }
-
- // Return response in "response"; wrapping code will detect the rewrap and
- // slot in instead of nesting
- return &logical.Response{
- Data: map[string]interface{}{
- "response": response,
- },
- WrapInfo: &wrapping.ResponseWrapInfo{
- TTL: time.Duration(creationTTL),
- CreationPath: creationPath,
- },
- }, nil
-}
-
-func sanitizeMountPath(path string) string {
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
-
- if strings.HasPrefix(path, "/") {
- path = path[1:]
- }
-
- return path
-}
-
-const sysHelpRoot = `
-The system backend is built-in to Vault and cannot be remounted or
-unmounted. It contains the paths that are used to configure Vault itself
-as well as perform core operations.
-`
-
-// sysHelp is all the help text for the sys backend.
-var sysHelp = map[string][2]string{
- "config/cors": {
- "Configures or returns the current configuration of CORS settings.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the configuration of the CORS setting.
-
- POST /
- Sets the comma-separated list of origins that can make cross-origin requests.
-
- DELETE /
- Clears the CORS configuration and disables acceptance of CORS requests.
- `,
- },
- "init": {
- "Initializes or returns the initialization status of the Vault.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the initialization status of the Vault.
-
- POST /
- Initializes a new vault.
- `,
- },
- "generate-root": {
- "Reads, generates, or deletes a root token regeneration process.",
- `
-This path responds to multiple HTTP methods which change the behavior. Those
-HTTP methods are listed below.
-
- GET /attempt
- Reads the configuration and progress of the current root generation
- attempt.
-
- POST /attempt
- Initializes a new root generation attempt. Only a single root generation
- attempt can take place at a time. One (and only one) of otp or pgp_key
- are required.
-
- DELETE /attempt
- Cancels any in-progress root generation attempt. This clears any
- progress made. This must be called to change the OTP or PGP key being
- used.
- `,
- },
- "seal-status": {
- "Returns the seal status of the Vault.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the seal status of the Vault. This is an unauthenticated
- endpoint.
- `,
- },
- "seal": {
- "Seals the Vault.",
- `
-This path responds to the following HTTP methods.
-
- PUT /
- Seals the Vault.
- `,
- },
- "unseal": {
- "Unseals the Vault.",
- `
-This path responds to the following HTTP methods.
-
- PUT /
- Unseals the Vault.
- `,
- },
- "mounts": {
- "List the currently mounted backends.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Lists all the mounted secret backends.
-
- GET /
- Get information about the mount at the specified path.
-
- POST /
- Mount a new secret backend to the mount point in the URL.
-
- POST //tune
- Tune configuration parameters for the given mount point.
-
- DELETE /
- Unmount the specified mount point.
- `,
- },
-
- "mount": {
- `Mount a new backend at a new path.`,
- `
-Mount a backend at a new path. A backend can be mounted multiple times at
-multiple paths in order to configure multiple separately configured backends.
-Example: you might have an AWS backend for the east coast, and one for the
-west coast.
- `,
- },
-
- "mount_path": {
- `The path to mount to. Example: "aws/east"`,
- "",
- },
-
- "mount_type": {
- `The type of the backend. Example: "passthrough"`,
- "",
- },
-
- "mount_desc": {
- `User-friendly description for this mount.`,
- "",
- },
-
- "mount_config": {
- `Configuration for this mount, such as default_lease_ttl
-and max_lease_ttl.`,
- },
-
- "mount_local": {
- `Mark the mount as a local mount, which is not replicated
-and is unaffected by replication.`,
- },
-
- "mount_plugin_name": {
- `Name of the plugin to mount based from the name registered
-in the plugin catalog.`,
- },
-
- "tune_default_lease_ttl": {
- `The default lease TTL for this mount.`,
- },
-
- "tune_max_lease_ttl": {
- `The max lease TTL for this mount.`,
- },
-
- "remount": {
- "Move the mount point of an already-mounted backend.",
- `
-This path responds to the following HTTP methods.
-
- POST /sys/remount
- Changes the mount point of an already-mounted backend.
- `,
- },
-
- "auth_tune": {
- "Tune the configuration parameters for an auth path.",
- `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
-the auth path.`,
- },
-
- "mount_tune": {
- "Tune backend configuration parameters for this mount.",
- `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
-the mount.`,
- },
-
- "renew": {
- "Renew a lease on a secret",
- `
-When a secret is read, it may optionally include a lease interval
-and a boolean indicating if renew is possible. For secrets that support
-lease renewal, this endpoint is used to extend the validity of the
-lease and to prevent an automatic revocation.
- `,
- },
-
- "lease_id": {
- "The lease identifier to renew. This is included with a lease.",
- "",
- },
-
- "increment": {
- "The desired increment in seconds to the lease",
- "",
- },
-
- "revoke": {
- "Revoke a leased secret immediately",
- `
-When a secret is generated with a lease, it is automatically revoked
-at the end of the lease period if not renewed. However, in some cases
-you may want to force an immediate revocation. This endpoint can be
-used to revoke the secret with the given Lease ID.
- `,
- },
-
- "revoke-prefix": {
- "Revoke all secrets generated in a given prefix",
- `
-Revokes all the secrets generated under a given mount prefix. As
-an example, "prod/aws/" might be the AWS logical backend, and due to
-a change in the "ops" policy, we may want to invalidate all the secrets
-generated. We can do a revoke prefix at "prod/aws/ops" to revoke all
-the ops secrets. This does a prefix match on the Lease IDs and revokes
-all matching leases.
- `,
- },
-
- "revoke-prefix-path": {
- `The path to revoke keys under. Example: "prod/aws/ops"`,
- "",
- },
-
- "revoke-force": {
- "Revoke all secrets generated in a given prefix, ignoring errors.",
- `
-See the path help for 'revoke-prefix'; this behaves the same, except that it
-ignores errors encountered during revocation. This can be used in certain
-recovery situations; for instance, when you want to unmount a backend, but it
-is impossible to fix revocation errors and these errors prevent the unmount
-from proceeding. This is a DANGEROUS operation as it removes Vault's oversight
-of external secrets. Access to this prefix should be tightly controlled.
- `,
- },
-
- "revoke-force-path": {
- `The path to revoke keys under. Example: "prod/aws/ops"`,
- "",
- },
-
- "auth-table": {
- "List the currently enabled credential backends.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- List the currently enabled credential backends: the name, the type of
- the backend, and a user friendly description of the purpose for the
- credential backend.
-
- POST /
- Enable a new auth backend.
-
- DELETE /
- Disable the auth backend at the given mount point.
- `,
- },
-
- "auth": {
- `Enable a new credential backend with a name.`,
- `
-Enable a credential mechanism at a new path. A backend can be mounted multiple times at
-multiple paths in order to configure multiple separately configured backends.
-Example: you might have an OAuth backend for GitHub, and one for Google Apps.
- `,
- },
-
- "auth_path": {
- `The path to mount to. Cannot be delimited. Example: "user"`,
- "",
- },
-
- "auth_type": {
- `The type of the backend. Example: "userpass"`,
- "",
- },
-
- "auth_desc": {
- `User-friendly description for this crential backend.`,
- "",
- },
-
- "auth_config": {
- `Configuration for this mount, such as plugin_name.`,
- },
-
- "auth_plugin": {
- `Name of the auth plugin to use based from the name in the plugin catalog.`,
- "",
- },
-
- "policy-list": {
- `List the configured access control policies.`,
- `
-This path responds to the following HTTP methods.
-
- GET /
- List the names of the configured access control policies.
-
- GET /
- Retrieve the rules for the named policy.
-
- PUT /
- Add or update a policy.
-
- DELETE /
- Delete the policy with the given name.
- `,
- },
-
- "policy": {
- `Read, Modify, or Delete an access control policy.`,
- `
-Read the rules of an existing policy, create or update the rules of a policy,
-or delete a policy.
- `,
- },
-
- "policy-name": {
- `The name of the policy. Example: "ops"`,
- "",
- },
-
- "policy-rules": {
- `The rules of the policy. Either given in HCL or JSON format.`,
- "",
- },
-
- "audit-hash": {
- "The hash of the given string via the given audit backend",
- "",
- },
-
- "audit-table": {
- "List the currently enabled audit backends.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- List the currently enabled audit backends.
-
- PUT /
- Enable an audit backend at the given path.
-
- DELETE /
- Disable the given audit backend.
- `,
- },
-
- "audit_path": {
- `The name of the backend. Cannot be delimited. Example: "mysql"`,
- "",
- },
-
- "audit_type": {
- `The type of the backend. Example: "mysql"`,
- "",
- },
-
- "audit_desc": {
- `User-friendly description for this audit backend.`,
- "",
- },
-
- "audit_opts": {
- `Configuration options for the audit backend.`,
- "",
- },
-
- "audit": {
- `Enable or disable audit backends.`,
- `
-Enable a new audit backend or disable an existing backend.
- `,
- },
-
- "key-status": {
- "Provides information about the backend encryption key.",
- `
- Provides the current backend encryption key term and installation time.
- `,
- },
-
- "rotate": {
- "Rotates the backend encryption key used to persist data.",
- `
- Rotate generates a new encryption key which is used to encrypt all
- data going to the storage backend. The old encryption keys are kept so
- that data encrypted using those keys can still be decrypted.
- `,
- },
-
- "rekey_backup": {
- "Allows fetching or deleting the backup of the rotated unseal keys.",
- "",
- },
-
- "capabilities": {
- "Fetches the capabilities of the given token on the given path.",
- `Returns the capabilities of the given token on the path.
- The path will be searched for a path match in all the policies associated with the token.`,
- },
-
- "capabilities_self": {
- "Fetches the capabilities of the given token on the given path.",
- `Returns the capabilities of the client token on the path.
- The path will be searched for a path match in all the policies associated with the client token.`,
- },
-
- "capabilities_accessor": {
- "Fetches the capabilities of the token associated with the given token, on the given path.",
- `When there is no access to the token, token accessor can be used to fetch the token's capabilities
- on a given path.`,
- },
-
- "tidy_leases": {
- `This endpoint performs cleanup tasks that can be run if certain error
-conditions have occurred.`,
- `This endpoint performs cleanup tasks that can be run to clean up the
-lease entries after certain error conditions. Usually running this is not
-necessary, and is only required if upgrade notes or support personnel suggest
-it.`,
- },
-
- "wrap": {
- "Response-wraps an arbitrary JSON object.",
- `Round trips the given input data into a response-wrapped token.`,
- },
-
- "wrappubkey": {
- "Returns pubkeys used in some wrapping formats.",
- "Returns pubkeys used in some wrapping formats.",
- },
-
- "unwrap": {
- "Unwraps a response-wrapped token.",
- `Unwraps a response-wrapped token. Unlike simply reading from cubbyhole/response,
- this provides additional validation on the token, and rather than a JSON-escaped
- string, the returned response is the exact same as the contained wrapped response.`,
- },
-
- "wraplookup": {
- "Looks up the properties of a response-wrapped token.",
- `Returns the creation TTL and creation time of a response-wrapped token.`,
- },
-
- "rewrap": {
- "Rotates a response-wrapped token.",
- `Rotates a response-wrapped token; the output is a new token with the same
- response wrapped inside and the same creation TTL. The original token is revoked.`,
- },
- "audited-headers-name": {
- "Configures the headers sent to the audit logs.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the setting for the header with the given name.
-
- POST /
- Enable auditing of the given header.
-
- DELETE /