diff --git a/Gopkg.lock b/Gopkg.lock
index fd3ab1d..9f1d1c1 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -4,18 +4,26 @@
[[projects]]
name = "github.com/Luzifer/rconfig"
packages = ["."]
- revision = "c27bd3a64b5b19556914d9fec69922cf3852d585"
- version = "v1.1.0"
+ revision = "7aef1d393c1e2d0758901853b59981c7adc67c7e"
+ version = "v1.2.0"
[[projects]]
name = "github.com/Sirupsen/logrus"
packages = ["."]
- revision = "3ec0642a7fb6488f65b06f9040adc67e3990296a"
+ revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
+ version = "v1.0.3"
[[projects]]
name = "github.com/fatih/structs"
packages = ["."]
- revision = "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2"
+ revision = "a720dfa8df582c51dee1b36feabb906bde1588bd"
+ version = "v1.0"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/golang/snappy"
+ packages = ["."]
+ revision = "553a641470496b2327abcac10b36396bd98e45c9"
[[projects]]
branch = "master"
@@ -24,14 +32,16 @@
revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55"
[[projects]]
+ branch = "master"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
- revision = "ad28ea4487f05916463e2423a55166280e8254b5"
+ revision = "3573b8b52aa7b37b9358d966a898feb387f62437"
[[projects]]
+ branch = "master"
name = "github.com/hashicorp/go-multierror"
packages = ["."]
- revision = "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
+ revision = "83588e72410abfbe4df460eeb6f30841ae47d4c4"
[[projects]]
branch = "master"
@@ -40,59 +50,88 @@
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
[[projects]]
+ branch = "master"
name = "github.com/hashicorp/hcl"
packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"]
- revision = "ef8133da8cda503718a74741312bf50821e6de79"
+ revision = "42e33e2d55a0ff1d6263f738896ea8c13571a8d0"
[[projects]]
name = "github.com/hashicorp/vault"
- packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil"]
- revision = "4490e93395fb70c3a25ade1fe88f363561a7d584"
+ packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil","helper/parseutil"]
+ revision = "6b29fb2b7f70ed538ee2b3c057335d706b6d4e36"
+ version = "v0.8.3"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
- revision = "14207d285c6c197daabb5c9793d63e7af9ab2d50"
+ revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
+ version = "v0.0.2"
[[projects]]
+ branch = "master"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
- revision = "981ab348d865cf048eb7d17e78ac7192632d8415"
+ revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
[[projects]]
+ branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
- revision = "ca63d7c062ee3c9f34db231e352b60012b4fd0c1"
+ revision = "d0303fe809921458f417bcf828397a65db30a7e4"
[[projects]]
+ branch = "master"
name = "github.com/olekukonko/tablewriter"
packages = ["."]
- revision = "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb"
+ revision = "a7a4c189eb47ed33ce7b35f2880070a0c82a67d4"
[[projects]]
+ branch = "master"
name = "github.com/sethgrid/pester"
packages = ["."]
- revision = "4f4c0a67b6496764028e1ab9fd8dfb630282ed2f"
+ revision = "0af5bab1e1ea2860c5aef8e77427bab011d774d8"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
- revision = "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
+ revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
+ version = "v1.0.0"
[[projects]]
+ branch = "master"
+ name = "golang.org/x/crypto"
+ packages = ["ssh/terminal"]
+ revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
+
+[[projects]]
+ branch = "master"
name = "golang.org/x/net"
packages = ["http2","http2/hpack","idna","lex/httplex"]
- revision = "f09c4662a0bd6bd8943ac7b4931e185df9471da4"
+ revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
[[projects]]
+ branch = "master"
name = "golang.org/x/sys"
- packages = ["unix"]
- revision = "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
+ packages = ["unix","windows"]
+ revision = "ebfc5b4631820b793c9010c87fd8fef0f39eb082"
[[projects]]
+ branch = "master"
+ name = "golang.org/x/text"
+ packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
+ revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25"
+
+[[projects]]
+ branch = "v2"
+ name = "gopkg.in/validator.v2"
+ packages = ["."]
+ revision = "460c83432a98c35224a6fe352acf8b23e067ad06"
+
+[[projects]]
+ branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
- revision = "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
+ revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[solve-meta]
analyzer-name = "dep"
diff --git a/vendor/github.com/Luzifer/rconfig/.travis.yml b/vendor/github.com/Luzifer/rconfig/.travis.yml
index 520bedf..b5c25ee 100644
--- a/vendor/github.com/Luzifer/rconfig/.travis.yml
+++ b/vendor/github.com/Luzifer/rconfig/.travis.yml
@@ -1,8 +1,8 @@
language: go
go:
- - 1.4
- - 1.5
+ - 1.6
+ - 1.7
- tip
script: go test -v -race -cover ./...
diff --git a/vendor/github.com/Luzifer/rconfig/History.md b/vendor/github.com/Luzifer/rconfig/History.md
index 8bc33a6..5adadd9 100644
--- a/vendor/github.com/Luzifer/rconfig/History.md
+++ b/vendor/github.com/Luzifer/rconfig/History.md
@@ -1,3 +1,7 @@
+# 1.2.0 / 2017-06-19
+
+ * Add ParseAndValidate method
+
# 1.1.0 / 2016-06-28
* Support time.Duration config parameters
diff --git a/vendor/github.com/Luzifer/rconfig/README.md b/vendor/github.com/Luzifer/rconfig/README.md
index 67fbf87..f42a664 100644
--- a/vendor/github.com/Luzifer/rconfig/README.md
+++ b/vendor/github.com/Luzifer/rconfig/README.md
@@ -29,34 +29,31 @@ go test -v -race -cover github.com/Luzifer/rconfig
## Usage
-As a first step define a struct holding your configuration:
+A very simple usecase is to just configure a struct inside the vars section of your `main.go` and to parse the commandline flags from the `main()` function:
```go
-type config struct {
- Username string `default:"unknown" flag:"user" description:"Your name"`
- Details struct {
- Age int `default:"25" flag:"age" env:"age" description:"Your age"`
- }
-}
-```
+package main
-Next create an instance of that struct and let `rconfig` fill that config:
+import (
+ "fmt"
+ "github.com/Luzifer/rconfig"
+)
-```go
-var cfg config
-func init() {
- cfg = config{}
- rconfig.Parse(&cfg)
-}
-```
+var (
+ cfg = struct {
+ Username string `default:"unknown" flag:"user" description:"Your name"`
+ Details struct {
+ Age int `default:"25" flag:"age" env:"age" description:"Your age"`
+ }
+ }{}
+)
-You're ready to access your configuration:
-
-```go
func main() {
+ rconfig.Parse(&cfg)
+
fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
- cfg.Username,
- cfg.Details.Age)
+ cfg.Username,
+ cfg.Details.Age)
}
```
@@ -72,18 +69,14 @@ The order of the directives (lower number = higher precedence):
1. `default` tag in the struct
```go
-type config struct {
+var cfg = struct {
Username string `vardefault:"username" flag:"username" description:"Your username"`
}
-var cfg = config{}
-
-func init() {
+func main() {
rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
rconfig.Parse(&cfg)
-}
-func main() {
fmt.Printf("Username = %s", cfg.Username)
// Output: Username = luzifer
}
diff --git a/vendor/github.com/Luzifer/rconfig/config.go b/vendor/github.com/Luzifer/rconfig/config.go
index dd37238..251909d 100644
--- a/vendor/github.com/Luzifer/rconfig/config.go
+++ b/vendor/github.com/Luzifer/rconfig/config.go
@@ -13,6 +13,7 @@ import (
"time"
"github.com/spf13/pflag"
+ validator "gopkg.in/validator.v2"
)
var (
@@ -45,6 +46,15 @@ func Parse(config interface{}) error {
return parse(config, nil)
}
+// ParseAndValidate works exactly like Parse but implements an additional run of
+// the go-validator package on the configuration struct. Therefore additonal struct
+// tags are supported like described in the readme file of the go-validator package:
+//
+// https://github.com/go-validator/validator/tree/v2#usage
+func ParseAndValidate(config interface{}) error {
+ return parseAndValidate(config, nil)
+}
+
// Args returns the non-flag command-line arguments.
func Args() []string {
return fs.Args()
@@ -65,6 +75,14 @@ func SetVariableDefaults(defaults map[string]string) {
variableDefaults = defaults
}
+func parseAndValidate(in interface{}, args []string) error {
+ if err := parse(in, args); err != nil {
+ return err
+ }
+
+ return validator.Validate(in)
+}
+
func parse(in interface{}, args []string) error {
if args == nil {
args = os.Args
diff --git a/vendor/github.com/Luzifer/rconfig/general_test.go b/vendor/github.com/Luzifer/rconfig/general_test.go
index d9ff8fe..e7f29b7 100644
--- a/vendor/github.com/Luzifer/rconfig/general_test.go
+++ b/vendor/github.com/Luzifer/rconfig/general_test.go
@@ -15,6 +15,10 @@ var _ = Describe("Testing general parsing", func() {
SadFlag string
}
+ type tValidated struct {
+ Test string `flag:"test" default:"" validate:"nonzero"`
+ }
+
var (
err error
args []string
@@ -106,4 +110,19 @@ var _ = Describe("Testing general parsing", func() {
})
})
+ Context("making use of the validator package", func() {
+ var cfgValidated tValidated
+
+ BeforeEach(func() {
+ cfgValidated = tValidated{}
+ args = []string{}
+ })
+
+ JustBeforeEach(func() {
+ err = parseAndValidate(&cfgValidated, args)
+ })
+
+ It("should have errored", func() { Expect(err).To(HaveOccurred()) })
+ })
+
})
diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml
index dee4eb2..a23296a 100644
--- a/vendor/github.com/Sirupsen/logrus/.travis.yml
+++ b/vendor/github.com/Sirupsen/logrus/.travis.yml
@@ -1,10 +1,15 @@
language: go
go:
- - 1.3
- - 1.4
- - 1.5
- - 1.6
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
- tip
+env:
+ - GOMAXPROCS=4 GORACE=halt_on_error=1
install:
- - go get -t ./...
-script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
+ - go get github.com/stretchr/testify/assert
+ - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
+ - go get golang.org/x/sys/unix
+ - go get golang.org/x/sys/windows
+script:
+ - go test -race -v ./...
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
index f2c2bc2..8236d8b 100644
--- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -1,3 +1,50 @@
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
# 0.10.0
* feature: Add a test hook (#180)
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
index ab48929..4f5ce57 100644
--- a/vendor/github.com/Sirupsen/logrus/README.md
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -1,11 +1,24 @@
-# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+# Logrus [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
-the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
-yet stable (pre 1.0). Logrus itself is completely stable and has been used in
-many large deployments. The core API is unlikely to change much but please
-version control your Logrus to make sure you aren't fetching latest `master` on
-every build.**
+the standard library logger.
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+**Are you interested in assisting in maintaining Logrus?** Currently I have a
+lot of obligations, and I am unable to provide Logrus with the maintainership it
+needs. If you'd like to help, please reach out to me at `simon at author's
+username dot com`.
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
@@ -46,6 +59,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822
exit status 1
```
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
@@ -54,7 +73,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
)
func main() {
@@ -65,7 +84,7 @@ func main() {
```
Note that it's completely api-compatible with the stdlib logger, so you can
-replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
@@ -74,15 +93,16 @@ package main
import (
"os"
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
- // Output to stderr instead of stdout, could also be a file.
- log.SetOutput(os.Stderr)
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
@@ -123,7 +143,8 @@ application, you can also create an instance of the `logrus` Logger:
package main
import (
- "github.com/Sirupsen/logrus"
+ "os"
+ "github.com/sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
@@ -132,7 +153,15 @@ var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
- log.Out = os.Stderr
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
log.WithFields(logrus.Fields{
"animal": "walrus",
@@ -143,7 +172,7 @@ func main() {
#### Fields
-Logrus encourages careful, structured logging though logging fields instead of
+Logrus encourages careful, structured logging through logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
@@ -165,6 +194,20 @@ In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
@@ -176,9 +219,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go
import (
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
- logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
)
@@ -200,37 +243,52 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
| Hook | Description |
| ----- | ----------- |
-| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
-| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
-| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
-| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
-| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
-| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
-| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
-| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
-| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
-| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
-| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
-| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
-| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
-| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
-| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
-| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
-| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
-| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
-| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
-| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
-| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
-| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
-| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
-| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
-| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
-| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
-
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
+| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
#### Level logging
@@ -279,7 +337,7 @@ could do:
```go
import (
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
)
init() {
@@ -306,11 +364,15 @@ The built-in logging formatters are:
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
- `DisableColors` field to `true`
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine.
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
@@ -356,6 +418,18 @@ srv := http.Server{
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
@@ -367,6 +441,7 @@ entries. It should not be a feature of the application-level logger.
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
#### Testing
@@ -376,15 +451,24 @@ Logrus has a built in facility for asserting the presence of log messages. This
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
-logger, hook := NewNullLogger()
-logger.Error("Hello error")
+import(
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
-assert.Equal(1, len(hook.Entries))
-assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
-assert.Equal("Hello error", hook.LastEntry().Message)
+func TestSomething(t*testing.T){
+ logger, hook := test.NewNullLogger()
+ logger.Error("Helloerror")
-hook.Reset()
-assert.Nil(hook.LastEntry())
+ assert.Equal(t, 1, len(hook.Entries))
+ assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+ hook.Reset()
+ assert.Nil(t, hook.LastEntry())
+}
```
#### Fatal handlers
@@ -403,7 +487,7 @@ logrus.RegisterExitHandler(handler)
...
```
-#### Thread safty
+#### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go
index b4c9e84..8af9063 100644
--- a/vendor/github.com/Sirupsen/logrus/alt_exit.go
+++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -1,7 +1,7 @@
package logrus
// The following code was sourced and modified from the
-// https://bitbucket.org/tebeka/atexit package governed by the following license:
+// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka .
//
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go
index 022b778..a08b1a8 100644
--- a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go
+++ b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go
@@ -2,7 +2,10 @@ package logrus
import (
"io/ioutil"
+ "log"
+ "os"
"os/exec"
+ "path/filepath"
"testing"
"time"
)
@@ -11,30 +14,36 @@ func TestRegister(t *testing.T) {
current := len(handlers)
RegisterExitHandler(func() {})
if len(handlers) != current+1 {
- t.Fatalf("can't add handler")
+ t.Fatalf("expected %d handlers, got %d", current+1, len(handlers))
}
}
func TestHandler(t *testing.T) {
- gofile := "/tmp/testprog.go"
+ tempDir, err := ioutil.TempDir("", "test_handler")
+ if err != nil {
+ log.Fatalf("can't create temp dir. %q", err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ gofile := filepath.Join(tempDir, "gofile.go")
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
- t.Fatalf("can't create go file")
+ t.Fatalf("can't create go file. %q", err)
}
- outfile := "/tmp/testprog.out"
+ outfile := filepath.Join(tempDir, "outfile.out")
arg := time.Now().UTC().String()
- err := exec.Command("go", "run", gofile, outfile, arg).Run()
+ err = exec.Command("go", "run", gofile, outfile, arg).Run()
if err == nil {
t.Fatalf("completed normally, should have failed")
}
data, err := ioutil.ReadFile(outfile)
if err != nil {
- t.Fatalf("can't read output file %s", outfile)
+ t.Fatalf("can't read output file %s. %q", outfile, err)
}
if string(data) != arg {
- t.Fatalf("bad data")
+ t.Fatalf("bad data. Expected %q, got %q", data, arg)
}
}
@@ -44,7 +53,7 @@ var testprog = []byte(`
package main
import (
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
"flag"
"fmt"
"io/ioutil"
diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..96c2ce1
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
index dddd5f8..da67aba 100644
--- a/vendor/github.com/Sirupsen/logrus/doc.go
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
)
func main() {
@@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger:
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
-For a full guide visit https://github.com/Sirupsen/logrus
+For a full guide visit https://github.com/sirupsen/logrus
*/
package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
index 4edbe7a..5bf582e 100644
--- a/vendor/github.com/Sirupsen/logrus/entry.go
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -35,6 +35,7 @@ type Entry struct {
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
@@ -126,7 +127,7 @@ func (entry Entry) log(level Level, msg string) {
}
func (entry *Entry) Debug(args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
+ if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
@@ -136,13 +137,13 @@ func (entry *Entry) Print(args ...interface{}) {
}
func (entry *Entry) Info(args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
+ if entry.Logger.level() >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
+ if entry.Logger.level() >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
@@ -152,20 +153,20 @@ func (entry *Entry) Warning(args ...interface{}) {
}
func (entry *Entry) Error(args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
+ if entry.Logger.level() >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
+ if entry.Logger.level() >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
+ if entry.Logger.level() >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
@@ -174,13 +175,13 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
+ if entry.Logger.level() >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
+ if entry.Logger.level() >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
@@ -190,7 +191,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
+ if entry.Logger.level() >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
@@ -200,20 +201,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
+ if entry.Logger.level() >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
+ if entry.Logger.level() >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
+ if entry.Logger.level() >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
@@ -221,13 +222,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
+ if entry.Logger.level() >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
+ if entry.Logger.level() >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
@@ -237,7 +238,7 @@ func (entry *Entry) Println(args ...interface{}) {
}
func (entry *Entry) Warnln(args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
+ if entry.Logger.level() >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
@@ -247,20 +248,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
}
func (entry *Entry) Errorln(args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
+ if entry.Logger.level() >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
+ if entry.Logger.level() >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
+ if entry.Logger.level() >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
diff --git a/vendor/github.com/Sirupsen/logrus/example_basic_test.go b/vendor/github.com/Sirupsen/logrus/example_basic_test.go
new file mode 100644
index 0000000..a2acf55
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/example_basic_test.go
@@ -0,0 +1,69 @@
+package logrus_test
+
+import (
+ "github.com/sirupsen/logrus"
+ "os"
+)
+
+func Example_basic() {
+ var log = logrus.New()
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) //default
+ log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
+ log.Level = logrus.DebugLevel
+ log.Out = os.Stdout
+
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ entry := err.(*logrus.Entry)
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err_animal": entry.Data["animal"],
+ "err_size": entry.Data["size"],
+ "err_level": entry.Level,
+ "err_message": entry.Message,
+ "number": 100,
+ }).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "number": 8,
+ }).Debug("Started observing beach")
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "temperature": -4,
+ }).Debug("Temperature changes")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+
+ // Output:
+ // level=debug msg="Started observing beach" animal=walrus number=8
+ // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+ // level=warning msg="The group's number increased tremendously!" number=122 omg=true
+ // level=debug msg="Temperature changes" temperature=-4
+ // level=panic msg="It's over 9000!" animal=orca size=9009
+ // level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true
+}
diff --git a/vendor/github.com/Sirupsen/logrus/example_hook_test.go b/vendor/github.com/Sirupsen/logrus/example_hook_test.go
new file mode 100644
index 0000000..d4ddffc
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/example_hook_test.go
@@ -0,0 +1,35 @@
+package logrus_test
+
+import (
+ "github.com/sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
+ "os"
+)
+
+func Example_hook() {
+ var log = logrus.New()
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
+ log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
+ log.Out = os.Stdout
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Error("The ice breaks!")
+
+ // Output:
+ // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+ // level=warning msg="The group's number increased tremendously!" number=122 omg=true
+ // level=error msg="The ice breaks!" number=100 omg=true
+}
diff --git a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
deleted file mode 100644
index a1623ec..0000000
--- a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package main
-
-import (
- "github.com/Sirupsen/logrus"
-)
-
-var log = logrus.New()
-
-func init() {
- log.Formatter = new(logrus.JSONFormatter)
- log.Formatter = new(logrus.TextFormatter) // default
- log.Level = logrus.DebugLevel
-}
-
-func main() {
- defer func() {
- err := recover()
- if err != nil {
- log.WithFields(logrus.Fields{
- "omg": true,
- "err": err,
- "number": 100,
- }).Fatal("The ice breaks!")
- }
- }()
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "number": 8,
- }).Debug("Started observing beach")
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(logrus.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(logrus.Fields{
- "temperature": -4,
- }).Debug("Temperature changes")
-
- log.WithFields(logrus.Fields{
- "animal": "orca",
- "size": 9009,
- }).Panic("It's over 9000!")
-}
diff --git a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
deleted file mode 100644
index 3187f6d..0000000
--- a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package main
-
-import (
- "github.com/Sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
-)
-
-var log = logrus.New()
-
-func init() {
- log.Formatter = new(logrus.TextFormatter) // default
- log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
-}
-
-func main() {
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(logrus.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(logrus.Fields{
- "omg": true,
- "number": 100,
- }).Fatal("The ice breaks!")
-}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
index 9a0120a..013183e 100644
--- a/vendor/github.com/Sirupsen/logrus/exported.go
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) {
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
- std.Level = level
+ std.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
- return std.Level
+ return std.level()
}
// AddHook adds a hook to the standard logger hooks.
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
index b5fbe93..b183ff5 100644
--- a/vendor/github.com/Sirupsen/logrus/formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -2,7 +2,7 @@ package logrus
import "time"
-const DefaultTimestampFormat = time.RFC3339
+const defaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
index c6d290c..d948158 100644
--- a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
+++ b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
@@ -80,11 +80,14 @@ func BenchmarkLargeJSONFormatter(b *testing.B) {
}
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+ logger := New()
+
entry := &Entry{
Time: time.Time{},
Level: InfoLevel,
Message: "message",
Data: fields,
+ Logger: logger,
}
var d []byte
var err error
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
index 066704b..1bbc0f7 100644
--- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
+++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
@@ -5,13 +5,13 @@
```go
import (
"log/syslog"
- "github.com/Sirupsen/logrus"
- logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "github.com/sirupsen/logrus"
+ lSyslog "github.com/sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
- hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
@@ -24,16 +24,16 @@ If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "
```go
import (
"log/syslog"
- "github.com/Sirupsen/logrus"
- logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "github.com/sirupsen/logrus"
+ lSyslog "github.com/sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
- hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
+ hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
-```
\ No newline at end of file
+```
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
index a36e200..329ce0d 100644
--- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
+++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
@@ -1,12 +1,13 @@
// +build !windows,!nacl,!plan9
-package logrus_syslog
+package syslog
import (
"fmt"
- "github.com/Sirupsen/logrus"
"log/syslog"
"os"
+
+ "github.com/sirupsen/logrus"
)
// SyslogHook to send logs via syslog.
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
index 42762dc..5ec3a44 100644
--- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
+++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -1,9 +1,10 @@
-package logrus_syslog
+package syslog
import (
- "github.com/Sirupsen/logrus"
"log/syslog"
"testing"
+
+ "github.com/sirupsen/logrus"
)
func TestLocalhostAddAndPrint(t *testing.T) {
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go
index 0688125..62c4845 100644
--- a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go
+++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go
@@ -1,17 +1,25 @@
+// The Test package is used for testing logrus. It is here for backwards
+// compatibility from when logrus' organization was upper-case. Please use
+// lower-case logrus and the `null` package instead of this one.
package test
import (
"io/ioutil"
+ "sync"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
-// test.Hook is a hook designed for dealing with logs in test scenarios.
+// Hook is a hook designed for dealing with logs in test scenarios.
type Hook struct {
+ // Entries is an array of all entries that have been received by this hook.
+ // For safe access, use the AllEntries() method, rather than reading this
+ // value directly.
Entries []*logrus.Entry
+ mu sync.RWMutex
}
-// Installs a test hook for the global logger.
+// NewGlobal installs a test hook for the global logger.
func NewGlobal() *Hook {
hook := new(Hook)
@@ -21,7 +29,7 @@ func NewGlobal() *Hook {
}
-// Installs a test hook for a given local logger.
+// NewLocal installs a test hook for a given local logger.
func NewLocal(logger *logrus.Logger) *Hook {
hook := new(Hook)
@@ -31,7 +39,7 @@ func NewLocal(logger *logrus.Logger) *Hook {
}
-// Creates a discarding logger and installs the test hook.
+// NewNullLogger creates a discarding logger and installs the test hook.
func NewNullLogger() (*logrus.Logger, *Hook) {
logger := logrus.New()
@@ -42,6 +50,8 @@ func NewNullLogger() (*logrus.Logger, *Hook) {
}
func (t *Hook) Fire(e *logrus.Entry) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
t.Entries = append(t.Entries, e)
return nil
}
@@ -51,17 +61,35 @@ func (t *Hook) Levels() []logrus.Level {
}
// LastEntry returns the last entry that was logged or nil.
-func (t *Hook) LastEntry() (l *logrus.Entry) {
-
- if i := len(t.Entries) - 1; i < 0 {
+func (t *Hook) LastEntry() *logrus.Entry {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ i := len(t.Entries) - 1
+ if i < 0 {
return nil
- } else {
- return t.Entries[i]
}
+ // Make a copy, for safety
+ e := *t.Entries[i]
+ return &e
+}
+// AllEntries returns all entries that were logged.
+func (t *Hook) AllEntries() []*logrus.Entry {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ // Make a copy so the returned value won't race with future log requests
+ entries := make([]*logrus.Entry, len(t.Entries))
+ for i, entry := range t.Entries {
+ // Make a copy, for safety
+ e := *entry
+ entries[i] = &e
+ }
+ return entries
}
// Reset removes all Entries from this test hook.
func (t *Hook) Reset() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
t.Entries = make([]*logrus.Entry, 0)
}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
index d69455b..3f55cfe 100644
--- a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
+++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
@@ -3,7 +3,7 @@ package test
import (
"testing"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
index 2ad6dc5..fb01c1b 100644
--- a/vendor/github.com/Sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -5,18 +5,54 @@ import (
"fmt"
)
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+// Default key names for the default fields
+const (
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message",
+ // },
+ // }
+ FieldMap FieldMap
}
+// Format renders a single log entry
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
- // https://github.com/Sirupsen/logrus/issues/137
+ // https://github.com/sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
@@ -26,12 +62,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
- timestampFormat = DefaultTimestampFormat
+ timestampFormat = defaultTimestampFormat
}
- data["time"] = entry.Time.Format(timestampFormat)
- data["msg"] = entry.Message
- data["level"] = entry.Level.String()
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go
index 1d70873..51093a7 100644
--- a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go
@@ -3,7 +3,7 @@ package logrus
import (
"encoding/json"
"errors"
-
+ "strings"
"testing"
)
@@ -118,3 +118,82 @@ func TestJSONEntryEndsWithNewline(t *testing.T) {
t.Fatal("Expected JSON log entry to end with a newline")
}
}
+
+func TestJSONMessageKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyMsg: "message",
+ },
+ }
+
+ b, err := formatter.Format(&Entry{Message: "oh hai"})
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
+ t.Fatal("Expected JSON to format message key")
+ }
+}
+
+func TestJSONLevelKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyLevel: "somelevel",
+ },
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, "somelevel") {
+ t.Fatal("Expected JSON to format level key")
+ }
+}
+
+func TestJSONTimeKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyTime: "timeywimey",
+ },
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, "timeywimey") {
+ t.Fatal("Expected JSON to format time key")
+ }
+}
+
+func TestJSONDisableTimestamp(t *testing.T) {
+ formatter := &JSONFormatter{
+ DisableTimestamp: true,
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if strings.Contains(s, FieldKeyTime) {
+ t.Error("Did not prevent timestamp", s)
+ }
+}
+
+func TestJSONEnableTimestamp(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, FieldKeyTime) {
+ t.Error("Timestamp not present", s)
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
index b769f3d..2acab05 100644
--- a/vendor/github.com/Sirupsen/logrus/logger.go
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -4,6 +4,7 @@ import (
"io"
"os"
"sync"
+ "sync/atomic"
)
type Logger struct {
@@ -24,7 +25,7 @@ type Logger struct {
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
- // logged. `logrus.Debug` is useful in
+ // logged.
Level Level
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
@@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry {
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
- if logger.Level >= DebugLevel {
+ if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
@@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
}
func (logger *Logger) Infof(format string, args ...interface{}) {
- if logger.Level >= InfoLevel {
+ if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
@@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
- if logger.Level >= WarnLevel {
+ if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
- if logger.Level >= WarnLevel {
+ if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
- if logger.Level >= ErrorLevel {
+ if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
@@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
- if logger.Level >= FatalLevel {
+ if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
@@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
- if logger.Level >= PanicLevel {
+ if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
@@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
}
func (logger *Logger) Debug(args ...interface{}) {
- if logger.Level >= DebugLevel {
+ if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
@@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) {
}
func (logger *Logger) Info(args ...interface{}) {
- if logger.Level >= InfoLevel {
+ if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
@@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) {
}
func (logger *Logger) Warn(args ...interface{}) {
- if logger.Level >= WarnLevel {
+ if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) {
}
func (logger *Logger) Warning(args ...interface{}) {
- if logger.Level >= WarnLevel {
+ if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) {
}
func (logger *Logger) Error(args ...interface{}) {
- if logger.Level >= ErrorLevel {
+ if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
@@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) {
}
func (logger *Logger) Fatal(args ...interface{}) {
- if logger.Level >= FatalLevel {
+ if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
@@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
}
func (logger *Logger) Panic(args ...interface{}) {
- if logger.Level >= PanicLevel {
+ if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
@@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) {
}
func (logger *Logger) Debugln(args ...interface{}) {
- if logger.Level >= DebugLevel {
+ if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
@@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
}
func (logger *Logger) Infoln(args ...interface{}) {
- if logger.Level >= InfoLevel {
+ if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
@@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
- if logger.Level >= WarnLevel {
+ if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
}
func (logger *Logger) Warningln(args ...interface{}) {
- if logger.Level >= WarnLevel {
+ if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
}
func (logger *Logger) Errorln(args ...interface{}) {
- if logger.Level >= ErrorLevel {
+ if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
@@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
}
func (logger *Logger) Fatalln(args ...interface{}) {
- if logger.Level >= FatalLevel {
+ if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
@@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
}
func (logger *Logger) Panicln(args ...interface{}) {
- if logger.Level >= PanicLevel {
+ if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
@@ -306,3 +307,11 @@ func (logger *Logger) Panicln(args ...interface{}) {
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}
+
+func (logger *Logger) level() Level {
+ return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+func (logger *Logger) SetLevel(level Level) {
+ atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
index e596691..dd38999 100644
--- a/vendor/github.com/Sirupsen/logrus/logrus.go
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -10,7 +10,7 @@ import (
type Fields map[string]interface{}
// Level type
-type Level uint8
+type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go
index bfc4780..78cbc28 100644
--- a/vendor/github.com/Sirupsen/logrus/logrus_test.go
+++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go
@@ -359,3 +359,28 @@ func TestLogrusInterface(t *testing.T) {
e := logger.WithField("another", "value")
fn(e)
}
+
+// Implements io.Writer using channels for synchronization, so we can wait on
+// the Entry.Writer goroutine to write in a non-racey way. This does assume that
+// there is a single call to Logger.Out for each message.
+type channelWriter chan []byte
+
+func (cw channelWriter) Write(p []byte) (int, error) {
+ cw <- p
+ return len(p), nil
+}
+
+func TestEntryWriter(t *testing.T) {
+ cw := channelWriter(make(chan []byte, 1))
+ log := New()
+ log.Out = cw
+ log.Formatter = new(JSONFormatter)
+ log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n"))
+
+ bs := <-cw
+ var fields Fields
+ err := json.Unmarshal(bs, &fields)
+ assert.Nil(t, err)
+ assert.Equal(t, fields["foo"], "bar")
+ assert.Equal(t, fields["level"], "warning")
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
deleted file mode 100644
index 1960169..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build appengine
-
-package logrus
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal() bool {
- return true
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
index 5f6be4d..d7b3893 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -3,8 +3,8 @@
package logrus
-import "syscall"
+import "golang.org/x/sys/unix"
-const ioctlReadTermios = syscall.TIOCGETA
+const ioctlReadTermios = unix.TIOCGETA
-type Termios syscall.Termios
+type Termios unix.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
index 308160c..88d7298 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -7,8 +7,8 @@
package logrus
-import "syscall"
+import "golang.org/x/sys/unix"
-const ioctlReadTermios = syscall.TCGETS
+const ioctlReadTermios = unix.TCGETS
-type Termios syscall.Termios
+type Termios unix.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
deleted file mode 100644
index 329038f..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package logrus
-
-import (
- "syscall"
- "unsafe"
-)
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal() bool {
- fd := syscall.Stderr
- var termios Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
deleted file mode 100644
index a3c6f6e..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build solaris,!appengine
-
-package logrus
-
-import (
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal() bool {
- _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
- return err == nil
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
deleted file mode 100644
index 3727e8a..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows,!appengine
-
-package logrus
-
-import (
- "syscall"
- "unsafe"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
-)
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal() bool {
- fd := syscall.Stderr
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
index cce61f2..be412aa 100644
--- a/vendor/github.com/Sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -3,10 +3,14 @@ package logrus
import (
"bytes"
"fmt"
- "runtime"
+ "io"
+ "os"
"sort"
"strings"
+ "sync"
"time"
+
+ "golang.org/x/crypto/ssh/terminal"
)
const (
@@ -14,24 +18,19 @@ const (
red = 31
green = 32
yellow = 33
- blue = 34
+ blue = 36
gray = 37
)
var (
baseTimestamp time.Time
- isTerminal bool
)
func init() {
baseTimestamp = time.Now()
- isTerminal = IsTerminal()
-}
-
-func miniTS() int {
- return int(time.Since(baseTimestamp) / time.Second)
}
+// TextFormatter formats logs into text
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
@@ -54,11 +53,35 @@ type TextFormatter struct {
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ sync.Once
}
+func (f *TextFormatter) init(entry *Entry) {
+ if entry.Logger != nil {
+ f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
+ }
+}
+
+func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ return terminal.IsTerminal(int(v.Fd()))
+ default:
+ return false
+ }
+}
+
+// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
- var keys []string = make([]string, 0, len(entry.Data))
+ keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
@@ -74,12 +97,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
prefixFieldClashes(entry.Data)
- isColorTerminal := isTerminal && (runtime.GOOS != "windows")
- isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+ f.Do(func() { f.init(entry) })
+
+ isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
- timestampFormat = DefaultTimestampFormat
+ timestampFormat = defaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
@@ -115,23 +139,29 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelText := strings.ToUpper(entry.Level.String())[0:4]
- if !f.FullTimestamp {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
- fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
}
}
-func needsQuoting(text string) bool {
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
- ch == '-' || ch == '.') {
+ ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
return true
}
}
@@ -139,27 +169,23 @@ func needsQuoting(text string) bool {
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
-
+ if b.Len() > 0 {
+ b.WriteByte(' ')
+ }
b.WriteString(key)
b.WriteByte('=')
+ f.appendValue(b, value)
+}
- switch value := value.(type) {
- case string:
- if !needsQuoting(value) {
- b.WriteString(value)
- } else {
- fmt.Fprintf(b, "%q", value)
- }
- case error:
- errmsg := value.Error()
- if !needsQuoting(errmsg) {
- b.WriteString(errmsg)
- } else {
- fmt.Fprintf(b, "%q", value)
- }
- default:
- fmt.Fprint(b, value)
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ stringVal, ok := value.(string)
+ if !ok {
+ stringVal = fmt.Sprint(value)
}
- b.WriteByte(' ')
+ if !f.needsQuoting(stringVal) {
+ b.WriteString(stringVal)
+ } else {
+ b.WriteString(fmt.Sprintf("%q", stringVal))
+ }
}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go
index e25a44f..d93b931 100644
--- a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go
@@ -3,17 +3,38 @@ package logrus
import (
"bytes"
"errors"
+ "fmt"
+ "strings"
"testing"
"time"
)
+func TestFormatting(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ testCases := []struct {
+ value string
+ expected string
+ }{
+ {`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"},
+ }
+
+ for _, tc := range testCases {
+ b, _ := tf.Format(WithField("test", tc.value))
+
+ if string(b) != tc.expected {
+ t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
+ }
+ }
+}
+
func TestQuoting(t *testing.T) {
tf := &TextFormatter{DisableColors: true}
checkQuoting := func(q bool, value interface{}) {
b, _ := tf.Format(WithField("test", value))
idx := bytes.Index(b, ([]byte)("test="))
- cont := bytes.Contains(b[idx+5:], []byte{'"'})
+ cont := bytes.Contains(b[idx+5:], []byte("\""))
if cont != q {
if q {
t.Errorf("quoting expected for: %#v", value)
@@ -23,14 +44,67 @@ func TestQuoting(t *testing.T) {
}
}
+ checkQuoting(false, "")
checkQuoting(false, "abcd")
checkQuoting(false, "v1.0")
checkQuoting(false, "1234567890")
- checkQuoting(true, "/foobar")
+ checkQuoting(false, "/foobar")
+ checkQuoting(false, "foo_bar")
+ checkQuoting(false, "foo@bar")
+ checkQuoting(false, "foobar^")
+ checkQuoting(false, "+/-_^@f.oobar")
+ checkQuoting(true, "foobar$")
+ checkQuoting(true, "&foobar")
checkQuoting(true, "x y")
checkQuoting(true, "x,y")
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
+
+ // Test for quoting empty fields.
+ tf.QuoteEmptyFields = true
+ checkQuoting(true, "")
+ checkQuoting(false, "abcd")
+ checkQuoting(true, errors.New("invalid argument"))
+}
+
+func TestEscaping(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ testCases := []struct {
+ value string
+ expected string
+ }{
+ {`ba"r`, `ba\"r`},
+ {`ba'r`, `ba'r`},
+ }
+
+ for _, tc := range testCases {
+ b, _ := tf.Format(WithField("test", tc.value))
+ if !bytes.Contains(b, []byte(tc.expected)) {
+ t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
+ }
+ }
+}
+
+func TestEscaping_Interface(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ ts := time.Now()
+
+ testCases := []struct {
+ value interface{}
+ expected string
+ }{
+ {ts, fmt.Sprintf("\"%s\"", ts.String())},
+ {errors.New("error: something went wrong"), "\"error: something went wrong\""},
+ }
+
+ for _, tc := range testCases {
+ b, _ := tf.Format(WithField("test", tc.value))
+ if !bytes.Contains(b, []byte(tc.expected)) {
+ t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
+ }
+ }
}
func TestTimestampFormat(t *testing.T) {
@@ -39,10 +113,7 @@ func TestTimestampFormat(t *testing.T) {
customStr, _ := customFormatter.Format(WithField("test", "test"))
timeStart := bytes.Index(customStr, ([]byte)("time="))
timeEnd := bytes.Index(customStr, ([]byte)("level="))
- timeStr := customStr[timeStart+5 : timeEnd-1]
- if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
- timeStr = timeStr[1 : len(timeStr)-1]
- }
+ timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")]
if format == "" {
format = time.RFC3339
}
@@ -57,5 +128,14 @@ func TestTimestampFormat(t *testing.T) {
checkTimeStr("")
}
+func TestDisableTimestampWithColoredOutput(t *testing.T) {
+ tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
+
+ b, _ := tf.Format(WithField("test", "test"))
+ if strings.Contains(string(b), "[0000]") {
+ t.Error("timestamp not expected when DisableTimestamp is true")
+ }
+}
+
// TODO add tests for sorting etc., this requires a parser for the text
// formatter output.
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
index f74d2aa..7bdebed 100644
--- a/vendor/github.com/Sirupsen/logrus/writer.go
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
+
switch level {
case DebugLevel:
- printFunc = logger.Debug
+ printFunc = entry.Debug
case InfoLevel:
- printFunc = logger.Info
+ printFunc = entry.Info
case WarnLevel:
- printFunc = logger.Warn
+ printFunc = entry.Warn
case ErrorLevel:
- printFunc = logger.Error
+ printFunc = entry.Error
case FatalLevel:
- printFunc = logger.Fatal
+ printFunc = entry.Fatal
case PanicLevel:
- printFunc = logger.Panic
+ printFunc = entry.Panic
default:
- printFunc = logger.Print
+ printFunc = entry.Print
}
- go logger.writerScanner(reader, printFunc)
+ go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
-func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
- logger.Errorf("Error while reading from Writer: %s", err)
+ entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml
index 845012b..cbf2ccc 100644
--- a/vendor/github.com/fatih/structs/.travis.yml
+++ b/vendor/github.com/fatih/structs/.travis.yml
@@ -1,6 +1,6 @@
language: go
go:
- - 1.6
+ - 1.7.x
- tip
sudo: false
before_install:
diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go
index ad705f0..e697832 100644
--- a/vendor/github.com/fatih/structs/field.go
+++ b/vendor/github.com/fatih/structs/field.go
@@ -117,7 +117,16 @@ func (f *Field) Field(name string) *Field {
// FieldOk returns the field from a nested struct. The boolean returns whether
// the field was found (true) or not (false).
func (f *Field) FieldOk(name string) (*Field, bool) {
- v := strctVal(f.value.Interface())
+ value := &f.value
+ // value must be settable so we need to make sure it holds the address of the
+ // variable and not a copy, so we can pass the pointer to strctVal instead of a
+ // copy (which is not assigned to any variable, hence not settable).
+ // see "https://blog.golang.org/laws-of-reflection#TOC_8."
+ if f.value.Kind() != reflect.Ptr {
+ a := f.value.Addr()
+ value = &a
+ }
+ v := strctVal(value.Interface())
t := v.Type()
field, ok := t.FieldByName(name)
diff --git a/vendor/github.com/fatih/structs/field_test.go b/vendor/github.com/fatih/structs/field_test.go
index b77e951..de9dc3b 100644
--- a/vendor/github.com/fatih/structs/field_test.go
+++ b/vendor/github.com/fatih/structs/field_test.go
@@ -133,6 +133,20 @@ func TestField_Set(t *testing.T) {
}
}
+func TestField_NotSettable(t *testing.T) {
+ a := map[int]Baz{
+ 4: Baz{
+ A: "value",
+ },
+ }
+
+ s := New(a[4])
+
+ if err := s.Field("A").Set("newValue"); err != errNotSettable {
+ t.Errorf("Trying to set non-settable field should error with %q. Got %q instead.", errNotSettable, err)
+ }
+}
+
func TestField_Zero(t *testing.T) {
s := newStruct()
diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go
index 39eb083..be3816a 100644
--- a/vendor/github.com/fatih/structs/structs.go
+++ b/vendor/github.com/fatih/structs/structs.go
@@ -56,7 +56,7 @@ func New(s interface{}) *Struct {
// in the output map. Example:
//
// // The FieldStruct's fields will be flattened into the output map.
-// FieldStruct time.Time `structs:"flatten"`
+// FieldStruct time.Time `structs:",flatten"`
//
// A tag value with the option of "omitnested" stops iterating further if the type
// is a struct. Example:
@@ -115,17 +115,17 @@ func (s *Struct) FillMap(out map[string]interface{}) {
}
}
- if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
- // look out for embedded structs, and convert them to a
- // map[string]interface{} too
- n := New(val.Interface())
- n.TagName = s.TagName
- m := n.Map()
- isSubStruct = true
- if len(m) == 0 {
- finalVal = val.Interface()
- } else {
- finalVal = m
+ if !tagOpts.Has("omitnested") {
+ finalVal = s.nested(val)
+
+ v := reflect.ValueOf(val.Interface())
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Map, reflect.Struct:
+ isSubStruct = true
}
} else {
finalVal = val.Interface()
@@ -431,7 +431,7 @@ func strctVal(s interface{}) reflect.Value {
v := reflect.ValueOf(s)
// if pointer get the underlying element≤
- if v.Kind() == reflect.Ptr {
+ for v.Kind() == reflect.Ptr {
v = v.Elem()
}
@@ -505,3 +505,82 @@ func IsStruct(s interface{}) bool {
func Name(s interface{}) string {
return New(s).Name()
}
+
+// nested retrieves recursively all types for the given value and returns the
+// nested value.
+func (s *Struct) nested(val reflect.Value) interface{} {
+ var finalVal interface{}
+
+ v := reflect.ValueOf(val.Interface())
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := New(val.Interface())
+ n.TagName = s.TagName
+ m := n.Map()
+
+ // do not add the converted value if there are no exported fields, ie:
+ // time.Time
+ if len(m) == 0 {
+ finalVal = val.Interface()
+ } else {
+ finalVal = m
+ }
+ case reflect.Map:
+ // get the element type of the map
+ mapElem := val.Type()
+ switch val.Type().Kind() {
+ case reflect.Ptr, reflect.Array, reflect.Map,
+ reflect.Slice, reflect.Chan:
+ mapElem = val.Type().Elem()
+ if mapElem.Kind() == reflect.Ptr {
+ mapElem = mapElem.Elem()
+ }
+ }
+
+ // only iterate over struct types, ie: map[string]StructType,
+ // map[string][]StructType,
+ if mapElem.Kind() == reflect.Struct ||
+ (mapElem.Kind() == reflect.Slice &&
+ mapElem.Elem().Kind() == reflect.Struct) {
+ m := make(map[string]interface{}, val.Len())
+ for _, k := range val.MapKeys() {
+ m[k.String()] = s.nested(val.MapIndex(k))
+ }
+ finalVal = m
+ break
+ }
+
+ // TODO(arslan): should this be optional?
+ finalVal = val.Interface()
+ case reflect.Slice, reflect.Array:
+ if val.Type().Kind() == reflect.Interface {
+ finalVal = val.Interface()
+ break
+ }
+
+ // TODO(arslan): should this be optional?
+ // do not iterate of non struct types, just pass the value. Ie: []int,
+ // []string, co... We only iterate further if it's a struct.
+ // i.e []foo or []*foo
+ if val.Type().Elem().Kind() != reflect.Struct &&
+ !(val.Type().Elem().Kind() == reflect.Ptr &&
+ val.Type().Elem().Elem().Kind() == reflect.Struct) {
+ finalVal = val.Interface()
+ break
+ }
+
+ slices := make([]interface{}, val.Len(), val.Len())
+ for x := 0; x < val.Len(); x++ {
+ slices[x] = s.nested(val.Index(x))
+ }
+ finalVal = slices
+ default:
+ finalVal = val.Interface()
+ }
+
+ return finalVal
+}
diff --git a/vendor/github.com/fatih/structs/structs_example_test.go b/vendor/github.com/fatih/structs/structs_example_test.go
index 32bb829..329c130 100644
--- a/vendor/github.com/fatih/structs/structs_example_test.go
+++ b/vendor/github.com/fatih/structs/structs_example_test.go
@@ -81,7 +81,7 @@ func ExampleMap_tags() {
}
-func ExampleMap_nested() {
+func ExampleMap_omitNested() {
// By default field with struct types are processed too. We can stop
// processing them via "omitnested" tag option.
type Server struct {
diff --git a/vendor/github.com/fatih/structs/structs_test.go b/vendor/github.com/fatih/structs/structs_test.go
index b1b05a1..8a18a07 100644
--- a/vendor/github.com/fatih/structs/structs_test.go
+++ b/vendor/github.com/fatih/structs/structs_test.go
@@ -268,6 +268,268 @@ func TestMap_Nested(t *testing.T) {
}
}
+func TestMap_NestedMapWithStructValues(t *testing.T) {
+ type A struct {
+ Name string
+ }
+
+ type B struct {
+ A map[string]*A
+ }
+
+ a := &A{Name: "example"}
+
+ b := &B{
+ A: map[string]*A{
+ "example_key": a,
+ },
+ }
+
+ m := Map(b)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["A"].(map[string]interface{})
+ if !ok {
+ t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["A"])
+ }
+
+ example := in["example_key"].(map[string]interface{})
+ if name := example["Name"].(string); name != "example" {
+ t.Errorf("Map nested struct's name field should give example, got: %s", name)
+ }
+}
+
+func TestMap_NestedMapWithStringValues(t *testing.T) {
+ type B struct {
+ Foo map[string]string
+ }
+
+ type A struct {
+ B *B
+ }
+
+ b := &B{
+ Foo: map[string]string{
+ "example_key": "example",
+ },
+ }
+
+ a := &A{B: b}
+
+ m := Map(a)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["B"].(map[string]interface{})
+ if !ok {
+ t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
+ }
+
+ foo := in["Foo"].(map[string]string)
+ if name := foo["example_key"]; name != "example" {
+ t.Errorf("Map nested struct's name field should give example, got: %s", name)
+ }
+}
+func TestMap_NestedMapWithInterfaceValues(t *testing.T) {
+ type B struct {
+ Foo map[string]interface{}
+ }
+
+ type A struct {
+ B *B
+ }
+
+ b := &B{
+ Foo: map[string]interface{}{
+ "example_key": "example",
+ },
+ }
+
+ a := &A{B: b}
+
+ m := Map(a)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["B"].(map[string]interface{})
+ if !ok {
+ t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
+ }
+
+ foo := in["Foo"].(map[string]interface{})
+ if name := foo["example_key"]; name != "example" {
+ t.Errorf("Map nested struct's name field should give example, got: %s", name)
+ }
+}
+
+func TestMap_NestedMapWithSliceIntValues(t *testing.T) {
+ type B struct {
+ Foo map[string][]int
+ }
+
+ type A struct {
+ B *B
+ }
+
+ b := &B{
+ Foo: map[string][]int{
+ "example_key": []int{80},
+ },
+ }
+
+ a := &A{B: b}
+
+ m := Map(a)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["B"].(map[string]interface{})
+ if !ok {
+ t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
+ }
+
+ foo := in["Foo"].(map[string][]int)
+ if name := foo["example_key"]; name[0] != 80 {
+ t.Errorf("Map nested struct's name field should give example, got: %s", name)
+ }
+}
+
+func TestMap_NestedMapWithSliceStructValues(t *testing.T) {
+ type address struct {
+ Country string `structs:"country"`
+ }
+
+ type B struct {
+ Foo map[string][]address
+ }
+
+ type A struct {
+ B *B
+ }
+
+ b := &B{
+ Foo: map[string][]address{
+ "example_key": []address{
+ {Country: "Turkey"},
+ },
+ },
+ }
+
+ a := &A{B: b}
+ m := Map(a)
+
+ if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
+ t.Errorf("Map should return a map type, got: %v", typ)
+ }
+
+ in, ok := m["B"].(map[string]interface{})
+ if !ok {
+ t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
+ }
+
+ foo := in["Foo"].(map[string]interface{})
+
+ addresses := foo["example_key"].([]interface{})
+
+ addr, ok := addresses[0].(map[string]interface{})
+ if !ok {
+ t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
+ }
+
+ if _, exists := addr["country"]; !exists {
+ t.Errorf("Expecting country, but found Country")
+ }
+}
+
+func TestMap_NestedSliceWithStructValues(t *testing.T) {
+ type address struct {
+ Country string `structs:"customCountryName"`
+ }
+
+ type person struct {
+ Name string `structs:"name"`
+ Addresses []address `structs:"addresses"`
+ }
+
+ p := person{
+ Name: "test",
+ Addresses: []address{
+ address{Country: "England"},
+ address{Country: "Italy"},
+ },
+ }
+ mp := Map(p)
+
+ mpAddresses := mp["addresses"].([]interface{})
+ if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists {
+ t.Errorf("Expecting customCountryName, but found Country")
+ }
+
+ if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists {
+ t.Errorf("customCountryName key not found")
+ }
+}
+
+func TestMap_NestedSliceWithPointerOfStructValues(t *testing.T) {
+ type address struct {
+ Country string `structs:"customCountryName"`
+ }
+
+ type person struct {
+ Name string `structs:"name"`
+ Addresses []*address `structs:"addresses"`
+ }
+
+ p := person{
+ Name: "test",
+ Addresses: []*address{
+ &address{Country: "England"},
+ &address{Country: "Italy"},
+ },
+ }
+ mp := Map(p)
+
+ mpAddresses := mp["addresses"].([]interface{})
+ if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists {
+ t.Errorf("Expecting customCountryName, but found Country")
+ }
+
+ if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists {
+ t.Errorf("customCountryName key not found")
+ }
+}
+
+func TestMap_NestedSliceWithIntValues(t *testing.T) {
+ type person struct {
+ Name string `structs:"name"`
+ Ports []int `structs:"ports"`
+ }
+
+ p := person{
+ Name: "test",
+ Ports: []int{80},
+ }
+ m := Map(p)
+
+ ports, ok := m["ports"].([]int)
+ if !ok {
+ t.Errorf("Nested type of map should be of type []int, have %T", m["ports"])
+ }
+
+ if ports[0] != 80 {
+ t.Errorf("Map nested struct's ports field should give 80, got: %v", ports)
+ }
+}
+
func TestMap_Anonymous(t *testing.T) {
type A struct {
Name string
@@ -1022,6 +1284,28 @@ func TestNestedNilPointer(t *testing.T) {
_ = Map(personWithDogWithCollar) // Doesn't panic
}
+func TestSetValueOnNestedField(t *testing.T) {
+ type Base struct {
+ ID int
+ }
+
+ type User struct {
+ Base
+ Name string
+ }
+
+ u := User{}
+ s := New(&u)
+ f := s.Field("Base").Field("ID")
+ err := f.Set(10)
+ if err != nil {
+ t.Errorf("Error %v", err)
+ }
+ if f.Value().(int) != 10 {
+ t.Errorf("Value should be equal to 10, got %v", f.Value())
+ }
+}
+
type Person struct {
Name string
Age int
@@ -1107,3 +1391,63 @@ func TestNonStringerTagWithStringOption(t *testing.T) {
t.Errorf("Value for field Animal should not exist")
}
}
+
+func TestMap_InterfaceValue(t *testing.T) {
+ type TestStruct struct {
+ A interface{}
+ }
+
+ expected := []byte("test value")
+
+ a := TestStruct{A: expected}
+ s := Map(a)
+ if !reflect.DeepEqual(s["A"], expected) {
+ t.Errorf("Value does not match expected: %q != %q", s["A"], expected)
+ }
+}
+
+func TestPointer2Pointer(t *testing.T) {
+ defer func() {
+ err := recover()
+ if err != nil {
+ fmt.Printf("err %+v\n", err)
+ t.Error("Internal nil pointer should not panic")
+ }
+ }()
+ a := &Animal{
+ Name: "Fluff",
+ Age: 4,
+ }
+ _ = Map(&a)
+
+ b := &a
+ _ = Map(&b)
+
+ c := &b
+ _ = Map(&c)
+}
+
+func TestMap_InterfaceTypeWithMapValue(t *testing.T) {
+ type A struct {
+ Name string `structs:"name"`
+ Ip string `structs:"ip"`
+ Query string `structs:"query"`
+ Payload interface{} `structs:"payload"`
+ }
+
+ a := A{
+ Name: "test",
+ Ip: "127.0.0.1",
+ Query: "",
+ Payload: map[string]string{"test_param": "test_param"},
+ }
+
+ defer func() {
+ err := recover()
+ if err != nil {
+ t.Error("Converting Map with an interface{} type with map value should not panic")
+ }
+ }()
+
+ _ = Map(a)
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp b/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp
new file mode 100644
index 0000000..fc31f51
--- /dev/null
+++ b/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp
@@ -0,0 +1,77 @@
+/*
+To build the snappytool binary:
+g++ main.cpp /usr/lib/libsnappy.a -o snappytool
+or, if you have built the C++ snappy library from source:
+g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool
+after running "make" from your snappy checkout directory.
+*/
+
+#include
+#include
+#include
+#include
+
+#include "snappy.h"
+
+#define N 1000000
+
+char dst[N];
+char src[N];
+
+int main(int argc, char** argv) {
+ // Parse args.
+ if (argc != 2) {
+ fprintf(stderr, "exactly one of -d or -e must be given\n");
+ return 1;
+ }
+ bool decode = strcmp(argv[1], "-d") == 0;
+ bool encode = strcmp(argv[1], "-e") == 0;
+ if (decode == encode) {
+ fprintf(stderr, "exactly one of -d or -e must be given\n");
+ return 1;
+ }
+
+ // Read all of stdin into src[:s].
+ size_t s = 0;
+ while (1) {
+ if (s == N) {
+ fprintf(stderr, "input too large\n");
+ return 1;
+ }
+ ssize_t n = read(0, src+s, N-s);
+ if (n == 0) {
+ break;
+ }
+ if (n < 0) {
+ fprintf(stderr, "read error: %s\n", strerror(errno));
+ // TODO: handle EAGAIN, EINTR?
+ return 1;
+ }
+ s += n;
+ }
+
+ // Encode or decode src[:s] to dst[:d], and write to stdout.
+ size_t d = 0;
+ if (encode) {
+ if (N < snappy::MaxCompressedLength(s)) {
+ fprintf(stderr, "input too large after encoding\n");
+ return 1;
+ }
+ snappy::RawCompress(src, s, dst, &d);
+ } else {
+ if (!snappy::GetUncompressedLength(src, s, &d)) {
+ fprintf(stderr, "could not get uncompressed length\n");
+ return 1;
+ }
+ if (N < d) {
+ fprintf(stderr, "input too large after decoding\n");
+ return 1;
+ }
+ if (!snappy::RawUncompress(src, s, dst)) {
+ fprintf(stderr, "input was not valid Snappy-compressed data\n");
+ return 1;
+ }
+ }
+ write(1, dst, d);
+ return 0;
+}
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go
new file mode 100644
index 0000000..e4496f9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/golden_test.go
@@ -0,0 +1,1965 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+// extendMatchGoldenTestCases is the i and j arguments, and the returned value,
+// for every extendMatch call issued when encoding the
+// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the
+// extendMatch implementation.
+//
+// It was generated manually by adding some print statements to the (pure Go)
+// extendMatch implementation:
+//
+// func extendMatch(src []byte, i, j int) int {
+// i0, j0 := i, j
+// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+// }
+// println("{", i0, ",", j0, ",", j, "},")
+// return j
+// }
+//
+// and running "go test -test.run=EncodeGoldenInput -tags=noasm".
+var extendMatchGoldenTestCases = []struct {
+ i, j, want int
+}{
+ {11, 61, 62},
+ {80, 81, 82},
+ {86, 87, 101},
+ {85, 133, 149},
+ {152, 153, 162},
+ {133, 168, 193},
+ {168, 207, 225},
+ {81, 255, 275},
+ {278, 279, 283},
+ {306, 417, 417},
+ {373, 428, 430},
+ {389, 444, 447},
+ {474, 510, 512},
+ {465, 533, 533},
+ {47, 547, 547},
+ {307, 551, 554},
+ {420, 582, 587},
+ {309, 604, 604},
+ {604, 625, 625},
+ {538, 629, 629},
+ {328, 640, 640},
+ {573, 645, 645},
+ {319, 657, 657},
+ {30, 664, 664},
+ {45, 679, 680},
+ {621, 684, 684},
+ {376, 700, 700},
+ {33, 707, 708},
+ {601, 733, 733},
+ {334, 744, 745},
+ {625, 758, 759},
+ {382, 763, 763},
+ {550, 769, 771},
+ {533, 789, 789},
+ {804, 813, 813},
+ {342, 841, 842},
+ {742, 847, 847},
+ {74, 852, 852},
+ {810, 864, 864},
+ {758, 868, 869},
+ {714, 883, 883},
+ {582, 889, 891},
+ {61, 934, 935},
+ {894, 942, 942},
+ {939, 949, 949},
+ {785, 956, 957},
+ {886, 978, 978},
+ {792, 998, 998},
+ {998, 1005, 1005},
+ {572, 1032, 1032},
+ {698, 1051, 1053},
+ {599, 1067, 1069},
+ {1056, 1079, 1079},
+ {942, 1089, 1090},
+ {831, 1094, 1096},
+ {1088, 1100, 1103},
+ {732, 1113, 1114},
+ {1037, 1118, 1118},
+ {872, 1128, 1130},
+ {1079, 1140, 1142},
+ {332, 1162, 1162},
+ {207, 1168, 1186},
+ {1189, 1190, 1225},
+ {105, 1229, 1230},
+ {79, 1256, 1257},
+ {1190, 1261, 1283},
+ {255, 1306, 1306},
+ {1319, 1339, 1358},
+ {364, 1370, 1370},
+ {955, 1378, 1380},
+ {122, 1403, 1403},
+ {1325, 1407, 1419},
+ {664, 1423, 1424},
+ {941, 1461, 1463},
+ {867, 1477, 1478},
+ {757, 1488, 1489},
+ {1140, 1499, 1499},
+ {31, 1506, 1506},
+ {1487, 1510, 1512},
+ {1089, 1520, 1521},
+ {1467, 1525, 1529},
+ {1394, 1537, 1537},
+ {1499, 1541, 1541},
+ {367, 1558, 1558},
+ {1475, 1564, 1564},
+ {1525, 1568, 1571},
+ {1541, 1582, 1583},
+ {864, 1587, 1588},
+ {704, 1597, 1597},
+ {336, 1602, 1602},
+ {1383, 1613, 1613},
+ {1498, 1617, 1618},
+ {1051, 1623, 1625},
+ {401, 1643, 1645},
+ {1072, 1654, 1655},
+ {1067, 1667, 1669},
+ {699, 1673, 1674},
+ {1587, 1683, 1684},
+ {920, 1696, 1696},
+ {1505, 1710, 1710},
+ {1550, 1723, 1723},
+ {996, 1727, 1727},
+ {833, 1733, 1734},
+ {1638, 1739, 1740},
+ {1654, 1744, 1744},
+ {753, 1761, 1761},
+ {1548, 1773, 1773},
+ {1568, 1777, 1780},
+ {1683, 1793, 1794},
+ {948, 1801, 1801},
+ {1666, 1805, 1808},
+ {1502, 1814, 1814},
+ {1696, 1822, 1822},
+ {502, 1836, 1837},
+ {917, 1843, 1843},
+ {1733, 1854, 1855},
+ {970, 1859, 1859},
+ {310, 1863, 1863},
+ {657, 1872, 1872},
+ {1005, 1876, 1876},
+ {1662, 1880, 1880},
+ {904, 1892, 1892},
+ {1427, 1910, 1910},
+ {1772, 1929, 1930},
+ {1822, 1937, 1940},
+ {1858, 1949, 1950},
+ {1602, 1956, 1956},
+ {1150, 1962, 1962},
+ {1504, 1966, 1967},
+ {51, 1971, 1971},
+ {1605, 1979, 1979},
+ {1458, 1983, 1988},
+ {1536, 2001, 2006},
+ {1373, 2014, 2018},
+ {1494, 2025, 2025},
+ {1667, 2029, 2031},
+ {1592, 2035, 2035},
+ {330, 2045, 2045},
+ {1376, 2053, 2053},
+ {1991, 2058, 2059},
+ {1635, 2065, 2065},
+ {1992, 2073, 2074},
+ {2014, 2080, 2081},
+ {1546, 2085, 2087},
+ {59, 2099, 2099},
+ {1996, 2106, 2106},
+ {1836, 2110, 2110},
+ {2068, 2114, 2114},
+ {1338, 2122, 2122},
+ {1562, 2128, 2130},
+ {1934, 2134, 2134},
+ {2114, 2141, 2142},
+ {977, 2149, 2150},
+ {956, 2154, 2155},
+ {1407, 2162, 2162},
+ {1773, 2166, 2166},
+ {883, 2171, 2171},
+ {623, 2175, 2178},
+ {1520, 2191, 2192},
+ {1162, 2200, 2200},
+ {912, 2204, 2204},
+ {733, 2208, 2208},
+ {1777, 2212, 2215},
+ {1532, 2219, 2219},
+ {718, 2223, 2225},
+ {2069, 2229, 2229},
+ {2207, 2245, 2246},
+ {1139, 2264, 2264},
+ {677, 2274, 2274},
+ {2099, 2279, 2279},
+ {1863, 2283, 2283},
+ {1966, 2305, 2306},
+ {2279, 2313, 2313},
+ {1628, 2319, 2319},
+ {755, 2329, 2329},
+ {1461, 2334, 2334},
+ {2117, 2340, 2340},
+ {2313, 2349, 2349},
+ {1859, 2353, 2353},
+ {1048, 2362, 2362},
+ {895, 2366, 2366},
+ {2278, 2373, 2373},
+ {1884, 2377, 2377},
+ {1402, 2387, 2392},
+ {700, 2398, 2398},
+ {1971, 2402, 2402},
+ {2009, 2419, 2419},
+ {1441, 2426, 2428},
+ {2208, 2432, 2432},
+ {2038, 2436, 2436},
+ {932, 2443, 2443},
+ {1759, 2447, 2448},
+ {744, 2452, 2452},
+ {1875, 2458, 2458},
+ {2405, 2468, 2468},
+ {1596, 2472, 2473},
+ {1953, 2480, 2482},
+ {736, 2487, 2487},
+ {1913, 2493, 2493},
+ {774, 2497, 2497},
+ {1484, 2506, 2508},
+ {2432, 2512, 2512},
+ {752, 2519, 2519},
+ {2497, 2523, 2523},
+ {2409, 2528, 2529},
+ {2122, 2533, 2533},
+ {2396, 2537, 2538},
+ {2410, 2547, 2548},
+ {1093, 2555, 2560},
+ {551, 2564, 2565},
+ {2268, 2569, 2569},
+ {1362, 2580, 2580},
+ {1916, 2584, 2585},
+ {994, 2589, 2590},
+ {1979, 2596, 2596},
+ {1041, 2602, 2602},
+ {2104, 2614, 2616},
+ {2609, 2621, 2628},
+ {2329, 2638, 2638},
+ {2211, 2657, 2658},
+ {2638, 2662, 2667},
+ {2578, 2676, 2679},
+ {2153, 2685, 2686},
+ {2608, 2696, 2697},
+ {598, 2712, 2712},
+ {2620, 2719, 2720},
+ {1888, 2724, 2728},
+ {2709, 2732, 2732},
+ {1365, 2739, 2739},
+ {784, 2747, 2748},
+ {424, 2753, 2753},
+ {2204, 2759, 2759},
+ {812, 2768, 2769},
+ {2455, 2773, 2773},
+ {1722, 2781, 2781},
+ {1917, 2792, 2792},
+ {2705, 2799, 2799},
+ {2685, 2806, 2807},
+ {2742, 2811, 2811},
+ {1370, 2818, 2818},
+ {2641, 2830, 2830},
+ {2512, 2837, 2837},
+ {2457, 2841, 2841},
+ {2756, 2845, 2845},
+ {2719, 2855, 2855},
+ {1423, 2859, 2859},
+ {2849, 2863, 2865},
+ {1474, 2871, 2871},
+ {1161, 2875, 2876},
+ {2282, 2880, 2881},
+ {2746, 2888, 2888},
+ {1783, 2893, 2893},
+ {2401, 2899, 2900},
+ {2632, 2920, 2923},
+ {2422, 2928, 2930},
+ {2715, 2939, 2939},
+ {2162, 2943, 2943},
+ {2859, 2947, 2947},
+ {1910, 2951, 2951},
+ {1431, 2955, 2956},
+ {1439, 2964, 2964},
+ {2501, 2968, 2969},
+ {2029, 2973, 2976},
+ {689, 2983, 2984},
+ {1658, 2988, 2988},
+ {1031, 2996, 2996},
+ {2149, 3001, 3002},
+ {25, 3009, 3013},
+ {2964, 3023, 3023},
+ {953, 3027, 3028},
+ {2359, 3036, 3036},
+ {3023, 3049, 3049},
+ {2880, 3055, 3056},
+ {2973, 3076, 3077},
+ {2874, 3090, 3090},
+ {2871, 3094, 3094},
+ {2532, 3100, 3100},
+ {2938, 3107, 3108},
+ {350, 3115, 3115},
+ {2196, 3119, 3121},
+ {1133, 3127, 3129},
+ {1797, 3134, 3150},
+ {3032, 3158, 3158},
+ {3016, 3172, 3172},
+ {2533, 3179, 3179},
+ {3055, 3187, 3188},
+ {1384, 3192, 3193},
+ {2799, 3199, 3199},
+ {2126, 3203, 3207},
+ {2334, 3215, 3215},
+ {2105, 3220, 3221},
+ {3199, 3229, 3229},
+ {2891, 3233, 3233},
+ {855, 3240, 3240},
+ {1852, 3253, 3256},
+ {2140, 3263, 3263},
+ {1682, 3268, 3270},
+ {3243, 3274, 3274},
+ {924, 3279, 3279},
+ {2212, 3283, 3283},
+ {2596, 3287, 3287},
+ {2999, 3291, 3291},
+ {2353, 3295, 3295},
+ {2480, 3302, 3304},
+ {1959, 3308, 3311},
+ {3000, 3318, 3318},
+ {845, 3330, 3330},
+ {2283, 3334, 3334},
+ {2519, 3342, 3342},
+ {3325, 3346, 3348},
+ {2397, 3353, 3354},
+ {2763, 3358, 3358},
+ {3198, 3363, 3364},
+ {3211, 3368, 3372},
+ {2950, 3376, 3377},
+ {3245, 3388, 3391},
+ {2264, 3398, 3398},
+ {795, 3403, 3403},
+ {3287, 3407, 3407},
+ {3358, 3411, 3411},
+ {3317, 3415, 3415},
+ {3232, 3431, 3431},
+ {2128, 3435, 3437},
+ {3236, 3441, 3441},
+ {3398, 3445, 3446},
+ {2814, 3450, 3450},
+ {3394, 3466, 3466},
+ {2425, 3470, 3470},
+ {3330, 3476, 3476},
+ {1612, 3480, 3480},
+ {1004, 3485, 3486},
+ {2732, 3490, 3490},
+ {1117, 3494, 3495},
+ {629, 3501, 3501},
+ {3087, 3514, 3514},
+ {684, 3518, 3518},
+ {3489, 3522, 3524},
+ {1760, 3529, 3529},
+ {617, 3537, 3537},
+ {3431, 3541, 3541},
+ {997, 3547, 3547},
+ {882, 3552, 3553},
+ {2419, 3558, 3558},
+ {610, 3562, 3563},
+ {1903, 3567, 3569},
+ {3005, 3575, 3575},
+ {3076, 3585, 3586},
+ {3541, 3590, 3590},
+ {3490, 3594, 3594},
+ {1899, 3599, 3599},
+ {3545, 3606, 3606},
+ {3290, 3614, 3615},
+ {2056, 3619, 3620},
+ {3556, 3625, 3625},
+ {3294, 3632, 3633},
+ {637, 3643, 3644},
+ {3609, 3648, 3650},
+ {3175, 3658, 3658},
+ {3498, 3665, 3665},
+ {1597, 3669, 3669},
+ {1983, 3673, 3673},
+ {3215, 3682, 3682},
+ {3544, 3689, 3689},
+ {3694, 3698, 3698},
+ {3228, 3715, 3716},
+ {2594, 3720, 3722},
+ {3573, 3726, 3726},
+ {2479, 3732, 3735},
+ {3191, 3741, 3742},
+ {1113, 3746, 3747},
+ {2844, 3751, 3751},
+ {3445, 3756, 3757},
+ {3755, 3766, 3766},
+ {3421, 3775, 3780},
+ {3593, 3784, 3786},
+ {3263, 3796, 3796},
+ {3469, 3806, 3806},
+ {2602, 3815, 3815},
+ {723, 3819, 3821},
+ {1608, 3826, 3826},
+ {3334, 3830, 3830},
+ {2198, 3835, 3835},
+ {2635, 3840, 3840},
+ {3702, 3852, 3853},
+ {3406, 3858, 3859},
+ {3681, 3867, 3870},
+ {3407, 3880, 3880},
+ {340, 3889, 3889},
+ {3772, 3893, 3893},
+ {593, 3897, 3897},
+ {2563, 3914, 3916},
+ {2981, 3929, 3929},
+ {1835, 3933, 3934},
+ {3906, 3951, 3951},
+ {1459, 3958, 3958},
+ {3889, 3974, 3974},
+ {2188, 3982, 3982},
+ {3220, 3986, 3987},
+ {3585, 3991, 3993},
+ {3712, 3997, 4001},
+ {2805, 4007, 4007},
+ {1879, 4012, 4013},
+ {3618, 4018, 4018},
+ {1145, 4031, 4032},
+ {3901, 4037, 4037},
+ {2772, 4046, 4047},
+ {2802, 4053, 4054},
+ {3299, 4058, 4058},
+ {3725, 4066, 4066},
+ {2271, 4070, 4070},
+ {385, 4075, 4076},
+ {3624, 4089, 4090},
+ {3745, 4096, 4098},
+ {1563, 4102, 4102},
+ {4045, 4106, 4111},
+ {3696, 4115, 4119},
+ {3376, 4125, 4126},
+ {1880, 4130, 4130},
+ {2048, 4140, 4141},
+ {2724, 4149, 4149},
+ {1767, 4156, 4156},
+ {2601, 4164, 4164},
+ {2757, 4168, 4168},
+ {3974, 4172, 4172},
+ {3914, 4178, 4178},
+ {516, 4185, 4185},
+ {1032, 4189, 4190},
+ {3462, 4197, 4198},
+ {3805, 4202, 4203},
+ {3910, 4207, 4212},
+ {3075, 4221, 4221},
+ {3756, 4225, 4226},
+ {1872, 4236, 4237},
+ {3844, 4241, 4241},
+ {3991, 4245, 4249},
+ {2203, 4258, 4258},
+ {3903, 4267, 4268},
+ {705, 4272, 4272},
+ {1896, 4276, 4276},
+ {1955, 4285, 4288},
+ {3746, 4302, 4303},
+ {2672, 4311, 4311},
+ {3969, 4317, 4317},
+ {3883, 4322, 4322},
+ {1920, 4339, 4340},
+ {3527, 4344, 4346},
+ {1160, 4358, 4358},
+ {3648, 4364, 4366},
+ {2711, 4387, 4387},
+ {3619, 4391, 4392},
+ {1944, 4396, 4396},
+ {4369, 4400, 4400},
+ {2736, 4404, 4407},
+ {2546, 4411, 4412},
+ {4390, 4422, 4422},
+ {3610, 4426, 4427},
+ {4058, 4431, 4431},
+ {4374, 4435, 4435},
+ {3463, 4445, 4446},
+ {1813, 4452, 4452},
+ {3669, 4456, 4456},
+ {3830, 4460, 4460},
+ {421, 4464, 4465},
+ {1719, 4471, 4471},
+ {3880, 4475, 4475},
+ {1834, 4485, 4487},
+ {3590, 4491, 4491},
+ {442, 4496, 4497},
+ {4435, 4501, 4501},
+ {3814, 4509, 4509},
+ {987, 4513, 4513},
+ {4494, 4518, 4521},
+ {3218, 4526, 4529},
+ {4221, 4537, 4537},
+ {2778, 4543, 4545},
+ {4422, 4552, 4552},
+ {4031, 4558, 4559},
+ {4178, 4563, 4563},
+ {3726, 4567, 4574},
+ {4027, 4578, 4578},
+ {4339, 4585, 4587},
+ {3796, 4592, 4595},
+ {543, 4600, 4613},
+ {2855, 4620, 4621},
+ {2795, 4627, 4627},
+ {3440, 4631, 4632},
+ {4279, 4636, 4639},
+ {4245, 4643, 4645},
+ {4516, 4649, 4650},
+ {3133, 4654, 4654},
+ {4042, 4658, 4659},
+ {3422, 4663, 4663},
+ {4046, 4667, 4668},
+ {4267, 4672, 4672},
+ {4004, 4676, 4677},
+ {2490, 4682, 4682},
+ {2451, 4697, 4697},
+ {3027, 4705, 4705},
+ {4028, 4717, 4717},
+ {4460, 4721, 4721},
+ {2471, 4725, 4727},
+ {3090, 4735, 4735},
+ {3192, 4739, 4740},
+ {3835, 4760, 4760},
+ {4540, 4764, 4764},
+ {4007, 4772, 4774},
+ {619, 4784, 4784},
+ {3561, 4789, 4791},
+ {3367, 4805, 4805},
+ {4490, 4810, 4811},
+ {2402, 4815, 4815},
+ {3352, 4819, 4822},
+ {2773, 4828, 4828},
+ {4552, 4832, 4832},
+ {2522, 4840, 4841},
+ {316, 4847, 4852},
+ {4715, 4858, 4858},
+ {2959, 4862, 4862},
+ {4858, 4868, 4869},
+ {2134, 4873, 4873},
+ {578, 4878, 4878},
+ {4189, 4889, 4890},
+ {2229, 4894, 4894},
+ {4501, 4898, 4898},
+ {2297, 4903, 4903},
+ {2933, 4909, 4909},
+ {3008, 4913, 4913},
+ {3153, 4917, 4917},
+ {4819, 4921, 4921},
+ {4921, 4932, 4933},
+ {4920, 4944, 4945},
+ {4814, 4954, 4955},
+ {576, 4966, 4966},
+ {1854, 4970, 4971},
+ {1374, 4975, 4976},
+ {3307, 4980, 4980},
+ {974, 4984, 4988},
+ {4721, 4992, 4992},
+ {4898, 4996, 4996},
+ {4475, 5006, 5006},
+ {3819, 5012, 5012},
+ {1948, 5019, 5021},
+ {4954, 5027, 5029},
+ {3740, 5038, 5040},
+ {4763, 5044, 5045},
+ {1936, 5051, 5051},
+ {4844, 5055, 5060},
+ {4215, 5069, 5072},
+ {1146, 5076, 5076},
+ {3845, 5082, 5082},
+ {4865, 5090, 5090},
+ {4624, 5094, 5094},
+ {4815, 5098, 5098},
+ {5006, 5105, 5105},
+ {4980, 5109, 5109},
+ {4795, 5113, 5115},
+ {5043, 5119, 5121},
+ {4782, 5129, 5129},
+ {3826, 5139, 5139},
+ {3876, 5156, 5156},
+ {3111, 5167, 5171},
+ {1470, 5177, 5177},
+ {4431, 5181, 5181},
+ {546, 5189, 5189},
+ {4225, 5193, 5193},
+ {1672, 5199, 5201},
+ {4207, 5205, 5209},
+ {4220, 5216, 5217},
+ {4658, 5224, 5225},
+ {3295, 5235, 5235},
+ {2436, 5239, 5239},
+ {2349, 5246, 5246},
+ {2175, 5250, 5250},
+ {5180, 5257, 5258},
+ {3161, 5263, 5263},
+ {5105, 5272, 5272},
+ {3552, 5282, 5282},
+ {4944, 5299, 5300},
+ {4130, 5312, 5313},
+ {902, 5323, 5323},
+ {913, 5327, 5327},
+ {2987, 5333, 5334},
+ {5150, 5344, 5344},
+ {5249, 5348, 5348},
+ {1965, 5358, 5359},
+ {5330, 5364, 5364},
+ {2012, 5373, 5377},
+ {712, 5384, 5386},
+ {5235, 5390, 5390},
+ {5044, 5398, 5399},
+ {564, 5406, 5406},
+ {39, 5410, 5410},
+ {4642, 5422, 5425},
+ {4421, 5437, 5438},
+ {2347, 5449, 5449},
+ {5333, 5453, 5454},
+ {4136, 5458, 5459},
+ {3793, 5468, 5468},
+ {2243, 5480, 5480},
+ {4889, 5492, 5493},
+ {4295, 5504, 5504},
+ {2785, 5511, 5511},
+ {2377, 5518, 5518},
+ {3662, 5525, 5525},
+ {5097, 5529, 5530},
+ {4781, 5537, 5538},
+ {4697, 5547, 5548},
+ {436, 5552, 5553},
+ {5542, 5558, 5558},
+ {3692, 5562, 5562},
+ {2696, 5568, 5569},
+ {4620, 5578, 5578},
+ {2898, 5590, 5590},
+ {5557, 5596, 5618},
+ {2797, 5623, 5625},
+ {2792, 5629, 5629},
+ {5243, 5633, 5633},
+ {5348, 5637, 5637},
+ {5547, 5643, 5643},
+ {4296, 5654, 5655},
+ {5568, 5662, 5662},
+ {3001, 5670, 5671},
+ {3794, 5679, 5679},
+ {4006, 5685, 5686},
+ {4969, 5690, 5692},
+ {687, 5704, 5704},
+ {4563, 5708, 5708},
+ {1723, 5738, 5738},
+ {649, 5742, 5742},
+ {5163, 5748, 5755},
+ {3907, 5759, 5759},
+ {3074, 5764, 5764},
+ {5326, 5771, 5771},
+ {2951, 5776, 5776},
+ {5181, 5780, 5780},
+ {2614, 5785, 5788},
+ {4709, 5794, 5794},
+ {2784, 5799, 5799},
+ {5518, 5803, 5803},
+ {4155, 5812, 5815},
+ {921, 5819, 5819},
+ {5224, 5823, 5824},
+ {2853, 5830, 5836},
+ {5776, 5840, 5840},
+ {2955, 5844, 5845},
+ {5745, 5853, 5853},
+ {3291, 5857, 5857},
+ {2988, 5861, 5861},
+ {2647, 5865, 5865},
+ {5398, 5869, 5870},
+ {1085, 5874, 5875},
+ {4906, 5881, 5881},
+ {802, 5886, 5886},
+ {5119, 5890, 5893},
+ {5802, 5899, 5900},
+ {3415, 5904, 5904},
+ {5629, 5908, 5908},
+ {3714, 5912, 5914},
+ {5558, 5921, 5921},
+ {2710, 5927, 5928},
+ {1094, 5932, 5934},
+ {2653, 5940, 5941},
+ {4735, 5954, 5954},
+ {5861, 5958, 5958},
+ {1040, 5971, 5971},
+ {5514, 5977, 5977},
+ {5048, 5981, 5982},
+ {5953, 5992, 5993},
+ {3751, 5997, 5997},
+ {4991, 6001, 6002},
+ {5885, 6006, 6007},
+ {5529, 6011, 6012},
+ {4974, 6019, 6020},
+ {5857, 6024, 6024},
+ {3483, 6032, 6032},
+ {3594, 6036, 6036},
+ {1997, 6040, 6040},
+ {5997, 6044, 6047},
+ {5197, 6051, 6051},
+ {1764, 6055, 6055},
+ {6050, 6059, 6059},
+ {5239, 6063, 6063},
+ {5049, 6067, 6067},
+ {5957, 6073, 6074},
+ {1022, 6078, 6078},
+ {3414, 6083, 6084},
+ {3809, 6090, 6090},
+ {4562, 6095, 6096},
+ {5878, 6104, 6104},
+ {594, 6108, 6109},
+ {3353, 6115, 6116},
+ {4992, 6120, 6121},
+ {2424, 6125, 6125},
+ {4484, 6130, 6130},
+ {3900, 6134, 6135},
+ {5793, 6139, 6141},
+ {3562, 6145, 6145},
+ {1438, 6152, 6153},
+ {6058, 6157, 6158},
+ {4411, 6162, 6163},
+ {4590, 6167, 6171},
+ {4748, 6175, 6175},
+ {5517, 6183, 6184},
+ {6095, 6191, 6192},
+ {1471, 6203, 6203},
+ {2643, 6209, 6210},
+ {450, 6220, 6220},
+ {5266, 6226, 6226},
+ {2576, 6233, 6233},
+ {2607, 6239, 6240},
+ {5164, 6244, 6251},
+ {6054, 6255, 6255},
+ {1789, 6260, 6261},
+ {5250, 6265, 6265},
+ {6062, 6273, 6278},
+ {5990, 6282, 6282},
+ {3283, 6286, 6286},
+ {5436, 6290, 6290},
+ {6059, 6294, 6294},
+ {5668, 6298, 6300},
+ {3072, 6324, 6329},
+ {3132, 6338, 6339},
+ {3246, 6343, 6344},
+ {28, 6348, 6349},
+ {1503, 6353, 6355},
+ {6067, 6359, 6359},
+ {3384, 6364, 6364},
+ {545, 6375, 6376},
+ {5803, 6380, 6380},
+ {5522, 6384, 6385},
+ {5908, 6389, 6389},
+ {2796, 6393, 6396},
+ {4831, 6403, 6404},
+ {6388, 6412, 6412},
+ {6005, 6417, 6420},
+ {4450, 6430, 6430},
+ {4050, 6435, 6435},
+ {5372, 6441, 6441},
+ {4378, 6447, 6447},
+ {6199, 6452, 6452},
+ {3026, 6456, 6456},
+ {2642, 6460, 6462},
+ {6392, 6470, 6470},
+ {6459, 6474, 6474},
+ {2829, 6487, 6488},
+ {2942, 6499, 6504},
+ {5069, 6508, 6511},
+ {5341, 6515, 6516},
+ {5853, 6521, 6525},
+ {6104, 6531, 6531},
+ {5759, 6535, 6538},
+ {4672, 6542, 6543},
+ {2443, 6550, 6550},
+ {5109, 6554, 6554},
+ {6494, 6558, 6560},
+ {6006, 6570, 6572},
+ {6424, 6576, 6580},
+ {4693, 6591, 6592},
+ {6439, 6596, 6597},
+ {3179, 6601, 6601},
+ {5299, 6606, 6607},
+ {4148, 6612, 6613},
+ {3774, 6617, 6617},
+ {3537, 6623, 6624},
+ {4975, 6628, 6629},
+ {3848, 6636, 6636},
+ {856, 6640, 6640},
+ {5724, 6645, 6645},
+ {6632, 6651, 6651},
+ {4630, 6656, 6658},
+ {1440, 6662, 6662},
+ {4281, 6666, 6667},
+ {4302, 6671, 6672},
+ {2589, 6676, 6677},
+ {5647, 6681, 6687},
+ {6082, 6691, 6693},
+ {6144, 6698, 6698},
+ {6103, 6709, 6710},
+ {3710, 6714, 6714},
+ {4253, 6718, 6721},
+ {2467, 6730, 6730},
+ {4778, 6734, 6734},
+ {6528, 6738, 6738},
+ {4358, 6747, 6747},
+ {5889, 6753, 6753},
+ {5193, 6757, 6757},
+ {5797, 6761, 6761},
+ {3858, 6765, 6766},
+ {5951, 6776, 6776},
+ {6487, 6781, 6782},
+ {3282, 6786, 6787},
+ {4667, 6797, 6799},
+ {1927, 6803, 6806},
+ {6583, 6810, 6810},
+ {4937, 6814, 6814},
+ {6099, 6824, 6824},
+ {4415, 6835, 6836},
+ {6332, 6840, 6841},
+ {5160, 6850, 6850},
+ {4764, 6854, 6854},
+ {6814, 6858, 6859},
+ {3018, 6864, 6864},
+ {6293, 6868, 6869},
+ {6359, 6877, 6877},
+ {3047, 6884, 6886},
+ {5262, 6890, 6891},
+ {5471, 6900, 6900},
+ {3268, 6910, 6912},
+ {1047, 6916, 6916},
+ {5904, 6923, 6923},
+ {5798, 6933, 6938},
+ {4149, 6942, 6942},
+ {1821, 6946, 6946},
+ {3599, 6952, 6952},
+ {6470, 6957, 6957},
+ {5562, 6961, 6961},
+ {6268, 6965, 6967},
+ {6389, 6971, 6971},
+ {6596, 6975, 6976},
+ {6553, 6980, 6981},
+ {6576, 6985, 6989},
+ {1375, 6993, 6993},
+ {652, 6998, 6998},
+ {4876, 7002, 7003},
+ {5768, 7011, 7013},
+ {3973, 7017, 7017},
+ {6802, 7025, 7025},
+ {6955, 7034, 7036},
+ {6974, 7040, 7040},
+ {5944, 7044, 7044},
+ {6992, 7048, 7054},
+ {6872, 7059, 7059},
+ {2943, 7063, 7063},
+ {6923, 7067, 7067},
+ {5094, 7071, 7071},
+ {4873, 7075, 7075},
+ {5819, 7079, 7079},
+ {5945, 7085, 7085},
+ {1540, 7090, 7091},
+ {2090, 7095, 7095},
+ {5024, 7104, 7105},
+ {6900, 7109, 7109},
+ {6024, 7113, 7114},
+ {6000, 7118, 7120},
+ {2187, 7124, 7125},
+ {6760, 7129, 7130},
+ {5898, 7134, 7136},
+ {7032, 7144, 7144},
+ {4271, 7148, 7148},
+ {3706, 7152, 7152},
+ {6970, 7156, 7157},
+ {7088, 7161, 7163},
+ {2718, 7168, 7169},
+ {5674, 7175, 7175},
+ {4631, 7182, 7182},
+ {7070, 7188, 7189},
+ {6220, 7196, 7196},
+ {3458, 7201, 7202},
+ {2041, 7211, 7212},
+ {1454, 7216, 7216},
+ {5199, 7225, 7227},
+ {3529, 7234, 7234},
+ {6890, 7238, 7238},
+ {3815, 7242, 7243},
+ {5490, 7250, 7253},
+ {6554, 7257, 7263},
+ {5890, 7267, 7269},
+ {6877, 7273, 7273},
+ {4877, 7277, 7277},
+ {2502, 7285, 7285},
+ {1483, 7289, 7295},
+ {7210, 7304, 7308},
+ {6845, 7313, 7316},
+ {7219, 7320, 7320},
+ {7001, 7325, 7329},
+ {6853, 7333, 7334},
+ {6120, 7338, 7338},
+ {6606, 7342, 7343},
+ {7020, 7348, 7350},
+ {3509, 7354, 7354},
+ {7133, 7359, 7363},
+ {3434, 7371, 7374},
+ {2787, 7384, 7384},
+ {7044, 7388, 7388},
+ {6960, 7394, 7395},
+ {6676, 7399, 7400},
+ {7161, 7404, 7404},
+ {7285, 7417, 7418},
+ {4558, 7425, 7426},
+ {4828, 7430, 7430},
+ {6063, 7436, 7436},
+ {3597, 7442, 7442},
+ {914, 7446, 7446},
+ {7320, 7452, 7454},
+ {7267, 7458, 7460},
+ {5076, 7464, 7464},
+ {7430, 7468, 7469},
+ {6273, 7473, 7474},
+ {7440, 7478, 7487},
+ {7348, 7491, 7494},
+ {1021, 7510, 7510},
+ {7473, 7515, 7515},
+ {2823, 7519, 7519},
+ {6264, 7527, 7527},
+ {7302, 7531, 7531},
+ {7089, 7535, 7535},
+ {7342, 7540, 7541},
+ {3688, 7547, 7551},
+ {3054, 7558, 7560},
+ {4177, 7566, 7567},
+ {6691, 7574, 7575},
+ {7156, 7585, 7586},
+ {7147, 7590, 7592},
+ {7407, 7598, 7598},
+ {7403, 7602, 7603},
+ {6868, 7607, 7607},
+ {6636, 7611, 7611},
+ {4805, 7617, 7617},
+ {5779, 7623, 7623},
+ {7063, 7627, 7627},
+ {5079, 7632, 7632},
+ {7377, 7637, 7637},
+ {7337, 7641, 7642},
+ {6738, 7655, 7655},
+ {7338, 7659, 7659},
+ {6541, 7669, 7671},
+ {595, 7675, 7675},
+ {7658, 7679, 7680},
+ {7647, 7685, 7686},
+ {2477, 7690, 7690},
+ {5823, 7694, 7694},
+ {4156, 7699, 7699},
+ {5931, 7703, 7706},
+ {6854, 7712, 7712},
+ {4931, 7718, 7718},
+ {6979, 7722, 7722},
+ {5085, 7727, 7727},
+ {6965, 7732, 7732},
+ {7201, 7736, 7737},
+ {3639, 7741, 7743},
+ {7534, 7749, 7749},
+ {4292, 7753, 7753},
+ {3427, 7759, 7763},
+ {7273, 7767, 7767},
+ {940, 7778, 7778},
+ {4838, 7782, 7785},
+ {4216, 7790, 7792},
+ {922, 7800, 7801},
+ {7256, 7810, 7811},
+ {7789, 7815, 7819},
+ {7225, 7823, 7825},
+ {7531, 7829, 7829},
+ {6997, 7833, 7833},
+ {7757, 7837, 7838},
+ {4129, 7842, 7842},
+ {7333, 7848, 7849},
+ {6776, 7855, 7855},
+ {7527, 7859, 7859},
+ {4370, 7863, 7863},
+ {4512, 7868, 7868},
+ {5679, 7880, 7880},
+ {3162, 7884, 7885},
+ {3933, 7892, 7894},
+ {7804, 7899, 7902},
+ {6363, 7906, 7907},
+ {7848, 7911, 7912},
+ {5584, 7917, 7921},
+ {874, 7926, 7926},
+ {3342, 7930, 7930},
+ {4507, 7935, 7937},
+ {3672, 7943, 7944},
+ {7911, 7948, 7949},
+ {6402, 7956, 7956},
+ {7940, 7960, 7960},
+ {7113, 7964, 7964},
+ {1073, 7968, 7968},
+ {7740, 7974, 7974},
+ {7601, 7978, 7982},
+ {6797, 7987, 7988},
+ {3528, 7994, 7995},
+ {5483, 7999, 7999},
+ {5717, 8011, 8011},
+ {5480, 8017, 8017},
+ {7770, 8023, 8030},
+ {2452, 8034, 8034},
+ {5282, 8047, 8047},
+ {7967, 8051, 8051},
+ {1128, 8058, 8066},
+ {6348, 8070, 8070},
+ {8055, 8077, 8077},
+ {7925, 8081, 8086},
+ {6810, 8090, 8090},
+ {5051, 8101, 8101},
+ {4696, 8109, 8110},
+ {5129, 8119, 8119},
+ {4449, 8123, 8123},
+ {7222, 8127, 8127},
+ {4649, 8131, 8134},
+ {7994, 8138, 8138},
+ {5954, 8148, 8148},
+ {475, 8152, 8153},
+ {7906, 8157, 8157},
+ {7458, 8164, 8166},
+ {7632, 8171, 8173},
+ {3874, 8177, 8183},
+ {4391, 8187, 8187},
+ {561, 8191, 8191},
+ {2417, 8195, 8195},
+ {2357, 8204, 8204},
+ {2269, 8216, 8218},
+ {3968, 8222, 8222},
+ {2200, 8226, 8227},
+ {3453, 8247, 8247},
+ {2439, 8251, 8252},
+ {7175, 8257, 8257},
+ {976, 8262, 8264},
+ {4953, 8273, 8273},
+ {4219, 8278, 8278},
+ {6, 8285, 8291},
+ {5703, 8295, 8296},
+ {5272, 8300, 8300},
+ {8037, 8304, 8304},
+ {8186, 8314, 8314},
+ {8304, 8318, 8318},
+ {8051, 8326, 8326},
+ {8318, 8330, 8330},
+ {2671, 8334, 8335},
+ {2662, 8339, 8339},
+ {8081, 8349, 8350},
+ {3328, 8356, 8356},
+ {2879, 8360, 8362},
+ {8050, 8370, 8371},
+ {8330, 8375, 8376},
+ {8375, 8386, 8386},
+ {4961, 8390, 8390},
+ {1017, 8403, 8405},
+ {3533, 8416, 8416},
+ {4555, 8422, 8422},
+ {6445, 8426, 8426},
+ {8169, 8432, 8432},
+ {990, 8436, 8436},
+ {4102, 8440, 8440},
+ {7398, 8444, 8446},
+ {3480, 8450, 8450},
+ {6324, 8462, 8462},
+ {7948, 8466, 8467},
+ {5950, 8471, 8471},
+ {5189, 8476, 8476},
+ {4026, 8490, 8490},
+ {8374, 8494, 8495},
+ {4682, 8501, 8501},
+ {7387, 8506, 8506},
+ {8164, 8510, 8515},
+ {4079, 8524, 8524},
+ {8360, 8529, 8531},
+ {7446, 8540, 8543},
+ {7971, 8547, 8548},
+ {4311, 8552, 8552},
+ {5204, 8556, 8557},
+ {7968, 8562, 8562},
+ {7847, 8571, 8573},
+ {8547, 8577, 8577},
+ {5320, 8581, 8581},
+ {8556, 8585, 8586},
+ {8504, 8590, 8590},
+ {7669, 8602, 8604},
+ {5874, 8608, 8609},
+ {5828, 8613, 8613},
+ {7998, 8617, 8617},
+ {8519, 8625, 8625},
+ {7250, 8637, 8637},
+ {426, 8641, 8641},
+ {8436, 8645, 8645},
+ {5986, 8649, 8656},
+ {8157, 8660, 8660},
+ {7182, 8665, 8665},
+ {8421, 8675, 8675},
+ {8509, 8681, 8681},
+ {5137, 8688, 8689},
+ {8625, 8694, 8695},
+ {5228, 8701, 8702},
+ {6661, 8714, 8714},
+ {1010, 8719, 8719},
+ {6648, 8723, 8723},
+ {3500, 8728, 8728},
+ {2442, 8735, 8735},
+ {8494, 8740, 8741},
+ {8171, 8753, 8755},
+ {7242, 8763, 8764},
+ {4739, 8768, 8769},
+ {7079, 8773, 8773},
+ {8386, 8777, 8777},
+ {8624, 8781, 8787},
+ {661, 8791, 8794},
+ {8631, 8801, 8801},
+ {7753, 8805, 8805},
+ {4783, 8809, 8810},
+ {1673, 8814, 8815},
+ {6623, 8819, 8819},
+ {4404, 8823, 8823},
+ {8089, 8827, 8828},
+ {8773, 8832, 8832},
+ {5394, 8836, 8836},
+ {6231, 8841, 8843},
+ {1015, 8852, 8853},
+ {6873, 8857, 8857},
+ {6289, 8865, 8865},
+ {8577, 8869, 8869},
+ {8114, 8873, 8875},
+ {8534, 8883, 8883},
+ {3007, 8887, 8888},
+ {8827, 8892, 8893},
+ {4788, 8897, 8900},
+ {5698, 8906, 8907},
+ {7690, 8911, 8911},
+ {6643, 8919, 8919},
+ {7206, 8923, 8924},
+ {7866, 8929, 8931},
+ {8880, 8942, 8942},
+ {8630, 8951, 8952},
+ {6027, 8958, 8958},
+ {7749, 8966, 8967},
+ {4932, 8972, 8973},
+ {8892, 8980, 8981},
+ {634, 9003, 9003},
+ {8109, 9007, 9008},
+ {8777, 9012, 9012},
+ {3981, 9016, 9017},
+ {5723, 9025, 9025},
+ {7662, 9034, 9038},
+ {8955, 9042, 9042},
+ {8070, 9060, 9062},
+ {8910, 9066, 9066},
+ {5363, 9070, 9071},
+ {7699, 9075, 9076},
+ {8991, 9081, 9081},
+ {6850, 9085, 9085},
+ {5811, 9092, 9094},
+ {9079, 9098, 9102},
+ {6456, 9106, 9106},
+ {2259, 9111, 9111},
+ {4752, 9116, 9116},
+ {9060, 9120, 9123},
+ {8090, 9127, 9127},
+ {5305, 9131, 9132},
+ {8623, 9137, 9137},
+ {7417, 9141, 9141},
+ {6564, 9148, 9149},
+ {9126, 9157, 9158},
+ {4285, 9169, 9170},
+ {8698, 9174, 9174},
+ {8869, 9178, 9178},
+ {2572, 9182, 9183},
+ {6482, 9188, 9190},
+ {9181, 9201, 9201},
+ {2968, 9208, 9209},
+ {2506, 9213, 9215},
+ {9127, 9219, 9219},
+ {7910, 9225, 9227},
+ {5422, 9235, 9239},
+ {8813, 9244, 9246},
+ {9178, 9250, 9250},
+ {8748, 9255, 9255},
+ {7354, 9265, 9265},
+ {7767, 9269, 9269},
+ {7710, 9281, 9283},
+ {8826, 9288, 9290},
+ {861, 9295, 9295},
+ {4482, 9301, 9301},
+ {9264, 9305, 9306},
+ {8805, 9310, 9310},
+ {4995, 9314, 9314},
+ {6730, 9318, 9318},
+ {7457, 9328, 9328},
+ {2547, 9335, 9336},
+ {6298, 9340, 9343},
+ {9305, 9353, 9354},
+ {9269, 9358, 9358},
+ {6338, 9370, 9370},
+ {7289, 9376, 9379},
+ {5780, 9383, 9383},
+ {7607, 9387, 9387},
+ {2065, 9392, 9392},
+ {7238, 9396, 9396},
+ {8856, 9400, 9400},
+ {8069, 9412, 9413},
+ {611, 9420, 9420},
+ {7071, 9424, 9424},
+ {3089, 9430, 9431},
+ {7117, 9435, 9438},
+ {1976, 9445, 9445},
+ {6640, 9449, 9449},
+ {5488, 9453, 9453},
+ {8739, 9457, 9459},
+ {5958, 9466, 9466},
+ {7985, 9470, 9470},
+ {8735, 9475, 9475},
+ {5009, 9479, 9479},
+ {8073, 9483, 9484},
+ {2328, 9490, 9491},
+ {9250, 9495, 9495},
+ {4043, 9502, 9502},
+ {7712, 9506, 9506},
+ {9012, 9510, 9510},
+ {9028, 9514, 9515},
+ {2190, 9521, 9524},
+ {9029, 9528, 9528},
+ {9519, 9532, 9532},
+ {9495, 9536, 9536},
+ {8527, 9540, 9540},
+ {2137, 9550, 9550},
+ {8419, 9557, 9557},
+ {9383, 9561, 9562},
+ {8970, 9575, 9578},
+ {8911, 9582, 9582},
+ {7828, 9595, 9596},
+ {6180, 9600, 9600},
+ {8738, 9604, 9607},
+ {7540, 9611, 9612},
+ {9599, 9616, 9618},
+ {9187, 9623, 9623},
+ {9294, 9628, 9629},
+ {4536, 9639, 9639},
+ {3867, 9643, 9643},
+ {6305, 9648, 9648},
+ {1617, 9654, 9657},
+ {5762, 9666, 9666},
+ {8314, 9670, 9670},
+ {9666, 9674, 9675},
+ {9506, 9679, 9679},
+ {9669, 9685, 9686},
+ {9683, 9690, 9690},
+ {8763, 9697, 9698},
+ {7468, 9702, 9702},
+ {460, 9707, 9707},
+ {3115, 9712, 9712},
+ {9424, 9716, 9717},
+ {7359, 9721, 9724},
+ {7547, 9728, 9729},
+ {7151, 9733, 9738},
+ {7627, 9742, 9742},
+ {2822, 9747, 9747},
+ {8247, 9751, 9753},
+ {9550, 9758, 9758},
+ {7585, 9762, 9763},
+ {1002, 9767, 9767},
+ {7168, 9772, 9773},
+ {6941, 9777, 9780},
+ {9728, 9784, 9786},
+ {9770, 9792, 9796},
+ {6411, 9801, 9802},
+ {3689, 9806, 9808},
+ {9575, 9814, 9816},
+ {7025, 9820, 9821},
+ {2776, 9826, 9826},
+ {9806, 9830, 9830},
+ {9820, 9834, 9835},
+ {9800, 9839, 9847},
+ {9834, 9851, 9852},
+ {9829, 9856, 9862},
+ {1400, 9866, 9866},
+ {3197, 9870, 9871},
+ {9851, 9875, 9876},
+ {9742, 9883, 9884},
+ {3362, 9888, 9889},
+ {9883, 9893, 9893},
+ {5711, 9899, 9910},
+ {7806, 9915, 9915},
+ {9120, 9919, 9919},
+ {9715, 9925, 9934},
+ {2580, 9938, 9938},
+ {4907, 9942, 9944},
+ {6239, 9953, 9954},
+ {6961, 9963, 9963},
+ {5295, 9967, 9968},
+ {1915, 9972, 9973},
+ {3426, 9983, 9985},
+ {9875, 9994, 9995},
+ {6942, 9999, 9999},
+ {6621, 10005, 10005},
+ {7589, 10010, 10012},
+ {9286, 10020, 10020},
+ {838, 10024, 10024},
+ {9980, 10028, 10031},
+ {9994, 10035, 10041},
+ {2702, 10048, 10051},
+ {2621, 10059, 10059},
+ {10054, 10065, 10065},
+ {8612, 10073, 10074},
+ {7033, 10078, 10078},
+ {916, 10082, 10082},
+ {10035, 10086, 10087},
+ {8613, 10097, 10097},
+ {9919, 10107, 10108},
+ {6133, 10114, 10115},
+ {10059, 10119, 10119},
+ {10065, 10126, 10127},
+ {7732, 10131, 10131},
+ {7155, 10135, 10136},
+ {6728, 10140, 10140},
+ {6162, 10144, 10145},
+ {4724, 10150, 10150},
+ {1665, 10154, 10154},
+ {10126, 10163, 10163},
+ {9783, 10168, 10168},
+ {1715, 10172, 10173},
+ {7152, 10177, 10182},
+ {8760, 10187, 10187},
+ {7829, 10191, 10191},
+ {9679, 10196, 10196},
+ {9369, 10201, 10201},
+ {2928, 10206, 10208},
+ {6951, 10214, 10217},
+ {5633, 10221, 10221},
+ {7199, 10225, 10225},
+ {10118, 10230, 10231},
+ {9999, 10235, 10236},
+ {10045, 10240, 10249},
+ {5565, 10256, 10256},
+ {9866, 10261, 10261},
+ {10163, 10268, 10268},
+ {9869, 10272, 10272},
+ {9789, 10276, 10283},
+ {10235, 10287, 10288},
+ {10214, 10298, 10299},
+ {6971, 10303, 10303},
+ {3346, 10307, 10307},
+ {10185, 10311, 10312},
+ {9993, 10318, 10320},
+ {2779, 10332, 10334},
+ {1726, 10338, 10338},
+ {741, 10354, 10360},
+ {10230, 10372, 10373},
+ {10260, 10384, 10385},
+ {10131, 10389, 10398},
+ {6946, 10406, 10409},
+ {10158, 10413, 10420},
+ {10123, 10424, 10424},
+ {6157, 10428, 10429},
+ {4518, 10434, 10434},
+ {9893, 10438, 10438},
+ {9865, 10442, 10446},
+ {7558, 10454, 10454},
+ {10434, 10460, 10460},
+ {10064, 10466, 10468},
+ {2703, 10472, 10474},
+ {9751, 10478, 10479},
+ {6714, 10485, 10485},
+ {8020, 10490, 10490},
+ {10303, 10494, 10494},
+ {3521, 10499, 10500},
+ {9281, 10513, 10515},
+ {6028, 10519, 10523},
+ {9387, 10527, 10527},
+ {7614, 10531, 10531},
+ {3611, 10536, 10536},
+ {9162, 10540, 10540},
+ {10081, 10546, 10547},
+ {10034, 10560, 10562},
+ {6726, 10567, 10571},
+ {8237, 10575, 10575},
+ {10438, 10579, 10583},
+ {10140, 10587, 10587},
+ {5784, 10592, 10592},
+ {9819, 10597, 10600},
+ {10567, 10604, 10608},
+ {9335, 10613, 10613},
+ {8300, 10617, 10617},
+ {10575, 10621, 10621},
+ {9678, 10625, 10626},
+ {9962, 10632, 10633},
+ {10535, 10637, 10638},
+ {8199, 10642, 10642},
+ {10372, 10647, 10648},
+ {10637, 10656, 10657},
+ {10579, 10667, 10668},
+ {10465, 10677, 10680},
+ {6702, 10684, 10685},
+ {10073, 10691, 10692},
+ {4505, 10696, 10697},
+ {9042, 10701, 10701},
+ {6460, 10705, 10706},
+ {10010, 10714, 10716},
+ {10656, 10720, 10722},
+ {7282, 10727, 10729},
+ {2327, 10733, 10733},
+ {2491, 10740, 10741},
+ {10704, 10748, 10750},
+ {6465, 10754, 10754},
+ {10647, 10758, 10759},
+ {10424, 10763, 10763},
+ {10748, 10776, 10776},
+ {10546, 10780, 10781},
+ {10758, 10785, 10786},
+ {10287, 10790, 10797},
+ {10785, 10801, 10807},
+ {10240, 10811, 10826},
+ {9509, 10830, 10830},
+ {2579, 10836, 10838},
+ {9801, 10843, 10845},
+ {7555, 10849, 10850},
+ {10776, 10860, 10865},
+ {8023, 10869, 10869},
+ {10046, 10876, 10884},
+ {10253, 10888, 10892},
+ {9941, 10897, 10897},
+ {7898, 10901, 10905},
+ {6725, 10909, 10913},
+ {10757, 10921, 10923},
+ {10160, 10931, 10931},
+ {10916, 10935, 10942},
+ {10261, 10946, 10946},
+ {10318, 10952, 10954},
+ {5911, 10959, 10961},
+ {10801, 10965, 10966},
+ {10946, 10970, 10977},
+ {10592, 10982, 10984},
+ {9913, 10988, 10990},
+ {8510, 10994, 10996},
+ {9419, 11000, 11001},
+ {6765, 11006, 11007},
+ {10725, 11011, 11011},
+ {5537, 11017, 11019},
+ {9208, 11024, 11025},
+ {5850, 11030, 11030},
+ {9610, 11034, 11036},
+ {8846, 11041, 11047},
+ {9697, 11051, 11051},
+ {1622, 11055, 11058},
+ {2370, 11062, 11062},
+ {8393, 11067, 11067},
+ {9756, 11071, 11071},
+ {10172, 11076, 11076},
+ {27, 11081, 11081},
+ {7357, 11087, 11092},
+ {8151, 11104, 11106},
+ {6115, 11110, 11110},
+ {10667, 11114, 11115},
+ {11099, 11121, 11123},
+ {10705, 11127, 11127},
+ {8938, 11131, 11131},
+ {11114, 11135, 11136},
+ {1390, 11140, 11141},
+ {10964, 11146, 11148},
+ {11140, 11152, 11155},
+ {9813, 11159, 11166},
+ {624, 11171, 11172},
+ {3118, 11177, 11179},
+ {11029, 11184, 11186},
+ {10186, 11190, 11190},
+ {10306, 11196, 11196},
+ {8665, 11201, 11201},
+ {7382, 11205, 11205},
+ {1100, 11210, 11210},
+ {2337, 11216, 11217},
+ {1609, 11221, 11223},
+ {5763, 11228, 11229},
+ {5220, 11233, 11233},
+ {11061, 11241, 11241},
+ {10617, 11246, 11246},
+ {11190, 11250, 11251},
+ {10144, 11255, 11256},
+ {11232, 11260, 11260},
+ {857, 11264, 11265},
+ {10994, 11269, 11271},
+ {3879, 11280, 11281},
+ {11184, 11287, 11289},
+ {9611, 11293, 11295},
+ {11250, 11299, 11299},
+ {4495, 11304, 11304},
+ {7574, 11308, 11309},
+ {9814, 11315, 11317},
+ {1713, 11321, 11324},
+ {1905, 11328, 11328},
+ {8745, 11335, 11340},
+ {8883, 11351, 11351},
+ {8119, 11358, 11358},
+ {1842, 11363, 11364},
+ {11237, 11368, 11368},
+ {8814, 11373, 11374},
+ {5684, 11378, 11378},
+ {11011, 11382, 11382},
+ {6520, 11389, 11389},
+ {11183, 11393, 11396},
+ {1790, 11404, 11404},
+ {9536, 11408, 11408},
+ {11298, 11418, 11419},
+ {3929, 11425, 11425},
+ {5588, 11429, 11429},
+ {8476, 11436, 11436},
+ {4096, 11440, 11442},
+ {11084, 11446, 11454},
+ {10603, 11458, 11463},
+ {7332, 11472, 11474},
+ {7611, 11483, 11486},
+ {4836, 11490, 11491},
+ {10024, 11495, 11495},
+ {4917, 11501, 11506},
+ {6486, 11510, 11512},
+ {11269, 11516, 11518},
+ {3603, 11522, 11525},
+ {11126, 11535, 11535},
+ {11418, 11539, 11541},
+ {11408, 11545, 11545},
+ {9021, 11549, 11552},
+ {6745, 11557, 11557},
+ {5118, 11561, 11564},
+ {7590, 11568, 11569},
+ {4426, 11573, 11578},
+ {9790, 11582, 11583},
+ {6447, 11587, 11587},
+ {10229, 11591, 11594},
+ {10457, 11598, 11598},
+ {10168, 11604, 11604},
+ {10543, 11608, 11608},
+ {7404, 11612, 11612},
+ {11127, 11616, 11616},
+ {3337, 11620, 11620},
+ {11501, 11624, 11628},
+ {4543, 11633, 11635},
+ {8449, 11642, 11642},
+ {4943, 11646, 11648},
+ {10526, 11652, 11654},
+ {11620, 11659, 11659},
+ {8927, 11664, 11669},
+ {532, 11673, 11673},
+ {10513, 11677, 11679},
+ {10428, 11683, 11683},
+ {10999, 11689, 11690},
+ {9469, 11695, 11695},
+ {3606, 11699, 11699},
+ {9560, 11708, 11709},
+ {1564, 11714, 11714},
+ {10527, 11718, 11718},
+ {3071, 11723, 11726},
+ {11590, 11731, 11732},
+ {6605, 11737, 11737},
+ {11624, 11741, 11745},
+ {7822, 11749, 11752},
+ {5269, 11757, 11758},
+ {1339, 11767, 11767},
+ {1363, 11771, 11773},
+ {3704, 11777, 11777},
+ {10952, 11781, 11783},
+ {6764, 11793, 11795},
+ {8675, 11800, 11800},
+ {9963, 11804, 11804},
+ {11573, 11808, 11809},
+ {9548, 11813, 11813},
+ {11591, 11817, 11818},
+ {11446, 11822, 11822},
+ {9224, 11828, 11828},
+ {3158, 11836, 11836},
+ {10830, 11840, 11840},
+ {7234, 11846, 11846},
+ {11299, 11850, 11850},
+ {11544, 11854, 11855},
+ {11498, 11859, 11859},
+ {10993, 11865, 11868},
+ {9720, 11872, 11878},
+ {10489, 11882, 11890},
+ {11712, 11898, 11904},
+ {11516, 11908, 11910},
+ {11568, 11914, 11915},
+ {10177, 11919, 11924},
+ {11363, 11928, 11929},
+ {10494, 11933, 11933},
+ {9870, 11937, 11938},
+ {9427, 11942, 11942},
+ {11481, 11949, 11949},
+ {6030, 11955, 11957},
+ {11718, 11961, 11961},
+ {10531, 11965, 11983},
+ {5126, 11987, 11987},
+ {7515, 11991, 11991},
+ {10646, 11996, 11997},
+ {2947, 12001, 12001},
+ {9582, 12009, 12010},
+ {6202, 12017, 12018},
+ {11714, 12022, 12022},
+ {9235, 12033, 12037},
+ {9721, 12041, 12044},
+ {11932, 12051, 12052},
+ {12040, 12056, 12056},
+ {12051, 12060, 12060},
+ {11601, 12066, 12066},
+ {8426, 12070, 12070},
+ {4053, 12077, 12077},
+ {4262, 12081, 12081},
+ {9761, 12086, 12088},
+ {11582, 12092, 12093},
+ {10965, 12097, 12098},
+ {11803, 12103, 12104},
+ {11933, 12108, 12109},
+ {10688, 12117, 12117},
+ {12107, 12125, 12126},
+ {6774, 12130, 12132},
+ {6286, 12137, 12137},
+ {9543, 12141, 12141},
+ {12097, 12145, 12146},
+ {10790, 12150, 12150},
+ {10125, 12154, 12156},
+ {12125, 12164, 12164},
+ {12064, 12168, 12172},
+ {10811, 12178, 12188},
+ {12092, 12192, 12193},
+ {10058, 12197, 12198},
+ {11611, 12211, 12212},
+ {3459, 12216, 12216},
+ {10291, 12225, 12228},
+ {12191, 12232, 12234},
+ {12145, 12238, 12238},
+ {12001, 12242, 12250},
+ {3840, 12255, 12255},
+ {12216, 12259, 12259},
+ {674, 12272, 12272},
+ {12141, 12276, 12276},
+ {10766, 12280, 12280},
+ {11545, 12284, 12284},
+ {6496, 12290, 12290},
+ {11381, 12294, 12295},
+ {603, 12302, 12303},
+ {12276, 12308, 12308},
+ {11850, 12313, 12314},
+ {565, 12319, 12319},
+ {9351, 12324, 12324},
+ {11822, 12328, 12328},
+ {2691, 12333, 12334},
+ {11840, 12338, 12338},
+ {11070, 12343, 12343},
+ {9510, 12347, 12347},
+ {11024, 12352, 12353},
+ {7173, 12359, 12359},
+ {517, 12363, 12363},
+ {6311, 12367, 12368},
+ {11367, 12372, 12373},
+ {12008, 12377, 12377},
+ {11372, 12382, 12384},
+ {11358, 12391, 12392},
+ {11382, 12396, 12396},
+ {6882, 12400, 12401},
+ {11246, 12405, 12405},
+ {8359, 12409, 12412},
+ {10154, 12418, 12418},
+ {12016, 12425, 12426},
+ {8972, 12434, 12435},
+ {10478, 12439, 12440},
+ {12395, 12449, 12449},
+ {11612, 12454, 12454},
+ {12347, 12458, 12458},
+ {10700, 12466, 12467},
+ {3637, 12471, 12476},
+ {1042, 12480, 12481},
+ {6747, 12488, 12488},
+ {12396, 12492, 12493},
+ {9420, 12497, 12497},
+ {11285, 12501, 12510},
+ {4470, 12515, 12515},
+ {9374, 12519, 12519},
+ {11293, 12528, 12528},
+ {2058, 12534, 12535},
+ {6521, 12539, 12539},
+ {12492, 12543, 12543},
+ {3043, 12547, 12547},
+ {2982, 12551, 12553},
+ {11030, 12557, 12563},
+ {7636, 12568, 12568},
+ {9639, 12572, 12572},
+ {12543, 12576, 12576},
+ {5989, 12580, 12583},
+ {11051, 12587, 12587},
+ {1061, 12592, 12594},
+ {12313, 12599, 12601},
+ {11846, 12605, 12605},
+ {12576, 12609, 12609},
+ {11040, 12618, 12625},
+ {12479, 12629, 12629},
+ {6903, 12633, 12633},
+ {12322, 12639, 12639},
+ {12253, 12643, 12645},
+ {5594, 12651, 12651},
+ {12522, 12655, 12655},
+ {11703, 12659, 12659},
+ {1377, 12665, 12665},
+ {8022, 12669, 12669},
+ {12280, 12674, 12674},
+ {9023, 12680, 12681},
+ {12328, 12685, 12685},
+ {3085, 12689, 12693},
+ {4700, 12698, 12698},
+ {10224, 12702, 12702},
+ {8781, 12706, 12706},
+ {1651, 12710, 12710},
+ {12458, 12714, 12714},
+ {12005, 12718, 12721},
+ {11908, 12725, 12726},
+ {8202, 12733, 12733},
+ {11708, 12739, 12740},
+ {12599, 12744, 12745},
+ {12284, 12749, 12749},
+ {5285, 12756, 12756},
+ {12055, 12775, 12777},
+ {6919, 12782, 12782},
+ {12242, 12786, 12786},
+ {12009, 12790, 12790},
+ {9628, 12794, 12796},
+ {11354, 12801, 12802},
+ {10225, 12806, 12807},
+ {579, 12813, 12813},
+ {8935, 12817, 12822},
+ {8753, 12827, 12829},
+ {11006, 12835, 12835},
+ {858, 12841, 12845},
+ {476, 12849, 12849},
+ {7667, 12854, 12854},
+ {12760, 12860, 12871},
+ {11677, 12875, 12877},
+ {12714, 12881, 12881},
+ {12731, 12885, 12890},
+ {7108, 12894, 12896},
+ {1165, 12900, 12900},
+ {4021, 12906, 12906},
+ {10829, 12910, 12911},
+ {12331, 12915, 12915},
+ {8887, 12919, 12921},
+ {11639, 12925, 12925},
+ {7964, 12929, 12929},
+ {12528, 12937, 12937},
+ {8148, 12941, 12941},
+ {12770, 12948, 12950},
+ {12609, 12954, 12954},
+ {12685, 12958, 12958},
+ {2803, 12962, 12962},
+ {9561, 12966, 12966},
+ {6671, 12972, 12973},
+ {12056, 12977, 12977},
+ {6380, 12981, 12981},
+ {12048, 12985, 12985},
+ {11961, 12989, 12993},
+ {3368, 12997, 12999},
+ {6634, 13004, 13004},
+ {6775, 13009, 13010},
+ {12136, 13014, 13019},
+ {10341, 13023, 13023},
+ {13002, 13027, 13027},
+ {10587, 13031, 13031},
+ {10307, 13035, 13035},
+ {12736, 13039, 13039},
+ {12744, 13043, 13044},
+ {6175, 13048, 13048},
+ {9702, 13053, 13054},
+ {662, 13059, 13061},
+ {12718, 13065, 13068},
+ {12893, 13072, 13075},
+ {8299, 13086, 13091},
+ {12604, 13095, 13096},
+ {12848, 13100, 13101},
+ {12749, 13105, 13105},
+ {12526, 13109, 13114},
+ {9173, 13122, 13122},
+ {12769, 13128, 13128},
+ {13038, 13132, 13132},
+ {12725, 13136, 13137},
+ {12639, 13146, 13146},
+ {9711, 13150, 13151},
+ {12137, 13155, 13155},
+ {13039, 13159, 13159},
+ {4681, 13163, 13164},
+ {12954, 13168, 13168},
+ {13158, 13175, 13176},
+ {13105, 13180, 13180},
+ {10754, 13184, 13184},
+ {13167, 13188, 13188},
+ {12658, 13192, 13192},
+ {4294, 13199, 13200},
+ {11682, 13204, 13205},
+ {11695, 13209, 13209},
+ {11076, 13214, 13214},
+ {12232, 13218, 13218},
+ {9399, 13223, 13224},
+ {12880, 13228, 13229},
+ {13048, 13234, 13234},
+ {9701, 13238, 13239},
+ {13209, 13243, 13243},
+ {3658, 13248, 13248},
+ {3698, 13252, 13254},
+ {12237, 13260, 13260},
+ {8872, 13266, 13266},
+ {12957, 13272, 13273},
+ {1393, 13281, 13281},
+ {2013, 13285, 13288},
+ {4244, 13296, 13299},
+ {9428, 13303, 13303},
+ {12702, 13307, 13307},
+ {13078, 13311, 13311},
+ {6071, 13315, 13315},
+ {3061, 13319, 13319},
+ {2051, 13324, 13324},
+ {11560, 13328, 13331},
+ {6584, 13336, 13336},
+ {8482, 13340, 13340},
+ {5331, 13344, 13344},
+ {4171, 13348, 13348},
+ {8501, 13352, 13352},
+ {9219, 13356, 13356},
+ {9473, 13360, 13363},
+ {12881, 13367, 13367},
+ {13065, 13371, 13375},
+ {2979, 13379, 13384},
+ {1518, 13388, 13388},
+ {11177, 13392, 13392},
+ {9457, 13398, 13398},
+ {12293, 13407, 13410},
+ {3697, 13414, 13417},
+ {10338, 13425, 13425},
+ {13367, 13429, 13429},
+ {11074, 13433, 13437},
+ {4201, 13441, 13443},
+ {1812, 13447, 13448},
+ {13360, 13452, 13456},
+ {13188, 13463, 13463},
+ {9732, 13470, 13470},
+ {11332, 13477, 13477},
+ {9918, 13487, 13487},
+ {6337, 13497, 13497},
+ {13429, 13501, 13501},
+ {11413, 13505, 13505},
+ {4685, 13512, 13513},
+ {13136, 13517, 13519},
+ {7416, 13528, 13530},
+ {12929, 13534, 13534},
+ {11110, 13539, 13539},
+ {11521, 13543, 13543},
+ {12825, 13553, 13553},
+ {13447, 13557, 13558},
+ {12299, 13562, 13563},
+ {9003, 13570, 13570},
+ {12500, 13577, 13577},
+ {13501, 13581, 13581},
+ {9392, 13586, 13586},
+ {12454, 13590, 13590},
+ {6189, 13595, 13595},
+ {13053, 13599, 13599},
+ {11881, 13604, 13604},
+ {13159, 13608, 13608},
+ {4894, 13612, 13612},
+ {13221, 13621, 13621},
+ {8950, 13625, 13625},
+ {13533, 13629, 13629},
+ {9633, 13633, 13633},
+ {7892, 13637, 13639},
+ {13581, 13643, 13643},
+ {13616, 13647, 13649},
+ {12794, 13653, 13654},
+ {8919, 13659, 13659},
+ {9674, 13663, 13663},
+ {13577, 13668, 13668},
+ {12966, 13672, 13672},
+ {12659, 13676, 13683},
+ {6124, 13688, 13688},
+ {9225, 13693, 13695},
+ {11833, 13702, 13702},
+ {12904, 13709, 13717},
+ {13647, 13721, 13722},
+ {11687, 13726, 13727},
+ {12434, 13731, 13732},
+ {12689, 13736, 13742},
+ {13168, 13746, 13746},
+ {6151, 13751, 13752},
+ {11821, 13756, 13757},
+ {6467, 13764, 13764},
+ {5730, 13769, 13769},
+ {5136, 13780, 13780},
+ {724, 13784, 13785},
+ {13517, 13789, 13791},
+ {640, 13795, 13796},
+ {7721, 13800, 13802},
+ {11121, 13806, 13807},
+ {5791, 13811, 13815},
+ {12894, 13819, 13819},
+ {11100, 13824, 13824},
+ {7011, 13830, 13830},
+ {7129, 13834, 13837},
+ {13833, 13841, 13841},
+ {11276, 13847, 13847},
+ {13621, 13853, 13853},
+ {13589, 13862, 13863},
+ {12989, 13867, 13867},
+ {12789, 13871, 13871},
+ {1239, 13875, 13875},
+ {4675, 13879, 13881},
+ {4686, 13885, 13885},
+ {707, 13889, 13889},
+ {5449, 13897, 13898},
+ {13867, 13902, 13903},
+ {10613, 13908, 13908},
+ {13789, 13912, 13914},
+ {4451, 13918, 13919},
+ {9200, 13924, 13924},
+ {2011, 13930, 13930},
+ {11433, 13934, 13936},
+ {4695, 13942, 13943},
+ {9435, 13948, 13951},
+ {13688, 13955, 13957},
+ {11694, 13961, 13962},
+ {5712, 13966, 13966},
+ {5991, 13970, 13972},
+ {13477, 13976, 13976},
+ {10213, 13987, 13987},
+ {11839, 13991, 13993},
+ {12272, 13997, 13997},
+ {6206, 14001, 14001},
+ {13179, 14006, 14007},
+ {2939, 14011, 14011},
+ {12972, 14016, 14017},
+ {13918, 14021, 14022},
+ {7436, 14026, 14027},
+ {7678, 14032, 14034},
+ {13586, 14040, 14040},
+ {13347, 14044, 14044},
+ {13109, 14048, 14051},
+ {9244, 14055, 14057},
+ {13315, 14061, 14061},
+ {13276, 14067, 14067},
+ {11435, 14073, 14074},
+ {13853, 14078, 14078},
+ {13452, 14082, 14082},
+ {14044, 14087, 14087},
+ {4440, 14091, 14095},
+ {4479, 14100, 14103},
+ {9395, 14107, 14109},
+ {6834, 14119, 14119},
+ {10458, 14123, 14124},
+ {1429, 14129, 14129},
+ {8443, 14135, 14135},
+ {10365, 14140, 14140},
+ {5267, 14145, 14145},
+ {11834, 14151, 14153},
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..0cf5e37
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go
new file mode 100644
index 0000000..2712710
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy_test.go
@@ -0,0 +1,1353 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "bytes"
+ "encoding/binary"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var (
+ download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+ testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data")
+ benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data")
+)
+
+// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by
+// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on
+// this GOARCH. There is more than one valid encoding of any given input, and
+// there is more than one good algorithm along the frontier of trading off
+// throughput for output size. Nonetheless, we presume that the C++ encoder's
+// algorithm is a good one and has been tested on a wide range of inputs, so
+// matching that exactly should mean that the Go encoder's algorithm is also
+// good, without needing to gather our own corpus of test data.
+//
+// The exact algorithm used by the C++ code is potentially endian dependent, as
+// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes
+// at a time. The Go implementation is endian agnostic, in that its output is
+// the same (as little-endian C++ code), regardless of the CPU's endianness.
+//
+// Thus, when comparing Go's output to C++ output generated beforehand, such as
+// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little-
+// endian system, we can run that test regardless of the runtime.GOARCH value.
+//
+// When comparing Go's output to dynamically generated C++ output, i.e. the
+// result of fork/exec'ing a C++ program, we can run that test only on
+// little-endian systems, because the C++ output might be different on
+// big-endian systems. The runtime package doesn't export endianness per se,
+// but we can restrict this match-C++ test to common little-endian systems.
+const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm"
+
+func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) {
+ got := maxEncodedLenOfMaxBlockSize
+ want := MaxEncodedLen(maxBlockSize)
+ if got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+}
+
+func cmp(a, b []byte) error {
+ if bytes.Equal(a, b) {
+ return nil
+ }
+ if len(a) != len(b) {
+ return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
+ }
+ }
+ return nil
+}
+
+func roundtrip(b, ebuf, dbuf []byte) error {
+ d, err := Decode(dbuf, Encode(ebuf, b))
+ if err != nil {
+ return fmt.Errorf("decoding error: %v", err)
+ }
+ if err := cmp(d, b); err != nil {
+ return fmt.Errorf("roundtrip mismatch: %v", err)
+ }
+ return nil
+}
+
+func TestEmpty(t *testing.T) {
+ if err := roundtrip(nil, nil, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSmallCopy(t *testing.T) {
+ for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
+ for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
+ for i := 0; i < 32; i++ {
+ s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
+ if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
+ t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
+ }
+ }
+ }
+ }
+}
+
+func TestSmallRand(t *testing.T) {
+ rng := rand.New(rand.NewSource(1))
+ for n := 1; n < 20000; n += 23 {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = uint8(rng.Intn(256))
+ }
+ if err := roundtrip(b, nil, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestSmallRegular(t *testing.T) {
+ for n := 1; n < 20000; n += 23 {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = uint8(i%10 + 'a')
+ }
+ if err := roundtrip(b, nil, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestInvalidVarint(t *testing.T) {
+ testCases := []struct {
+ desc string
+ input string
+ }{{
+ "invalid varint, final byte has continuation bit set",
+ "\xff",
+ }, {
+ "invalid varint, value overflows uint64",
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00",
+ }, {
+ // https://github.com/google/snappy/blob/master/format_description.txt
+ // says that "the stream starts with the uncompressed length [as a
+ // varint] (up to a maximum of 2^32 - 1)".
+ "valid varint (as uint64), but value overflows uint32",
+ "\x80\x80\x80\x80\x10",
+ }}
+
+ for _, tc := range testCases {
+ input := []byte(tc.input)
+ if _, err := DecodedLen(input); err != ErrCorrupt {
+ t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err)
+ }
+ if _, err := Decode(nil, input); err != ErrCorrupt {
+ t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err)
+ }
+ }
+}
+
+func TestDecode(t *testing.T) {
+ lit40Bytes := make([]byte, 40)
+ for i := range lit40Bytes {
+ lit40Bytes[i] = byte(i)
+ }
+ lit40 := string(lit40Bytes)
+
+ testCases := []struct {
+ desc string
+ input string
+ want string
+ wantErr error
+ }{{
+ `decodedLen=0; valid input`,
+ "\x00",
+ "",
+ nil,
+ }, {
+ `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`,
+ "\x03" + "\x08\xff\xff\xff",
+ "\xff\xff\xff",
+ nil,
+ }, {
+ `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`,
+ "\x02" + "\x08\xff\xff\xff",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`,
+ "\x03" + "\x08\xff\xff",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`,
+ "\x28" + "\x9c" + lit40,
+ lit40,
+ nil,
+ }, {
+ `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`,
+ "\x01" + "\xf0",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`,
+ "\x03" + "\xf0\x02\xff\xff\xff",
+ "\xff\xff\xff",
+ nil,
+ }, {
+ `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`,
+ "\x01" + "\xf4\x00",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`,
+ "\x03" + "\xf4\x02\x00\xff\xff\xff",
+ "\xff\xff\xff",
+ nil,
+ }, {
+ `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`,
+ "\x01" + "\xf8\x00\x00",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`,
+ "\x03" + "\xf8\x02\x00\x00\xff\xff\xff",
+ "\xff\xff\xff",
+ nil,
+ }, {
+ `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`,
+ "\x01" + "\xfc\x00\x00\x00",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`,
+ "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`,
+ "\x04" + "\xfc\x02\x00\x00\x00\xff",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`,
+ "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
+ "\xff\xff\xff",
+ nil,
+ }, {
+ `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`,
+ "\x04" + "\x01",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`,
+ "\x04" + "\x02\x00",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`,
+ "\x04" + "\x03\x00\x00\x00",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`,
+ "\x04" + "\x0cabcd",
+ "abcd",
+ nil,
+ }, {
+ `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`,
+ "\x0d" + "\x0cabcd" + "\x15\x04",
+ "abcdabcdabcda",
+ nil,
+ }, {
+ `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`,
+ "\x08" + "\x0cabcd" + "\x01\x04",
+ "abcdabcd",
+ nil,
+ }, {
+ `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`,
+ "\x08" + "\x0cabcd" + "\x01\x02",
+ "abcdcdcd",
+ nil,
+ }, {
+ `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`,
+ "\x08" + "\x0cabcd" + "\x01\x01",
+ "abcddddd",
+ nil,
+ }, {
+ `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`,
+ "\x08" + "\x0cabcd" + "\x01\x00",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`,
+ "\x09" + "\x0cabcd" + "\x01\x04",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`,
+ "\x08" + "\x0cabcd" + "\x01\x05",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`,
+ "\x07" + "\x0cabcd" + "\x01\x04",
+ "",
+ ErrCorrupt,
+ }, {
+ `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`,
+ "\x06" + "\x0cabcd" + "\x06\x03\x00",
+ "abcdbc",
+ nil,
+ }, {
+ `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`,
+ "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00",
+ "abcdbc",
+ nil,
+ }}
+
+ const (
+ // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
+ // not present in either the input or the output. It is written to dBuf
+ // to check that Decode does not write bytes past the end of
+ // dBuf[:dLen].
+ //
+ // The magic number 37 was chosen because it is prime. A more 'natural'
+ // number like 32 might lead to a false negative if, for example, a
+ // byte was incorrectly copied 4*8 bytes later.
+ notPresentBase = 0xa0
+ notPresentLen = 37
+ )
+
+ var dBuf [100]byte
+loop:
+ for i, tc := range testCases {
+ input := []byte(tc.input)
+ for _, x := range input {
+ if notPresentBase <= x && x < notPresentBase+notPresentLen {
+ t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input)
+ continue loop
+ }
+ }
+
+ dLen, n := binary.Uvarint(input)
+ if n <= 0 {
+ t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc)
+ continue
+ }
+ if dLen > uint64(len(dBuf)) {
+ t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen)
+ continue
+ }
+
+ for j := range dBuf {
+ dBuf[j] = byte(notPresentBase + j%notPresentLen)
+ }
+ g, gotErr := Decode(dBuf[:], input)
+ if got := string(g); got != tc.want || gotErr != tc.wantErr {
+ t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v",
+ i, tc.desc, got, gotErr, tc.want, tc.wantErr)
+ continue
+ }
+ for j, x := range dBuf {
+ if uint64(j) < dLen {
+ continue
+ }
+ if w := byte(notPresentBase + j%notPresentLen); x != w {
+ t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x",
+ i, tc.desc, j, x, w, dBuf)
+ continue loop
+ }
+ }
+ }
+}
+
+func TestDecodeCopy4(t *testing.T) {
+ dots := strings.Repeat(".", 65536)
+
+ input := strings.Join([]string{
+ "\x89\x80\x04", // decodedLen = 65545.
+ "\x0cpqrs", // 4-byte literal "pqrs".
+ "\xf4\xff\xff" + dots, // 65536-byte literal dots.
+ "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540.
+ }, "")
+
+ gotBytes, err := Decode(nil, []byte(input))
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(gotBytes)
+ want := "pqrs" + dots + "pqrs."
+ if len(got) != len(want) {
+ t.Fatalf("got %d bytes, want %d", len(got), len(want))
+ }
+ if got != want {
+ for i := 0; i < len(got); i++ {
+ if g, w := got[i], want[i]; g != w {
+ t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w)
+ }
+ }
+ }
+}
+
+// TestDecodeLengthOffset tests decoding an encoding of the form literal +
+// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB".
+func TestDecodeLengthOffset(t *testing.T) {
+ const (
+ prefix = "abcdefghijklmnopqr"
+ suffix = "ABCDEFGHIJKLMNOPQR"
+
+ // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
+ // not present in either the input or the output. It is written to
+ // gotBuf to check that Decode does not write bytes past the end of
+ // gotBuf[:totalLen].
+ //
+ // The magic number 37 was chosen because it is prime. A more 'natural'
+ // number like 32 might lead to a false negative if, for example, a
+ // byte was incorrectly copied 4*8 bytes later.
+ notPresentBase = 0xa0
+ notPresentLen = 37
+ )
+ var gotBuf, wantBuf, inputBuf [128]byte
+ for length := 1; length <= 18; length++ {
+ for offset := 1; offset <= 18; offset++ {
+ loop:
+ for suffixLen := 0; suffixLen <= 18; suffixLen++ {
+ totalLen := len(prefix) + length + suffixLen
+
+ inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen))
+ inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1)
+ inputLen++
+ inputLen += copy(inputBuf[inputLen:], prefix)
+ inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1)
+ inputBuf[inputLen+1] = byte(offset)
+ inputBuf[inputLen+2] = 0x00
+ inputLen += 3
+ if suffixLen > 0 {
+ inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1)
+ inputLen++
+ inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen])
+ }
+ input := inputBuf[:inputLen]
+
+ for i := range gotBuf {
+ gotBuf[i] = byte(notPresentBase + i%notPresentLen)
+ }
+ got, err := Decode(gotBuf[:], input)
+ if err != nil {
+ t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err)
+ continue
+ }
+
+ wantLen := 0
+ wantLen += copy(wantBuf[wantLen:], prefix)
+ for i := 0; i < length; i++ {
+ wantBuf[wantLen] = wantBuf[wantLen-offset]
+ wantLen++
+ }
+ wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen])
+ want := wantBuf[:wantLen]
+
+ for _, x := range input {
+ if notPresentBase <= x && x < notPresentBase+notPresentLen {
+ t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x",
+ length, offset, suffixLen, x, input)
+ continue loop
+ }
+ }
+ for i, x := range gotBuf {
+ if i < totalLen {
+ continue
+ }
+ if w := byte(notPresentBase + i%notPresentLen); x != w {
+ t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+
+ "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x",
+ length, offset, suffixLen, totalLen, i, x, w, gotBuf)
+ continue loop
+ }
+ }
+ for _, x := range want {
+ if notPresentBase <= x && x < notPresentBase+notPresentLen {
+ t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x",
+ length, offset, suffixLen, x, want)
+ continue loop
+ }
+ }
+
+ if !bytes.Equal(got, want) {
+ t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x",
+ length, offset, suffixLen, input, got, want)
+ continue
+ }
+ }
+ }
+ }
+}
+
+const (
+ goldenText = "Mark.Twain-Tom.Sawyer.txt"
+ goldenCompressed = goldenText + ".rawsnappy"
+)
+
+func TestDecodeGoldenInput(t *testing.T) {
+ tDir := filepath.FromSlash(*testdataDir)
+ src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed))
+ if err != nil {
+ t.Fatalf("ReadFile: %v", err)
+ }
+ got, err := Decode(nil, src)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+ want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
+ if err != nil {
+ t.Fatalf("ReadFile: %v", err)
+ }
+ if err := cmp(got, want); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestEncodeGoldenInput(t *testing.T) {
+ tDir := filepath.FromSlash(*testdataDir)
+ src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
+ if err != nil {
+ t.Fatalf("ReadFile: %v", err)
+ }
+ got := Encode(nil, src)
+ want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed))
+ if err != nil {
+ t.Fatalf("ReadFile: %v", err)
+ }
+ if err := cmp(got, want); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestExtendMatchGoldenInput(t *testing.T) {
+ tDir := filepath.FromSlash(*testdataDir)
+ src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
+ if err != nil {
+ t.Fatalf("ReadFile: %v", err)
+ }
+ for i, tc := range extendMatchGoldenTestCases {
+ got := extendMatch(src, tc.i, tc.j)
+ if got != tc.want {
+ t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)",
+ i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j)
+ }
+ }
+}
+
+func TestExtendMatch(t *testing.T) {
+ // ref is a simple, reference implementation of extendMatch.
+ ref := func(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+ }
+
+ nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40}
+ for yIndex := 40; yIndex > 30; yIndex-- {
+ xxx := bytes.Repeat([]byte("x"), 40)
+ if yIndex < len(xxx) {
+ xxx[yIndex] = 'y'
+ }
+ for _, i := range nums {
+ for _, j := range nums {
+ if i >= j {
+ continue
+ }
+ got := extendMatch(xxx, i, j)
+ want := ref(xxx, i, j)
+ if got != want {
+ t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want)
+ }
+ }
+ }
+ }
+}
+
+const snappytoolCmdName = "cmd/snappytool/snappytool"
+
+func skipTestSameEncodingAsCpp() (msg string) {
+ if !goEncoderShouldMatchCppEncoder {
+ return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH)
+ }
+ if _, err := os.Stat(snappytoolCmdName); err != nil {
+ return fmt.Sprintf("could not find snappytool: %v", err)
+ }
+ return ""
+}
+
+func runTestSameEncodingAsCpp(src []byte) error {
+ got := Encode(nil, src)
+
+ cmd := exec.Command(snappytoolCmdName, "-e")
+ cmd.Stdin = bytes.NewReader(src)
+ want, err := cmd.Output()
+ if err != nil {
+ return fmt.Errorf("could not run snappytool: %v", err)
+ }
+ return cmp(got, want)
+}
+
+func TestSameEncodingAsCppShortCopies(t *testing.T) {
+ if msg := skipTestSameEncodingAsCpp(); msg != "" {
+ t.Skip(msg)
+ }
+ src := bytes.Repeat([]byte{'a'}, 20)
+ for i := 0; i <= len(src); i++ {
+ if err := runTestSameEncodingAsCpp(src[:i]); err != nil {
+ t.Errorf("i=%d: %v", i, err)
+ }
+ }
+}
+
+func TestSameEncodingAsCppLongFiles(t *testing.T) {
+ if msg := skipTestSameEncodingAsCpp(); msg != "" {
+ t.Skip(msg)
+ }
+ bDir := filepath.FromSlash(*benchdataDir)
+ failed := false
+ for i, tf := range testFiles {
+ if err := downloadBenchmarkFiles(t, tf.filename); err != nil {
+ t.Fatalf("failed to download testdata: %s", err)
+ }
+ data := readFile(t, filepath.Join(bDir, tf.filename))
+ if n := tf.sizeLimit; 0 < n && n < len(data) {
+ data = data[:n]
+ }
+ if err := runTestSameEncodingAsCpp(data); err != nil {
+ t.Errorf("i=%d: %v", i, err)
+ failed = true
+ }
+ }
+ if failed {
+ t.Errorf("was the snappytool program built against the C++ snappy library version " +
+ "d53de187 or later, commited on 2016-04-05? See " +
+ "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc")
+ }
+}
+
+// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm
+// described in decode_amd64.s and its claim of a 10 byte overrun worst case.
+func TestSlowForwardCopyOverrun(t *testing.T) {
+ const base = 100
+
+ for length := 1; length < 18; length++ {
+ for offset := 1; offset < 18; offset++ {
+ highWaterMark := base
+ d := base
+ l := length
+ o := offset
+
+ // makeOffsetAtLeast8
+ for o < 8 {
+ if end := d + 8; highWaterMark < end {
+ highWaterMark = end
+ }
+ l -= o
+ d += o
+ o += o
+ }
+
+ // fixUpSlowForwardCopy
+ a := d
+ d += l
+
+ // finishSlowForwardCopy
+ for l > 0 {
+ if end := a + 8; highWaterMark < end {
+ highWaterMark = end
+ }
+ a += 8
+ l -= 8
+ }
+
+ dWant := base + length
+ overrun := highWaterMark - dWant
+ if d != dWant || overrun < 0 || 10 < overrun {
+ t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])",
+ length, offset, d, overrun, dWant)
+ }
+ }
+ }
+}
+
+// TestEncodeNoiseThenRepeats encodes input for which the first half is very
+// incompressible and the second half is very compressible. The encoded form's
+// length should be closer to 50% of the original length than 100%.
+func TestEncodeNoiseThenRepeats(t *testing.T) {
+ for _, origLen := range []int{256 * 1024, 2048 * 1024} {
+ src := make([]byte, origLen)
+ rng := rand.New(rand.NewSource(1))
+ firstHalf, secondHalf := src[:origLen/2], src[origLen/2:]
+ for i := range firstHalf {
+ firstHalf[i] = uint8(rng.Intn(256))
+ }
+ for i := range secondHalf {
+ secondHalf[i] = uint8(i >> 8)
+ }
+ dst := Encode(nil, src)
+ if got, want := len(dst), origLen*3/4; got >= want {
+ t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want)
+ }
+ }
+}
+
+func TestFramingFormat(t *testing.T) {
+ // src is comprised of alternating 1e5-sized sequences of random
+ // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
+ // because it is larger than maxBlockSize (64k).
+ src := make([]byte, 1e6)
+ rng := rand.New(rand.NewSource(1))
+ for i := 0; i < 10; i++ {
+ if i%2 == 0 {
+ for j := 0; j < 1e5; j++ {
+ src[1e5*i+j] = uint8(rng.Intn(256))
+ }
+ } else {
+ for j := 0; j < 1e5; j++ {
+ src[1e5*i+j] = uint8(i)
+ }
+ }
+ }
+
+ buf := new(bytes.Buffer)
+ if _, err := NewWriter(buf).Write(src); err != nil {
+ t.Fatalf("Write: encoding: %v", err)
+ }
+ dst, err := ioutil.ReadAll(NewReader(buf))
+ if err != nil {
+ t.Fatalf("ReadAll: decoding: %v", err)
+ }
+ if err := cmp(dst, src); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestWriterGoldenOutput(t *testing.T) {
+ buf := new(bytes.Buffer)
+ w := NewBufferedWriter(buf)
+ defer w.Close()
+ w.Write([]byte("abcd")) // Not compressible.
+ w.Flush()
+ w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible.
+ w.Flush()
+ // The next chunk is also compressible, but a naive, greedy encoding of the
+ // overall length 67 copy as a length 64 copy (the longest expressible as a
+ // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte
+ // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4
+ // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2
+ // (of length 60) and a 2-byte tagCopy1 (of length 7).
+ w.Write(bytes.Repeat([]byte{'B'}, 68))
+ w.Write([]byte("efC")) // Not compressible.
+ w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible.
+ w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible.
+ w.Write([]byte("g")) // Not compressible.
+ w.Flush()
+
+ got := buf.String()
+ want := strings.Join([]string{
+ magicChunk,
+ "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum).
+ "\x68\x10\xe6\xb6", // Checksum.
+ "\x61\x62\x63\x64", // Uncompressed payload: "abcd".
+ "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum).
+ "\x5f\xeb\xf2\x10", // Checksum.
+ "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150.
+ "\x00\x41", // Compressed payload: tagLiteral, length=1, "A".
+ "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1.
+ "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1.
+ "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1.
+ "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum).
+ "\x30\x85\x69\xeb", // Checksum.
+ "\x70", // Compressed payload: Uncompressed length (varint encoded): 112.
+ "\x00\x42", // Compressed payload: tagLiteral, length=1, "B".
+ "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1.
+ "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1.
+ "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC".
+ "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1.
+ "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90.
+ "\x00\x67", // Compressed payload: tagLiteral, length=1, "g".
+ }, "")
+ if got != want {
+ t.Fatalf("\ngot: % x\nwant: % x", got, want)
+ }
+}
+
+func TestEmitLiteral(t *testing.T) {
+ testCases := []struct {
+ length int
+ want string
+ }{
+ {1, "\x00"},
+ {2, "\x04"},
+ {59, "\xe8"},
+ {60, "\xec"},
+ {61, "\xf0\x3c"},
+ {62, "\xf0\x3d"},
+ {254, "\xf0\xfd"},
+ {255, "\xf0\xfe"},
+ {256, "\xf0\xff"},
+ {257, "\xf4\x00\x01"},
+ {65534, "\xf4\xfd\xff"},
+ {65535, "\xf4\xfe\xff"},
+ {65536, "\xf4\xff\xff"},
+ }
+
+ dst := make([]byte, 70000)
+ nines := bytes.Repeat([]byte{0x99}, 65536)
+ for _, tc := range testCases {
+ lit := nines[:tc.length]
+ n := emitLiteral(dst, lit)
+ if !bytes.HasSuffix(dst[:n], lit) {
+ t.Errorf("length=%d: did not end with that many literal bytes", tc.length)
+ continue
+ }
+ got := string(dst[:n-tc.length])
+ if got != tc.want {
+ t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want)
+ continue
+ }
+ }
+}
+
+func TestEmitCopy(t *testing.T) {
+ testCases := []struct {
+ offset int
+ length int
+ want string
+ }{
+ {8, 04, "\x01\x08"},
+ {8, 11, "\x1d\x08"},
+ {8, 12, "\x2e\x08\x00"},
+ {8, 13, "\x32\x08\x00"},
+ {8, 59, "\xea\x08\x00"},
+ {8, 60, "\xee\x08\x00"},
+ {8, 61, "\xf2\x08\x00"},
+ {8, 62, "\xf6\x08\x00"},
+ {8, 63, "\xfa\x08\x00"},
+ {8, 64, "\xfe\x08\x00"},
+ {8, 65, "\xee\x08\x00\x05\x08"},
+ {8, 66, "\xee\x08\x00\x09\x08"},
+ {8, 67, "\xee\x08\x00\x0d\x08"},
+ {8, 68, "\xfe\x08\x00\x01\x08"},
+ {8, 69, "\xfe\x08\x00\x05\x08"},
+ {8, 80, "\xfe\x08\x00\x3e\x08\x00"},
+
+ {256, 04, "\x21\x00"},
+ {256, 11, "\x3d\x00"},
+ {256, 12, "\x2e\x00\x01"},
+ {256, 13, "\x32\x00\x01"},
+ {256, 59, "\xea\x00\x01"},
+ {256, 60, "\xee\x00\x01"},
+ {256, 61, "\xf2\x00\x01"},
+ {256, 62, "\xf6\x00\x01"},
+ {256, 63, "\xfa\x00\x01"},
+ {256, 64, "\xfe\x00\x01"},
+ {256, 65, "\xee\x00\x01\x25\x00"},
+ {256, 66, "\xee\x00\x01\x29\x00"},
+ {256, 67, "\xee\x00\x01\x2d\x00"},
+ {256, 68, "\xfe\x00\x01\x21\x00"},
+ {256, 69, "\xfe\x00\x01\x25\x00"},
+ {256, 80, "\xfe\x00\x01\x3e\x00\x01"},
+
+ {2048, 04, "\x0e\x00\x08"},
+ {2048, 11, "\x2a\x00\x08"},
+ {2048, 12, "\x2e\x00\x08"},
+ {2048, 13, "\x32\x00\x08"},
+ {2048, 59, "\xea\x00\x08"},
+ {2048, 60, "\xee\x00\x08"},
+ {2048, 61, "\xf2\x00\x08"},
+ {2048, 62, "\xf6\x00\x08"},
+ {2048, 63, "\xfa\x00\x08"},
+ {2048, 64, "\xfe\x00\x08"},
+ {2048, 65, "\xee\x00\x08\x12\x00\x08"},
+ {2048, 66, "\xee\x00\x08\x16\x00\x08"},
+ {2048, 67, "\xee\x00\x08\x1a\x00\x08"},
+ {2048, 68, "\xfe\x00\x08\x0e\x00\x08"},
+ {2048, 69, "\xfe\x00\x08\x12\x00\x08"},
+ {2048, 80, "\xfe\x00\x08\x3e\x00\x08"},
+ }
+
+ dst := make([]byte, 1024)
+ for _, tc := range testCases {
+ n := emitCopy(dst, tc.offset, tc.length)
+ got := string(dst[:n])
+ if got != tc.want {
+ t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want)
+ }
+ }
+}
+
+func TestNewBufferedWriter(t *testing.T) {
+ // Test all 32 possible sub-sequences of these 5 input slices.
+ //
+ // Their lengths sum to 400,000, which is over 6 times the Writer ibuf
+ // capacity: 6 * maxBlockSize is 393,216.
+ inputs := [][]byte{
+ bytes.Repeat([]byte{'a'}, 40000),
+ bytes.Repeat([]byte{'b'}, 150000),
+ bytes.Repeat([]byte{'c'}, 60000),
+ bytes.Repeat([]byte{'d'}, 120000),
+ bytes.Repeat([]byte{'e'}, 30000),
+ }
+loop:
+ for i := 0; i < 1< 0; {
+ i := copy(x, src)
+ x = x[i:]
+ }
+ return dst
+}
+
+func benchWords(b *testing.B, n int, decode bool) {
+ // Note: the file is OS-language dependent so the resulting values are not
+ // directly comparable for non-US-English OS installations.
+ data := expand(readFile(b, "/usr/share/dict/words"), n)
+ if decode {
+ benchDecode(b, data)
+ } else {
+ benchEncode(b, data)
+ }
+}
+
+func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) }
+func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) }
+func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
+func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
+func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
+func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
+func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) }
+func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) }
+func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
+func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
+func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
+func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
+
+func BenchmarkRandomEncode(b *testing.B) {
+ rng := rand.New(rand.NewSource(1))
+ data := make([]byte, 1<<20)
+ for i := range data {
+ data[i] = uint8(rng.Intn(256))
+ }
+ benchEncode(b, data)
+}
+
+// testFiles' values are copied directly from
+// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
+// The label field is unused in snappy-go.
+var testFiles = []struct {
+ label string
+ filename string
+ sizeLimit int
+}{
+ {"html", "html", 0},
+ {"urls", "urls.10K", 0},
+ {"jpg", "fireworks.jpeg", 0},
+ {"jpg_200", "fireworks.jpeg", 200},
+ {"pdf", "paper-100k.pdf", 0},
+ {"html4", "html_x_4", 0},
+ {"txt1", "alice29.txt", 0},
+ {"txt2", "asyoulik.txt", 0},
+ {"txt3", "lcet10.txt", 0},
+ {"txt4", "plrabn12.txt", 0},
+ {"pb", "geo.protodata", 0},
+ {"gaviota", "kppkn.gtb", 0},
+}
+
+const (
+ // The benchmark data files are at this canonical URL.
+ benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
+)
+
+func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) {
+ bDir := filepath.FromSlash(*benchdataDir)
+ filename := filepath.Join(bDir, basename)
+ if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
+ return nil
+ }
+
+ if !*download {
+ b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b))
+ }
+ // Download the official snappy C++ implementation reference test data
+ // files for benchmarking.
+ if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) {
+ return fmt.Errorf("failed to create %s: %s", bDir, err)
+ }
+
+ f, err := os.Create(filename)
+ if err != nil {
+ return fmt.Errorf("failed to create %s: %s", filename, err)
+ }
+ defer f.Close()
+ defer func() {
+ if errRet != nil {
+ os.Remove(filename)
+ }
+ }()
+ url := benchURL + basename
+ resp, err := http.Get(url)
+ if err != nil {
+ return fmt.Errorf("failed to download %s: %s", url, err)
+ }
+ defer resp.Body.Close()
+ if s := resp.StatusCode; s != http.StatusOK {
+ return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
+ }
+ _, err = io.Copy(f, resp.Body)
+ if err != nil {
+ return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
+ }
+ return nil
+}
+
+func benchFile(b *testing.B, i int, decode bool) {
+ if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil {
+ b.Fatalf("failed to download testdata: %s", err)
+ }
+ bDir := filepath.FromSlash(*benchdataDir)
+ data := readFile(b, filepath.Join(bDir, testFiles[i].filename))
+ if n := testFiles[i].sizeLimit; 0 < n && n < len(data) {
+ data = data[:n]
+ }
+ if decode {
+ benchDecode(b, data)
+ } else {
+ benchEncode(b, data)
+ }
+}
+
+// Naming convention is kept similar to what snappy's C++ implementation uses.
+func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
+func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
+func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
+func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
+func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
+func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
+func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
+func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
+func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
+func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
+func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
+func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
+func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
+func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
+func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
+func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
+func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
+func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
+func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
+func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
+func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
+func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
+func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
+func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
+
+func BenchmarkExtendMatch(b *testing.B) {
+ tDir := filepath.FromSlash(*testdataDir)
+ src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
+ if err != nil {
+ b.Fatalf("ReadFile: %v", err)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, tc := range extendMatchGoldenTestCases {
+ extendMatch(src, tc.i, tc.j)
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
new file mode 100644
index 0000000..86a1875
--- /dev/null
+++ b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
@@ -0,0 +1,396 @@
+Produced by David Widger. The previous edition was updated by Jose
+Menendez.
+
+
+
+
+
+ THE ADVENTURES OF TOM SAWYER
+ BY
+ MARK TWAIN
+ (Samuel Langhorne Clemens)
+
+
+
+
+ P R E F A C E
+
+MOST of the adventures recorded in this book really occurred; one or
+two were experiences of my own, the rest those of boys who were
+schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but
+not from an individual--he is a combination of the characteristics of
+three boys whom I knew, and therefore belongs to the composite order of
+architecture.
+
+The odd superstitions touched upon were all prevalent among children
+and slaves in the West at the period of this story--that is to say,
+thirty or forty years ago.
+
+Although my book is intended mainly for the entertainment of boys and
+girls, I hope it will not be shunned by men and women on that account,
+for part of my plan has been to try to pleasantly remind adults of what
+they once were themselves, and of how they felt and thought and talked,
+and what queer enterprises they sometimes engaged in.
+
+ THE AUTHOR.
+
+HARTFORD, 1876.
+
+
+
+ T O M S A W Y E R
+
+
+
+CHAPTER I
+
+"TOM!"
+
+No answer.
+
+"TOM!"
+
+No answer.
+
+"What's gone with that boy, I wonder? You TOM!"
+
+No answer.
+
+The old lady pulled her spectacles down and looked over them about the
+room; then she put them up and looked out under them. She seldom or
+never looked THROUGH them for so small a thing as a boy; they were her
+state pair, the pride of her heart, and were built for "style," not
+service--she could have seen through a pair of stove-lids just as well.
+She looked perplexed for a moment, and then said, not fiercely, but
+still loud enough for the furniture to hear:
+
+"Well, I lay if I get hold of you I'll--"
+
+She did not finish, for by this time she was bending down and punching
+under the bed with the broom, and so she needed breath to punctuate the
+punches with. She resurrected nothing but the cat.
+
+"I never did see the beat of that boy!"
+
+She went to the open door and stood in it and looked out among the
+tomato vines and "jimpson" weeds that constituted the garden. No Tom.
+So she lifted up her voice at an angle calculated for distance and
+shouted:
+
+"Y-o-u-u TOM!"
+
+There was a slight noise behind her and she turned just in time to
+seize a small boy by the slack of his roundabout and arrest his flight.
+
+"There! I might 'a' thought of that closet. What you been doing in
+there?"
+
+"Nothing."
+
+"Nothing! Look at your hands. And look at your mouth. What IS that
+truck?"
+
+"I don't know, aunt."
+
+"Well, I know. It's jam--that's what it is. Forty times I've said if
+you didn't let that jam alone I'd skin you. Hand me that switch."
+
+The switch hovered in the air--the peril was desperate--
+
+"My! Look behind you, aunt!"
+
+The old lady whirled round, and snatched her skirts out of danger. The
+lad fled on the instant, scrambled up the high board-fence, and
+disappeared over it.
+
+His aunt Polly stood surprised a moment, and then broke into a gentle
+laugh.
+
+"Hang the boy, can't I never learn anything? Ain't he played me tricks
+enough like that for me to be looking out for him by this time? But old
+fools is the biggest fools there is. Can't learn an old dog new tricks,
+as the saying is. But my goodness, he never plays them alike, two days,
+and how is a body to know what's coming? He 'pears to know just how
+long he can torment me before I get my dander up, and he knows if he
+can make out to put me off for a minute or make me laugh, it's all down
+again and I can't hit him a lick. I ain't doing my duty by that boy,
+and that's the Lord's truth, goodness knows. Spare the rod and spile
+the child, as the Good Book says. I'm a laying up sin and suffering for
+us both, I know. He's full of the Old Scratch, but laws-a-me! he's my
+own dead sister's boy, poor thing, and I ain't got the heart to lash
+him, somehow. Every time I let him off, my conscience does hurt me so,
+and every time I hit him my old heart most breaks. Well-a-well, man
+that is born of woman is of few days and full of trouble, as the
+Scripture says, and I reckon it's so. He'll play hookey this evening, *
+and [* Southwestern for "afternoon"] I'll just be obleeged to make him
+work, to-morrow, to punish him. It's mighty hard to make him work
+Saturdays, when all the boys is having holiday, but he hates work more
+than he hates anything else, and I've GOT to do some of my duty by him,
+or I'll be the ruination of the child."
+
+Tom did play hookey, and he had a very good time. He got back home
+barely in season to help Jim, the small colored boy, saw next-day's
+wood and split the kindlings before supper--at least he was there in
+time to tell his adventures to Jim while Jim did three-fourths of the
+work. Tom's younger brother (or rather half-brother) Sid was already
+through with his part of the work (picking up chips), for he was a
+quiet boy, and had no adventurous, troublesome ways.
+
+While Tom was eating his supper, and stealing sugar as opportunity
+offered, Aunt Polly asked him questions that were full of guile, and
+very deep--for she wanted to trap him into damaging revealments. Like
+many other simple-hearted souls, it was her pet vanity to believe she
+was endowed with a talent for dark and mysterious diplomacy, and she
+loved to contemplate her most transparent devices as marvels of low
+cunning. Said she:
+
+"Tom, it was middling warm in school, warn't it?"
+
+"Yes'm."
+
+"Powerful warm, warn't it?"
+
+"Yes'm."
+
+"Didn't you want to go in a-swimming, Tom?"
+
+A bit of a scare shot through Tom--a touch of uncomfortable suspicion.
+He searched Aunt Polly's face, but it told him nothing. So he said:
+
+"No'm--well, not very much."
+
+The old lady reached out her hand and felt Tom's shirt, and said:
+
+"But you ain't too warm now, though." And it flattered her to reflect
+that she had discovered that the shirt was dry without anybody knowing
+that that was what she had in her mind. But in spite of her, Tom knew
+where the wind lay, now. So he forestalled what might be the next move:
+
+"Some of us pumped on our heads--mine's damp yet. See?"
+
+Aunt Polly was vexed to think she had overlooked that bit of
+circumstantial evidence, and missed a trick. Then she had a new
+inspiration:
+
+"Tom, you didn't have to undo your shirt collar where I sewed it, to
+pump on your head, did you? Unbutton your jacket!"
+
+The trouble vanished out of Tom's face. He opened his jacket. His
+shirt collar was securely sewed.
+
+"Bother! Well, go 'long with you. I'd made sure you'd played hookey
+and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a
+singed cat, as the saying is--better'n you look. THIS time."
+
+She was half sorry her sagacity had miscarried, and half glad that Tom
+had stumbled into obedient conduct for once.
+
+But Sidney said:
+
+"Well, now, if I didn't think you sewed his collar with white thread,
+but it's black."
+
+"Why, I did sew it with white! Tom!"
+
+But Tom did not wait for the rest. As he went out at the door he said:
+
+"Siddy, I'll lick you for that."
+
+In a safe place Tom examined two large needles which were thrust into
+the lapels of his jacket, and had thread bound about them--one needle
+carried white thread and the other black. He said:
+
+"She'd never noticed if it hadn't been for Sid. Confound it! sometimes
+she sews it with white, and sometimes she sews it with black. I wish to
+geeminy she'd stick to one or t'other--I can't keep the run of 'em. But
+I bet you I'll lam Sid for that. I'll learn him!"
+
+He was not the Model Boy of the village. He knew the model boy very
+well though--and loathed him.
+
+Within two minutes, or even less, he had forgotten all his troubles.
+Not because his troubles were one whit less heavy and bitter to him
+than a man's are to a man, but because a new and powerful interest bore
+them down and drove them out of his mind for the time--just as men's
+misfortunes are forgotten in the excitement of new enterprises. This
+new interest was a valued novelty in whistling, which he had just
+acquired from a negro, and he was suffering to practise it undisturbed.
+It consisted in a peculiar bird-like turn, a sort of liquid warble,
+produced by touching the tongue to the roof of the mouth at short
+intervals in the midst of the music--the reader probably remembers how
+to do it, if he has ever been a boy. Diligence and attention soon gave
+him the knack of it, and he strode down the street with his mouth full
+of harmony and his soul full of gratitude. He felt much as an
+astronomer feels who has discovered a new planet--no doubt, as far as
+strong, deep, unalloyed pleasure is concerned, the advantage was with
+the boy, not the astronomer.
+
+The summer evenings were long. It was not dark, yet. Presently Tom
+checked his whistle. A stranger was before him--a boy a shade larger
+than himself. A new-comer of any age or either sex was an impressive
+curiosity in the poor little shabby village of St. Petersburg. This boy
+was well dressed, too--well dressed on a week-day. This was simply
+astounding. His cap was a dainty thing, his close-buttoned blue cloth
+roundabout was new and natty, and so were his pantaloons. He had shoes
+on--and it was only Friday. He even wore a necktie, a bright bit of
+ribbon. He had a citified air about him that ate into Tom's vitals. The
+more Tom stared at the splendid marvel, the higher he turned up his
+nose at his finery and the shabbier and shabbier his own outfit seemed
+to him to grow. Neither boy spoke. If one moved, the other moved--but
+only sidewise, in a circle; they kept face to face and eye to eye all
+the time. Finally Tom said:
+
+"I can lick you!"
+
+"I'd like to see you try it."
+
+"Well, I can do it."
+
+"No you can't, either."
+
+"Yes I can."
+
+"No you can't."
+
+"I can."
+
+"You can't."
+
+"Can!"
+
+"Can't!"
+
+An uncomfortable pause. Then Tom said:
+
+"What's your name?"
+
+"'Tisn't any of your business, maybe."
+
+"Well I 'low I'll MAKE it my business."
+
+"Well why don't you?"
+
+"If you say much, I will."
+
+"Much--much--MUCH. There now."
+
+"Oh, you think you're mighty smart, DON'T you? I could lick you with
+one hand tied behind me, if I wanted to."
+
+"Well why don't you DO it? You SAY you can do it."
+
+"Well I WILL, if you fool with me."
+
+"Oh yes--I've seen whole families in the same fix."
+
+"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!"
+
+"You can lump that hat if you don't like it. I dare you to knock it
+off--and anybody that'll take a dare will suck eggs."
+
+"You're a liar!"
+
+"You're another."
+
+"You're a fighting liar and dasn't take it up."
+
+"Aw--take a walk!"
+
+"Say--if you give me much more of your sass I'll take and bounce a
+rock off'n your head."
+
+"Oh, of COURSE you will."
+
+"Well I WILL."
+
+"Well why don't you DO it then? What do you keep SAYING you will for?
+Why don't you DO it? It's because you're afraid."
+
+"I AIN'T afraid."
+
+"You are."
+
+"I ain't."
+
+"You are."
+
+Another pause, and more eying and sidling around each other. Presently
+they were shoulder to shoulder. Tom said:
+
+"Get away from here!"
+
+"Go away yourself!"
+
+"I won't."
+
+"I won't either."
+
+So they stood, each with a foot placed at an angle as a brace, and
+both shoving with might and main, and glowering at each other with
+hate. But neither could get an advantage. After struggling till both
+were hot and flushed, each relaxed his strain with watchful caution,
+and Tom said:
+
+"You're a coward and a pup. I'll tell my big brother on you, and he
+can thrash you with his little finger, and I'll make him do it, too."
+
+"What do I care for your big brother? I've got a brother that's bigger
+than he is--and what's more, he can throw him over that fence, too."
+[Both brothers were imaginary.]
+
+"That's a lie."
+
+"YOUR saying so don't make it so."
+
+Tom drew a line in the dust with his big toe, and said:
+
+"I dare you to step over that, and I'll lick you till you can't stand
+up. Anybody that'll take a dare will steal sheep."
+
+The new boy stepped over promptly, and said:
+
+"Now you said you'd do it, now let's see you do it."
+
+"Don't you crowd me now; you better look out."
+
+"Well, you SAID you'd do it--why don't you do it?"
+
+"By jingo! for two cents I WILL do it."
+
+The new boy took two broad coppers out of his pocket and held them out
+with derision. Tom struck them to the ground. In an instant both boys
+were rolling and tumbling in the dirt, gripped together like cats; and
+for the space of a minute they tugged and tore at each other's hair and
+clothes, punched and scratched each other's nose, and covered
+themselves with dust and glory. Presently the confusion took form, and
+through the fog of battle Tom appeared, seated astride the new boy, and
+pounding him with his fists. "Holler 'nuff!" said he.
+
+The boy only struggled to free himself. He was crying--mainly from rage.
+
+"Holler 'nuff!"--and the pounding went on.
+
+At last the stranger got out a smothered "'Nuff!" and Tom let him up
+and said:
+
+"Now that'll learn you. Better look out who you're fooling with next
+time."
+
+The new boy went off brushing the dust from his clothes, sobbing,
+snuffling, and occasionally looking back and shaking his head and
+threatening what he would do to Tom the "next time he caught him out."
+To which Tom responded with jeers, and started off in high feather, and
+as soon as his back was turned the new boy snatched up a stone, threw
+it and hit him between the shoulders and then turned tail and ran like
+an antelope. Tom chased the traitor home, and thus found out where he
+lived. He then held a position at the gate for some time, daring the
+enemy to come outside, but the enemy only made faces at him through the
+window and declined. At last the enemy's mother appeared, and called
+Tom a bad, vicious, vulgar child, and ordered him away. So he went
+away; but he said he "'lowed" to "lay" for that boy.
+
+He got home pretty late that night, and when he climbed cautiously in
+at the window, he uncovered an ambuscade, in the person of his aunt;
+and when she saw the state his clothes were in her resolution to turn
+his Saturday holiday into captivity at hard labor became adamantine in
+its firmness.
diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy
new file mode 100644
index 0000000..9c56d98
Binary files /dev/null and b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy differ
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
index f4596d8..7d8a57c 100644
--- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
+++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -3,11 +3,12 @@ package cleanhttp
import (
"net"
"net/http"
+ "runtime"
"time"
)
-// DefaultTransport returns a new http.Transport with the same default values
-// as http.DefaultTransport, but with idle connections and keepalives disabled.
+// DefaultTransport returns a new http.Transport with similar default values to
+// http.DefaultTransport, but with idle connections and keepalives disabled.
func DefaultTransport() *http.Transport {
transport := DefaultPooledTransport()
transport.DisableKeepAlives = true
@@ -22,13 +23,15 @@ func DefaultTransport() *http.Transport {
func DefaultPooledTransport() *http.Transport {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
+ DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- DisableKeepAlives: false,
- MaxIdleConnsPerHost: 1,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
}
return transport
}
@@ -42,10 +45,10 @@ func DefaultClient() *http.Client {
}
}
-// DefaultPooledClient returns a new http.Client with the same default values
-// as http.Client, but with a shared Transport. Do not use this function
-// for transient clients as it can leak file descriptors over time. Only use
-// this for clients that will be re-used for the same host(s).
+// DefaultPooledClient returns a new http.Client with similar default values to
+// http.Client, but with a shared Transport. Do not use this function for
+// transient clients as it can leak file descriptors over time. Only use this
+// for clients that will be re-used for the same host(s).
func DefaultPooledClient() *http.Client {
return &http.Client{
Transport: DefaultPooledTransport(),
diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml
new file mode 100644
index 0000000..4b865d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml
@@ -0,0 +1,12 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.6
+
+branches:
+ only:
+ - master
+
+script: make test testrace
diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile
new file mode 100644
index 0000000..b97cd6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/Makefile
@@ -0,0 +1,31 @@
+TEST?=./...
+
+default: test
+
+# test runs the test suite and vets the code.
+test: generate
+ @echo "==> Running tests..."
+ @go list $(TEST) \
+ | grep -v "/vendor/" \
+ | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
+
+# testrace runs the race checker
+testrace: generate
+ @echo "==> Running tests (race)..."
+ @go list $(TEST) \
+ | grep -v "/vendor/" \
+ | xargs -n1 go test -timeout=60s -race ${TESTARGS}
+
+# updatedeps installs all the dependencies needed to run and build.
+updatedeps:
+ @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
+
+# generate runs `go generate` to build the dynamically generated source files.
+generate:
+ @echo "==> Generating..."
+ @find . -type f -name '.DS_Store' -delete
+ @go list ./... \
+ | grep -v "/vendor/" \
+ | xargs -n1 go generate
+
+.PHONY: default test testrace updatedeps generate
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
index e81be50..ead5830 100644
--- a/vendor/github.com/hashicorp/go-multierror/README.md
+++ b/vendor/github.com/hashicorp/go-multierror/README.md
@@ -1,5 +1,11 @@
# go-multierror
+[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: https://travis-ci.org/hashicorp/go-multierror
+[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
+
`go-multierror` is a package for Go that provides a mechanism for
representing a list of `error` values as a single `error`.
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
index 00afa9b..775b6e7 100644
--- a/vendor/github.com/hashicorp/go-multierror/append.go
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -18,9 +18,13 @@ func Append(err error, errs ...error) *Error {
for _, e := range errs {
switch e := e.(type) {
case *Error:
- err.Errors = append(err.Errors, e.Errors...)
+ if e != nil {
+ err.Errors = append(err.Errors, e.Errors...)
+ }
default:
- err.Errors = append(err.Errors, e)
+ if e != nil {
+ err.Errors = append(err.Errors, e)
+ }
}
}
diff --git a/vendor/github.com/hashicorp/go-multierror/append_test.go b/vendor/github.com/hashicorp/go-multierror/append_test.go
index dfa79e2..58ddafa 100644
--- a/vendor/github.com/hashicorp/go-multierror/append_test.go
+++ b/vendor/github.com/hashicorp/go-multierror/append_test.go
@@ -47,6 +47,24 @@ func TestAppend_NilError(t *testing.T) {
}
}
+func TestAppend_NilErrorArg(t *testing.T) {
+ var err error
+ var nilErr *Error
+ result := Append(err, nilErr)
+ if len(result.Errors) != 0 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
+func TestAppend_NilErrorIfaceArg(t *testing.T) {
+ var err error
+ var nilErr error
+ result := Append(err, nilErr)
+ if len(result.Errors) != 0 {
+ t.Fatalf("wrong len: %d", len(result.Errors))
+ }
+}
+
func TestAppend_NonError(t *testing.T) {
original := errors.New("foo")
result := Append(original, errors.New("bar"))
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten_test.go b/vendor/github.com/hashicorp/go-multierror/flatten_test.go
index 75218f1..9fbacad 100644
--- a/vendor/github.com/hashicorp/go-multierror/flatten_test.go
+++ b/vendor/github.com/hashicorp/go-multierror/flatten_test.go
@@ -26,7 +26,7 @@ func TestFlatten(t *testing.T) {
}
expected := strings.TrimSpace(`
-3 error(s) occurred:
+3 errors occurred:
* one
* two
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
index bb65a12..6c7a3cc 100644
--- a/vendor/github.com/hashicorp/go-multierror/format.go
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -12,12 +12,16 @@ type ErrorFormatFunc func([]error) string
// ListFormatFunc is a basic formatter that outputs the number of errors
// that occurred along with a bullet point list of the errors.
func ListFormatFunc(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("1 error occurred:\n\n* %s", es[0])
+ }
+
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %s", err)
}
return fmt.Sprintf(
- "%d error(s) occurred:\n\n%s",
+ "%d errors occurred:\n\n%s",
len(es), strings.Join(points, "\n"))
}
diff --git a/vendor/github.com/hashicorp/go-multierror/format_test.go b/vendor/github.com/hashicorp/go-multierror/format_test.go
index d7cee5d..3359e02 100644
--- a/vendor/github.com/hashicorp/go-multierror/format_test.go
+++ b/vendor/github.com/hashicorp/go-multierror/format_test.go
@@ -5,8 +5,23 @@ import (
"testing"
)
-func TestListFormatFunc(t *testing.T) {
- expected := `2 error(s) occurred:
+func TestListFormatFuncSingle(t *testing.T) {
+ expected := `1 error occurred:
+
+* foo`
+
+ errors := []error{
+ errors.New("foo"),
+ }
+
+ actual := ListFormatFunc(errors)
+ if actual != expected {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+
+func TestListFormatFuncMultiple(t *testing.T) {
+ expected := `2 errors occurred:
* foo
* bar`
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
index 2ea0827..89b1422 100644
--- a/vendor/github.com/hashicorp/go-multierror/multierror.go
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -40,11 +40,11 @@ func (e *Error) GoString() string {
}
// WrappedErrors returns the list of errors that this Error is wrapping.
-// It is an implementatin of the errwrap.Wrapper interface so that
+// It is an implementation of the errwrap.Wrapper interface so that
// multierror.Error can be used with that library.
//
// This method is not safe to be called concurrently and is no different
-// than accessing the Errors field directly. It is implementd only to
+// than accessing the Errors field directly. It is implemented only to
// satisfy the errwrap.Wrapper interface.
func (e *Error) WrappedErrors() []error {
return e.Errors
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror_test.go b/vendor/github.com/hashicorp/go-multierror/multierror_test.go
index 3e78079..5567d1c 100644
--- a/vendor/github.com/hashicorp/go-multierror/multierror_test.go
+++ b/vendor/github.com/hashicorp/go-multierror/multierror_test.go
@@ -27,7 +27,7 @@ func TestErrorError_custom(t *testing.T) {
}
func TestErrorError_default(t *testing.T) {
- expected := `2 error(s) occurred:
+ expected := `2 errors occurred:
* foo
* bar`
diff --git a/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh b/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh
new file mode 100755
index 0000000..1d2fcf9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+#
+# This script updates dependencies using a temporary directory. This is required
+# to avoid any auxillary dependencies that sneak into GOPATH.
+set -e
+
+# Get the parent directory of where this script is.
+SOURCE="${BASH_SOURCE[0]}"
+while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
+DIR="$(cd -P "$(dirname "$SOURCE")/.." && pwd)"
+
+# Change into that directory
+cd "$DIR"
+
+# Get the name from the directory
+NAME=${NAME:-"$(basename $(pwd))"}
+
+# Announce
+echo "==> Updating dependencies..."
+
+echo "--> Making tmpdir..."
+tmpdir=$(mktemp -d)
+function cleanup {
+ rm -rf "${tmpdir}"
+}
+trap cleanup EXIT
+
+export GOPATH="${tmpdir}"
+export PATH="${tmpdir}/bin:$PATH"
+
+mkdir -p "${tmpdir}/src/github.com/hashicorp"
+pushd "${tmpdir}/src/github.com/hashicorp" &>/dev/null
+
+echo "--> Copying ${NAME}..."
+cp -R "$DIR" "${tmpdir}/src/github.com/hashicorp/${NAME}"
+pushd "${tmpdir}/src/github.com/hashicorp/${NAME}" &>/dev/null
+rm -rf vendor/
+
+echo "--> Installing dependency manager..."
+go get -u github.com/kardianos/govendor
+govendor init
+
+echo "--> Installing all dependencies (may take some time)..."
+govendor fetch -v +outside
+
+echo "--> Vendoring..."
+govendor add +external
+
+echo "--> Moving into place..."
+vpath="${tmpdir}/src/github.com/hashicorp/${NAME}/vendor"
+popd &>/dev/null
+popd &>/dev/null
+rm -rf vendor/
+cp -R "${vpath}" .
diff --git a/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md b/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000..2d7fc4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,21 @@
+### HCL Template
+```hcl
+# Place your HCL configuration file here
+```
+
+### Expected behavior
+What should have happened?
+
+### Actual behavior
+What actually happened?
+
+### Steps to reproduce
+1.
+2.
+3.
+
+### References
+Are there any other GitHub issues (open or closed) that should
+be linked here? For example:
+- GH-1234
+- ...
diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml
index a785444..cb63a32 100644
--- a/vendor/github.com/hashicorp/hcl/.travis.yml
+++ b/vendor/github.com/hashicorp/hcl/.travis.yml
@@ -1,3 +1,13 @@
sudo: false
+
language: go
-go: 1.7
+
+go:
+ - 1.x
+ - tip
+
+branches:
+ only:
+ - master
+
+script: make test
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
index 3c8cdf8..4db0b71 100644
--- a/vendor/github.com/hashicorp/hcl/appveyor.yml
+++ b/vendor/github.com/hashicorp/hcl/appveyor.yml
@@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\hashicorp\hcl
environment:
GOPATH: c:\gopath
init:
- - git config --global core.autocrlf true
+ - git config --global core.autocrlf false
install:
- cmd: >-
echo %Path%
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
index c8a077d..b88f322 100644
--- a/vendor/github.com/hashicorp/hcl/decoder.go
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -89,9 +89,9 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
switch k.Kind() {
case reflect.Bool:
return d.decodeBool(name, node, result)
- case reflect.Float64:
+ case reflect.Float32, reflect.Float64:
return d.decodeFloat(name, node, result)
- case reflect.Int:
+ case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result)
case reflect.Interface:
// When we see an interface, we make our own thing
@@ -137,13 +137,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
- if n.Token.Type == token.FLOAT {
+ if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
v, err := strconv.ParseFloat(n.Token.Text, 64)
if err != nil {
return err
}
- result.Set(reflect.ValueOf(v))
+ result.Set(reflect.ValueOf(v).Convert(result.Type()))
return nil
}
}
@@ -164,7 +164,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err
}
- result.Set(reflect.ValueOf(int(v)))
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
return nil
case token.STRING:
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
@@ -172,7 +176,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err
}
- result.Set(reflect.ValueOf(int(v)))
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
return nil
}
}
diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go
index 5a8404c..8682f47 100644
--- a/vendor/github.com/hashicorp/hcl/decoder_test.go
+++ b/vendor/github.com/hashicorp/hcl/decoder_test.go
@@ -5,10 +5,10 @@ import (
"path/filepath"
"reflect"
"testing"
+ "time"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/hcl/hcl/ast"
- "github.com/hashicorp/hcl/testhelper"
)
func TestDecode_interface(t *testing.T) {
@@ -64,7 +64,7 @@ func TestDecode_interface(t *testing.T) {
"qux": "back\\slash",
"bar": "new\nline",
"qax": `slash\:colon`,
- "nested": `${HH\:mm\:ss}`,
+ "nested": `${HH\\:mm\\:ss}`,
"nestedquotes": `${"\"stringwrappedinquotes\""}`,
},
},
@@ -73,6 +73,7 @@ func TestDecode_interface(t *testing.T) {
false,
map[string]interface{}{
"a": 1.02,
+ "b": 2,
},
},
{
@@ -82,9 +83,13 @@ func TestDecode_interface(t *testing.T) {
},
{
"multiline_literal.hcl",
+ true,
+ nil,
+ },
+ {
+ "multiline_literal_with_hil.hcl",
false,
- map[string]interface{}{"multiline_literal": testhelper.Unix2dos(`hello
- world`)},
+ map[string]interface{}{"multiline_literal_with_hil": "${hello\n world}"},
},
{
"multiline_no_marker.hcl",
@@ -94,22 +99,22 @@ func TestDecode_interface(t *testing.T) {
{
"multiline.hcl",
false,
- map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n")},
+ map[string]interface{}{"foo": "bar\nbaz\n"},
},
{
"multiline_indented.hcl",
false,
- map[string]interface{}{"foo": testhelper.Unix2dos(" bar\n baz\n")},
+ map[string]interface{}{"foo": " bar\n baz\n"},
},
{
"multiline_no_hanging_indent.hcl",
false,
- map[string]interface{}{"foo": testhelper.Unix2dos(" baz\n bar\n foo\n")},
+ map[string]interface{}{"foo": " baz\n bar\n foo\n"},
},
{
"multiline_no_eof.hcl",
false,
- map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n"), "key": "value"},
+ map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"},
},
{
"multiline.json",
@@ -201,6 +206,16 @@ func TestDecode_interface(t *testing.T) {
},
},
},
+ {
+ "list_of_lists.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []interface{}{
+ []interface{}{"foo"},
+ []interface{}{"bar"},
+ },
+ },
+ },
{
"list_of_maps.hcl",
false,
@@ -274,6 +289,14 @@ func TestDecode_interface(t *testing.T) {
},
},
+ {
+ "structure_list_empty.json",
+ false,
+ map[string]interface{}{
+ "foo": []interface{}{},
+ },
+ },
+
{
"nested_block_comment.hcl",
false,
@@ -357,34 +380,72 @@ func TestDecode_interface(t *testing.T) {
true,
nil,
},
+
+ {
+ "escape_backslash.hcl",
+ false,
+ map[string]interface{}{
+ "output": []map[string]interface{}{
+ map[string]interface{}{
+ "one": `${replace(var.sub_domain, ".", "\\.")}`,
+ "two": `${replace(var.sub_domain, ".", "\\\\.")}`,
+ "many": `${replace(var.sub_domain, ".", "\\\\\\\\.")}`,
+ },
+ },
+ },
+ },
+
+ {
+ "git_crypt.hcl",
+ true,
+ nil,
+ },
+
+ {
+ "object_with_bool.hcl",
+ false,
+ map[string]interface{}{
+ "path": []map[string]interface{}{
+ map[string]interface{}{
+ "policy": "write",
+ "permissions": []map[string]interface{}{
+ map[string]interface{}{
+ "bool": []interface{}{false},
+ },
+ },
+ },
+ },
+ },
+ },
}
for _, tc := range cases {
- t.Logf("Testing: %s", tc.File)
- d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
+ t.Run(tc.File, func(t *testing.T) {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
- var out interface{}
- err = Decode(&out, string(d))
- if (err != nil) != tc.Err {
- t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
- }
+ var out interface{}
+ err = Decode(&out, string(d))
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+ }
- if !reflect.DeepEqual(out, tc.Out) {
- t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
- }
+ if !reflect.DeepEqual(out, tc.Out) {
+ t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+ }
- var v interface{}
- err = Unmarshal(d, &v)
- if (err != nil) != tc.Err {
- t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
- }
+ var v interface{}
+ err = Unmarshal(d, &v)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+ }
- if !reflect.DeepEqual(v, tc.Out) {
- t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
- }
+ if !reflect.DeepEqual(v, tc.Out) {
+ t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+ }
+ })
}
}
@@ -748,6 +809,59 @@ func TestDecode_intString(t *testing.T) {
}
}
+func TestDecode_float32(t *testing.T) {
+ var value struct {
+ A float32 `hcl:"a"`
+ B float32 `hcl:"b"`
+ }
+
+ err := Decode(&value, testReadFile(t, "float.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if got, want := value.A, float32(1.02); got != want {
+ t.Fatalf("wrong result %#v; want %#v", got, want)
+ }
+ if got, want := value.B, float32(2); got != want {
+ t.Fatalf("wrong result %#v; want %#v", got, want)
+ }
+}
+
+func TestDecode_float64(t *testing.T) {
+ var value struct {
+ A float64 `hcl:"a"`
+ B float64 `hcl:"b"`
+ }
+
+ err := Decode(&value, testReadFile(t, "float.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if got, want := value.A, float64(1.02); got != want {
+ t.Fatalf("wrong result %#v; want %#v", got, want)
+ }
+ if got, want := value.B, float64(2); got != want {
+ t.Fatalf("wrong result %#v; want %#v", got, want)
+ }
+}
+
+func TestDecode_intStringAliased(t *testing.T) {
+ var value struct {
+ Count time.Duration
+ }
+
+ err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if value.Count != time.Duration(3) {
+ t.Fatalf("bad: %#v", value.Count)
+ }
+}
+
func TestDecode_Node(t *testing.T) {
// given
var value struct {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
index ea3734f..6e5ef65 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -156,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos {
type LiteralType struct {
Token token.Token
- // associated line comment, only when used in a list
+ // comment types, only used when in a list
+ LeadComment *CommentGroup
LineComment *CommentGroup
}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
index 85e536d..2380d71 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
@@ -58,7 +58,7 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts
res, err := printer.Format(src)
if err != nil {
- return err
+ return fmt.Errorf("In %s: %s", filename, err)
}
if !bytes.Equal(src, res) {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index f46ed4c..098e1bc 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -3,6 +3,7 @@
package parser
import (
+ "bytes"
"errors"
"fmt"
"strings"
@@ -36,6 +37,11 @@ func newParser(src []byte) *Parser {
// Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) {
+ // normalize all line endings
+ // since the scanner and output only work with "\n" line endings, we may
+ // end up with dangling "\r" characters in the parsed data.
+ src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
p := newParser(src)
return p.Parse()
}
@@ -50,7 +56,7 @@ func (p *Parser) Parse() (*ast.File, error) {
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
}
- f.Node, err = p.objectList()
+ f.Node, err = p.objectList(false)
if scerr != nil {
return nil, scerr
}
@@ -62,11 +68,23 @@ func (p *Parser) Parse() (*ast.File, error) {
return f, nil
}
-func (p *Parser) objectList() (*ast.ObjectList, error) {
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList"))
node := &ast.ObjectList{}
for {
+ if obj {
+ tok := p.scan()
+ p.unscan()
+ if tok.Type == token.RBRACE {
+ break
+ }
+ }
+
n, err := p.objectItem()
if err == errEofToken {
break // we are finished
@@ -179,9 +197,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
keyStr = append(keyStr, k.Token.Text)
}
- return nil, fmt.Errorf(
- "key '%s' expected start of object ('{') or assignment ('=')",
- strings.Join(keyStr, " "))
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " ")),
+ }
}
// do a look-ahead for line comment
@@ -244,7 +265,10 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
- fmt.Println("illegal")
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
default:
return keys, &PosError{
Pos: p.tok.Pos,
@@ -288,7 +312,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
Lbrace: p.tok.Pos,
}
- l, err := p.objectList()
+ l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
@@ -296,9 +320,12 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
return nil, err
}
- // If there is no error, we should be at a RBRACE to end the object
- if p.tok.Type != token.RBRACE {
- return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
+ // No error, scan and expect the ending to be a brace
+ if tok := p.scan(); tok.Type != token.RBRACE {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+ }
}
o.List = l
@@ -331,12 +358,18 @@ func (p *Parser) listType() (*ast.ListType, error) {
}
}
switch tok.Type {
- case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
node, err := p.literalType()
if err != nil {
return nil, err
}
+ // If there is a lead comment, apply it
+ if p.leadComment != nil {
+ node.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
l.Add(node)
needComma = true
case token.COMMA:
@@ -367,12 +400,16 @@ func (p *Parser) listType() (*ast.ListType, error) {
}
l.Add(node)
needComma = true
- case token.BOOL:
- // TODO(arslan) should we support? not supported by HCL yet
case token.LBRACK:
- // TODO(arslan) should we support nested lists? Even though it's
- // written in README of HCL, it's not a part of the grammar
- // (not defined in parse.y)
+ node, err := p.listType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse list within list: %s", err),
+ }
+ }
+ l.Add(node)
case token.RBRACK:
// finished
l.Rbrack = p.tok.Pos
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
index 2756d06..2702122 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
@@ -59,12 +59,12 @@ func TestListType(t *testing.T) {
[]token.Type{token.NUMBER, token.STRING},
},
{
- `foo = []`,
- []token.Type{},
+ `foo = [false]`,
+ []token.Type{token.BOOL},
},
{
- `foo = ["123", 123]`,
- []token.Type{token.STRING, token.NUMBER},
+ `foo = []`,
+ []token.Type{},
},
{
`foo = [1,
@@ -152,6 +152,109 @@ func TestListOfMaps_requiresComma(t *testing.T) {
}
}
+func TestListType_leadComment(t *testing.T) {
+ var literals = []struct {
+ src string
+ comment []string
+ }{
+ {
+ `foo = [
+ 1,
+ # bar
+ 2,
+ 3,
+ ]`,
+ []string{"", "# bar", ""},
+ },
+ }
+
+ for _, l := range literals {
+ p := newParser([]byte(l.src))
+ item, err := p.objectItem()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ list, ok := item.Val.(*ast.ListType)
+ if !ok {
+ t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ if len(list.List) != len(l.comment) {
+ t.Fatalf("bad: %d", len(list.List))
+ }
+
+ for i, li := range list.List {
+ lt := li.(*ast.LiteralType)
+ comment := l.comment[i]
+
+ if (lt.LeadComment == nil) != (comment == "") {
+ t.Fatalf("bad: %#v", lt)
+ }
+
+ if comment == "" {
+ continue
+ }
+
+ actual := lt.LeadComment.List[0].Text
+ if actual != comment {
+ t.Fatalf("bad: %q %q", actual, comment)
+ }
+ }
+ }
+}
+
+func TestListType_lineComment(t *testing.T) {
+ var literals = []struct {
+ src string
+ comment []string
+ }{
+ {
+ `foo = [
+ 1,
+ 2, # bar
+ 3,
+ ]`,
+ []string{"", "# bar", ""},
+ },
+ }
+
+ for _, l := range literals {
+ p := newParser([]byte(l.src))
+ item, err := p.objectItem()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ list, ok := item.Val.(*ast.ListType)
+ if !ok {
+ t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ if len(list.List) != len(l.comment) {
+ t.Fatalf("bad: %d", len(list.List))
+ }
+
+ for i, li := range list.List {
+ lt := li.(*ast.LiteralType)
+ comment := l.comment[i]
+
+ if (lt.LineComment == nil) != (comment == "") {
+ t.Fatalf("bad: %s", lt)
+ }
+
+ if comment == "" {
+ continue
+ }
+
+ actual := lt.LineComment.List[0].Text
+ if actual != comment {
+ t.Fatalf("bad: %q %q", actual, comment)
+ }
+ }
+ }
+}
+
func TestObjectType(t *testing.T) {
var literals = []struct {
src string
@@ -204,6 +307,8 @@ func TestObjectType(t *testing.T) {
}
for _, l := range literals {
+ t.Logf("Source: %s", l.src)
+
p := newParser([]byte(l.src))
// p.enableTrace = true
item, err := p.objectItem()
@@ -282,6 +387,30 @@ func TestObjectKey(t *testing.T) {
}
}
+func TestCommentGroup(t *testing.T) {
+ var cases = []struct {
+ src string
+ groups int
+ }{
+ {"# Hello\n# World", 1},
+ {"# Hello\r\n# Windows", 1},
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.src, func(t *testing.T) {
+ p := newParser([]byte(tc.src))
+ file, err := p.Parse()
+ if err != nil {
+ t.Fatalf("parse error: %s", err)
+ }
+
+ if len(file.Comments) != tc.groups {
+ t.Fatalf("bad: %#v", file.Comments)
+ }
+ })
+ }
+}
+
// Official HCL tests
func TestParse(t *testing.T) {
cases := []struct {
@@ -296,6 +425,10 @@ func TestParse(t *testing.T) {
"comment.hcl",
false,
},
+ {
+ "comment_crlf.hcl",
+ false,
+ },
{
"comment_lastline.hcl",
false,
@@ -336,6 +469,10 @@ func TestParse(t *testing.T) {
"complex.hcl",
false,
},
+ {
+ "complex_crlf.hcl",
+ false,
+ },
{
"types.hcl",
false,
@@ -368,20 +505,38 @@ func TestParse(t *testing.T) {
"object_key_without_value.hcl",
true,
},
+ {
+ "object_key_assign_without_value.hcl",
+ true,
+ },
+ {
+ "object_key_assign_without_value2.hcl",
+ true,
+ },
+ {
+ "object_key_assign_without_value3.hcl",
+ true,
+ },
+ {
+ "git_crypt.hcl",
+ true,
+ },
}
const fixtureDir = "./test-fixtures"
for _, tc := range cases {
- d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
- if err != nil {
- t.Fatalf("err: %s", err)
- }
+ t.Run(tc.Name, func(t *testing.T) {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
- _, err = Parse(d)
- if (err != nil) != tc.Err {
- t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
- }
+ v, err := Parse(d)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s\n\nAST: %#v", tc.Name, err, v)
+ }
+ })
}
}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl
index 1ff7f29..e32be87 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl
@@ -1,15 +1,15 @@
-// Foo
-
-/* Bar */
-
-/*
-/*
-Baz
-*/
-
-# Another
-
-# Multiple
-# Lines
-
-foo = "bar"
+// Foo
+
+/* Bar */
+
+/*
+/*
+Baz
+*/
+
+# Another
+
+# Multiple
+# Lines
+
+foo = "bar"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl
new file mode 100644
index 0000000..1ff7f29
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl
@@ -0,0 +1,15 @@
+// Foo
+
+/* Bar */
+
+/*
+/*
+Baz
+*/
+
+# Another
+
+# Multiple
+# Lines
+
+foo = "bar"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl
new file mode 100644
index 0000000..9b071d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl
@@ -0,0 +1,42 @@
+variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+
+variable "groups" { }
+
+provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+
+provider "do" {
+ api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+ count = 5
+}
+
+resource aws_instance "web" {
+ ami = "${var.foo}"
+ security_groups = [
+ "foo",
+ "${aws_security_group.firewall.foo}",
+ "${element(split(\",\", var.groups)}",
+ ]
+ network_interface = {
+ device_index = 0
+ description = "Main network interface"
+ }
+}
+
+resource "aws_instance" "db" {
+ security_groups = "${aws_security_group.firewall.*.id}"
+ VPC = "foo"
+ depends_on = ["aws_instance.web"]
+}
+
+output "web_ip" {
+ value = "${aws_instance.web.private_ip}"
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl
new file mode 100644
index 0000000..f691948
Binary files /dev/null and b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl differ
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl
new file mode 100644
index 0000000..37a2c7a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl
@@ -0,0 +1,3 @@
+foo {
+ bar =
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl
new file mode 100644
index 0000000..83ec5e6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl
@@ -0,0 +1,4 @@
+foo {
+ baz = 7
+ bar =
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl
new file mode 100644
index 0000000..21136d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl
@@ -0,0 +1,4 @@
+foo {
+ bar =
+ baz = 7
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
index 218b56a..c896d58 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
@@ -62,6 +62,14 @@ func (p *printer) collectComments(node ast.Node) {
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
switch t := nn.(type) {
case *ast.LiteralType:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
if t.LineComment != nil {
for _, comment := range t.LineComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
@@ -95,7 +103,6 @@ func (p *printer) collectComments(node ast.Node) {
}
sort.Sort(ByPosition(p.standaloneComments))
-
}
// output prints creates b printable HCL output and returns it.
@@ -104,35 +111,60 @@ func (p *printer) output(n interface{}) []byte {
switch t := n.(type) {
case *ast.File:
+ // File doesn't trace so we add the tracing here
+ defer un(trace(p, "File"))
return p.output(t.Node)
case *ast.ObjectList:
- var index int
- var nextItem token.Pos
- var commented bool
- for {
- // TODO(arslan): refactor below comment printing, we have the same in objectType
- for _, c := range p.standaloneComments {
- for _, comment := range c.List {
- if index != len(t.Items) {
- nextItem = t.Items[index].Pos()
- } else {
- nextItem = token.Pos{Offset: infinity, Line: infinity}
- }
+ defer un(trace(p, "ObjectList"))
+ var index int
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is at "infinity"
+ var nextItem token.Pos
+ if index != len(t.Items) {
+ nextItem = t.Items[index].Pos()
+ } else {
+ nextItem = token.Pos{Offset: infinity, Line: infinity}
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ // Go through all the comments in the group. The group
+ // should be printed together, not separated by double newlines.
+ printed := false
+ newlinePrinted := false
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// if we hit the end add newlines so we can print the comment
- if index == len(t.Items) {
+ // we don't do this if prev is invalid which means the
+ // beginning of the file since the first comment should
+ // be at the first line.
+ if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
buf.Write([]byte{newline, newline})
+ newlinePrinted = true
}
+ // Write the actual comment.
buf.WriteString(comment.Text)
-
buf.WriteByte(newline)
- if index != len(t.Items) {
- buf.WriteByte(newline)
- }
+
+ // Set printed to true to note that we printed something
+ printed = true
}
}
+
+ // If we're not at the last item, write a new line so
+ // that there is a newline separating this comment from
+ // the next object.
+ if printed && index != len(t.Items) {
+ buf.WriteByte(newline)
+ }
}
if index == len(t.Items) {
@@ -140,8 +172,29 @@ func (p *printer) output(n interface{}) []byte {
}
buf.Write(p.output(t.Items[index]))
- if !commented && index != len(t.Items)-1 {
- buf.Write([]byte{newline, newline})
+ if index != len(t.Items)-1 {
+ // Always write a newline to separate us from the next item
+ buf.WriteByte(newline)
+
+ // Need to determine if we're going to separate the next item
+ // with a blank line. The logic here is simple, though there
+ // are a few conditions:
+ //
+ // 1. The next object is more than one line away anyways,
+ // so we need an empty line.
+ //
+ // 2. The next object is not a "single line" object, so
+ // we need an empty line.
+ //
+ // 3. This current object is not a single line object,
+ // so we need an empty line.
+ current := t.Items[index]
+ next := t.Items[index+1]
+ if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+ !p.isSingleLineObject(next) ||
+ !p.isSingleLineObject(current) {
+ buf.WriteByte(newline)
+ }
}
index++
}
@@ -165,7 +218,8 @@ func (p *printer) output(n interface{}) []byte {
func (p *printer) literalType(lit *ast.LiteralType) []byte {
result := []byte(lit.Token.Text)
- if lit.Token.Type == token.HEREDOC {
+ switch lit.Token.Type {
+ case token.HEREDOC:
// Clear the trailing newline from heredocs
if result[len(result)-1] == '\n' {
result = result[:len(result)-1]
@@ -173,6 +227,12 @@ func (p *printer) literalType(lit *ast.LiteralType) []byte {
// Poison lines 2+ so that we don't indent them
result = p.heredocIndent(result)
+ case token.STRING:
+ // If this is a multiline string, poison lines 2+ so we don't
+ // indent them.
+ if bytes.IndexRune(result, '\n') >= 0 {
+ result = p.heredocIndent(result)
+ }
}
return result
@@ -226,17 +286,24 @@ func (p *printer) objectType(o *ast.ObjectType) []byte {
var nextItem token.Pos
var commented, newlinePrinted bool
for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is the closing brace
+ if index != len(o.List.Items) {
+ nextItem = o.List.Items[index].Pos()
+ } else {
+ nextItem = o.Rbrace
+ }
- // Print stand alone comments
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
for _, c := range p.standaloneComments {
+ printed := false
+ var lastCommentPos token.Pos
for _, comment := range c.List {
- // if we hit the end, last item should be the brace
- if index != len(o.List.Items) {
- nextItem = o.List.Items[index].Pos()
- } else {
- nextItem = o.Rbrace
- }
-
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// If there are standalone comments and the initial newline has not
// been printed yet, do it now.
@@ -251,11 +318,33 @@ func (p *printer) objectType(o *ast.ObjectType) []byte {
buf.WriteByte(newline)
}
- buf.Write(p.indent([]byte(comment.Text)))
+ // Store this position
+ lastCommentPos = comment.Pos()
+
+ // output the comment itself
+ buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+ // Set printed to true to note that we printed something
+ printed = true
+
+ /*
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ */
+ }
+ }
+
+ // Stuff to do if we had comments
+ if printed {
+ // Always write a newline
+ buf.WriteByte(newline)
+
+ // If there is another item in the object and our comment
+ // didn't hug it directly, then make sure there is a blank
+ // line separating them.
+ if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
buf.WriteByte(newline)
- if index != len(o.List.Items) {
- buf.WriteByte(newline) // do not print on the end
- }
}
}
}
@@ -435,16 +524,54 @@ func (p *printer) list(l *ast.ListType) []byte {
}
insertSpaceBeforeItem := false
+ lastHadLeadComment := false
for i, item := range l.List {
+ // Keep track of whether this item is a heredoc since that has
+ // unique behavior.
+ heredoc := false
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ heredoc = true
+ }
+
if item.Pos().Line != l.Lbrack.Line {
// multiline list, add newline before we add each item
buf.WriteByte(newline)
insertSpaceBeforeItem = false
+
+ // If we have a lead comment, then we want to write that first
+ leadComment := false
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+ leadComment = true
+
+ // If this isn't the first item and the previous element
+ // didn't have a lead comment, then we need to add an extra
+ // newline to properly space things out. If it did have a
+ // lead comment previously then this would be done
+ // automatically.
+ if i > 0 && !lastHadLeadComment {
+ buf.WriteByte(newline)
+ }
+
+ for _, comment := range lit.LeadComment.List {
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ }
+ }
+
// also indent each line
val := p.output(item)
curLen := len(val)
buf.Write(p.indent(val))
- buf.WriteString(",")
+
+ // if this item is a heredoc, then we output the comma on
+ // the next line. This is the only case this happens.
+ comma := []byte{','}
+ if heredoc {
+ buf.WriteByte(newline)
+ comma = p.indent(comma)
+ }
+
+ buf.Write(comma)
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
// if the next item doesn't have any comments, do not align
@@ -458,19 +585,51 @@ func (p *printer) list(l *ast.ListType) []byte {
}
}
- if i == len(l.List)-1 {
+ lastItem := i == len(l.List)-1
+ if lastItem {
buf.WriteByte(newline)
}
+
+ if leadComment && !lastItem {
+ buf.WriteByte(newline)
+ }
+
+ lastHadLeadComment = leadComment
} else {
if insertSpaceBeforeItem {
buf.WriteByte(blank)
insertSpaceBeforeItem = false
}
- buf.Write(p.output(item))
+
+ // Output the item itself
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(val)
+
+ // If this is a heredoc item we always have to output a newline
+ // so that it parses properly.
+ if heredoc {
+ buf.WriteByte(newline)
+ }
+
+ // If this isn't the last element, write a comma.
if i != len(l.List)-1 {
buf.WriteString(",")
insertSpaceBeforeItem = true
}
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
}
}
@@ -547,6 +706,36 @@ func (p *printer) heredocIndent(buf []byte) []byte {
return res
}
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+// * has no lead comments (hence multi-line)
+// * has no assignment
+// * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+ // If there is a lead comment, can't be one line
+ if val.LeadComment != nil {
+ return false
+ }
+
+ // If there is assignment, we always break by line
+ if val.Assign.IsValid() {
+ return false
+ }
+
+ // If it isn't an object type, then its not a single line object
+ ot, ok := val.Val.(*ast.ObjectType)
+ if !ok {
+ return false
+ }
+
+ // If the object has no items, it is single line!
+ return len(ot.List.Items) == 0
+}
+
func lines(txt string) int {
endline := 1
for i := 0; i < len(txt); i++ {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
index a296fc8..6617ab8 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -62,6 +62,5 @@ func Format(src []byte) ([]byte, error) {
// Add trailing newline to result
buf.WriteString("\n")
-
return buf.Bytes(), nil
}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
index abb22a3..5248259 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
@@ -1,8 +1,3 @@
-// +build !windows
-// TODO(jen20): These need fixing on Windows but printer is not used right now
-// and red CI is making it harder to process other bugs, so ignore until
-// we get around to fixing them.package printer
-
package printer
import (
@@ -31,18 +26,32 @@ type entry struct {
var data = []entry{
{"complexhcl.input", "complexhcl.golden"},
{"list.input", "list.golden"},
+ {"list_comment.input", "list_comment.golden"},
{"comment.input", "comment.golden"},
+ {"comment_crlf.input", "comment.golden"},
{"comment_aligned.input", "comment_aligned.golden"},
+ {"comment_array.input", "comment_array.golden"},
+ {"comment_end_file.input", "comment_end_file.golden"},
+ {"comment_multiline_indent.input", "comment_multiline_indent.golden"},
+ {"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"},
+ {"comment_multiline_stanza.input", "comment_multiline_stanza.golden"},
+ {"comment_newline.input", "comment_newline.golden"},
+ {"comment_object_multi.input", "comment_object_multi.golden"},
{"comment_standalone.input", "comment_standalone.golden"},
{"empty_block.input", "empty_block.golden"},
{"list_of_objects.input", "list_of_objects.golden"},
+ {"multiline_string.input", "multiline_string.golden"},
+ {"object_singleline.input", "object_singleline.golden"},
+ {"object_with_heredoc.input", "object_with_heredoc.golden"},
}
func TestFiles(t *testing.T) {
for _, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
- check(t, source, golden)
+ t.Run(e.source, func(t *testing.T) {
+ check(t, source, golden)
+ })
}
}
@@ -96,8 +105,8 @@ func diff(aname, bname string, a, b []byte) error {
for i := 0; i < len(a) && i < len(b); i++ {
ch := a[i]
if ch != b[i] {
- fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
- fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs))
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs))
fmt.Fprintf(&buf, "\n\n")
break
}
@@ -124,7 +133,7 @@ func format(src []byte) ([]byte, error) {
// make sure formatted output is syntactically correct
if _, err := parser.Parse(formatted); err != nil {
- return nil, fmt.Errorf("parse: %s\n%s", err, src)
+ return nil, fmt.Errorf("parse: %s\n%s", err, formatted)
}
return formatted, nil
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden
new file mode 100644
index 0000000..e778eaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden
@@ -0,0 +1,13 @@
+banana = [
+ # I really want to comment this item in the array.
+ "a",
+
+ # This as well
+ "b",
+
+ "c", # And C
+ "d",
+
+ # And another
+ "e",
+]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input
new file mode 100644
index 0000000..e778eaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input
@@ -0,0 +1,13 @@
+banana = [
+ # I really want to comment this item in the array.
+ "a",
+
+ # This as well
+ "b",
+
+ "c", # And C
+ "d",
+
+ # And another
+ "e",
+]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input
new file mode 100644
index 0000000..5d27206
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input
@@ -0,0 +1,37 @@
+// A standalone comment is a comment which is not attached to any kind of node
+
+ // This comes from Terraform, as a test
+variable "foo" {
+ # Standalone comment should be still here
+
+ default = "bar"
+ description = "bar" # yooo
+}
+
+/* This is a multi line standalone
+comment*/
+
+
+// fatih arslan
+/* This is a developer test
+account and a multine comment */
+developer = [ "fatih", "arslan"] // fatih arslan
+
+# One line here
+numbers = [1,2] // another line here
+
+ # Another comment
+variable = {
+ description = "bar" # another yooo
+ foo {
+ # Nested standalone
+
+ bar = "fatih"
+ }
+}
+
+ // lead comment
+foo {
+ bar = "fatih" // line comment 2
+} // line comment 3
+
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden
new file mode 100644
index 0000000..dbeae36
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden
@@ -0,0 +1,6 @@
+resource "blah" "blah" {}
+
+//
+//
+//
+
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input
new file mode 100644
index 0000000..68c4c28
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input
@@ -0,0 +1,5 @@
+resource "blah" "blah" {}
+
+//
+//
+//
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden
new file mode 100644
index 0000000..74c4ccd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden
@@ -0,0 +1,12 @@
+resource "provider" "resource" {
+ /*
+ SPACE_SENSITIVE_CODE = < 0 {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
@@ -474,7 +480,7 @@ func (s *Scanner) scanString() {
// read character after quote
ch := s.next()
- if ch < 0 || ch == eof {
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
s.err("literal not terminated")
return
}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go
index b167811..4f2c9cb 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go
@@ -363,7 +363,7 @@ func TestRealExample(t *testing.T) {
provider "aws" {
access_key = "foo"
- secret_key = "bar"
+ secret_key = "${replace(var.foo, ".", "\\.")}"
}
resource "aws_security_group" "firewall" {
@@ -416,7 +416,7 @@ EOF
{token.STRING, `"foo"`},
{token.IDENT, `secret_key`},
{token.ASSIGN, `=`},
- {token.STRING, `"bar"`},
+ {token.STRING, `"${replace(var.foo, ".", "\\.")}"`},
{token.RBRACE, `}`},
{token.IDENT, `resource`},
{token.STRING, `"aws_security_group"`},
@@ -476,6 +476,36 @@ EOF
}
+func TestScan_crlf(t *testing.T) {
+ complexHCL := "foo {\r\n bar = \"baz\"\r\n}\r\n"
+
+ literals := []struct {
+ tokenType token.Type
+ literal string
+ }{
+ {token.IDENT, `foo`},
+ {token.LBRACE, `{`},
+ {token.IDENT, `bar`},
+ {token.ASSIGN, `=`},
+ {token.STRING, `"baz"`},
+ {token.RBRACE, `}`},
+ {token.EOF, ``},
+ }
+
+ s := New([]byte(complexHCL))
+ for _, l := range literals {
+ tok := s.Scan()
+ if l.tokenType != tok.Type {
+ t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
+ }
+
+ if l.literal != tok.Text {
+ t.Errorf("got:\n%+v\n%s\n want:\n%+v\n%s\n", []byte(tok.String()), tok, []byte(l.literal), l.literal)
+ }
+ }
+
+}
+
func TestError(t *testing.T) {
testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
@@ -494,7 +524,8 @@ func TestError(t *testing.T) {
testError(t, `"`, "1:2", "literal not terminated", token.STRING)
testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
- testError(t, `"abc`+"\n", "2:1", "literal not terminated", token.STRING)
+ testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
+ testError(t, `"${abc`+"\n", "2:1", "literal not terminated", token.STRING)
testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT)
testError(t, `/foo`, "1:1", "expected '/' for comment", token.COMMENT)
}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
index 956c899..5f981ea 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -27,6 +27,9 @@ func Unquote(s string) (t string, err error) {
if quote != '"' {
return "", ErrSyntax
}
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
@@ -46,7 +49,7 @@ func Unquote(s string) (t string, err error) {
for len(s) > 0 {
// If we're starting a '${}' then let it through un-unquoted.
// Specifically: we don't unquote any characters within the `${}`
- // section, except for escaped backslashes, which we handle specifically.
+ // section.
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
buf = append(buf, '$', '{')
s = s[2:]
@@ -61,16 +64,6 @@ func Unquote(s string) (t string, err error) {
s = s[size:]
- // We special case escaped backslashes in interpolations, converting
- // them to their unescaped equivalents.
- if r == '\\' {
- q, _ := utf8.DecodeRuneInString(s)
- switch q {
- case '\\':
- continue
- }
- }
-
n := utf8.EncodeRune(runeTmp[:], r)
buf = append(buf, runeTmp[:n]...)
@@ -94,6 +87,10 @@ func Unquote(s string) (t string, err error) {
}
}
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
c, multibyte, ss, err := unquoteChar(s, quote)
if err != nil {
return "", err
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
index af2d848..65be375 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
@@ -39,7 +39,8 @@ var unquotetests = []unQuoteTest{
{`"${file("\"foo\"")}"`, `${file("\"foo\"")}`},
{`"echo ${var.region}${element(split(",",var.zones),0)}"`,
`echo ${var.region}${element(split(",",var.zones),0)}`},
- {`"${HH\\:mm\\:ss}"`, `${HH\:mm\:ss}`},
+ {`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`},
+ {`"${\n}"`, `${\n}`},
}
var misquoted = []string{
@@ -65,9 +66,12 @@ var misquoted = []string{
"`\"",
`"\'"`,
`'\"'`,
+ "\"\n\"",
+ "\"\\n\n\"",
"'\n'",
`"${"`,
`"${foo{}"`,
+ "\"${foo}\n\"",
}
func TestUnquote(t *testing.T) {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go b/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go
index 932951c..e4b4af2 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go
@@ -51,6 +51,12 @@ func TestTokenValue(t *testing.T) {
{Token{Type: STRING, Text: `"foo"`}, "foo"},
{Token{Type: STRING, Text: `"foo\nbar"`}, "foo\nbar"},
{Token{Type: STRING, Text: `"${file("foo")}"`}, `${file("foo")}`},
+ {
+ Token{
+ Type: STRING,
+ Text: `"${replace("foo", ".", "\\.")}"`,
+ },
+ `${replace("foo", ".", "\\.")}`},
{Token{Type: HEREDOC, Text: "<
+
+
+
+
+**Environment:**
+
+
+* Vault Version:
+* Operating System/Architecture:
+
+**Vault Config File:**
+
+
+**Startup Log Output:**
+
+
+**Expected Behavior:**
+
+
+**Actual Behavior:**
+
+
+**Steps to Reproduce:**
+
+
+**Important Factoids:**
+
+
+**References:**
+
diff --git a/vendor/github.com/hashicorp/vault/.gitignore b/vendor/github.com/hashicorp/vault/.gitignore
index 89cb1b7..dbd3bc3 100644
--- a/vendor/github.com/hashicorp/vault/.gitignore
+++ b/vendor/github.com/hashicorp/vault/.gitignore
@@ -46,6 +46,7 @@ Vagrantfile
.DS_Store
.idea
+.vscode
dist/*
diff --git a/vendor/github.com/hashicorp/vault/.hooks/pre-push b/vendor/github.com/hashicorp/vault/.hooks/pre-push
new file mode 100755
index 0000000..ac56a48
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/.hooks/pre-push
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+remote="$1"
+
+if [ "$remote" = "enterprise" ]; then
+ exit 0
+fi
+
+if [ -f version/version_ent.go ]; then
+ echo "Found enterprise version file while pushing to oss remote"
+ exit 1
+fi
+
+exit 0
diff --git a/vendor/github.com/hashicorp/vault/.travis.yml b/vendor/github.com/hashicorp/vault/.travis.yml
index e5ad1be..aa214be 100644
--- a/vendor/github.com/hashicorp/vault/.travis.yml
+++ b/vendor/github.com/hashicorp/vault/.travis.yml
@@ -7,7 +7,7 @@ services:
- docker
go:
- - 1.8.1
+ - 1.9
matrix:
allow_failures:
@@ -20,4 +20,5 @@ branches:
script:
- make bootstrap
- - make test testrace
+ - travis_wait 75 make test
+ - travis_wait 75 make testrace
diff --git a/vendor/github.com/hashicorp/vault/CHANGELOG.md b/vendor/github.com/hashicorp/vault/CHANGELOG.md
index c721cdf..9ce00fe 100644
--- a/vendor/github.com/hashicorp/vault/CHANGELOG.md
+++ b/vendor/github.com/hashicorp/vault/CHANGELOG.md
@@ -1,4 +1,411 @@
-## 0.7.1 (Unreleased)
+## 0.8.3 (September 19th, 2017)
+
+CHANGES:
+
+ * Policy input/output standardization: For all built-in authentication
+ backends, policies can now be specified as a comma-delimited string or an
+ array if using JSON as API input; on read, policies will be returned as an
+ array; and the `default` policy will not be forcefully added to policies
+ saved in configurations. Please note that the `default` policy will continue
+ to be added to generated tokens, however, rather than backends adding
+ `default` to the given set of input policies (in some cases, and not in
+ others), the stored set will reflect the user-specified set.
+ * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the
+ endpoint would not modify the Issuer in the generated certificate, leaving
+ the output self-issued. Although theoretically valid, in practice crypto
+ stacks were unhappy validating paths containing such certs. As a result,
+ `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer
+ DN of the generated certificate.
+ * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely
+ useful in break-glass or support scenarios, it is also extremely dangerous.
+ As of now, a configuration file option `raw_storage_endpoint` must be set in
+ order to enable this API endpoint. Once set, the available functionality has
+ been enhanced slightly; it now supports listing and decrypting most of
+ Vault's core data structures, except for the encryption keyring itself.
+ * `generic` is now `kv`: To better reflect its actual use, the `generic`
+ backend is now `kv`. Using `generic` will still work for backwards
+ compatibility.
+
+FEATURES:
+
+ * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault
+ using machine credentials.
+ * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts
+ can not authenticate to vault using JWT tokens.
+
+IMPROVEMENTS:
+
+ * configuration: Provide a config option to store Vault server's process ID
+ (PID) in a file [GH-3321]
+ * mfa (Enterprise): Add the ability to use identity metadata in username format
+ * mfa/okta (Enterprise): Add support for configuring base_url for API calls
+ * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value
+ longer than the signing CA certificate's NotAfter value. [GH-3325]
+ * sys/raw: Raw storage access is now disabled by default [GH-3329]
+
+BUG FIXES:
+
+ * auth/okta: Fix regression that removed the ability to set base_url [GH-3313]
+ * core: Fix panic while loading leases at startup on ARM processors
+ [GH-3314]
+ * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key
+ [GH-3325]
+
+## 0.8.2.1 (September 11th, 2017) (Enterprise Only)
+
+BUG FIXES:
+
+ * Fix an issue upgrading to 0.8.2 for Enterprise customers.
+
+## 0.8.2 (September 5th, 2017)
+
+SECURITY:
+
+* In prior versions of Vault, if authenticating via AWS IAM and requesting a
+ periodic token, the period was not properly respected. This could lead to
+ tokens expiring unexpectedly, or a token lifetime being longer than expected.
+ Upon token renewal with Vault 0.8.2 the period will be properly enforced.
+
+DEPRECATIONS/CHANGES:
+
+* `vault ssh` users should supply `-mode` and `-role` to reduce the number of
+ API calls. A future version of Vault will mark these optional values are
+ required. Failure to supply `-mode` or `-role` will result in a warning.
+* Vault plugins will first briefly run a restricted version of the plugin to
+ fetch metadata, and then lazy-load the plugin on first request to prevent
+ crash/deadlock of Vault during the unseal process. Plugins will need to be
+ built with the latest changes in order for them to run properly.
+
+FEATURES:
+
+* **Lazy Lease Loading**: On startup, Vault will now load leases from storage
+ in a lazy fashion (token checks and revocation/renewal requests still force
+ an immediate load). For larger installations this can significantly reduce
+ downtime when switching active nodes or bringing Vault up from cold start.
+* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA
+ backend for authenticating to machines. It also supports remote host key
+ verification through the SSH CA backend, if enabled.
+* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports
+ signing self-issued CA certs. This is useful when switching root CAs.
+
+IMPROVEMENTS:
+
+ * audit/file: Allow specifying `stdout` as the `file_path` to log to standard
+ output [GH-3235]
+ * auth/aws: Allow wildcards in `bound_iam_principal_id` [GH-3213]
+ * auth/okta: Compare groups case-insensitively since Okta is only
+ case-preserving [GH-3240]
+ * auth/okta: Standarize Okta configuration APIs across backends [GH-3245]
+ * cli: Add subcommand autocompletion that can be enabled with
+ `vault -autocomplete-install` [GH-3223]
+ * cli: Add ability to handle wrapped responses when using `vault auth`. What
+ is output depends on the other given flags; see the help output for that
+ command for more information. [GH-3263]
+ * core: TLS cipher suites used for cluster behavior can now be set via
+ `cluster_cipher_suites` in configuration [GH-3228]
+ * core: The `plugin_name` can now either be specified directly as part of the
+ parameter or within the `config` object when mounting a secret or auth backend
+ via `sys/mounts/:path` or `sys/auth/:path` respectively [GH-3202]
+ * core: It is now possible to update the `description` of a mount when
+ mount-tuning, although this must be done through the HTTP layer [GH-3285]
+ * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and
+ retrying the operation [GH-3269]
+ * secret/pki: TTLs can now be specified as a string or an integer number of
+ seconds [GH-3270]
+ * secret/pki: Self-issued certs can now be signed via
+ `pki/root/sign-self-issued` [GH-3274]
+ * storage/gcp: Use application default credentials if they exist [GH-3248]
+
+BUG FIXES:
+
+ * auth/aws: Properly use role-set period values for IAM-derived token renewals
+ [GH-3220]
+ * auth/okta: Fix updating organization/ttl/max_ttl after initial setting
+ [GH-3236]
+ * core: Fix PROXY when underlying connection is TLS [GH-3195]
+ * core: Policy-related commands would sometimes fail to act case-insensitively
+ [GH-3210]
+ * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address
+ [GH-3268]
+ * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process.
+ [GH-3255]
+ * plugins: Skip mounting plugin-based secret and credential mounts when setting
+ up mounts if the plugin is no longer present in the catalog. [GH-3255]
+
+## 0.8.1 (August 16th, 2017)
+
+DEPRECATIONS/CHANGES:
+
+ * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already
+ exists will now return a `204` instead of overwriting an existing root. If
+ you want to recreate the root, first run a delete operation on `pki/root`
+ (requires `sudo` capability), then generate it again.
+
+FEATURES:
+
+ * **Oracle Secret Backend**: There is now an external plugin to support leased
+ credentials for Oracle databases (distributed separately).
+ * **GCP IAM Auth Backend**: There is now an authentication backend that allows
+ using GCP IAM credentials to retrieve Vault tokens. This is available as
+ both a plugin and built-in to Vault.
+ * **PingID Push Support for Path-Baased MFA (Enterprise)**: PingID Push can
+ now be used for MFA with the new path-based MFA introduced in Vault
+ Enterprise 0.8.
+ * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports
+ specifying permitted DNS domains for CA certificates, allowing you to
+ narrowly scope the set of domains for which a CA can issue or sign child
+ certificates.
+ * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to
+ reload using the `sys/plugins/reload/backend` endpoint and providing either
+ the plugin name or the mounts to reload.
+ * **Self-Reloading Plugins**: The plugin system will now attempt to reload a
+ crashed or stopped plugin, once per request.
+
+IMPROVEMENTS:
+
+ * auth/approle: Allow array input for policies in addition to comma-delimited
+ strings [GH-3163]
+ * plugins: Send logs through Vault's logger rather than stdout [GH-3142]
+ * secret/pki: Add `pki/root` delete operation [GH-3165]
+ * secret/pki: Don't overwrite an existing root cert/key when calling generate
+ [GH-3165]
+
+BUG FIXES:
+
+ * aws: Don't prefer a nil HTTP client over an existing one [GH-3159]
+ * core: If there is an error when checking for create/update existence, return
+ 500 instead of 400 [GH-3162]
+ * secret/database: Avoid creating usernames that are too long for legacy MySQL
+ [GH-3138]
+
+## 0.8.0 (August 9th, 2017)
+
+SECURITY:
+
+ * We've added a note to the docs about the way the GitHub auth backend works
+ as it may not be readily apparent that GitHub personal access tokens, which
+ are used by the backend, can be used for unauthorized access if they are
+ stolen from third party services and access to Vault is public.
+
+DEPRECATIONS/CHANGES:
+
+ * Database Plugin Backends: Passwords generated for these backends now
+ enforce stricter password requirements, as opposed to the previous behavior
+ of returning a randomized UUID. Passwords are of length 20, and have a `A1a-`
+ characters prepended to ensure stricter requirements. No regressions are
+ expected from this change. (For database backends that were previously
+ substituting underscores for hyphens in passwords, this will remain the
+ case.)
+ * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`,
+ `sys/revoke-force` have been deprecated and relocated under `sys/leases`.
+ Additionally, the deprecated path `sys/revoke-force` now requires the `sudo`
+ capability.
+ * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint
+ is now unauthenticated. This allows introspection of the wrapping info by
+ clients that only have the wrapping token without then invalidating the
+ token. Validation functions/checks are still performed on the token.
+
+FEATURES:
+
+ * **Cassandra Storage**: Cassandra can now be used for Vault storage
+ * **CockroachDB Storage**: CockroachDB can now be used for Vault storage
+ * **CouchDB Storage**: CouchDB can now be used for Vault storage
+ * **SAP HANA Database Plugin**: The `databases` backend can now manage users
+ for SAP HANA databases
+ * **Plugin Backends**: Vault now supports running secret and auth backends as
+ plugins. Plugins can be mounted like normal backends and can be developed
+ independently from Vault.
+ * **PROXY Protocol Support** Vault listeners can now be configured to honor
+ PROXY protocol v1 information to allow passing real client IPs into Vault. A
+ list of authorized addresses (IPs or subnets) can be defined and
+ accept/reject behavior controlled.
+ * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI
+ now supports lookup and listing of leases and the associated actions from the
+ `sys/leases` endpoints in the API. These are located in the new top level
+ navigation item "Leases".
+ * **Filtered Mounts for Performance Mode Replication**: Whitelists or
+ blacklists of mounts can be defined per-secondary to control which mounts
+ are actually replicated to that secondary. This can allow targeted
+ replication of specific sets of data to specific geolocations/datacenters.
+ * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new
+ replication mode, Disaster Recovery (DR), that performs full real-time
+ replication (including tokens and leases) to DR secondaries. DR secondaries
+ cannot handle client requests, but can be promoted to primary as needed for
+ failover.
+ * **Manage New Replication Features in the Vault Enterprise UI**: Support for
+ Replication features in Vault Enterprise UI has expanded to include new DR
+ Replication mode and management of Filtered Mounts in Performance Replication
+ mode.
+ * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows
+ correlation of users across tokens. At present this is only used for MFA,
+ but will be the foundation of many other features going forward.
+ * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise
+ Only)**: A brand new MFA system built on top of Identity allows MFA
+ (currently Duo Push, Okta Push, and TOTP) for any authenticated path within
+ Vault. MFA methods can be configured centrally, and TOTP keys live within
+ the user's Identity information to allow using the same key across tokens.
+ Specific MFA method(s) required for any given path within Vault can be
+ specified in normal ACL path statements.
+
+IMPROVEMENTS:
+
+ * api: Add client method for a secret renewer background process [GH-2886]
+ * api: Add `RenewTokenAsSelf` [GH-2886]
+ * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env
+ var or with a new API function [GH-2956]
+ * api/cli: Client will now attempt to look up SRV records for the given Vault
+ hostname [GH-3035]
+ * audit/socket: Enhance reconnection logic and don't require the connection to
+ be established at unseal time [GH-2934]
+ * audit/file: Opportunistically try re-opening the file on error [GH-2999]
+ * auth/approle: Add role name to token metadata [GH-2985]
+ * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [GH-2915]
+ * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env
+ var [GH-2956]
+ * command/auth: Add `-token-only` flag to `vault auth` that returns only the
+ token on stdout and does not store it via the token helper [GH-2855]
+ * core: CORS allowed origins can now be configured [GH-2021]
+ * core: Add metrics counters for audit log failures [GH-2863]
+ * cors: Allow setting allowed headers via the API instead of always using
+ wildcard [GH-3023]
+ * secret/ssh: Allow specifying the key ID format using template values for CA
+ type [GH-2888]
+ * server: Add `tls_client_ca_file` option for specifying a CA file to use for
+ client certificate verification when `tls_require_and_verify_client_cert` is
+ enabled [GH-3034]
+ * storage/cockroachdb: Add CockroachDB storage backend [GH-2713]
+ * storage/couchdb: Add CouchhDB storage backend [GH-2880]
+ * storage/mssql: Add `max_parallel` [GH-3026]
+ * storage/postgresql: Add `max_parallel` [GH-3026]
+ * storage/postgresql: Improve listing speed [GH-2945]
+ * storage/s3: More efficient paging when an object has a lot of subobjects
+ [GH-2780]
+ * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [GH-3084]
+ * sys/wrapping: Wrapped tokens now store the original request path of the data
+ [GH-3100]
+ * telemetry: Add support for DogStatsD [GH-2490]
+
+BUG FIXES:
+
+ * api/health: Don't treat standby `429` codes as an error [GH-2850]
+ * api/leases: Fix lease lookup returning lease properties at the top level
+ * audit: Fix panic when audit logging a read operation on an asymmetric
+ `transit` key [GH-2958]
+ * auth/approle: Fix panic when secret and cidr list not provided in role
+ [GH-3075]
+ * auth/aws: Look up proper account ID on token renew [GH-3012]
+ * auth/aws: Store IAM header in all cases when it changes [GH-3004]
+ * auth/ldap: Verify given certificate is PEM encoded instead of failing
+ silently [GH-3016]
+ * auth/token: Don't allow using the same token ID twice when manually
+ specifying [GH-2916]
+ * cli: Fix issue with parsing keys that start with special characters [GH-2998]
+ * core: Relocated `sys/leases/renew` returns same payload as original
+ `sys/leases` endpoint [GH-2891]
+ * secret/ssh: Fix panic when signing with incorrect key type [GH-3072]
+ * secret/totp: Ensure codes can only be used once. This makes some automated
+ workflows harder but complies with the RFC. [GH-2908]
+ * secret/transit: Fix locking when creating a key with unsupported options
+ [GH-2974]
+
+## 0.7.3 (June 7th, 2017)
+
+SECURITY:
+
+ * Cert auth backend now checks validity of individual certificates: In
+ previous versions of Vault, validity (e.g. expiration) of individual leaf
+ certificates added for authentication was not checked. This was done to make
+ it easier for administrators to control lifecycles of individual
+ certificates added to the backend, e.g. the authentication material being
+ checked was access to that specific certificate's private key rather than
+ all private keys signed by a CA. However, this behavior is often unexpected
+ and as a result can lead to insecure deployments, so we are now validating
+ these certificates as well.
+ * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2
+ caused the HMACing of any App-ID information stored in paths (including
+ actual app-IDs and user-IDs) to be unsalted and written as-is from the API.
+ In 0.7.3 any such paths will be automatically changed to salted versions on
+ access (e.g. login or read); however, if you created new app-IDs or user-IDs
+ in 0.7.1/0.7.2, you may want to consider whether any users with access to
+ Vault's underlying data store may have intercepted these values, and
+ revoke/roll them.
+
+DEPRECATIONS/CHANGES:
+
+ * Step-Down is Forwarded: When a step-down is issued against a non-active node
+ in an HA cluster, it will now forward the request to the active node.
+
+FEATURES:
+
+ * **ed25519 Signing/Verification in Transit with Key Derivation**: The
+ `transit` backend now supports generating
+ [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification
+ functionality. These keys support derivation, allowing you to modify the
+ actual encryption key used by supplying a `context` value.
+ * **Key Version Specification for Encryption in Transit**: You can now specify
+ the version of a key you use to wish to generate a signature, ciphertext, or
+ HMAC. This can be controlled by the `min_encryption_version` key
+ configuration property.
+ * **Replication Primary Discovery (Enterprise)**: Replication primaries will
+ now advertise the addresses of their local HA cluster members to replication
+ secondaries. This helps recovery if the primary active node goes down and
+ neither service discovery nor load balancers are in use to steer clients.
+
+IMPROVEMENTS:
+
+ * api/health: Add Sys().Health() [GH-2805]
+ * audit: Add auth information to requests that error out [GH-2754]
+ * command/auth: Add `-no-store` option that prevents the auth command from
+ storing the returned token into the configured token helper [GH-2809]
+ * core/forwarding: Request forwarding now heartbeats to prevent unused
+ connections from being terminated by firewalls or proxies
+ * plugins/databases: Add MongoDB as an internal database plugin [GH-2698]
+ * storage/dynamodb: Add a method for checking the existence of children,
+ speeding up deletion operations in the DynamoDB storage backend [GH-2722]
+ * storage/mysql: Add max_parallel parameter to MySQL backend [GH-2760]
+ * secret/databases: Support listing connections [GH-2823]
+ * secret/databases: Support custom renewal statements in Postgres database
+ plugin [GH-2788]
+ * secret/databases: Use the role name as part of generated credentials
+ [GH-2812]
+ * ui (Enterprise): Transit key and secret browsing UI handle large lists better
+ * ui (Enterprise): root tokens are no longer persisted
+ * ui (Enterprise): support for mounting Database and TOTP secret backends
+
+BUG FIXES:
+
+ * auth/app-id: Fix regression causing loading of salts to be skipped
+ * auth/aws: Improve EC2 describe instances performance [GH-2766]
+ * auth/aws: Fix lookup of some instance profile ARNs [GH-2802]
+ * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various
+ points (e.g. renewal time) more robust [GH-2814]
+ * auth/aws: Properly honor configured period when using IAM authentication
+ [GH-2825]
+ * auth/aws: Check that a bound IAM principal is not empty (in the current
+ state of the role) before requiring it match the previously authenticated
+ client [GH-2781]
+ * auth/cert: Fix panic on renewal [GH-2749]
+ * auth/cert: Certificate verification for non-CA certs [GH-2761]
+ * core/acl: Prevent race condition when compiling ACLs in some scenarios
+ [GH-2826]
+ * secret/database: Increase wrapping token TTL; in a loaded scenario it could
+ be too short
+ * secret/generic: Allow integers to be set as the value of `ttl` field as the
+ documentation claims is supported [GH-2699]
+ * secret/ssh: Added host key callback to ssh client config [GH-2752]
+ * storage/s3: Avoid a panic when some bad data is returned [GH-2785]
+ * storage/dynamodb: Fix list functions working improperly on Windows [GH-2789]
+ * storage/file: Don't leak file descriptors in some error cases
+ * storage/swift: Fix pre-v3 project/tenant name reading [GH-2803]
+
+## 0.7.2 (May 8th, 2017)
+
+BUG FIXES:
+
+ * audit: Fix auditing entries containing certain kinds of time values
+ [GH-2689]
+
+## 0.7.1 (May 5th, 2017)
DEPRECATIONS/CHANGES:
@@ -13,11 +420,26 @@ FEATURES:
Lambda instances, and more. Signed client identity information retrieved
using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS
service before issuing a Vault token. This backend is unified with the
- `aws-ec2` authentication backend, and allows additional EC2-related
- restrictions to be applied during the IAM authentication; the previous EC2
- behavior is also still available. [GH-2441]
+ `aws-ec2` authentication backend under the name `aws`, and allows additional
+ EC2-related restrictions to be applied during the IAM authentication; the
+ previous EC2 behavior is also still available. [GH-2441]
* **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your
Vault physical data store [GH-2546]
+ * **Lease Listing and Lookup**: You can now introspect a lease to get its
+ creation and expiration properties via `sys/leases/lookup`; with `sudo`
+ capability you can also list leases for lookup, renewal, or revocation via
+ that endpoint. Various lease functions (renew, revoke, revoke-prefix,
+ revoke-force) have also been relocated to `sys/leases/`, but they also work
+ at the old paths for compatibility. Reading (but not listing) leases via
+ `sys/leases/lookup` is now a part of the current `default` policy. [GH-2650]
+ * **TOTP Secret Backend**: You can now store multi-factor authentication keys
+ in Vault and use the API to retrieve time-based one-time use passwords on
+ demand. The backend can also be used to generate a new key and validate
+ passwords generated by that key. [GH-2492]
+ * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend
+ combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra
+ backends. It also provides a plugin interface for extendability through
+ custom databases. [GH-2200]
IMPROVEMENTS:
@@ -27,7 +449,11 @@ IMPROVEMENTS:
than the user credentials [GH-2534]
* cli/revoke: Add `-self` option to allow revoking the currently active token
[GH-2596]
- * core: Randomizing x coordinate in Shamir shares [GH-2621]
+ * core: Randomize x coordinate in Shamir shares [GH-2621]
+ * replication: Fix a bug when enabling `approle` on a primary before
+ secondaries were connected
+ * replication: Add heartbeating to ensure firewalls don't kill connections to
+ primaries
* secret/pki: Add `no_store` option that allows certificates to be issued
without being stored. This removes the ability to look up and/or add to a
CRL but helps with scaling to very large numbers of certificates. [GH-2565]
@@ -44,12 +470,20 @@ IMPROVEMENTS:
requests [GH-2466]
* storage/s3: Use pooled transport for http client [GH-2481]
* storage/swift: Allow domain values for V3 authentication [GH-2554]
+ * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more
+ cleanup cases [GH-2452]
BUG FIXES:
* api: Respect a configured path in Vault's address [GH-2588]
* auth/aws-ec2: New bounds added as criteria to allow role creation [GH-2600]
* auth/ldap: Don't lowercase groups attached to users [GH-2613]
+ * cli: Don't panic if `vault write` is used with the `force` flag but no path
+ [GH-2674]
+ * core: Help operations should request forward since standbys may not have
+ appropriate info [GH-2677]
+ * replication: Fix enabling secondaries when certain mounts already existed on
+ the primary
* secret/mssql: Update mssql driver to support queries with colons [GH-2610]
* secret/pki: Don't lowercase O/OU values in certs [GH-2555]
* secret/pki: Don't attempt to validate IP SANs if none are provided [GH-2574]
@@ -192,11 +626,11 @@ FEATURES:
* **Configurable Audited HTTP Headers**: You can now specify headers that you
want to have included in each audit entry, along with whether each header
should be HMAC'd or kept plaintext. This can be useful for adding additional
- client or network metadata to the audit logs.
+ client or network metadata to the audit logs.
* **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit
backend, allowing creation, viewing and editing of named keys as well as using
those keys to perform supported transit operations directly in the UI.
- * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent
+ * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent
through TCP, UDP, or UNIX Sockets.
IMPROVEMENTS:
@@ -404,9 +838,9 @@ FEATURES:
response wrapped token parameters; wrap arbitrary values; rotate wrapping
tokens; and unwrap with enhanced validation. In addition, list operations
can now be response-wrapped. [GH-1927]
- * Transit features: The `transit` backend now supports generating random bytes
- and SHA sums; HMACs; and signing and verification functionality using EC
- keys (P-256 curve)
+ * **Transit Features**: The `transit` backend now supports generating random
+ bytes and SHA sums; HMACs; and signing and verification functionality using
+ EC keys (P-256 curve)
IMPROVEMENTS:
diff --git a/vendor/github.com/hashicorp/vault/Makefile b/vendor/github.com/hashicorp/vault/Makefile
index 52ab43c..0bf1d14 100644
--- a/vendor/github.com/hashicorp/vault/Makefile
+++ b/vendor/github.com/hashicorp/vault/Makefile
@@ -1,36 +1,38 @@
TEST?=$$(go list ./... | grep -v /vendor/)
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
EXTERNAL_TOOLS=\
- github.com/mitchellh/gox
+ github.com/mitchellh/gox \
+ github.com/kardianos/govendor
BUILD_TAGS?=vault
+GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
default: dev
# bin generates the releaseable binaries for Vault
-bin: generate
+bin: fmtcheck prep
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
# dev creates binaries for testing Vault locally. These are put
# into ./bin/ as well as $GOPATH/bin, except for quickdev which
# is only put into /bin/
-quickdev: generate
+quickdev: prep
@CGO_ENABLED=0 go build -i -tags='$(BUILD_TAGS)' -o bin/vault
-dev: generate
+dev: fmtcheck prep
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
-dev-dynamic: generate
+dev-dynamic: prep
@CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
# test runs the unit tests and vets the code
-test: generate
+test: fmtcheck prep
CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=20m -parallel=4
-testcompile: generate
+testcompile: fmtcheck prep
@for pkg in $(TEST) ; do \
go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \
done
# testacc runs acceptance tests
-testacc: generate
+testacc: fmtcheck prep
@if [ "$(TEST)" = "./..." ]; then \
echo "ERROR: Set TEST to a specific package"; \
exit 1; \
@@ -38,8 +40,8 @@ testacc: generate
VAULT_ACC=1 go test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout 45m
# testrace runs the race checker
-testrace: generate
- CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=20m -parallel=4
+testrace: fmtcheck prep
+ CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=45m -parallel=4
cover:
./scripts/coverage.sh --html
@@ -55,15 +57,16 @@ vet:
echo "and fix them if necessary before submitting the code for reviewal."; \
fi
-# generate runs `go generate` to build the dynamically generated
+# prep runs `go generate` to build the dynamically generated
# source files.
-generate:
+prep:
go generate $(go list ./... | grep -v /vendor/)
+ cp .hooks/* .git/hooks/
# bootstrap the build by downloading additional tools
bootstrap:
@for tool in $(EXTERNAL_TOOLS) ; do \
- echo "Installing $$tool" ; \
+ echo "Installing/Updating $$tool" ; \
go get -u $$tool; \
done
@@ -71,4 +74,31 @@ proto:
protoc -I helper/forwarding -I vault -I ../../.. vault/*.proto --go_out=plugins=grpc:vault
protoc -I helper/forwarding -I vault -I ../../.. helper/forwarding/types.proto --go_out=plugins=grpc:helper/forwarding
-.PHONY: bin default generate test vet bootstrap
+fmtcheck:
+ @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'"
+
+fmt:
+ gofmt -w $(GOFMT_FILES)
+
+mysql-database-plugin:
+ @CGO_ENABLED=0 go build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin
+
+mysql-legacy-database-plugin:
+ @CGO_ENABLED=0 go build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin
+
+cassandra-database-plugin:
+ @CGO_ENABLED=0 go build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin
+
+postgresql-database-plugin:
+ @CGO_ENABLED=0 go build -o bin/postgresql-database-plugin ./plugins/database/postgresql/postgresql-database-plugin
+
+mssql-database-plugin:
+ @CGO_ENABLED=0 go build -o bin/mssql-database-plugin ./plugins/database/mssql/mssql-database-plugin
+
+hana-database-plugin:
+ @CGO_ENABLED=0 go build -o bin/hana-database-plugin ./plugins/database/hana/hana-database-plugin
+
+mongodb-database-plugin:
+ @CGO_ENABLED=0 go build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin
+
+.PHONY: bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin
diff --git a/vendor/github.com/hashicorp/vault/README.md b/vendor/github.com/hashicorp/vault/README.md
index 61b2bb42..058c065 100644
--- a/vendor/github.com/hashicorp/vault/README.md
+++ b/vendor/github.com/hashicorp/vault/README.md
@@ -1,4 +1,4 @@
-Vault [![Build Status](https://travis-ci.org/hashicorp/vault.svg)](https://travis-ci.org/hashicorp/vault) [![Join the chat at https://gitter.im/hashicorp-vault/Lobby](https://badges.gitter.im/hashicorp-vault/Lobby.svg)](https://gitter.im/hashicorp-vault/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+Vault [![Build Status](https://travis-ci.org/hashicorp/vault.svg)](https://travis-ci.org/hashicorp/vault) [![Join the chat at https://gitter.im/hashicorp-vault/Lobby](https://badges.gitter.im/hashicorp-vault/Lobby.svg)](https://gitter.im/hashicorp-vault/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise)
=========
**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com).
@@ -57,7 +57,7 @@ Developing Vault
--------------------
If you wish to work on Vault itself or any of its built-in systems, you'll
-first need [Go](https://www.golang.org) installed on your machine (version 1.8+
+first need [Go](https://www.golang.org) installed on your machine (version 1.9+
is *required*).
For local dev first make sure Go is properly installed, including setting up a
@@ -128,3 +128,5 @@ long time.
Acceptance tests typically require other environment variables to be set for
things such as access keys. The test itself should error early and tell
you what to set, so it is not documented here.
+
+For more information on Vault Enterprise features, visit the [Vault Enterprise site](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=referral&utm_campaign=github-vault-enterprise).
diff --git a/vendor/github.com/hashicorp/vault/api/api_integration_test.go b/vendor/github.com/hashicorp/vault/api/api_integration_test.go
new file mode 100644
index 0000000..c4e1a1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/api_integration_test.go
@@ -0,0 +1,92 @@
+package api_test
+
+import (
+ "database/sql"
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/pki"
+ "github.com/hashicorp/vault/builtin/logical/transit"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+
+ vaulthttp "github.com/hashicorp/vault/http"
+ logxi "github.com/mgutz/logxi/v1"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+var testVaultServerDefaultBackends = map[string]logical.Factory{
+ "transit": transit.Factory,
+ "pki": pki.Factory,
+}
+
+func testVaultServer(t testing.TB) (*api.Client, func()) {
+ return testVaultServerBackends(t, testVaultServerDefaultBackends)
+}
+
+func testVaultServerBackends(t testing.TB, backends map[string]logical.Factory) (*api.Client, func()) {
+ coreConfig := &vault.CoreConfig{
+ DisableMlock: true,
+ DisableCache: true,
+ Logger: logxi.NullLog,
+ LogicalBackends: backends,
+ }
+
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+
+ // make it easy to get access to the active
+ core := cluster.Cores[0].Core
+ vault.TestWaitActive(t, core)
+
+ client := cluster.Cores[0].Client
+ client.SetToken(cluster.RootToken)
+
+ // Sanity check
+ secret, err := client.Auth().Token().LookupSelf()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret == nil || secret.Data["id"].(string) != cluster.RootToken {
+ t.Fatalf("token mismatch: %#v vs %q", secret, cluster.RootToken)
+ }
+ return client, func() { defer cluster.Cleanup() }
+}
+
+// testPostgresDB creates a testing postgres database in a Docker container,
+// returning the connection URL and the associated closer function.
+func testPostgresDB(t testing.TB) (string, func()) {
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("postgresdb: failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("postgres", "latest", []string{
+ "POSTGRES_PASSWORD=secret",
+ "POSTGRES_DB=database",
+ })
+ if err != nil {
+ t.Fatalf("postgresdb: could not start container: %s", err)
+ }
+
+ addr := fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp"))
+
+ if err := pool.Retry(func() error {
+ db, err := sql.Open("postgres", addr)
+ if err != nil {
+ return err
+ }
+ return db.Ping()
+ }); err != nil {
+ t.Fatalf("postgresdb: could not connect: %s", err)
+ }
+
+ return addr, func() {
+ if err := pool.Purge(resource); err != nil {
+ t.Fatalf("postgresdb: failed to cleanup container: %s", err)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go
index aff10f4..4f74f61 100644
--- a/vendor/github.com/hashicorp/vault/api/auth_token.go
+++ b/vendor/github.com/hashicorp/vault/api/auth_token.go
@@ -135,6 +135,26 @@ func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) {
return ParseSecret(resp.Body)
}
+// RenewTokenAsSelf behaves like renew-self, but authenticates using a provided
+// token instead of the token attached to the client.
+func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) {
+ r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self")
+ r.ClientToken = token
+
+ body := map[string]interface{}{"increment": increment}
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
// RevokeAccessor revokes a token associated with the given accessor
// along with all the child tokens.
func (c *TokenAuth) RevokeAccessor(accessor string) error {
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
index 85b8953..b19d5f0 100644
--- a/vendor/github.com/hashicorp/vault/api/client.go
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -3,19 +3,21 @@ package api
import (
"crypto/tls"
"fmt"
+ "net"
"net/http"
"net/url"
"os"
+ "path"
"strconv"
"strings"
"sync"
"time"
- "path"
"golang.org/x/net/http2"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/sethgrid/pester"
)
@@ -24,6 +26,7 @@ const EnvVaultCACert = "VAULT_CACERT"
const EnvVaultCAPath = "VAULT_CAPATH"
const EnvVaultClientCert = "VAULT_CLIENT_CERT"
const EnvVaultClientKey = "VAULT_CLIENT_KEY"
+const EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT"
const EnvVaultInsecure = "VAULT_SKIP_VERIFY"
const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME"
const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
@@ -54,6 +57,9 @@ type Config struct {
// MaxRetries controls the maximum number of times to retry when a 5xx error
// occurs. Set to 0 or less to disable retrying. Defaults to 0.
MaxRetries int
+
+ // Timeout is for setting custom timeout parameter in the HttpClient
+ Timeout time.Duration
}
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
@@ -156,6 +162,7 @@ func (c *Config) ReadEnvironment() error {
var envCAPath string
var envClientCert string
var envClientKey string
+ var envClientTimeout time.Duration
var envInsecure bool
var envTLSServerName string
var envMaxRetries *uint64
@@ -183,6 +190,13 @@ func (c *Config) ReadEnvironment() error {
if v := os.Getenv(EnvVaultClientKey); v != "" {
envClientKey = v
}
+ if t := os.Getenv(EnvVaultClientTimeout); t != "" {
+ clientTimeout, err := parseutil.ParseDurationSecond(t)
+ if err != nil {
+ return fmt.Errorf("Could not parse %s", EnvVaultClientTimeout)
+ }
+ envClientTimeout = clientTimeout
+ }
if v := os.Getenv(EnvVaultInsecure); v != "" {
var err error
envInsecure, err = strconv.ParseBool(v)
@@ -215,6 +229,10 @@ func (c *Config) ReadEnvironment() error {
c.MaxRetries = int(*envMaxRetries) + 1
}
+ if envClientTimeout != 0 {
+ c.Timeout = envClientTimeout
+ }
+
return nil
}
@@ -304,6 +322,11 @@ func (c *Client) SetMaxRetries(retries int) {
c.config.MaxRetries = retries
}
+// SetClientTimeout sets the client request timeout
+func (c *Client) SetClientTimeout(timeout time.Duration) {
+ c.config.Timeout = timeout
+}
+
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
@@ -327,16 +350,32 @@ func (c *Client) ClearToken() {
c.token = ""
}
+// Clone creates a copy of this client.
+func (c *Client) Clone() (*Client, error) {
+ return NewClient(c.config)
+}
+
// NewRequest creates a new raw request object to query the Vault server
// configured for this client. This is an advanced method and generally
// doesn't need to be called externally.
func (c *Client) NewRequest(method, requestPath string) *Request {
+ // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV
+ // record and take the highest match; this is not designed for high-availability, just discovery
+ var host string = c.addr.Host
+ if c.addr.Port() == "" {
+ // Internet Draft specifies that the SRV record is ignored if a port is given
+ _, addrs, err := net.LookupSRV("http", "tcp", c.addr.Hostname())
+ if err == nil && len(addrs) > 0 {
+ host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port)
+ }
+ }
+
req := &Request{
Method: method,
URL: &url.URL{
User: c.addr.User,
Scheme: c.addr.Scheme,
- Host: c.addr.Host,
+ Host: host,
Path: path.Join(c.addr.Path, requestPath),
},
ClientToken: c.token,
@@ -357,6 +396,9 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
} else {
req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath)
}
+ if c.config.Timeout != 0 {
+ c.config.HttpClient.Timeout = c.config.Timeout
+ }
return req
}
diff --git a/vendor/github.com/hashicorp/vault/api/client_test.go b/vendor/github.com/hashicorp/vault/api/client_test.go
index f95d795..84663ee 100644
--- a/vendor/github.com/hashicorp/vault/api/client_test.go
+++ b/vendor/github.com/hashicorp/vault/api/client_test.go
@@ -6,6 +6,7 @@ import (
"net/http"
"os"
"testing"
+ "time"
)
func init() {
@@ -160,3 +161,27 @@ func TestClientEnvSettings(t *testing.T) {
t.Fatalf("bad: %v", tlsConfig.InsecureSkipVerify)
}
}
+
+func TestClientTimeoutSetting(t *testing.T) {
+ oldClientTimeout := os.Getenv(EnvVaultClientTimeout)
+ os.Setenv(EnvVaultClientTimeout, "10")
+ defer os.Setenv(EnvVaultClientTimeout, oldClientTimeout)
+ config := DefaultConfig()
+ config.ReadEnvironment()
+ client, err := NewClient(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _ = client.NewRequest("PUT", "/")
+ if client.config.HttpClient.Timeout != time.Second*10 {
+ t.Fatalf("error setting client timeout using env variable")
+ }
+
+ // Setting custom client timeout for a new request
+ client.SetClientTimeout(time.Second * 20)
+ _ = client.NewRequest("PUT", "/")
+ if client.config.HttpClient.Timeout != time.Second*20 {
+ t.Fatalf("error setting client timeout using SetClientTimeout")
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go
new file mode 100644
index 0000000..a2a4b66
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/renewer.go
@@ -0,0 +1,302 @@
+package api
+
+import (
+ "errors"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+var (
+ ErrRenewerMissingInput = errors.New("missing input to renewer")
+ ErrRenewerMissingSecret = errors.New("missing secret to renew")
+ ErrRenewerNotRenewable = errors.New("secret is not renewable")
+ ErrRenewerNoSecretData = errors.New("returned empty secret data")
+
+ // DefaultRenewerGrace is the default grace period
+ DefaultRenewerGrace = 15 * time.Second
+
+ // DefaultRenewerRenewBuffer is the default size of the buffer for renew
+ // messages on the channel.
+ DefaultRenewerRenewBuffer = 5
+)
+
+// Renewer is a process for renewing a secret.
+//
+// renewer, err := client.NewRenewer(&RenewerInput{
+// Secret: mySecret,
+// })
+// go renewer.Renew()
+// defer renewer.Stop()
+//
+// for {
+// select {
+// case err := <-renewer.DoneCh():
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// // Renewal is now over
+// case renewal := <-renewer.RenewCh():
+// log.Printf("Successfully renewed: %#v", renewal)
+// }
+// }
+//
+//
+// The `DoneCh` will return if renewal fails or if the remaining lease duration
+// after a renewal is less than or equal to the grace (in number of seconds). In
+// both cases, the caller should attempt a re-read of the secret. Clients should
+// check the return value of the channel to see if renewal was successful.
+type Renewer struct {
+ l sync.Mutex
+
+ client *Client
+ secret *Secret
+ grace time.Duration
+ random *rand.Rand
+ doneCh chan error
+ renewCh chan *RenewOutput
+
+ stopped bool
+ stopCh chan struct{}
+}
+
+// RenewerInput is used as input to the renew function.
+type RenewerInput struct {
+ // Secret is the secret to renew
+ Secret *Secret
+
+ // Grace is a minimum renewal before returning so the upstream client
+ // can do a re-read. This can be used to prevent clients from waiting
+ // too long to read a new credential and incur downtime.
+ Grace time.Duration
+
+ // Rand is the randomizer to use for underlying randomization. If not
+ // provided, one will be generated and seeded automatically. If provided, it
+ // is assumed to have already been seeded.
+ Rand *rand.Rand
+
+ // RenewBuffer is the size of the buffered channel where renew messages are
+ // dispatched.
+ RenewBuffer int
+}
+
+// RenewOutput is the metadata returned to the client (if it's listening) to
+// renew messages.
+type RenewOutput struct {
+ // RenewedAt is the timestamp when the renewal took place (UTC).
+ RenewedAt time.Time
+
+ // Secret is the underlying renewal data. It's the same struct as all data
+ // that is returned from Vault, but since this is renewal data, it will not
+ // usually include the secret itself.
+ Secret *Secret
+}
+
+// NewRenewer creates a new renewer from the given input.
+func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
+ if i == nil {
+ return nil, ErrRenewerMissingInput
+ }
+
+ secret := i.Secret
+ if secret == nil {
+ return nil, ErrRenewerMissingSecret
+ }
+
+ grace := i.Grace
+ if grace == 0 {
+ grace = DefaultRenewerGrace
+ }
+
+ random := i.Rand
+ if random == nil {
+ random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
+ }
+
+ renewBuffer := i.RenewBuffer
+ if renewBuffer == 0 {
+ renewBuffer = DefaultRenewerRenewBuffer
+ }
+
+ return &Renewer{
+ client: c,
+ secret: secret,
+ grace: grace,
+ random: random,
+ doneCh: make(chan error, 1),
+ renewCh: make(chan *RenewOutput, renewBuffer),
+
+ stopped: false,
+ stopCh: make(chan struct{}),
+ }, nil
+}
+
+// DoneCh returns the channel where the renewer will publish when renewal stops.
+// If there is an error, this will be an error.
+func (r *Renewer) DoneCh() <-chan error {
+ return r.doneCh
+}
+
+// RenewCh is a channel that receives a message when a successful renewal takes
+// place and includes metadata about the renewal.
+func (r *Renewer) RenewCh() <-chan *RenewOutput {
+ return r.renewCh
+}
+
+// Stop stops the renewer.
+func (r *Renewer) Stop() {
+ r.l.Lock()
+ if !r.stopped {
+ close(r.stopCh)
+ r.stopped = true
+ }
+ r.l.Unlock()
+}
+
+// Renew starts a background process for renewing this secret. When the secret
+// is has auth data, this attempts to renew the auth (token). When the secret
+// has a lease, this attempts to renew the lease.
+func (r *Renewer) Renew() {
+ var result error
+ if r.secret.Auth != nil {
+ result = r.renewAuth()
+ } else {
+ result = r.renewLease()
+ }
+
+ select {
+ case r.doneCh <- result:
+ case <-r.stopCh:
+ }
+}
+
+// renewAuth is a helper for renewing authentication.
+func (r *Renewer) renewAuth() error {
+ if !r.secret.Auth.Renewable || r.secret.Auth.ClientToken == "" {
+ return ErrRenewerNotRenewable
+ }
+
+ client, token := r.client, r.secret.Auth.ClientToken
+
+ for {
+ // Check if we are stopped.
+ select {
+ case <-r.stopCh:
+ return nil
+ default:
+ }
+
+ // Renew the auth.
+ renewal, err := client.Auth().Token().RenewTokenAsSelf(token, 0)
+ if err != nil {
+ return err
+ }
+
+ // Push a message that a renewal took place.
+ select {
+ case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}:
+ default:
+ }
+
+ // Somehow, sometimes, this happens.
+ if renewal == nil || renewal.Auth == nil {
+ return ErrRenewerNoSecretData
+ }
+
+ // Do nothing if we are not renewable
+ if !renewal.Auth.Renewable {
+ return ErrRenewerNotRenewable
+ }
+
+ // Grab the lease duration and sleep duration - note that we grab the auth
+ // lease duration, not the secret lease duration.
+ leaseDuration := time.Duration(renewal.Auth.LeaseDuration) * time.Second
+ sleepDuration := r.sleepDuration(leaseDuration)
+
+ // If we are within grace, return now.
+ if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ return nil
+ }
+
+ select {
+ case <-r.stopCh:
+ return nil
+ case <-time.After(sleepDuration):
+ continue
+ }
+ }
+}
+
+// renewLease is a helper for renewing a lease.
+func (r *Renewer) renewLease() error {
+ if !r.secret.Renewable || r.secret.LeaseID == "" {
+ return ErrRenewerNotRenewable
+ }
+
+ client, leaseID := r.client, r.secret.LeaseID
+
+ for {
+ // Check if we are stopped.
+ select {
+ case <-r.stopCh:
+ return nil
+ default:
+ }
+
+ // Renew the lease.
+ renewal, err := client.Sys().Renew(leaseID, 0)
+ if err != nil {
+ return err
+ }
+
+ // Push a message that a renewal took place.
+ select {
+ case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}:
+ default:
+ }
+
+ // Somehow, sometimes, this happens.
+ if renewal == nil {
+ return ErrRenewerNoSecretData
+ }
+
+ // Do nothing if we are not renewable
+ if !renewal.Renewable {
+ return ErrRenewerNotRenewable
+ }
+
+ // Grab the lease duration and sleep duration
+ leaseDuration := time.Duration(renewal.LeaseDuration) * time.Second
+ sleepDuration := r.sleepDuration(leaseDuration)
+
+ // If we are within grace, return now.
+ if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ return nil
+ }
+
+ select {
+ case <-r.stopCh:
+ return nil
+ case <-time.After(sleepDuration):
+ continue
+ }
+ }
+}
+
+// sleepDuration calculates the time to sleep given the base lease duration. The
+// base is the resulting lease duration. It will be reduced to 1/3 and
+// multiplied by a random float between 0.0 and 1.0. This extra randomness
+// prevents multiple clients from all trying to renew simultaneously.
+func (r *Renewer) sleepDuration(base time.Duration) time.Duration {
+ sleep := float64(base)
+
+ // Renew at 1/3 the remaining lease. This will give us an opportunity to retry
+ // at least one more time should the first renewal fail.
+ sleep = sleep / 3.0
+
+ // Use a randomness so many clients do not hit Vault simultaneously.
+ sleep = sleep * (r.random.Float64() + 1) / 2.0
+
+ return time.Duration(sleep)
+}
diff --git a/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go b/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go
new file mode 100644
index 0000000..7011c7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go
@@ -0,0 +1,228 @@
+package api_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database"
+ "github.com/hashicorp/vault/builtin/logical/pki"
+ "github.com/hashicorp/vault/builtin/logical/transit"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestRenewer_Renew(t *testing.T) {
+ t.Parallel()
+
+ client, vaultDone := testVaultServerBackends(t, map[string]logical.Factory{
+ "database": database.Factory,
+ "pki": pki.Factory,
+ "transit": transit.Factory,
+ })
+ defer vaultDone()
+
+ pgURL, pgDone := testPostgresDB(t)
+ defer pgDone()
+
+ t.Run("group", func(t *testing.T) {
+ t.Run("kv", func(t *testing.T) {
+ t.Parallel()
+
+ if _, err := client.Logical().Write("secret/value", map[string]interface{}{
+ "foo": "bar",
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ secret, err := client.Logical().Read("secret/value")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ v, err := client.NewRenewer(&api.RenewerInput{
+ Secret: secret,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ go v.Renew()
+ defer v.Stop()
+
+ select {
+ case err := <-v.DoneCh():
+ if err != api.ErrRenewerNotRenewable {
+ t.Fatal(err)
+ }
+ case renew := <-v.RenewCh():
+ t.Errorf("received renew, but should have been nil: %#v", renew)
+ case <-time.After(500 * time.Millisecond):
+ t.Error("should have been non-renewable")
+ }
+ })
+
+ t.Run("transit", func(t *testing.T) {
+ t.Parallel()
+
+ if err := client.Sys().Mount("transit", &api.MountInput{
+ Type: "transit",
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ secret, err := client.Logical().Write("transit/encrypt/my-app", map[string]interface{}{
+ "plaintext": "Zm9vCg==",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ v, err := client.NewRenewer(&api.RenewerInput{
+ Secret: secret,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ go v.Renew()
+ defer v.Stop()
+
+ select {
+ case err := <-v.DoneCh():
+ if err != api.ErrRenewerNotRenewable {
+ t.Fatal(err)
+ }
+ case renew := <-v.RenewCh():
+ t.Errorf("received renew, but should have been nil: %#v", renew)
+ case <-time.After(500 * time.Millisecond):
+ t.Error("should have been non-renewable")
+ }
+ })
+
+ t.Run("database", func(t *testing.T) {
+ t.Parallel()
+
+ if err := client.Sys().Mount("database", &api.MountInput{
+ Type: "database",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := client.Logical().Write("database/config/postgresql", map[string]interface{}{
+ "plugin_name": "postgresql-database-plugin",
+ "connection_url": pgURL,
+ "allowed_roles": "readonly",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := client.Logical().Write("database/roles/readonly", map[string]interface{}{
+ "db_name": "postgresql",
+ "creation_statements": `` +
+ `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';` +
+ `GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";`,
+ "default_ttl": "1s",
+ "max_ttl": "3s",
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ secret, err := client.Logical().Read("database/creds/readonly")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ v, err := client.NewRenewer(&api.RenewerInput{
+ Secret: secret,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ go v.Renew()
+ defer v.Stop()
+
+ select {
+ case err := <-v.DoneCh():
+ t.Errorf("should have renewed once before returning: %s", err)
+ case renew := <-v.RenewCh():
+ if renew == nil {
+ t.Fatal("renew is nil")
+ }
+ if !renew.Secret.Renewable {
+ t.Errorf("expected lease to be renewable: %#v", renew)
+ }
+ if renew.Secret.LeaseDuration > 2 {
+ t.Errorf("expected lease to < 2s: %#v", renew)
+ }
+ case <-time.After(3 * time.Second):
+ t.Errorf("no renewal")
+ }
+
+ select {
+ case err := <-v.DoneCh():
+ if err != nil {
+ t.Fatal(err)
+ }
+ case renew := <-v.RenewCh():
+ t.Fatalf("should not have renewed (lease should be up): %#v", renew)
+ case <-time.After(3 * time.Second):
+ t.Errorf("no data")
+ }
+ })
+
+ t.Run("auth", func(t *testing.T) {
+ t.Parallel()
+
+ secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{
+ Policies: []string{"default"},
+ TTL: "1s",
+ ExplicitMaxTTL: "3s",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ v, err := client.NewRenewer(&api.RenewerInput{
+ Secret: secret,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ go v.Renew()
+ defer v.Stop()
+
+ select {
+ case err := <-v.DoneCh():
+ t.Errorf("should have renewed once before returning: %s", err)
+ case renew := <-v.RenewCh():
+ if renew == nil {
+ t.Fatal("renew is nil")
+ }
+ if renew.Secret.Auth == nil {
+ t.Fatal("renew auth is nil")
+ }
+ if !renew.Secret.Auth.Renewable {
+ t.Errorf("expected lease to be renewable: %#v", renew)
+ }
+ if renew.Secret.Auth.LeaseDuration > 2 {
+ t.Errorf("expected lease to < 2s: %#v", renew)
+ }
+ if renew.Secret.Auth.ClientToken == "" {
+ t.Error("expected a client token")
+ }
+ if renew.Secret.Auth.Accessor == "" {
+ t.Error("expected an accessor")
+ }
+ case <-time.After(3 * time.Second):
+ t.Errorf("no renewal")
+ }
+
+ select {
+ case err := <-v.DoneCh():
+ if err != nil {
+ t.Fatal(err)
+ }
+ case renew := <-v.RenewCh():
+ t.Fatalf("should not have renewed (lease should be up): %#v", renew)
+ case <-time.After(3 * time.Second):
+ t.Errorf("no data")
+ }
+ })
+ })
+}
diff --git a/vendor/github.com/hashicorp/vault/api/renewer_test.go b/vendor/github.com/hashicorp/vault/api/renewer_test.go
new file mode 100644
index 0000000..262484e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/renewer_test.go
@@ -0,0 +1,85 @@
+package api
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestRenewer_NewRenewer(t *testing.T) {
+ t.Parallel()
+
+ client, err := NewClient(DefaultConfig())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ name string
+ i *RenewerInput
+ e *Renewer
+ err bool
+ }{
+ {
+ "nil",
+ nil,
+ nil,
+ true,
+ },
+ {
+ "missing_secret",
+ &RenewerInput{
+ Secret: nil,
+ },
+ nil,
+ true,
+ },
+ {
+ "default_grace",
+ &RenewerInput{
+ Secret: &Secret{},
+ },
+ &Renewer{
+ secret: &Secret{},
+ grace: DefaultRenewerGrace,
+ },
+ false,
+ },
+ {
+ "custom_grace",
+ &RenewerInput{
+ Secret: &Secret{},
+ Grace: 30 * time.Second,
+ },
+ &Renewer{
+ secret: &Secret{},
+ grace: 30 * time.Second,
+ },
+ false,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ v, err := client.NewRenewer(tc.i)
+ if (err != nil) != tc.err {
+ t.Fatal(err)
+ }
+
+ if v == nil {
+ return
+ }
+
+ // Zero-out channels because reflect
+ v.client = nil
+ v.random = nil
+ v.doneCh = nil
+ v.renewCh = nil
+ v.stopCh = nil
+
+ if !reflect.DeepEqual(tc.e, v) {
+ t.Errorf("not equal\nexp: %#v\nact: %#v", tc.e, v)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go
index 685e2d7..83a28bd 100644
--- a/vendor/github.com/hashicorp/vault/api/request.go
+++ b/vendor/github.com/hashicorp/vault/api/request.go
@@ -14,6 +14,7 @@ type Request struct {
Method string
URL *url.URL
Params url.Values
+ Headers http.Header
ClientToken string
WrapTTL string
Obj interface{}
@@ -60,6 +61,14 @@ func (r *Request) ToHTTP() (*http.Request, error) {
req.URL.Host = r.URL.Host
req.Host = r.URL.Host
+ if r.Headers != nil {
+ for header, vals := range r.Headers {
+ for _, val := range vals {
+ req.Header.Add(header, val)
+ }
+ }
+ }
+
if len(r.ClientToken) != 0 {
req.Header.Set("X-Vault-Token", r.ClientToken)
}
diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go
index 7c8ac9f..05502e1 100644
--- a/vendor/github.com/hashicorp/vault/api/response.go
+++ b/vendor/github.com/hashicorp/vault/api/response.go
@@ -25,8 +25,9 @@ func (r *Response) DecodeJSON(out interface{}) error {
// this will fully consume the response body, but will not close it. The
// body must still be closed manually.
func (r *Response) Error() error {
- // 200 to 399 are okay status codes
- if r.StatusCode >= 200 && r.StatusCode < 400 {
+ // 200 to 399 are okay status codes. 429 is the code for health status of
+ // standby nodes.
+ if (r.StatusCode >= 200 && r.StatusCode < 400) || r.StatusCode == 429 {
return nil
}
diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go
index 14924f9..7478a0c 100644
--- a/vendor/github.com/hashicorp/vault/api/secret.go
+++ b/vendor/github.com/hashicorp/vault/api/secret.go
@@ -42,6 +42,7 @@ type SecretWrapInfo struct {
Token string `json:"token"`
TTL int `json:"ttl"`
CreationTime time.Time `json:"creation_time"`
+ CreationPath string `json:"creation_path"`
WrappedAccessor string `json:"wrapped_accessor"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go
index 7c3e56b..a17b0eb 100644
--- a/vendor/github.com/hashicorp/vault/api/ssh.go
+++ b/vendor/github.com/hashicorp/vault/api/ssh.go
@@ -36,3 +36,20 @@ func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, err
return ParseSecret(resp.Body)
}
+
+// SignKey signs the given public key and returns a signed public key to pass
+// along with the SSH request.
+func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) {
+ r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role))
+ if err := r.SetJSONBody(data); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go
index f9f3c8c..32f4bbd 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_auth.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go
@@ -82,19 +82,27 @@ func (c *Sys) DisableAuth(path string) error {
// documentation. Please refer to that documentation for more details.
type EnableAuthOptions struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Local bool `json:"local" structs:"local"`
+ Type string `json:"type" structs:"type"`
+ Description string `json:"description" structs:"description"`
+ Config AuthConfigInput `json:"config" structs:"config"`
+ Local bool `json:"local" structs:"local"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty"`
+}
+
+type AuthConfigInput struct {
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
type AuthMount struct {
Type string `json:"type" structs:"type" mapstructure:"type"`
Description string `json:"description" structs:"description" mapstructure:"description"`
+ Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"`
Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"`
Local bool `json:"local" structs:"local" mapstructure:"local"`
}
type AuthConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go
new file mode 100644
index 0000000..e7f2a59
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go
@@ -0,0 +1,56 @@
+package api
+
+func (c *Sys) CORSStatus() (*CORSResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/config/cors")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result CORSResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) ConfigureCORS(req *CORSRequest) (*CORSResponse, error) {
+ r := c.c.NewRequest("PUT", "/v1/sys/config/cors")
+ if err := r.SetJSONBody(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result CORSResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) DisableCORS() (*CORSResponse, error) {
+ r := c.c.NewRequest("DELETE", "/v1/sys/config/cors")
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result CORSResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+
+}
+
+type CORSRequest struct {
+ AllowedOrigins string `json:"allowed_origins"`
+ Enabled bool `json:"enabled"`
+}
+
+type CORSResponse struct {
+ AllowedOrigins string `json:"allowed_origins"`
+ Enabled bool `json:"enabled"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go
new file mode 100644
index 0000000..822354c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_health.go
@@ -0,0 +1,29 @@
+package api
+
+func (c *Sys) Health() (*HealthResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/health")
+ // If the code is 400 or above it will automatically turn into an error,
+ // but the sys/health API defaults to returning 5xx when not sealed or
+ // inited, so we force this code to be something else so we parse correctly
+ r.Params.Add("sealedcode", "299")
+ r.Params.Add("uninitcode", "299")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result HealthResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+type HealthResponse struct {
+ Initialized bool `json:"initialized"`
+ Sealed bool `json:"sealed"`
+ Standby bool `json:"standby"`
+ ServerTimeUTC int64 `json:"server_time_utc"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go
index 201ac73..4951c46 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_leader.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go
@@ -14,7 +14,8 @@ func (c *Sys) Leader() (*LeaderResponse, error) {
}
type LeaderResponse struct {
- HAEnabled bool `json:"ha_enabled"`
- IsSelf bool `json:"is_self"`
- LeaderAddress string `json:"leader_address"`
+ HAEnabled bool `json:"ha_enabled"`
+ IsSelf bool `json:"is_self"`
+ LeaderAddress string `json:"leader_address"`
+ LeaderClusterAddress string `json:"leader_cluster_address"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_lease.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go
similarity index 76%
rename from vendor/github.com/hashicorp/vault/api/sys_lease.go
rename to vendor/github.com/hashicorp/vault/api/sys_leases.go
index e5c19c4..34bd99e 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_lease.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go
@@ -1,7 +1,7 @@
package api
func (c *Sys) Renew(id string, increment int) (*Secret, error) {
- r := c.c.NewRequest("PUT", "/v1/sys/renew")
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/renew")
body := map[string]interface{}{
"increment": increment,
@@ -21,7 +21,7 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) {
}
func (c *Sys) Revoke(id string) error {
- r := c.c.NewRequest("PUT", "/v1/sys/revoke/"+id)
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
@@ -30,7 +30,7 @@ func (c *Sys) Revoke(id string) error {
}
func (c *Sys) RevokePrefix(id string) error {
- r := c.c.NewRequest("PUT", "/v1/sys/revoke-prefix/"+id)
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
@@ -39,7 +39,7 @@ func (c *Sys) RevokePrefix(id string) error {
}
func (c *Sys) RevokeForce(id string) error {
- r := c.c.NewRequest("PUT", "/v1/sys/revoke-force/"+id)
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
index 907fddb..091a8f6 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
@@ -124,23 +124,27 @@ type MountInput struct {
Description string `json:"description" structs:"description"`
Config MountConfigInput `json:"config" structs:"config"`
Local bool `json:"local" structs:"local"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name"`
}
type MountConfigInput struct {
DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
type MountOutput struct {
Type string `json:"type" structs:"type"`
Description string `json:"description" structs:"description"`
+ Accessor string `json:"accessor" structs:"accessor"`
Config MountConfigOutput `json:"config" structs:"config"`
Local bool `json:"local" structs:"local"`
}
type MountConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
diff --git a/vendor/github.com/hashicorp/vault/audit/audit.go b/vendor/github.com/hashicorp/vault/audit/audit.go
index dffa8ee..b96391c 100644
--- a/vendor/github.com/hashicorp/vault/audit/audit.go
+++ b/vendor/github.com/hashicorp/vault/audit/audit.go
@@ -25,15 +25,21 @@ type Backend interface {
// GetHash is used to return the given data with the backend's hash,
// so that a caller can determine if a value in the audit log matches
// an expected plaintext value
- GetHash(string) string
+ GetHash(string) (string, error)
// Reload is called on SIGHUP for supporting backends.
Reload() error
+
+ // Invalidate is called for path invalidation
+ Invalidate()
}
type BackendConfig struct {
- // The salt that should be used for any secret obfuscation
- Salt *salt.Salt
+ // The view to store the salt
+ SaltView logical.Storage
+
+ // The salt config that should be used for any secret obfuscation
+ SaltConfig *salt.Config
// Config is the opaque user configuration provided when mounting
Config map[string]string
diff --git a/vendor/github.com/hashicorp/vault/audit/format.go b/vendor/github.com/hashicorp/vault/audit/format.go
index 919da12..18eb254 100644
--- a/vendor/github.com/hashicorp/vault/audit/format.go
+++ b/vendor/github.com/hashicorp/vault/audit/format.go
@@ -7,6 +7,8 @@ import (
"time"
"github.com/SermoDigital/jose/jws"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
"github.com/mitchellh/copystructure"
)
@@ -14,6 +16,7 @@ import (
type AuditFormatWriter interface {
WriteRequest(io.Writer, *AuditRequestEntry) error
WriteResponse(io.Writer, *AuditResponseEntry) error
+ Salt() (*salt.Salt, error)
}
// AuditFormatter implements the Formatter interface, and allows the underlying
@@ -41,6 +44,11 @@ func (f *AuditFormatter) FormatRequest(
return fmt.Errorf("no format writer specified")
}
+ salt, err := f.Salt()
+ if err != nil {
+ return errwrap.Wrapf("error fetching salt: {{err}}", err)
+ }
+
if !config.Raw {
// Before we copy the structure we must nil out some data
// otherwise we will cause reflection to panic and die
@@ -70,9 +78,17 @@ func (f *AuditFormatter) FormatRequest(
// Hash any sensitive information
if auth != nil {
- if err := Hash(config.Salt, auth); err != nil {
+ // Cache and restore accessor in the auth
+ var authAccessor string
+ if !config.HMACAccessor && auth.Accessor != "" {
+ authAccessor = auth.Accessor
+ }
+ if err := Hash(salt, auth); err != nil {
return err
}
+ if authAccessor != "" {
+ auth.Accessor = authAccessor
+ }
}
// Cache and restore accessor in the request
@@ -80,7 +96,7 @@ func (f *AuditFormatter) FormatRequest(
if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" {
clientTokenAccessor = req.ClientTokenAccessor
}
- if err := Hash(config.Salt, req); err != nil {
+ if err := Hash(salt, req); err != nil {
return err
}
if clientTokenAccessor != "" {
@@ -102,6 +118,8 @@ func (f *AuditFormatter) FormatRequest(
Error: errString,
Auth: AuditAuth{
+ ClientToken: auth.ClientToken,
+ Accessor: auth.Accessor,
DisplayName: auth.DisplayName,
Policies: auth.Policies,
Metadata: auth.Metadata,
@@ -152,6 +170,11 @@ func (f *AuditFormatter) FormatResponse(
return fmt.Errorf("no format writer specified")
}
+ salt, err := f.Salt()
+ if err != nil {
+ return errwrap.Wrapf("error fetching salt: {{err}}", err)
+ }
+
if !config.Raw {
// Before we copy the structure we must nil out some data
// otherwise we will cause reflection to panic and die
@@ -195,7 +218,7 @@ func (f *AuditFormatter) FormatResponse(
if !config.HMACAccessor && auth.Accessor != "" {
accessor = auth.Accessor
}
- if err := Hash(config.Salt, auth); err != nil {
+ if err := Hash(salt, auth); err != nil {
return err
}
if accessor != "" {
@@ -208,7 +231,7 @@ func (f *AuditFormatter) FormatResponse(
if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" {
clientTokenAccessor = req.ClientTokenAccessor
}
- if err := Hash(config.Salt, req); err != nil {
+ if err := Hash(salt, req); err != nil {
return err
}
if clientTokenAccessor != "" {
@@ -224,7 +247,7 @@ func (f *AuditFormatter) FormatResponse(
if !config.HMACAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" {
wrappedAccessor = resp.WrapInfo.WrappedAccessor
}
- if err := Hash(config.Salt, resp); err != nil {
+ if err := Hash(salt, resp); err != nil {
return err
}
if accessor != "" {
@@ -277,6 +300,7 @@ func (f *AuditFormatter) FormatResponse(
TTL: int(resp.WrapInfo.TTL / time.Second),
Token: token,
CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano),
+ CreationPath: resp.WrapInfo.CreationPath,
WrappedAccessor: resp.WrapInfo.WrappedAccessor,
}
}
@@ -284,11 +308,13 @@ func (f *AuditFormatter) FormatResponse(
respEntry := &AuditResponseEntry{
Type: "response",
Error: errString,
-
Auth: AuditAuth{
- DisplayName: auth.DisplayName,
- Policies: auth.Policies,
- Metadata: auth.Metadata,
+ ClientToken: auth.ClientToken,
+ Accessor: auth.Accessor,
+ DisplayName: auth.DisplayName,
+ Policies: auth.Policies,
+ Metadata: auth.Metadata,
+ RemainingUses: req.ClientTokenRemainingUses,
},
Request: AuditRequest{
@@ -381,6 +407,7 @@ type AuditResponseWrapInfo struct {
TTL int `json:"ttl"`
Token string `json:"token"`
CreationTime string `json:"creation_time"`
+ CreationPath string `json:"creation_path"`
WrappedAccessor string `json:"wrapped_accessor,omitempty"`
}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_json.go b/vendor/github.com/hashicorp/vault/audit/format_json.go
index 9e200f0..0a5c9d9 100644
--- a/vendor/github.com/hashicorp/vault/audit/format_json.go
+++ b/vendor/github.com/hashicorp/vault/audit/format_json.go
@@ -4,12 +4,15 @@ import (
"encoding/json"
"fmt"
"io"
+
+ "github.com/hashicorp/vault/helper/salt"
)
// JSONFormatWriter is an AuditFormatWriter implementation that structures data into
// a JSON format.
type JSONFormatWriter struct {
- Prefix string
+ Prefix string
+ SaltFunc func() (*salt.Salt, error)
}
func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error {
@@ -43,3 +46,7 @@ func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry)
enc := json.NewEncoder(w)
return enc.Encode(resp)
}
+
+func (f *JSONFormatWriter) Salt() (*salt.Salt, error) {
+ return f.SaltFunc()
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_json_test.go b/vendor/github.com/hashicorp/vault/audit/format_json_test.go
index 21bb647..688ae3d 100644
--- a/vendor/github.com/hashicorp/vault/audit/format_json_test.go
+++ b/vendor/github.com/hashicorp/vault/audit/format_json_test.go
@@ -9,21 +9,32 @@ import (
"errors"
+ "fmt"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
func TestFormatJSON_formatRequest(t *testing.T) {
+ salter, err := salt.NewSalt(nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ saltFunc := func() (*salt.Salt, error) {
+ return salter, nil
+ }
+
+ expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, salter.GetIdentifiedHMAC("foo"))
+
cases := map[string]struct {
- Auth *logical.Auth
- Req *logical.Request
- Err error
- Prefix string
- Result string
+ Auth *logical.Auth
+ Req *logical.Request
+ Err error
+ Prefix string
+ ExpectedStr string
}{
"auth, request": {
- &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
&logical.Request{
Operation: logical.UpdateOperation,
Path: "/foo",
@@ -39,10 +50,10 @@ func TestFormatJSON_formatRequest(t *testing.T) {
},
errors.New("this is an error"),
"",
- testFormatJSONReqBasicStr,
+ expectedResultStr,
},
"auth, request with prefix": {
- &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
&logical.Request{
Operation: logical.UpdateOperation,
Path: "/foo",
@@ -58,7 +69,7 @@ func TestFormatJSON_formatRequest(t *testing.T) {
},
errors.New("this is an error"),
"@cee: ",
- testFormatJSONReqBasicStr,
+ expectedResultStr,
},
}
@@ -66,23 +77,24 @@ func TestFormatJSON_formatRequest(t *testing.T) {
var buf bytes.Buffer
formatter := AuditFormatter{
AuditFormatWriter: &JSONFormatWriter{
- Prefix: tc.Prefix,
+ Prefix: tc.Prefix,
+ SaltFunc: saltFunc,
},
}
- salter, _ := salt.NewSalt(nil, nil)
config := FormatterConfig{
- Salt: salter,
+ HMACAccessor: false,
}
if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil {
t.Fatalf("bad: %s\nerr: %s", name, err)
}
if !strings.HasPrefix(buf.String(), tc.Prefix) {
- t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix)
+ t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix)
}
var expectedjson = new(AuditRequestEntry)
- if err := jsonutil.DecodeJSON([]byte(tc.Result), &expectedjson); err != nil {
+
+ if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil {
t.Fatalf("bad json: %s", err)
}
@@ -106,5 +118,5 @@ func TestFormatJSON_formatRequest(t *testing.T) {
}
}
-const testFormatJSONReqBasicStr = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"display_name":"","policies":["root"],"metadata":null},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"}
+const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"metadata":null},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"}
`
diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go
index cc6cc95..792e552 100644
--- a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go
+++ b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go
@@ -5,13 +5,15 @@ import (
"fmt"
"io"
+ "github.com/hashicorp/vault/helper/salt"
"github.com/jefferai/jsonx"
)
// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into
// a XML format.
type JSONxFormatWriter struct {
- Prefix string
+ Prefix string
+ SaltFunc func() (*salt.Salt, error)
}
func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error {
@@ -65,3 +67,7 @@ func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry)
_, err = w.Write(xmlBytes)
return err
}
+
+func (f *JSONxFormatWriter) Salt() (*salt.Salt, error) {
+ return f.SaltFunc()
+}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go
index 8d4fe4b..b04ccd0 100644
--- a/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go
+++ b/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go
@@ -8,21 +8,32 @@ import (
"errors"
+ "fmt"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
func TestFormatJSONx_formatRequest(t *testing.T) {
+ salter, err := salt.NewSalt(nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ saltFunc := func() (*salt.Salt, error) {
+ return salter, nil
+ }
+
+ fooSalted := salter.GetIdentifiedHMAC("foo")
+
cases := map[string]struct {
- Auth *logical.Auth
- Req *logical.Request
- Err error
- Prefix string
- Result string
- Expected string
+ Auth *logical.Auth
+ Req *logical.Request
+ Err error
+ Prefix string
+ Result string
+ ExpectedStr string
}{
"auth, request": {
- &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
&logical.Request{
Operation: logical.UpdateOperation,
Path: "/foo",
@@ -39,10 +50,11 @@ func TestFormatJSONx_formatRequest(t *testing.T) {
errors.New("this is an error"),
"",
"",
- `rootthis is an errorbarupdate/foo127.0.0.160request`,
+ fmt.Sprintf(`bar%stesttokenrootthis is an errorbarupdate/foo127.0.0.160request`,
+ fooSalted),
},
"auth, request with prefix": {
- &logical.Auth{ClientToken: "foo", Policies: []string{"root"}},
+ &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}},
&logical.Request{
Operation: logical.UpdateOperation,
Path: "/foo",
@@ -59,7 +71,8 @@ func TestFormatJSONx_formatRequest(t *testing.T) {
errors.New("this is an error"),
"",
"@cee: ",
- `rootthis is an errorbarupdate/foo127.0.0.160request`,
+ fmt.Sprintf(`bar%stesttokenrootthis is an errorbarupdate/foo127.0.0.160request`,
+ fooSalted),
},
}
@@ -67,13 +80,13 @@ func TestFormatJSONx_formatRequest(t *testing.T) {
var buf bytes.Buffer
formatter := AuditFormatter{
AuditFormatWriter: &JSONxFormatWriter{
- Prefix: tc.Prefix,
+ Prefix: tc.Prefix,
+ SaltFunc: saltFunc,
},
}
- salter, _ := salt.NewSalt(nil, nil)
config := FormatterConfig{
- Salt: salter,
- OmitTime: true,
+ OmitTime: true,
+ HMACAccessor: false,
}
if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil {
t.Fatalf("bad: %s\nerr: %s", name, err)
@@ -83,10 +96,10 @@ func TestFormatJSONx_formatRequest(t *testing.T) {
t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix)
}
- if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.Expected)) {
+ if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) {
t.Fatalf(
"bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'",
- name, strings.TrimSpace(buf.String()), string(tc.Expected))
+ name, strings.TrimSpace(buf.String()), string(tc.ExpectedStr))
}
}
}
diff --git a/vendor/github.com/hashicorp/vault/audit/format_test.go b/vendor/github.com/hashicorp/vault/audit/format_test.go
index 6a6425b..5390229 100644
--- a/vendor/github.com/hashicorp/vault/audit/format_test.go
+++ b/vendor/github.com/hashicorp/vault/audit/format_test.go
@@ -10,6 +10,8 @@ import (
)
type noopFormatWriter struct {
+ salt *salt.Salt
+ SaltFunc func() (*salt.Salt, error)
}
func (n *noopFormatWriter) WriteRequest(_ io.Writer, _ *AuditRequestEntry) error {
@@ -20,11 +22,20 @@ func (n *noopFormatWriter) WriteResponse(_ io.Writer, _ *AuditResponseEntry) err
return nil
}
-func TestFormatRequestErrors(t *testing.T) {
- salter, _ := salt.NewSalt(nil, nil)
- config := FormatterConfig{
- Salt: salter,
+func (n *noopFormatWriter) Salt() (*salt.Salt, error) {
+ if n.salt != nil {
+ return n.salt, nil
}
+ var err error
+ n.salt, err = salt.NewSalt(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return n.salt, nil
+}
+
+func TestFormatRequestErrors(t *testing.T) {
+ config := FormatterConfig{}
formatter := AuditFormatter{
AuditFormatWriter: &noopFormatWriter{},
}
@@ -38,10 +49,7 @@ func TestFormatRequestErrors(t *testing.T) {
}
func TestFormatResponseErrors(t *testing.T) {
- salter, _ := salt.NewSalt(nil, nil)
- config := FormatterConfig{
- Salt: salter,
- }
+ config := FormatterConfig{}
formatter := AuditFormatter{
AuditFormatWriter: &noopFormatWriter{},
}
diff --git a/vendor/github.com/hashicorp/vault/audit/formatter.go b/vendor/github.com/hashicorp/vault/audit/formatter.go
index 318bd1b..3c1748f 100644
--- a/vendor/github.com/hashicorp/vault/audit/formatter.go
+++ b/vendor/github.com/hashicorp/vault/audit/formatter.go
@@ -3,7 +3,6 @@ package audit
import (
"io"
- "github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
@@ -19,7 +18,6 @@ type Formatter interface {
type FormatterConfig struct {
Raw bool
- Salt *salt.Salt
HMACAccessor bool
// This should only ever be used in a testing context
diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure.go b/vendor/github.com/hashicorp/vault/audit/hashstructure.go
index 8d0fd7c..8caf3eb 100644
--- a/vendor/github.com/hashicorp/vault/audit/hashstructure.go
+++ b/vendor/github.com/hashicorp/vault/audit/hashstructure.go
@@ -1,10 +1,13 @@
package audit
import (
+ "errors"
"reflect"
"strings"
+ "time"
"github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/helper/wrapping"
"github.com/hashicorp/vault/logical"
"github.com/mitchellh/copystructure"
"github.com/mitchellh/reflectwalk"
@@ -84,7 +87,7 @@ func Hash(salter *salt.Salt, raw interface{}) error {
s.Data = data.(map[string]interface{})
- case *logical.ResponseWrapInfo:
+ case *wrapping.ResponseWrapInfo:
if s == nil {
return nil
}
@@ -140,6 +143,12 @@ type hashWalker struct {
unknownKeys []string
}
+// hashTimeType stores a pre-computed reflect.Type for a time.Time so
+// we can quickly compare in hashWalker.Struct. We create an empty/invalid
+// time.Time{} so we don't need to incur any additional startup cost vs.
+// Now() or Unix().
+var hashTimeType = reflect.TypeOf(time.Time{})
+
func (w *hashWalker) Enter(loc reflectwalk.Location) error {
w.loc = loc
return nil
@@ -187,6 +196,35 @@ func (w *hashWalker) SliceElem(i int, elem reflect.Value) error {
return nil
}
+func (w *hashWalker) Struct(v reflect.Value) error {
+ // We are looking for time values. If it isn't one, ignore it.
+ if v.Type() != hashTimeType {
+ return nil
+ }
+
+ // If we aren't in a map value, return an error to prevent a panic
+ if v.Interface() != w.lastValue.Interface() {
+ return errors.New("time.Time value in a non map key cannot be hashed for audits")
+ }
+
+ // Create a string value of the time. IMPORTANT: this must never change
+ // across Vault versions or the hash value of equivalent time.Time will
+ // change.
+ strVal := v.Interface().(time.Time).Format(time.RFC3339Nano)
+
+ // Set the map value to the string instead of the time.Time object
+ m := w.cs[len(w.cs)-1]
+ mk := w.csData.(reflect.Value)
+ m.SetMapIndex(mk, reflect.ValueOf(strVal))
+
+ // Skip this entry so that we don't walk the struct.
+ return reflectwalk.SkipEntry
+}
+
+func (w *hashWalker) StructField(reflect.StructField, reflect.Value) error {
+ return nil
+}
+
func (w *hashWalker) Primitive(v reflect.Value) error {
if w.Callback == nil {
return nil
diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go b/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go
index 5fefa0f..49afa6e 100644
--- a/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go
+++ b/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go
@@ -9,6 +9,7 @@ import (
"github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/helper/wrapping"
"github.com/hashicorp/vault/logical"
"github.com/mitchellh/copystructure"
)
@@ -69,7 +70,7 @@ func TestCopy_response(t *testing.T) {
Data: map[string]interface{}{
"foo": "bar",
},
- WrapInfo: &logical.ResponseWrapInfo{
+ WrapInfo: &wrapping.ResponseWrapInfo{
TTL: 60,
Token: "foo",
CreationTime: time.Now(),
@@ -139,8 +140,12 @@ func TestHash(t *testing.T) {
&logical.Response{
Data: map[string]interface{}{
"foo": "bar",
+
+ // Responses can contain time values, so test that with
+ // a known fixed value.
+ "bar": now,
},
- WrapInfo: &logical.ResponseWrapInfo{
+ WrapInfo: &wrapping.ResponseWrapInfo{
TTL: 60,
Token: "bar",
CreationTime: now,
@@ -150,8 +155,9 @@ func TestHash(t *testing.T) {
&logical.Response{
Data: map[string]interface{}{
"foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
+ "bar": now.Format(time.RFC3339Nano),
},
- WrapInfo: &logical.ResponseWrapInfo{
+ WrapInfo: &wrapping.ResponseWrapInfo{
TTL: 60,
Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
CreationTime: now,
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go
index cc2cfe5..614e153 100644
--- a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go
@@ -2,18 +2,24 @@ package file
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
"strconv"
+ "strings"
"sync"
"github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
- if conf.Salt == nil {
- return nil, fmt.Errorf("nil salt")
+ if conf.SaltConfig == nil {
+ return nil, fmt.Errorf("nil salt config")
+ }
+ if conf.SaltView == nil {
+ return nil, fmt.Errorf("nil salt view")
}
path, ok := conf.Config["file_path"]
@@ -24,6 +30,14 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
}
}
+ // normalize path if configured for stdout
+ if strings.ToLower(path) == "stdout" {
+ path = "stdout"
+ }
+ if strings.ToLower(path) == "discard" {
+ path = "discard"
+ }
+
format, ok := conf.Config["format"]
if !ok {
format = "json"
@@ -65,11 +79,12 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
}
b := &Backend{
- path: path,
- mode: mode,
+ path: path,
+ mode: mode,
+ saltConfig: conf.SaltConfig,
+ saltView: conf.SaltView,
formatConfig: audit.FormatterConfig{
Raw: logRaw,
- Salt: conf.Salt,
HMACAccessor: hmacAccessor,
},
}
@@ -77,19 +92,26 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
switch format {
case "json":
b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
- Prefix: conf.Config["prefix"],
+ Prefix: conf.Config["prefix"],
+ SaltFunc: b.Salt,
}
case "jsonx":
b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
- Prefix: conf.Config["prefix"],
+ Prefix: conf.Config["prefix"],
+ SaltFunc: b.Salt,
}
}
- // Ensure that the file can be successfully opened for writing;
- // otherwise it will be too late to catch later without problems
- // (ref: https://github.com/hashicorp/vault/issues/550)
- if err := b.open(); err != nil {
- return nil, fmt.Errorf("sanity check failed; unable to open %s for writing: %v", path, err)
+ switch path {
+ case "stdout", "discard":
+ // no need to test opening file if outputting to stdout or discarding
+ default:
+ // Ensure that the file can be successfully opened for writing;
+ // otherwise it will be too late to catch later without problems
+ // (ref: https://github.com/hashicorp/vault/issues/550)
+ if err := b.open(); err != nil {
+ return nil, fmt.Errorf("sanity check failed; unable to open %s for writing: %v", path, err)
+ }
}
return b, nil
@@ -109,16 +131,64 @@ type Backend struct {
fileLock sync.RWMutex
f *os.File
mode os.FileMode
+
+ saltMutex sync.RWMutex
+ salt *salt.Salt
+ saltConfig *salt.Config
+ saltView logical.Storage
}
-func (b *Backend) GetHash(data string) string {
- return audit.HashString(b.formatConfig.Salt, data)
+func (b *Backend) Salt() (*salt.Salt, error) {
+ b.saltMutex.RLock()
+ if b.salt != nil {
+ defer b.saltMutex.RUnlock()
+ return b.salt, nil
+ }
+ b.saltMutex.RUnlock()
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ if b.salt != nil {
+ return b.salt, nil
+ }
+ salt, err := salt.NewSalt(b.saltView, b.saltConfig)
+ if err != nil {
+ return nil, err
+ }
+ b.salt = salt
+ return salt, nil
+}
+
+func (b *Backend) GetHash(data string) (string, error) {
+ salt, err := b.Salt()
+ if err != nil {
+ return "", err
+ }
+ return audit.HashString(salt, data), nil
}
func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
b.fileLock.Lock()
defer b.fileLock.Unlock()
+ switch b.path {
+ case "stdout":
+ return b.formatter.FormatRequest(os.Stdout, b.formatConfig, auth, req, outerErr)
+ case "discard":
+ return b.formatter.FormatRequest(ioutil.Discard, b.formatConfig, auth, req, outerErr)
+ }
+
+ if err := b.open(); err != nil {
+ return err
+ }
+
+ if err := b.formatter.FormatRequest(b.f, b.formatConfig, auth, req, outerErr); err == nil {
+ return nil
+ }
+
+ // Opportunistically try to re-open the FD, once per call
+ b.f.Close()
+ b.f = nil
+
if err := b.open(); err != nil {
return err
}
@@ -135,6 +205,25 @@ func (b *Backend) LogResponse(
b.fileLock.Lock()
defer b.fileLock.Unlock()
+ switch b.path {
+ case "stdout":
+ return b.formatter.FormatResponse(os.Stdout, b.formatConfig, auth, req, resp, err)
+ case "discard":
+ return b.formatter.FormatResponse(ioutil.Discard, b.formatConfig, auth, req, resp, err)
+ }
+
+ if err := b.open(); err != nil {
+ return err
+ }
+
+ if err := b.formatter.FormatResponse(b.f, b.formatConfig, auth, req, resp, err); err == nil {
+ return nil
+ }
+
+ // Opportunistically try to re-open the FD, once per call
+ b.f.Close()
+ b.f = nil
+
if err := b.open(); err != nil {
return err
}
@@ -172,6 +261,11 @@ func (b *Backend) open() error {
}
func (b *Backend) Reload() error {
+ switch b.path {
+ case "stdout", "discard":
+ return nil
+ }
+
b.fileLock.Lock()
defer b.fileLock.Unlock()
@@ -189,3 +283,9 @@ func (b *Backend) Reload() error {
return b.open()
}
+
+func (b *Backend) Invalidate() {
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ b.salt = nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go
index 0a1a8c7..3b4ec84 100644
--- a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go
@@ -9,15 +9,21 @@ import (
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/salt"
+ "github.com/hashicorp/vault/logical"
)
func TestAuditFile_fileModeNew(t *testing.T) {
- salter, _ := salt.NewSalt(nil, nil)
-
modeStr := "0777"
mode, err := strconv.ParseUint(modeStr, 8, 32)
+ if err != nil {
+ t.Fatal(err)
+ }
path, err := ioutil.TempDir("", "vault-test_audit_file-file_mode_new")
+ if err != nil {
+ t.Fatal(err)
+ }
+
defer os.RemoveAll(path)
file := filepath.Join(path, "auditTest.txt")
@@ -28,8 +34,9 @@ func TestAuditFile_fileModeNew(t *testing.T) {
}
_, err = Factory(&audit.BackendConfig{
- Salt: salter,
- Config: config,
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: config,
})
if err != nil {
t.Fatal(err)
@@ -45,8 +52,6 @@ func TestAuditFile_fileModeNew(t *testing.T) {
}
func TestAuditFile_fileModeExisting(t *testing.T) {
- salter, _ := salt.NewSalt(nil, nil)
-
f, err := ioutil.TempFile("", "test")
if err != nil {
t.Fatalf("Failure to create test file.")
@@ -68,8 +73,9 @@ func TestAuditFile_fileModeExisting(t *testing.T) {
}
_, err = Factory(&audit.BackendConfig{
- Salt: salter,
- Config: config,
+ Config: config,
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
})
if err != nil {
t.Fatal(err)
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go
index 91e701e..bf0ce7f 100644
--- a/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go
@@ -11,12 +11,16 @@ import (
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
- if conf.Salt == nil {
- return nil, fmt.Errorf("nil salt passed in")
+ if conf.SaltConfig == nil {
+ return nil, fmt.Errorf("nil salt config")
+ }
+ if conf.SaltView == nil {
+ return nil, fmt.Errorf("nil salt view")
}
address, ok := conf.Config["address"]
@@ -68,18 +72,14 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
logRaw = b
}
- conn, err := net.Dial(socketType, address)
- if err != nil {
- return nil, err
- }
-
b := &Backend{
- connection: conn,
+ saltConfig: conf.SaltConfig,
+ saltView: conf.SaltView,
formatConfig: audit.FormatterConfig{
Raw: logRaw,
- Salt: conf.Salt,
HMACAccessor: hmacAccessor,
},
+
writeDuration: writeDuration,
address: address,
socketType: socketType,
@@ -88,11 +88,13 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
switch format {
case "json":
b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
- Prefix: conf.Config["prefix"],
+ Prefix: conf.Config["prefix"],
+ SaltFunc: b.Salt,
}
case "jsonx":
b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
- Prefix: conf.Config["prefix"],
+ Prefix: conf.Config["prefix"],
+ SaltFunc: b.Salt,
}
}
@@ -111,10 +113,19 @@ type Backend struct {
socketType string
sync.Mutex
+
+ saltMutex sync.RWMutex
+ salt *salt.Salt
+ saltConfig *salt.Config
+ saltView logical.Storage
}
-func (b *Backend) GetHash(data string) string {
- return audit.HashString(b.formatConfig.Salt, data)
+func (b *Backend) GetHash(data string) (string, error) {
+ salt, err := b.Salt()
+ if err != nil {
+ return "", err
+ }
+ return audit.HashString(salt, data), nil
}
func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
@@ -165,6 +176,12 @@ func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request,
}
func (b *Backend) write(buf []byte) error {
+ if b.connection == nil {
+ if err := b.reconnect(); err != nil {
+ return err
+ }
+ }
+
err := b.connection.SetWriteDeadline(time.Now().Add(b.writeDuration))
if err != nil {
return err
@@ -179,12 +196,16 @@ func (b *Backend) write(buf []byte) error {
}
func (b *Backend) reconnect() error {
+ if b.connection != nil {
+ b.connection.Close()
+ b.connection = nil
+ }
+
conn, err := net.Dial(b.socketType, b.address)
if err != nil {
return err
}
- b.connection.Close()
b.connection = conn
return nil
@@ -198,3 +219,29 @@ func (b *Backend) Reload() error {
return err
}
+
+func (b *Backend) Salt() (*salt.Salt, error) {
+ b.saltMutex.RLock()
+ if b.salt != nil {
+ defer b.saltMutex.RUnlock()
+ return b.salt, nil
+ }
+ b.saltMutex.RUnlock()
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ if b.salt != nil {
+ return b.salt, nil
+ }
+ salt, err := salt.NewSalt(b.saltView, b.saltConfig)
+ if err != nil {
+ return nil, err
+ }
+ b.salt = salt
+ return salt, nil
+}
+
+func (b *Backend) Invalidate() {
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ b.salt = nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go
index 4b1912f..22c39d4 100644
--- a/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go
@@ -4,15 +4,20 @@ import (
"bytes"
"fmt"
"strconv"
+ "sync"
"github.com/hashicorp/go-syslog"
"github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
- if conf.Salt == nil {
- return nil, fmt.Errorf("Nil salt passed in")
+ if conf.SaltConfig == nil {
+ return nil, fmt.Errorf("nil salt config")
+ }
+ if conf.SaltView == nil {
+ return nil, fmt.Errorf("nil salt view")
}
// Get facility or default to AUTH
@@ -64,10 +69,11 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
}
b := &Backend{
- logger: logger,
+ logger: logger,
+ saltConfig: conf.SaltConfig,
+ saltView: conf.SaltView,
formatConfig: audit.FormatterConfig{
Raw: logRaw,
- Salt: conf.Salt,
HMACAccessor: hmacAccessor,
},
}
@@ -75,11 +81,13 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
switch format {
case "json":
b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{
- Prefix: conf.Config["prefix"],
+ Prefix: conf.Config["prefix"],
+ SaltFunc: b.Salt,
}
case "jsonx":
b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{
- Prefix: conf.Config["prefix"],
+ Prefix: conf.Config["prefix"],
+ SaltFunc: b.Salt,
}
}
@@ -92,10 +100,19 @@ type Backend struct {
formatter audit.AuditFormatter
formatConfig audit.FormatterConfig
+
+ saltMutex sync.RWMutex
+ salt *salt.Salt
+ saltConfig *salt.Config
+ saltView logical.Storage
}
-func (b *Backend) GetHash(data string) string {
- return audit.HashString(b.formatConfig.Salt, data)
+func (b *Backend) GetHash(data string) (string, error) {
+ salt, err := b.Salt()
+ if err != nil {
+ return "", err
+ }
+ return audit.HashString(salt, data), nil
}
func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error {
@@ -123,3 +140,29 @@ func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request, resp *lo
func (b *Backend) Reload() error {
return nil
}
+
+func (b *Backend) Salt() (*salt.Salt, error) {
+ b.saltMutex.RLock()
+ if b.salt != nil {
+ defer b.saltMutex.RUnlock()
+ return b.salt, nil
+ }
+ b.saltMutex.RUnlock()
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ if b.salt != nil {
+ return b.salt, nil
+ }
+ salt, err := salt.NewSalt(b.saltView, b.saltConfig)
+ if err != nil {
+ return nil, err
+ }
+ b.salt = salt
+ return salt, nil
+}
+
+func (b *Backend) Invalidate() {
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ b.salt = nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go
index 76d9a6e..a25c9ee 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go
@@ -1,7 +1,7 @@
package appId
import (
- "fmt"
+ "sync"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
@@ -13,10 +13,13 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
if err != nil {
return nil, err
}
- return b.Setup(conf)
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
-func Backend(conf *logical.BackendConfig) (*framework.Backend, error) {
+func Backend(conf *logical.BackendConfig) (*backend, error) {
var b backend
b.MapAppId = &framework.PolicyMap{
PathMap: framework.PathMap{
@@ -60,7 +63,6 @@ func Backend(conf *logical.BackendConfig) (*framework.Backend, error) {
"login/*",
},
},
-
Paths: framework.PathAppend([]*framework.Path{
pathLogin(&b),
pathLoginWithAppIDPath(&b),
@@ -68,110 +70,58 @@ func Backend(conf *logical.BackendConfig) (*framework.Backend, error) {
b.MapAppId.Paths(),
b.MapUserId.Paths(),
),
-
- AuthRenew: b.pathLoginRenew,
-
- Init: b.initialize,
+ AuthRenew: b.pathLoginRenew,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeCredential,
}
b.view = conf.StorageView
+ b.MapAppId.SaltFunc = b.Salt
+ b.MapUserId.SaltFunc = b.Salt
- return b.Backend, nil
+ return &b, nil
}
type backend struct {
*framework.Backend
- Salt *salt.Salt
+ salt *salt.Salt
+ SaltMutex sync.RWMutex
view logical.Storage
MapAppId *framework.PolicyMap
MapUserId *framework.PathMap
}
-func (b *backend) initialize() error {
+func (b *backend) Salt() (*salt.Salt, error) {
+ b.SaltMutex.RLock()
+ if b.salt != nil {
+ defer b.SaltMutex.RUnlock()
+ return b.salt, nil
+ }
+ b.SaltMutex.RUnlock()
+ b.SaltMutex.Lock()
+ defer b.SaltMutex.Unlock()
+ if b.salt != nil {
+ return b.salt, nil
+ }
salt, err := salt.NewSalt(b.view, &salt.Config{
HashFunc: salt.SHA1Hash,
+ Location: salt.DefaultLocation,
})
if err != nil {
- return err
+ return nil, err
}
- b.Salt = salt
-
- b.MapAppId.Salt = salt
- b.MapUserId.Salt = salt
-
- // Since the salt is new in 0.2, we need to handle this by migrating
- // any existing keys to use the salt. We can deprecate this eventually,
- // but for now we want a smooth upgrade experience by automatically
- // upgrading to use salting.
- if salt.DidGenerate() {
- if err := b.upgradeToSalted(b.view); err != nil {
- return err
- }
- }
-
- return nil
+ b.salt = salt
+ return salt, nil
}
-// upgradeToSalted is used to upgrade the non-salted keys prior to
-// Vault 0.2 to be salted. This is done on mount time and is only
-// done once. It can be deprecated eventually, but should be around
-// long enough for all 0.1.x users to upgrade.
-func (b *backend) upgradeToSalted(view logical.Storage) error {
- // Create a copy of MapAppId that does not use a Salt
- nonSaltedAppId := new(framework.PathMap)
- *nonSaltedAppId = b.MapAppId.PathMap
- nonSaltedAppId.Salt = nil
-
- // Get the list of app-ids
- keys, err := b.MapAppId.List(view, "")
- if err != nil {
- return fmt.Errorf("failed to list app-ids: %v", err)
+func (b *backend) invalidate(key string) {
+ switch key {
+ case salt.DefaultLocation:
+ b.SaltMutex.Lock()
+ defer b.SaltMutex.Unlock()
+ b.salt = nil
}
-
- // Upgrade all the existing keys
- for _, key := range keys {
- val, err := nonSaltedAppId.Get(view, key)
- if err != nil {
- return fmt.Errorf("failed to read app-id: %v", err)
- }
-
- if err := b.MapAppId.Put(view, key, val); err != nil {
- return fmt.Errorf("failed to write app-id: %v", err)
- }
-
- if err := nonSaltedAppId.Delete(view, key); err != nil {
- return fmt.Errorf("failed to delete app-id: %v", err)
- }
- }
-
- // Create a copy of MapUserId that does not use a Salt
- nonSaltedUserId := new(framework.PathMap)
- *nonSaltedUserId = *b.MapUserId
- nonSaltedUserId.Salt = nil
-
- // Get the list of user-ids
- keys, err = b.MapUserId.List(view, "")
- if err != nil {
- return fmt.Errorf("failed to list user-ids: %v", err)
- }
-
- // Upgrade all the existing keys
- for _, key := range keys {
- val, err := nonSaltedUserId.Get(view, key)
- if err != nil {
- return fmt.Errorf("failed to read user-id: %v", err)
- }
-
- if err := b.MapUserId.Put(view, key, val); err != nil {
- return fmt.Errorf("failed to write user-id: %v", err)
- }
-
- if err := nonSaltedUserId.Delete(view, key); err != nil {
- return fmt.Errorf("failed to delete user-id: %v", err)
- }
- }
- return nil
}
const backendHelp = `
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go
index 2960e40..4ae5d3e 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go
@@ -9,8 +9,22 @@ import (
)
func TestBackend_basic(t *testing.T) {
+ var b *backend
+ var err error
+ var storage logical.Storage
+ factory := func(conf *logical.BackendConfig) (logical.Backend, error) {
+ b, err = Backend(conf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ storage = conf.StorageView
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
+ }
logicaltest.Test(t, logicaltest.TestCase{
- Factory: Factory,
+ Factory: factory,
Steps: []logicaltest.TestStep{
testAccStepMapAppId(t),
testAccStepMapUserId(t),
@@ -21,6 +35,30 @@ func TestBackend_basic(t *testing.T) {
testAccLoginDeleted(t),
},
})
+
+ req := &logical.Request{
+ Path: "map/app-id",
+ Operation: logical.ListOperation,
+ Storage: storage,
+ }
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("nil response")
+ }
+ keys := resp.Data["keys"].([]string)
+ if len(keys) != 1 {
+ t.Fatalf("expected 1 key, got %d", len(keys))
+ }
+ salt, err := b.Salt()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if keys[0] != salt.SaltID("foo") {
+ t.Fatal("value was improperly salted")
+ }
}
func TestBackend_cidr(t *testing.T) {
@@ -51,70 +89,6 @@ func TestBackend_displayName(t *testing.T) {
})
}
-// Verify that we are able to update from non-salted (<0.2) to
-// using a Salt for the paths
-func TestBackend_upgradeToSalted(t *testing.T) {
- inm := new(logical.InmemStorage)
-
- // Create some fake keys
- se, _ := logical.StorageEntryJSON("struct/map/app-id/foo",
- map[string]string{"value": "test"})
- inm.Put(se)
- se, _ = logical.StorageEntryJSON("struct/map/user-id/bar",
- map[string]string{"value": "foo"})
- inm.Put(se)
-
- // Initialize the backend, this should do the automatic upgrade
- conf := &logical.BackendConfig{
- StorageView: inm,
- }
- backend, err := Factory(conf)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- err = backend.Initialize()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Check the keys have been upgraded
- out, err := inm.Get("struct/map/app-id/foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("unexpected key")
- }
- out, err = inm.Get("struct/map/user-id/bar")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("unexpected key")
- }
-
- // Backend should still be able to resolve
- req := logical.TestRequest(t, logical.ReadOperation, "map/app-id/foo")
- req.Storage = inm
- resp, err := backend.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp.Data["value"] != "test" {
- t.Fatalf("bad: %#v", resp)
- }
-
- req = logical.TestRequest(t, logical.ReadOperation, "map/user-id/bar")
- req.Storage = inm
- resp, err = backend.HandleRequest(req)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if resp.Data["value"] != "foo" {
- t.Fatalf("bad: %#v", resp)
- }
-}
-
func testAccStepMapAppId(t *testing.T) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go
index cd5d97b..d086d3c 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go
@@ -14,7 +14,8 @@ type backend struct {
// The salt value to be used by the information to be accessed only
// by this backend.
- salt *salt.Salt
+ salt *salt.Salt
+ saltMutex sync.RWMutex
// The view to use when creating the salt
view logical.Storage
@@ -53,7 +54,10 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
if err != nil {
return nil, err
}
- return b.Setup(conf)
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend(conf *logical.BackendConfig) (*backend, error) {
@@ -92,20 +96,42 @@ func Backend(conf *logical.BackendConfig) (*backend, error) {
pathTidySecretID(b),
},
),
- Init: b.initialize,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeCredential,
}
return b, nil
}
-func (b *backend) initialize() error {
+func (b *backend) Salt() (*salt.Salt, error) {
+ b.saltMutex.RLock()
+ if b.salt != nil {
+ defer b.saltMutex.RUnlock()
+ return b.salt, nil
+ }
+ b.saltMutex.RUnlock()
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ if b.salt != nil {
+ return b.salt, nil
+ }
salt, err := salt.NewSalt(b.view, &salt.Config{
HashFunc: salt.SHA256Hash,
+ Location: salt.DefaultLocation,
})
if err != nil {
- return err
+ return nil, err
}
b.salt = salt
- return nil
+ return salt, nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case salt.DefaultLocation:
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ b.salt = nil
+ }
}
// periodicFunc of the backend will be invoked once a minute by the RollbackManager.
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go
index e49cf48..5f16e5f 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go
@@ -17,11 +17,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
if b == nil {
t.Fatalf("failed to create backend")
}
- _, err = b.Backend.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Initialize()
+ err = b.Backend.Setup(config)
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go
index d40530e..9b902a4 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go
@@ -38,6 +38,9 @@ func (b *backend) pathLoginUpdate(req *logical.Request, data *framework.FieldDat
return logical.ErrorResponse(fmt.Sprintf("failed to validate SecretID: %s", err)), nil
}
+ // Always include the role name, for later filtering
+ metadata["role_name"] = roleName
+
auth := &logical.Auth{
NumUses: role.TokenNumUses,
Period: role.Period,
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go
index 2a1ff1a..b9f7e5b 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go
@@ -113,7 +113,7 @@ func rolePaths(b *backend) []*framework.Path {
addresses which can perform the login operation`,
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Default: "default",
Description: "Comma separated list of policies on the role.",
},
@@ -172,7 +172,7 @@ TTL will be set to the value of this parameter.`,
Description: "Name of the role.",
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Default: "default",
Description: "Comma separated list of policies on the role.",
},
@@ -768,9 +768,9 @@ func (b *backend) pathRoleCreateUpdate(req *logical.Request, data *framework.Fie
}
if policiesRaw, ok := data.GetOk("policies"); ok {
- role.Policies = policyutil.ParsePolicies(policiesRaw.(string))
+ role.Policies = policyutil.ParsePolicies(policiesRaw)
} else if req.Operation == logical.CreateOperation {
- role.Policies = policyutil.ParsePolicies(data.Get("policies").(string))
+ role.Policies = policyutil.ParsePolicies(data.Get("policies"))
}
periodRaw, ok := data.GetOk("period")
@@ -1306,8 +1306,8 @@ func (b *backend) pathRolePoliciesUpdate(req *logical.Request, data *framework.F
return nil, nil
}
- policies := strings.TrimSpace(data.Get("policies").(string))
- if policies == "" {
+ policiesRaw, ok := data.GetOk("policies")
+ if !ok {
return logical.ErrorResponse("missing policies"), nil
}
@@ -1316,7 +1316,7 @@ func (b *backend) pathRolePoliciesUpdate(req *logical.Request, data *framework.F
lock.Lock()
defer lock.Unlock()
- role.Policies = policyutil.ParsePolicies(policies)
+ role.Policies = policyutil.ParsePolicies(policiesRaw)
return nil, b.setRoleEntry(req.Storage, roleName, role, "")
}
@@ -1359,7 +1359,7 @@ func (b *backend) pathRolePoliciesDelete(req *logical.Request, data *framework.F
lock.Lock()
defer lock.Unlock()
- role.Policies = policyutil.ParsePolicies(data.GetDefaultOrZero("policies").(string))
+ role.Policies = []string{}
return nil, b.setRoleEntry(req.Storage, roleName, role, "")
}
@@ -1939,7 +1939,11 @@ func (b *backend) setRoleIDEntry(s logical.Storage, roleID string, roleIDEntry *
lock.Lock()
defer lock.Unlock()
- entryIndex := "role_id/" + b.salt.SaltID(roleID)
+ salt, err := b.Salt()
+ if err != nil {
+ return err
+ }
+ entryIndex := "role_id/" + salt.SaltID(roleID)
entry, err := logical.StorageEntryJSON(entryIndex, roleIDEntry)
if err != nil {
@@ -1963,7 +1967,11 @@ func (b *backend) roleIDEntry(s logical.Storage, roleID string) (*roleIDStorageE
var result roleIDStorageEntry
- entryIndex := "role_id/" + b.salt.SaltID(roleID)
+ salt, err := b.Salt()
+ if err != nil {
+ return nil, err
+ }
+ entryIndex := "role_id/" + salt.SaltID(roleID)
if entry, err := s.Get(entryIndex); err != nil {
return nil, err
@@ -1987,7 +1995,11 @@ func (b *backend) roleIDEntryDelete(s logical.Storage, roleID string) error {
lock.Lock()
defer lock.Unlock()
- entryIndex := "role_id/" + b.salt.SaltID(roleID)
+ salt, err := b.Salt()
+ if err != nil {
+ return err
+ }
+ entryIndex := "role_id/" + salt.SaltID(roleID)
return s.Delete(entryIndex)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go
index a40cbe1..fa3e681 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go
@@ -61,6 +61,9 @@ func TestAppRole_CIDRSubset(t *testing.T) {
secretIDData["cidr_list"] = "192.168.27.29/20,172.245.30.40/25,10.20.30.40/32"
resp, err = b.HandleRequest(secretIDReq)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp != nil && resp.IsError() {
t.Fatalf("resp: %#v", resp)
}
@@ -605,7 +608,7 @@ func TestAppRole_RoleCRUD(t *testing.T) {
expected := map[string]interface{}{
"bind_secret_id": true,
- "policies": []string{"default", "p", "q", "r", "s"},
+ "policies": []string{"p", "q", "r", "s"},
"secret_id_num_uses": 10,
"secret_id_ttl": 300,
"token_ttl": 400,
@@ -653,7 +656,7 @@ func TestAppRole_RoleCRUD(t *testing.T) {
}
expected = map[string]interface{}{
- "policies": []string{"a", "b", "c", "d", "default"},
+ "policies": []string{"a", "b", "c", "d"},
"secret_id_num_uses": 100,
"secret_id_ttl": 3000,
"token_ttl": 4000,
@@ -761,7 +764,7 @@ func TestAppRole_RoleCRUD(t *testing.T) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
- if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1", "default"}) {
+ if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1"}) {
t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string))
}
roleReq.Operation = logical.DeleteOperation
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go
index db668a8..c7e32e1 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go
@@ -91,7 +91,7 @@ func (b *backend) validateRoleID(s logical.Storage, roleID string) (*roleStorage
// Validates the supplied RoleID and SecretID
func (b *backend) validateCredentials(req *logical.Request, data *framework.FieldData) (*roleStorageEntry, string, map[string]string, error) {
- var metadata map[string]string
+ metadata := make(map[string]string)
// RoleID must be supplied during every login
roleID := strings.TrimSpace(data.Get("role_id").(string))
if roleID == "" {
@@ -469,7 +469,11 @@ func (b *backend) secretIDAccessorEntry(s logical.Storage, secretIDAccessor stri
var result secretIDAccessorStorageEntry
// Create index entry, mapping the accessor to the token ID
- entryIndex := "accessor/" + b.salt.SaltID(secretIDAccessor)
+ salt, err := b.Salt()
+ if err != nil {
+ return nil, err
+ }
+ entryIndex := "accessor/" + salt.SaltID(secretIDAccessor)
accessorLock := b.secretIDAccessorLock(secretIDAccessor)
accessorLock.RLock()
@@ -498,7 +502,11 @@ func (b *backend) createSecretIDAccessorEntry(s logical.Storage, entry *secretID
entry.SecretIDAccessor = accessorUUID
// Create index entry, mapping the accessor to the token ID
- entryIndex := "accessor/" + b.salt.SaltID(entry.SecretIDAccessor)
+ salt, err := b.Salt()
+ if err != nil {
+ return err
+ }
+ entryIndex := "accessor/" + salt.SaltID(entry.SecretIDAccessor)
accessorLock := b.secretIDAccessorLock(accessorUUID)
accessorLock.Lock()
@@ -517,7 +525,11 @@ func (b *backend) createSecretIDAccessorEntry(s logical.Storage, entry *secretID
// deleteSecretIDAccessorEntry deletes the storage index mapping the accessor to a SecretID.
func (b *backend) deleteSecretIDAccessorEntry(s logical.Storage, secretIDAccessor string) error {
- accessorEntryIndex := "accessor/" + b.salt.SaltID(secretIDAccessor)
+ salt, err := b.Salt()
+ if err != nil {
+ return err
+ }
+ accessorEntryIndex := "accessor/" + salt.SaltID(secretIDAccessor)
accessorLock := b.secretIDAccessorLock(secretIDAccessor)
accessorLock.Lock()
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go
index fbd62c7..30feba9 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go
@@ -1,14 +1,16 @@
package awsauth
import (
+ "fmt"
"sync"
"time"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
- "github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
+ "github.com/patrickmn/go-cache"
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
@@ -16,15 +18,14 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
if err != nil {
return nil, err
}
- return b.Setup(conf)
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
type backend struct {
*framework.Backend
- Salt *salt.Salt
-
- // Used during initialization to set the salt
- view logical.Storage
// Lock to make changes to any of the backend's configuration endpoints.
configMutex sync.RWMutex
@@ -59,18 +60,34 @@ type backend struct {
// When the credentials are modified or deleted, all the cached client objects
// will be flushed. The empty STS role signifies the master account
IAMClientsMap map[string]map[string]*iam.IAM
+
+ // Map of AWS unique IDs to the full ARN corresponding to that unique ID
+ // This avoids the overhead of an AWS API hit for every login request
+ // using the IAM auth method when bound_iam_principal_arn contains a wildcard
+ iamUserIdToArnCache *cache.Cache
+
+ // AWS Account ID of the "default" AWS credentials
+ // This cache avoids the need to call GetCallerIdentity repeatedly to learn it
+ // We can't store this because, in certain pathological cases, it could change
+ // out from under us, such as a standby and active Vault server in different AWS
+ // accounts using their IAM instance profile to get their credentials.
+ defaultAWSAccountID string
+
+ resolveArnToUniqueIDFunc func(logical.Storage, string) (string, error)
}
func Backend(conf *logical.BackendConfig) (*backend, error) {
b := &backend{
// Setting the periodic func to be run once in an hour.
// If there is a real need, this can be made configurable.
- tidyCooldownPeriod: time.Hour,
- view: conf.StorageView,
- EC2ClientsMap: make(map[string]map[string]*ec2.EC2),
- IAMClientsMap: make(map[string]map[string]*iam.IAM),
+ tidyCooldownPeriod: time.Hour,
+ EC2ClientsMap: make(map[string]map[string]*ec2.EC2),
+ IAMClientsMap: make(map[string]map[string]*iam.IAM),
+ iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour),
}
+ b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId
+
b.Backend = &framework.Backend{
PeriodicFunc: b.periodicFunc,
AuthRenew: b.pathLoginRenew,
@@ -103,26 +120,13 @@ func Backend(conf *logical.BackendConfig) (*backend, error) {
pathIdentityWhitelist(b),
pathTidyIdentityWhitelist(b),
},
-
- Invalidate: b.invalidate,
-
- Init: b.initialize,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeCredential,
}
return b, nil
}
-func (b *backend) initialize() error {
- salt, err := salt.NewSalt(b.view, &salt.Config{
- HashFunc: salt.SHA256Hash,
- })
- if err != nil {
- return err
- }
- b.Salt = salt
- return nil
-}
-
// periodicFunc performs the tasks that the backend wishes to do periodically.
// Currently this will be triggered once in a minute by the RollbackManager.
//
@@ -190,9 +194,86 @@ func (b *backend) invalidate(key string) {
defer b.configMutex.Unlock()
b.flushCachedEC2Clients()
b.flushCachedIAMClients()
+ b.defaultAWSAccountID = ""
}
}
+// Putting this here so we can inject a fake resolver into the backend for unit testing
+// purposes
+func (b *backend) resolveArnToRealUniqueId(s logical.Storage, arn string) (string, error) {
+ entity, err := parseIamArn(arn)
+ if err != nil {
+ return "", err
+ }
+ // This odd-looking code is here because IAM is an inherently global service. IAM and STS ARNs
+ // don't have regions in them, and there is only a single global endpoint for IAM; see
+ // http://docs.aws.amazon.com/general/latest/gr/rande.html#iam_region
+ // However, the ARNs do have a partition in them, because the GovCloud and China partitions DO
+ // have their own separate endpoints, and the partition is encoded in the ARN. If Amazon's Go SDK
+ // would allow us to pass a partition back to the IAM client, it would be much simpler. But it
+ // doesn't appear that's possible, so in order to properly support GovCloud and China, we do a
+ // circular dance of extracting the partition from the ARN, finding any arbitrary region in the
+ // partition, and passing that region back back to the SDK, so that the SDK can figure out the
+ // proper partition from the arbitrary region we passed in to look up the endpoint.
+ // Sigh
+ region := getAnyRegionForAwsPartition(entity.Partition)
+ if region == nil {
+ return "", fmt.Errorf("Unable to resolve partition %q to a region", entity.Partition)
+ }
+ iamClient, err := b.clientIAM(s, region.ID(), entity.AccountNumber)
+ if err != nil {
+ return "", err
+ }
+
+ switch entity.Type {
+ case "user":
+ userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName})
+ if err != nil {
+ return "", err
+ }
+ if userInfo == nil {
+ return "", fmt.Errorf("got nil result from GetUser")
+ }
+ return *userInfo.User.UserId, nil
+ case "role":
+ roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName})
+ if err != nil {
+ return "", err
+ }
+ if roleInfo == nil {
+ return "", fmt.Errorf("got nil result from GetRole")
+ }
+ return *roleInfo.Role.RoleId, nil
+ case "instance-profile":
+ profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName})
+ if err != nil {
+ return "", err
+ }
+ if profileInfo == nil {
+ return "", fmt.Errorf("got nil result from GetInstanceProfile")
+ }
+ return *profileInfo.InstanceProfile.InstanceProfileId, nil
+ default:
+ return "", fmt.Errorf("unrecognized error type %#v", entity.Type)
+ }
+}
+
+// Adapted from https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/
+// the "Enumerating Regions and Endpoint Metadata" section
+func getAnyRegionForAwsPartition(partitionId string) *endpoints.Region {
+ resolver := endpoints.DefaultResolver()
+ partitions := resolver.(endpoints.EnumPartitions).Partitions()
+
+ for _, p := range partitions {
+ if p.ID() == partitionId {
+ for _, r := range p.Regions() {
+ return &r
+ }
+ }
+ }
+ return nil
+}
+
const backendHelp = `
aws-ec2 auth backend takes in PKCS#7 signature of an AWS EC2 instance and a client
created nonce to authenticates the EC2 instance with Vault.
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go
index a539fba..881ca85 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go
@@ -9,11 +9,13 @@ import (
"os"
"strings"
"testing"
+ "time"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
logicaltest "github.com/hashicorp/vault/logical/testing"
)
@@ -27,7 +29,7 @@ func TestBackend_CreateParseVerifyRoleTag(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -251,7 +253,7 @@ func TestBackend_ConfigTidyIdentities(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -305,7 +307,7 @@ func TestBackend_ConfigTidyRoleTags(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -359,7 +361,7 @@ func TestBackend_TidyIdentities(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -384,7 +386,7 @@ func TestBackend_TidyRoleTags(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -409,7 +411,7 @@ func TestBackend_ConfigClient(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -546,7 +548,7 @@ func TestBackend_pathConfigCertificate(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -614,6 +616,9 @@ MlpCclZOR3JOOU4yZjZST2swazlLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
certReq.Operation = logical.ReadOperation
// test read operation
resp, err = b.HandleRequest(certReq)
+ if err != nil {
+ t.Fatal(err)
+ }
expectedCert := `-----BEGIN CERTIFICATE-----
MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
@@ -698,7 +703,7 @@ func TestBackend_parseAndVerifyRoleTagValue(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -730,6 +735,9 @@ func TestBackend_parseAndVerifyRoleTagValue(t *testing.T) {
Path: "role/abcd-123",
Storage: storage,
})
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatalf("expected an role entry for abcd-123")
}
@@ -776,7 +784,7 @@ func TestBackend_PathRoleTag(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -841,7 +849,7 @@ func TestBackend_PathBlacklistRoleTag(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -989,7 +997,7 @@ func TestBackendAcc_LoginWithInstanceIdentityDocAndWhitelistIdentity(t *testing.
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -1169,7 +1177,7 @@ func TestBackend_pathStsConfig(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -1216,6 +1224,9 @@ func TestBackend_pathStsConfig(t *testing.T) {
stsReq.Operation = logical.ReadOperation
// test read operation
resp, err = b.HandleRequest(stsReq)
+ if err != nil {
+ t.Fatal(err)
+ }
expectedStsRole := "arn:aws:iam:account1:role/myRole"
if resp.Data["sts_role"].(string) != expectedStsRole {
t.Fatalf("bad: expected:%s\n got:%s\n", expectedStsRole, resp.Data["sts_role"].(string))
@@ -1314,7 +1325,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -1346,7 +1357,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
if err != nil {
t.Fatalf("Received error retrieving identity: %s", err)
}
- testIdentityArn, _, _, err := parseIamArn(*testIdentity.Arn)
+ entity, err := parseIamArn(*testIdentity.Arn)
if err != nil {
t.Fatal(err)
}
@@ -1385,7 +1396,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
// configuring the valid role we'll be able to login to
roleData := map[string]interface{}{
- "bound_iam_principal_arn": testIdentityArn,
+ "bound_iam_principal_arn": entity.canonicalArn(),
"policies": "root",
"auth_type": iamAuthType,
}
@@ -1417,8 +1428,17 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
t.Fatalf("bad: failed to create role; resp:%#v\nerr:%v", resp, err)
}
+ fakeArn := "arn:aws:iam::123456789012:role/somePath/FakeRole"
+ fakeArnResolver := func(s logical.Storage, arn string) (string, error) {
+ if arn == fakeArn {
+ return fmt.Sprintf("FakeUniqueIdFor%s", fakeArn), nil
+ }
+ return b.resolveArnToRealUniqueId(s, arn)
+ }
+ b.resolveArnToUniqueIDFunc = fakeArnResolver
+
// now we're creating the invalid role we won't be able to login to
- roleData["bound_iam_principal_arn"] = "arn:aws:iam::123456789012:role/FakeRole"
+ roleData["bound_iam_principal_arn"] = fakeArn
roleRequest.Path = "role/" + testInvalidRoleName
resp, err = b.HandleRequest(roleRequest)
if err != nil || (resp != nil && resp.IsError()) {
@@ -1491,7 +1511,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
t.Errorf("bad: expected failed login due to bad auth type: resp:%#v\nerr:%v", resp, err)
}
- // finally, the happy path tests :)
+ // finally, the happy path test :)
loginData["role"] = testValidRoleName
resp, err = b.HandleRequest(loginRequest)
@@ -1499,6 +1519,101 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
t.Fatal(err)
}
if resp == nil || resp.Auth == nil || resp.IsError() {
- t.Errorf("bad: expected valid login: resp:%#v", resp)
+ t.Fatalf("bad: expected valid login: resp:%#v", resp)
+ }
+
+ renewReq := generateRenewRequest(storage, resp.Auth)
+ // dump a fake ARN into the metadata to ensure that we ONLY look
+ // at the unique ID that has been generated
+ renewReq.Auth.Metadata["canonical_arn"] = "fake_arn"
+ empty_login_fd := &framework.FieldData{
+ Raw: map[string]interface{}{},
+ Schema: pathLogin(b).Fields,
+ }
+ // ensure we can renew
+ resp, err = b.pathLoginRenew(renewReq, empty_login_fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response from renew")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error when renewing: %#v", *resp)
+ }
+
+ // Now, fake out the unique ID resolver to ensure we fail login if the unique ID
+ // changes from under us
+ b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId
+ // First, we need to update the role to force Vault to use our fake resolver to
+ // pick up the fake user ID
+ roleData["bound_iam_principal_arn"] = entity.canonicalArn()
+ roleRequest.Path = "role/" + testValidRoleName
+ resp, err = b.HandleRequest(roleRequest)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: failed to recreate role: resp:%#v\nerr:%v", resp, err)
+ }
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil || resp == nil || !resp.IsError() {
+ t.Errorf("bad: expected failed login due to changed AWS role ID: resp: %#v\nerr:%v", resp, err)
+ }
+
+ // and ensure a renew no longer works
+ resp, err = b.pathLoginRenew(renewReq, empty_login_fd)
+ if err == nil || (resp != nil && !resp.IsError()) {
+ t.Errorf("bad: expected failed renew due to changed AWS role ID: resp: %#v", resp, err)
+ }
+ // Undo the fake resolver...
+ b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId
+
+ // Now test that wildcard matching works
+ wildcardRoleName := "valid_wildcard"
+ wildcardEntity := *entity
+ wildcardEntity.FriendlyName = "*"
+ roleData["bound_iam_principal_arn"] = wildcardEntity.canonicalArn()
+ roleRequest.Path = "role/" + wildcardRoleName
+ resp, err = b.HandleRequest(roleRequest)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("bad: failed to create wildcard role: resp:%#v\nerr:%v", resp, err)
+ }
+
+ loginData["role"] = wildcardRoleName
+ resp, err = b.HandleRequest(loginRequest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.Auth == nil || resp.IsError() {
+ t.Fatalf("bad: expected valid login: resp:%#v", resp)
+ }
+ // and ensure we can renew
+ renewReq = generateRenewRequest(storage, resp.Auth)
+ resp, err = b.pathLoginRenew(renewReq, empty_login_fd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response from renew")
+ }
+ if resp.IsError() {
+ t.Fatalf("got error when renewing: %#v", *resp)
+ }
+ // ensure the cache is populated
+ cachedArn := b.getCachedUserId(resp.Auth.Metadata["client_user_id"])
+ if cachedArn == "" {
+ t.Errorf("got empty ARN back from user ID cache; expected full arn")
}
}
+
+func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Request {
+ renewReq := &logical.Request{
+ Storage: s,
+ Auth: &logical.Auth{},
+ }
+ renewReq.Auth.InternalData = auth.InternalData
+ renewReq.Auth.Metadata = auth.Metadata
+ renewReq.Auth.LeaseOptions = auth.LeaseOptions
+ renewReq.Auth.Policies = auth.Policies
+ renewReq.Auth.IssueTime = time.Now()
+
+ return renewReq
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go
index c69187f..2842c24 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go
@@ -16,7 +16,60 @@ import (
type CLIHandler struct{}
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+// Generates the necessary data to send to the Vault server for generating a token
+// This is useful for other API clients to use
+func GenerateLoginData(accessKey, secretKey, sessionToken, headerValue string) (map[string]interface{}, error) {
+ loginData := make(map[string]interface{})
+
+ credConfig := &awsutil.CredentialsConfig{
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ SessionToken: sessionToken,
+ }
+ creds, err := credConfig.GenerateCredentialChain()
+ if err != nil {
+ return nil, err
+ }
+ if creds == nil {
+ return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
+ }
+
+ // Use the credentials we've found to construct an STS session
+ stsSession, err := session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{Credentials: creds},
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ var params *sts.GetCallerIdentityInput
+ svc := sts.New(stsSession)
+ stsRequest, _ := svc.GetCallerIdentityRequest(params)
+
+ // Inject the required auth header value, if supplied, and then sign the request including that header
+ if headerValue != "" {
+ stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue)
+ }
+ stsRequest.Sign()
+
+ // Now extract out the relevant parts of the request
+ headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header)
+ if err != nil {
+ return nil, err
+ }
+ requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body)
+ if err != nil {
+ return nil, err
+ }
+ loginData["iam_http_request_method"] = stsRequest.HTTPRequest.Method
+ loginData["iam_request_url"] = base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String()))
+ loginData["iam_request_headers"] = base64.StdEncoding.EncodeToString(headersJson)
+ loginData["iam_request_body"] = base64.StdEncoding.EncodeToString(requestBody)
+
+ return loginData, nil
+}
+
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
mount, ok := m["mount"]
if !ok {
mount = "aws"
@@ -32,71 +85,25 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
headerValue = ""
}
- // Grab any supplied credentials off the command line
- // Ensure we're able to fall back to the SDK default credential providers
- credConfig := &awsutil.CredentialsConfig{
- AccessKey: m["aws_access_key_id"],
- SecretKey: m["aws_secret_access_key"],
- SessionToken: m["aws_security_token"],
- }
- creds, err := credConfig.GenerateCredentialChain()
+ loginData, err := GenerateLoginData(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], headerValue)
if err != nil {
- return "", err
+ return nil, err
}
- if creds == nil {
- return "", fmt.Errorf("could not compile valid credential providers from static config, environemnt, shared, or instance metadata")
+ if loginData == nil {
+ return nil, fmt.Errorf("got nil response from GenerateLoginData")
}
-
- // Use the credentials we've found to construct an STS session
- stsSession, err := session.NewSessionWithOptions(session.Options{
- Config: aws.Config{Credentials: creds},
- })
- if err != nil {
- return "", err
- }
-
- var params *sts.GetCallerIdentityInput
- svc := sts.New(stsSession)
- stsRequest, _ := svc.GetCallerIdentityRequest(params)
-
- // Inject the required auth header value, if suplied, and then sign the request including that header
- if headerValue != "" {
- stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue)
- }
- stsRequest.Sign()
-
- // Now extract out the relevant parts of the request
- headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header)
- if err != nil {
- return "", err
- }
- requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body)
- if err != nil {
- return "", err
- }
- method := stsRequest.HTTPRequest.Method
- targetUrl := base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String()))
- headers := base64.StdEncoding.EncodeToString(headersJson)
- body := base64.StdEncoding.EncodeToString(requestBody)
-
- // And pass them on to the Vault server
+ loginData["role"] = role
path := fmt.Sprintf("auth/%s/login", mount)
- secret, err := c.Logical().Write(path, map[string]interface{}{
- "iam_http_request_method": method,
- "iam_request_url": targetUrl,
- "iam_request_headers": headers,
- "iam_request_body": body,
- "role": role,
- })
+ secret, err := c.Logical().Write(path, loginData)
if err != nil {
- return "", err
+ return nil, err
}
if secret == nil {
- return "", fmt.Errorf("empty response from credential provider")
+ return nil, fmt.Errorf("empty response from credential provider")
}
- return secret.Auth.ClientToken, nil
+ return secret, nil
}
func (h *CLIHandler) Help() string {
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go
index 1647f45..aa3da0d 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go
@@ -8,6 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
+ "github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/helper/awsutil"
"github.com/hashicorp/vault/logical"
@@ -70,7 +71,7 @@ func (b *backend) getRawClientConfig(s logical.Storage, region, clientType strin
// It uses getRawClientConfig to obtain config for the runtime environemnt, and if
// stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed
// credentials. The credentials will expire after 15 minutes but will auto-refresh.
-func (b *backend) getClientConfig(s logical.Storage, region, stsRole, clientType string) (*aws.Config, error) {
+func (b *backend) getClientConfig(s logical.Storage, region, stsRole, accountID, clientType string) (*aws.Config, error) {
config, err := b.getRawClientConfig(s, region, clientType)
if err != nil {
@@ -80,20 +81,39 @@ func (b *backend) getClientConfig(s logical.Storage, region, stsRole, clientType
return nil, fmt.Errorf("could not compile valid credentials through the default provider chain")
}
+ stsConfig, err := b.getRawClientConfig(s, region, "sts")
+ if stsConfig == nil {
+ return nil, fmt.Errorf("could not configure STS client")
+ }
+ if err != nil {
+ return nil, err
+ }
if stsRole != "" {
- assumeRoleConfig, err := b.getRawClientConfig(s, region, "sts")
- if err != nil {
- return nil, err
- }
- if assumeRoleConfig == nil {
- return nil, fmt.Errorf("could not configure STS client")
- }
- assumedCredentials := stscreds.NewCredentials(session.New(assumeRoleConfig), stsRole)
+ assumedCredentials := stscreds.NewCredentials(session.New(stsConfig), stsRole)
// Test that we actually have permissions to assume the role
if _, err = assumedCredentials.Get(); err != nil {
return nil, err
}
config.Credentials = assumedCredentials
+ } else {
+ if b.defaultAWSAccountID == "" {
+ client := sts.New(session.New(stsConfig))
+ if client == nil {
+ return nil, fmt.Errorf("could not obtain sts client: %v", err)
+ }
+ inputParams := &sts.GetCallerIdentityInput{}
+ identity, err := client.GetCallerIdentity(inputParams)
+ if err != nil {
+ return nil, fmt.Errorf("unable to fetch current caller: %v", err)
+ }
+ if identity == nil {
+ return nil, fmt.Errorf("got nil result from GetCallerIdentity")
+ }
+ b.defaultAWSAccountID = *identity.Account
+ }
+ if b.defaultAWSAccountID != accountID {
+ return nil, fmt.Errorf("unable to fetch client for account ID %s -- default client is for account %s", accountID, b.defaultAWSAccountID)
+ }
}
return config, nil
@@ -121,8 +141,44 @@ func (b *backend) flushCachedIAMClients() {
}
}
+// Gets an entry out of the user ID cache
+func (b *backend) getCachedUserId(userId string) string {
+ if userId == "" {
+ return ""
+ }
+ if entry, ok := b.iamUserIdToArnCache.Get(userId); ok {
+ b.iamUserIdToArnCache.SetDefault(userId, entry)
+ return entry.(string)
+ }
+ return ""
+}
+
+// Sets an entry in the user ID cache
+func (b *backend) setCachedUserId(userId, arn string) {
+ if userId != "" {
+ b.iamUserIdToArnCache.SetDefault(userId, arn)
+ }
+}
+
+func (b *backend) stsRoleForAccount(s logical.Storage, accountID string) (string, error) {
+ // Check if an STS configuration exists for the AWS account
+ sts, err := b.lockedAwsStsEntry(s, accountID)
+ if err != nil {
+ return "", fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err)
+ }
+ // An empty STS role signifies the master account
+ if sts != nil {
+ return sts.StsRole, nil
+ }
+ return "", nil
+}
+
// clientEC2 creates a client to interact with AWS EC2 API
-func (b *backend) clientEC2(s logical.Storage, region string, stsRole string) (*ec2.EC2, error) {
+func (b *backend) clientEC2(s logical.Storage, region, accountID string) (*ec2.EC2, error) {
+ stsRole, err := b.stsRoleForAccount(s, accountID)
+ if err != nil {
+ return nil, err
+ }
b.configMutex.RLock()
if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil {
defer b.configMutex.RUnlock()
@@ -142,8 +198,7 @@ func (b *backend) clientEC2(s logical.Storage, region string, stsRole string) (*
// Create an AWS config object using a chain of providers
var awsConfig *aws.Config
- var err error
- awsConfig, err = b.getClientConfig(s, region, stsRole, "ec2")
+ awsConfig, err = b.getClientConfig(s, region, stsRole, accountID, "ec2")
if err != nil {
return nil, err
@@ -168,7 +223,11 @@ func (b *backend) clientEC2(s logical.Storage, region string, stsRole string) (*
}
// clientIAM creates a client to interact with AWS IAM API
-func (b *backend) clientIAM(s logical.Storage, region string, stsRole string) (*iam.IAM, error) {
+func (b *backend) clientIAM(s logical.Storage, region, accountID string) (*iam.IAM, error) {
+ stsRole, err := b.stsRoleForAccount(s, accountID)
+ if err != nil {
+ return nil, err
+ }
b.configMutex.RLock()
if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil {
defer b.configMutex.RUnlock()
@@ -188,8 +247,7 @@ func (b *backend) clientIAM(s logical.Storage, region string, stsRole string) (*
// Create an AWS config object using a chain of providers
var awsConfig *aws.Config
- var err error
- awsConfig, err = b.getClientConfig(s, region, stsRole, "iam")
+ awsConfig, err = b.getClientConfig(s, region, stsRole, accountID, "iam")
if err != nil {
return nil, err
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go
index 3787aed..9242ebd 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go
@@ -129,6 +129,9 @@ func (b *backend) pathConfigClientDelete(
// Remove all the cached EC2 client objects in the backend.
b.flushCachedIAMClients()
+ // unset the cached default AWS account ID
+ b.defaultAWSAccountID = ""
+
return nil, nil
}
@@ -147,7 +150,11 @@ func (b *backend) pathConfigClientCreateUpdate(
configEntry = &clientConfig{}
}
+ // changedCreds is whether we need to flush the cached AWS clients and store in the backend
changedCreds := false
+ // changedOtherConfig is whether other config has changed that requires storing in the backend
+ // but does not require flushing the cached clients
+ changedOtherConfig := false
accessKeyStr, ok := data.GetOk("access_key")
if ok {
@@ -210,6 +217,7 @@ func (b *backend) pathConfigClientCreateUpdate(
if configEntry.IAMServerIdHeaderValue != headerValStr.(string) {
// NOT setting changedCreds here, since this isn't really cached
configEntry.IAMServerIdHeaderValue = headerValStr.(string)
+ changedOtherConfig = true
}
} else if req.Operation == logical.CreateOperation {
configEntry.IAMServerIdHeaderValue = data.Get("iam_server_id_header_value").(string)
@@ -225,7 +233,7 @@ func (b *backend) pathConfigClientCreateUpdate(
return nil, err
}
- if changedCreds || req.Operation == logical.CreateOperation {
+ if changedCreds || changedOtherConfig || req.Operation == logical.CreateOperation {
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
@@ -234,6 +242,7 @@ func (b *backend) pathConfigClientCreateUpdate(
if changedCreds {
b.flushCachedEC2Clients()
b.flushCachedIAMClients()
+ b.defaultAWSAccountID = ""
}
return nil, nil
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go
index 2685710..ff60ebf 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go
@@ -15,7 +15,7 @@ func TestBackend_pathConfigClient(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -73,4 +73,37 @@ func TestBackend_pathConfigClient(t *testing.T) {
t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'",
data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"])
}
+
+ data = map[string]interface{}{
+ "iam_server_id_header_value": "vault_server_identification_2718281",
+ }
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/client",
+ Data: data,
+ Storage: storage,
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil && resp.IsError() {
+ t.Fatal("failed to update the client config entry")
+ }
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "config/client",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatal("failed to read the client config entry")
+ }
+ if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] {
+ t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'",
+ data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"])
+ }
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go
index bf50898..cca2d75 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go
@@ -10,6 +10,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
+ "reflect"
"regexp"
"strings"
"time"
@@ -151,32 +152,15 @@ func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName str
// validateInstance queries the status of the EC2 instance using AWS EC2 API
// and checks if the instance is running and is healthy
func (b *backend) validateInstance(s logical.Storage, instanceID, region, accountID string) (*ec2.Instance, error) {
-
- // Check if an STS configuration exists for the AWS account
- sts, err := b.lockedAwsStsEntry(s, accountID)
- if err != nil {
- return nil, fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err)
- }
- // An empty STS role signifies the master account
- stsRole := ""
- if sts != nil {
- stsRole = sts.StsRole
- }
-
// Create an EC2 client to pull the instance information
- ec2Client, err := b.clientEC2(s, region, stsRole)
+ ec2Client, err := b.clientEC2(s, region, accountID)
if err != nil {
return nil, err
}
status, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{
- Filters: []*ec2.Filter{
- &ec2.Filter{
- Name: aws.String("instance-id"),
- Values: []*string{
- aws.String(instanceID),
- },
- },
+ InstanceIds: []*string{
+ aws.String(instanceID),
},
})
if err != nil {
@@ -477,32 +461,20 @@ func (b *backend) verifyInstanceMeetsRoleRequirements(
// Extract out the instance profile name from the instance
// profile ARN
- iamInstanceProfileARNSlice := strings.SplitAfter(iamInstanceProfileARN, ":instance-profile/")
- iamInstanceProfileName := iamInstanceProfileARNSlice[len(iamInstanceProfileARNSlice)-1]
+ iamInstanceProfileEntity, err := parseIamArn(iamInstanceProfileARN)
- if iamInstanceProfileName == "" {
- return nil, fmt.Errorf("failed to extract out IAM instance profile name from IAM instance profile ARN")
- }
-
- // Check if an STS configuration exists for the AWS account
- sts, err := b.lockedAwsStsEntry(s, identityDoc.AccountID)
if err != nil {
- return fmt.Errorf("error fetching STS config for account ID %q: %q\n", identityDoc.AccountID, err), nil
- }
- // An empty STS role signifies the master account
- stsRole := ""
- if sts != nil {
- stsRole = sts.StsRole
+ return nil, fmt.Errorf("failed to parse IAM instance profile ARN %q; error: %v", iamInstanceProfileARN, err)
}
// Use instance profile ARN to fetch the associated role ARN
- iamClient, err := b.clientIAM(s, identityDoc.Region, stsRole)
+ iamClient, err := b.clientIAM(s, identityDoc.Region, identityDoc.AccountID)
if err != nil {
return nil, fmt.Errorf("could not fetch IAM client: %v", err)
} else if iamClient == nil {
return nil, fmt.Errorf("received a nil iamClient")
}
- iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileName)
+ iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileEntity.FriendlyName)
if err != nil {
return nil, fmt.Errorf("IAM role ARN could not be fetched: %v", err)
}
@@ -927,8 +899,18 @@ func (b *backend) pathLoginRenewIam(
return nil, fmt.Errorf("role entry not found")
}
- if entityType, ok := req.Auth.Metadata["inferred_entity_type"]; !ok {
- if entityType == ec2EntityType {
+ // we don't really care what the inferred entity type was when the role was initially created. We
+ // care about what the role currently requires. However, the metadata's inferred_entity_id is only
+ // set when inferencing is turned on at initial login time. So, if inferencing is turned on, any
+ // existing roles will NOT be able to renew tokens.
+ // This might change later, but authenticating the actual inferred entity ID is NOT done if there
+ // is no inferencing requested in the role. The reason is that authenticating the inferred entity
+ // ID requires additional AWS IAM permissions that might not be present (e.g.,
+ // ec2:DescribeInstances) as well as additional inferencing configuration (the inferred region).
+ // So, for now, if you want to turn on inferencing, all clients must re-authenticate and cannot
+ // renew existing tokens.
+ if roleEntry.InferredEntityType != "" {
+ if roleEntry.InferredEntityType == ec2EntityType {
instanceID, ok := req.Auth.Metadata["inferred_entity_id"]
if !ok {
return nil, fmt.Errorf("no inferred entity ID in auth metadata")
@@ -937,21 +919,64 @@ func (b *backend) pathLoginRenewIam(
if !ok {
return nil, fmt.Errorf("no inferred AWS region in auth metadata")
}
- _, err := b.validateInstance(req.Storage, instanceID, instanceRegion, req.Auth.Metadata["accountID"])
+ _, err := b.validateInstance(req.Storage, instanceID, instanceRegion, req.Auth.Metadata["account_id"])
if err != nil {
return nil, fmt.Errorf("failed to verify instance ID %q: %v", instanceID, err)
}
} else {
- return nil, fmt.Errorf("unrecognized entity_type in metadata: %q", entityType)
+ return nil, fmt.Errorf("unrecognized entity_type in metadata: %q", roleEntry.InferredEntityType)
}
}
- if roleEntry.BoundIamPrincipalARN != canonicalArn {
- return nil, fmt.Errorf("role no longer bound to arn %q", canonicalArn)
+ // Note that the error messages below can leak a little bit of information about the role information
+ // For example, if on renew, the client gets the "error parsing ARN..." error message, the client
+ // will know that it's a wildcard bind (but not the actual bind), even if the client can't actually
+ // read the role directly to know what the bind is. It's a relatively small amount of leakage, in
+ // some fairly corner cases, and in the most likely error case (role has been changed to a new ARN),
+ // the error message is identical.
+ if roleEntry.BoundIamPrincipalARN != "" {
+ // We might not get here if all bindings were on the inferred entity, which we've already validated
+ // above
+ clientUserId, ok := req.Auth.Metadata["client_user_id"]
+ if ok && roleEntry.BoundIamPrincipalID != "" {
+ // Resolving unique IDs is enabled and the auth metadata contains the unique ID, so checking the
+ // unique ID is authoritative at this stage
+ if roleEntry.BoundIamPrincipalID != clientUserId {
+ return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn)
+ }
+ } else if strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
+ fullArn := b.getCachedUserId(clientUserId)
+ if fullArn == "" {
+ entity, err := parseIamArn(canonicalArn)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing ARN %q: %v", canonicalArn, err)
+ }
+ fullArn, err = b.fullArn(entity, req.Storage)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up full ARN of entity %v: %v", entity, err)
+ }
+ if fullArn == "" {
+ return nil, fmt.Errorf("got empty string back when looking up full ARN of entity %v", entity)
+ }
+ if clientUserId != "" {
+ b.setCachedUserId(clientUserId, fullArn)
+ }
+ }
+ if !strutil.GlobbedStringsMatch(roleEntry.BoundIamPrincipalARN, fullArn) {
+ return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn)
+ }
+ } else if roleEntry.BoundIamPrincipalARN != canonicalArn {
+ return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn)
+ }
}
- return framework.LeaseExtend(roleEntry.TTL, roleEntry.MaxTTL, b.System())(req, data)
-
+ // If 'Period' is set on the role, then the token should never expire.
+ if roleEntry.Period > time.Duration(0) {
+ req.Auth.TTL = roleEntry.Period
+ return &logical.Response{Auth: req.Auth}, nil
+ } else {
+ return framework.LeaseExtend(roleEntry.TTL, roleEntry.MaxTTL, b.System())(req, data)
+ }
}
func (b *backend) pathLoginRenewEc2(
@@ -1095,14 +1120,12 @@ func (b *backend) pathLoginUpdateIam(
if headersB64 == "" {
return logical.ErrorResponse("missing iam_request_headers"), nil
}
- headersJson, err := base64.StdEncoding.DecodeString(headersB64)
+ headers, err := parseIamRequestHeaders(headersB64)
if err != nil {
- return logical.ErrorResponse("failed to base64 decode iam_request_headers"), nil
+ return logical.ErrorResponse(fmt.Sprintf("Error parsing iam_request_headers: %v", err)), nil
}
- var headers http.Header
- err = jsonutil.DecodeJSON(headersJson, &headers)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to JSON decode iam_request_headers %q: %v", headersJson, err)), nil
+ if headers == nil {
+ return logical.ErrorResponse("nil response when parsing iam_request_headers"), nil
}
config, err := b.lockedClientConfigEntry(req.Storage)
@@ -1124,18 +1147,21 @@ func (b *backend) pathLoginUpdateIam(
}
}
- clientArn, accountID, err := submitCallerIdentityRequest(method, endpoint, parsedUrl, body, headers)
+ callerID, err := submitCallerIdentityRequest(method, endpoint, parsedUrl, body, headers)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil
}
- canonicalArn, principalName, sessionName, err := parseIamArn(clientArn)
+ // This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID"
+ // (in the case of an IAM user).
+ callerUniqueId := strings.Split(callerID.UserId, ":")[0]
+ entity, err := parseIamArn(callerID.Arn)
if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error parsing arn: %v", err)), nil
+ return logical.ErrorResponse(fmt.Sprintf("error parsing arn %q: %v", callerID.Arn, err)), nil
}
roleName := data.Get("role").(string)
if roleName == "" {
- roleName = principalName
+ roleName = entity.FriendlyName
}
roleEntry, err := b.lockedAWSRole(req.Storage, roleName)
@@ -1152,8 +1178,34 @@ func (b *backend) pathLoginUpdateIam(
// The role creation should ensure that either we're inferring this is an EC2 instance
// or that we're binding an ARN
- if roleEntry.BoundIamPrincipalARN != "" && roleEntry.BoundIamPrincipalARN != canonicalArn {
- return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", clientArn, roleName)), nil
+ // The only way BoundIamPrincipalID could get set is if BoundIamPrincipalARN was also set and
+ // resolving to internal IDs was turned on, which can't be turned off. So, there should be no
+ // way for this to be set and not match BoundIamPrincipalARN
+ if roleEntry.BoundIamPrincipalID != "" {
+ if callerUniqueId != roleEntry.BoundIamPrincipalID {
+ return logical.ErrorResponse(fmt.Sprintf("expected IAM %s %s to resolve to unique AWS ID %q but got %q instead", entity.Type, entity.FriendlyName, roleEntry.BoundIamPrincipalID, callerUniqueId)), nil
+ }
+ } else if roleEntry.BoundIamPrincipalARN != "" {
+ if strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
+ fullArn := b.getCachedUserId(callerUniqueId)
+ if fullArn == "" {
+ fullArn, err = b.fullArn(entity, req.Storage)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error looking up full ARN of entity %v: %v", entity, err)), nil
+ }
+ if fullArn == "" {
+ return logical.ErrorResponse(fmt.Sprintf("got empty string back when looking up full ARN of entity %v", entity)), nil
+ }
+ b.setCachedUserId(callerUniqueId, fullArn)
+ }
+ if !strutil.GlobbedStringsMatch(roleEntry.BoundIamPrincipalARN, fullArn) {
+ // Note: Intentionally giving the exact same error message as a few lines below. Otherwise, we might leak information
+ // about whether the bound IAM principal ARN is a wildcard or not, and what that wildcard is.
+ return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName)), nil
+ }
+ } else if roleEntry.BoundIamPrincipalARN != entity.canonicalArn() {
+ return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName)), nil
+ }
}
policies := roleEntry.Policies
@@ -1161,9 +1213,9 @@ func (b *backend) pathLoginUpdateIam(
inferredEntityType := ""
inferredEntityId := ""
if roleEntry.InferredEntityType == ec2EntityType {
- instance, err := b.validateInstance(req.Storage, sessionName, roleEntry.InferredAWSRegion, accountID)
+ instance, err := b.validateInstance(req.Storage, entity.SessionInfo, roleEntry.InferredAWSRegion, callerID.Account)
if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", sessionName, roleEntry.InferredAWSRegion)), nil
+ return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", entity.SessionInfo, roleEntry.InferredAWSRegion)), nil
}
// build a fake identity doc to pass on metadata about the instance to verifyInstanceMeetsRoleRequirements
@@ -1171,7 +1223,7 @@ func (b *backend) pathLoginUpdateIam(
Tags: nil, // Don't really need the tags, so not doing the work of converting them from Instance.Tags to identityDocument.Tags
InstanceID: *instance.InstanceId,
AmiID: *instance.ImageId,
- AccountID: accountID,
+ AccountID: callerID.Account,
Region: roleEntry.InferredAWSRegion,
PendingTime: instance.LaunchTime.Format(time.RFC3339),
}
@@ -1181,29 +1233,31 @@ func (b *backend) pathLoginUpdateIam(
return nil, err
}
if validationError != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error validating instance: %s", validationError)), nil
+ return logical.ErrorResponse(fmt.Sprintf("error validating instance: %s", validationError)), nil
}
inferredEntityType = ec2EntityType
- inferredEntityId = sessionName
+ inferredEntityId = entity.SessionInfo
}
resp := &logical.Response{
Auth: &logical.Auth{
+ Period: roleEntry.Period,
Policies: policies,
Metadata: map[string]string{
- "client_arn": clientArn,
- "canonical_arn": canonicalArn,
+ "client_arn": callerID.Arn,
+ "canonical_arn": entity.canonicalArn(),
+ "client_user_id": callerUniqueId,
"auth_type": iamAuthType,
"inferred_entity_type": inferredEntityType,
"inferred_entity_id": inferredEntityId,
"inferred_aws_region": roleEntry.InferredAWSRegion,
- "account_id": accountID,
+ "account_id": entity.AccountNumber,
},
InternalData: map[string]interface{}{
"role_name": roleName,
},
- DisplayName: principalName,
+ DisplayName: entity.FriendlyName,
LeaseOptions: logical.LeaseOptions{
Renewable: true,
TTL: roleEntry.TTL,
@@ -1256,29 +1310,50 @@ func hasValuesForIamAuth(data *framework.FieldData) (bool, bool) {
(hasRequestMethod || hasRequestUrl || hasRequestBody || hasRequestHeaders)
}
-func parseIamArn(iamArn string) (string, string, string, error) {
+func parseIamArn(iamArn string) (*iamEntity, error) {
// iamArn should look like one of the following:
- // 1. arn:aws:iam:::user/
+ // 1. arn:aws:iam:::/
// 2. arn:aws:sts:::assumed-role//
// if we get something like 2, then we want to transform that back to what
// most people would expect, which is arn:aws:iam:::role/
+ var entity iamEntity
fullParts := strings.Split(iamArn, ":")
- principalFullName := fullParts[5]
- // principalFullName would now be something like user/ or assumed-role//
- parts := strings.Split(principalFullName, "/")
- principalName := parts[1]
- // now, principalName should either be or
- transformedArn := iamArn
- sessionName := ""
- if parts[0] == "assumed-role" {
- transformedArn = fmt.Sprintf("arn:aws:iam::%s:role/%s", fullParts[4], principalName)
- // fullParts[4] is the
- sessionName = parts[2]
- // sessionName is
- } else if parts[0] != "user" {
- return "", "", "", fmt.Errorf("unrecognized principal type: %q", parts[0])
+ if len(fullParts) != 6 {
+ return nil, fmt.Errorf("unrecognized arn: contains %d colon-separated parts, expected 6", len(fullParts))
}
- return transformedArn, principalName, sessionName, nil
+ if fullParts[0] != "arn" {
+ return nil, fmt.Errorf("unrecognized arn: does not begin with arn:")
+ }
+ // normally aws, but could be aws-cn or aws-us-gov
+ entity.Partition = fullParts[1]
+ if fullParts[2] != "iam" && fullParts[2] != "sts" {
+ return nil, fmt.Errorf("unrecognized service: %v, not one of iam or sts", fullParts[2])
+ }
+ // fullParts[3] is the region, which doesn't matter for AWS IAM entities
+ entity.AccountNumber = fullParts[4]
+ // fullParts[5] would now be something like user/ or assumed-role//
+ parts := strings.Split(fullParts[5], "/")
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 2 slash-separated parts", fullParts[5])
+ }
+ entity.Type = parts[0]
+ entity.Path = strings.Join(parts[1:len(parts)-1], "/")
+ entity.FriendlyName = parts[len(parts)-1]
+ // now, entity.FriendlyName should either be or
+ switch entity.Type {
+ case "assumed-role":
+ // Assumed roles don't have paths and have a slightly different format
+ // parts[2] is
+ entity.Path = ""
+ entity.FriendlyName = parts[1]
+ entity.SessionInfo = parts[2]
+ case "user":
+ case "role":
+ case "instance-profile":
+ default:
+ return &iamEntity{}, fmt.Errorf("unrecognized principal type: %q", entity.Type)
+ }
+ return &entity, nil
}
func validateVaultHeaderValue(headers http.Header, requestUrl *url.URL, requiredHeaderValue string) error {
@@ -1381,7 +1456,38 @@ func parseGetCallerIdentityResponse(response string) (GetCallerIdentityResponse,
return result, err
}
-func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (string, string, error) {
+func parseIamRequestHeaders(headersB64 string) (http.Header, error) {
+ headersJson, err := base64.StdEncoding.DecodeString(headersB64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to base64 decode iam_request_headers")
+ }
+ var headersDecoded map[string]interface{}
+ err = jsonutil.DecodeJSON(headersJson, &headersDecoded)
+ if err != nil {
+ return nil, fmt.Errorf("failed to JSON decode iam_request_headers %q: %v", headersJson, err)
+ }
+ headers := make(http.Header)
+ for k, v := range headersDecoded {
+ switch typedValue := v.(type) {
+ case string:
+ headers.Add(k, typedValue)
+ case []interface{}:
+ for _, individualVal := range typedValue {
+ switch possibleStrVal := individualVal.(type) {
+ case string:
+ headers.Add(k, possibleStrVal)
+ default:
+ return nil, fmt.Errorf("header %q contains value %q that has type %s, not string", k, individualVal, reflect.TypeOf(individualVal))
+ }
+ }
+ default:
+ return nil, fmt.Errorf("header %q value %q has type %s, not string or []interface", k, typedValue, reflect.TypeOf(v))
+ }
+ }
+ return headers, nil
+}
+
+func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*GetCallerIdentityResult, error) {
// NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy
// The protection against this is that this method will only call the endpoint specified in the
// client config (defaulting to sts.amazonaws.com), so it would require a Vault admin to override
@@ -1390,25 +1496,24 @@ func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, bo
client := cleanhttp.DefaultClient()
response, err := client.Do(request)
if err != nil {
- return "", "", fmt.Errorf("error making request: %v", err)
+ return nil, fmt.Errorf("error making request: %v", err)
}
if response != nil {
defer response.Body.Close()
}
// we check for status code afterwards to also print out response body
responseBody, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return nil, err
+ }
if response.StatusCode != 200 {
- return "", "", fmt.Errorf("received error code %s from STS: %s", response.StatusCode, string(responseBody))
+ return nil, fmt.Errorf("received error code %s from STS: %s", response.StatusCode, string(responseBody))
}
callerIdentityResponse, err := parseGetCallerIdentityResponse(string(responseBody))
if err != nil {
- return "", "", fmt.Errorf("error parsing STS response")
+ return nil, fmt.Errorf("error parsing STS response")
}
- clientArn := callerIdentityResponse.GetCallerIdentityResult[0].Arn
- if clientArn == "" {
- return "", "", fmt.Errorf("no ARN validated")
- }
- return clientArn, callerIdentityResponse.GetCallerIdentityResult[0].Account, nil
+ return &callerIdentityResponse.GetCallerIdentityResult[0], nil
}
type GetCallerIdentityResponse struct {
@@ -1446,6 +1551,70 @@ type roleTagLoginResponse struct {
DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"`
}
+type iamEntity struct {
+ Partition string
+ AccountNumber string
+ Type string
+ Path string
+ FriendlyName string
+ SessionInfo string
+}
+
+// Returns a Vault-internal canonical ARN for referring to an IAM entity
+func (e *iamEntity) canonicalArn() string {
+ entityType := e.Type
+ // canonicalize "assumed-role" into "role"
+ if entityType == "assumed-role" {
+ entityType = "role"
+ }
+ // Annoyingly, the assumed-role entity type doesn't have the Path of the role which was assumed
+ // So, we "canonicalize" it by just completely dropping the path. The other option would be to
+ // make an AWS API call to look up the role by FriendlyName, which introduces more complexity to
+ // code and test, and it also breaks backwards compatibility in an area where we would really want
+ // it
+ return fmt.Sprintf("arn:%s:iam::%s:%s/%s", e.Partition, e.AccountNumber, entityType, e.FriendlyName)
+}
+
+// This returns the "full" ARN of an iamEntity, how it would be referred to in AWS proper
+func (b *backend) fullArn(e *iamEntity, s logical.Storage) (string, error) {
+ // Not assuming path is reliable for any entity types
+ client, err := b.clientIAM(s, getAnyRegionForAwsPartition(e.Partition).ID(), e.AccountNumber)
+ if err != nil {
+ return "", fmt.Errorf("error creating IAM client: %v", err)
+ }
+
+ switch e.Type {
+ case "user":
+ input := iam.GetUserInput{
+ UserName: aws.String(e.FriendlyName),
+ }
+ resp, err := client.GetUser(&input)
+ if err != nil {
+ return "", fmt.Errorf("error fetching user %q: %v", e.FriendlyName, err)
+ }
+ if resp == nil {
+ return "", fmt.Errorf("nil response from GetUser")
+ }
+ return *(resp.User.Arn), nil
+ case "assumed-role":
+ fallthrough
+ case "role":
+ input := iam.GetRoleInput{
+ RoleName: aws.String(e.FriendlyName),
+ }
+ resp, err := client.GetRole(&input)
+ if err != nil {
+ return "", fmt.Errorf("error fetching role %q: %v", e.FriendlyName, err)
+ }
+ if resp == nil {
+ return "", fmt.Errorf("nil response form GetRole")
+ }
+ return *(resp.Role.Arn), nil
+ default:
+ return "", fmt.Errorf("unrecognized entity type: %s", e.Type)
+ }
+}
+
const iamServerIdHeader = "X-Vault-AWS-IAM-Server-ID"
const pathLoginSyn = `
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go
index e96bed8..f813a58 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go
@@ -1,8 +1,12 @@
package awsauth
import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
"net/http"
"net/url"
+ "reflect"
"testing"
)
@@ -32,11 +36,17 @@ func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) {
expectedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName"
parsedUserResponse, err := parseGetCallerIdentityResponse(responseFromUser)
+ if err != nil {
+ t.Fatal(err)
+ }
if parsed_arn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedUserArn {
t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsed_arn)
}
parsedRoleResponse, err := parseGetCallerIdentityResponse(responseFromAssumedRole)
+ if err != nil {
+ t.Fatal(err)
+ }
if parsed_arn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedRoleArn {
t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsed_arn)
}
@@ -48,36 +58,56 @@ func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) {
}
func TestBackend_pathLogin_parseIamArn(t *testing.T) {
- userArn := "arn:aws:iam::123456789012:user/MyUserName"
- assumedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName"
- baseRoleArn := "arn:aws:iam::123456789012:role/RoleName"
-
- xformedUser, principalFriendlyName, sessionName, err := parseIamArn(userArn)
- if err != nil {
- t.Fatal(err)
- }
- if xformedUser != userArn {
- t.Fatalf("expected to transform ARN %#v into %#v but got %#v instead", userArn, userArn, xformedUser)
- }
- if principalFriendlyName != "MyUserName" {
- t.Fatalf("expected to extract MyUserName from ARN %#v but got %#v instead", userArn, principalFriendlyName)
- }
- if sessionName != "" {
- t.Fatalf("expected to extract no session name from ARN %#v but got %#v instead", userArn, sessionName)
+ testParser := func(inputArn, expectedCanonicalArn string, expectedEntity iamEntity) {
+ entity, err := parseIamArn(inputArn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expectedCanonicalArn != "" && entity.canonicalArn() != expectedCanonicalArn {
+ t.Fatalf("expected to canonicalize ARN %q into %q but got %q instead", inputArn, expectedCanonicalArn, entity.canonicalArn())
+ }
+ if *entity != expectedEntity {
+ t.Fatalf("expected to get iamEntity %#v from input ARN %q but instead got %#v", expectedEntity, inputArn, *entity)
+ }
}
- xformedRole, principalFriendlyName, sessionName, err := parseIamArn(assumedRoleArn)
- if err != nil {
- t.Fatal(err)
+ testParser("arn:aws:iam::123456789012:user/UserPath/MyUserName",
+ "arn:aws:iam::123456789012:user/MyUserName",
+ iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "user", Path: "UserPath", FriendlyName: "MyUserName"},
+ )
+ canonicalRoleArn := "arn:aws:iam::123456789012:role/RoleName"
+ testParser("arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName",
+ canonicalRoleArn,
+ iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "assumed-role", FriendlyName: "RoleName", SessionInfo: "RoleSessionName"},
+ )
+ testParser("arn:aws:iam::123456789012:role/RolePath/RoleName",
+ canonicalRoleArn,
+ iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "role", Path: "RolePath", FriendlyName: "RoleName"},
+ )
+ testParser("arn:aws:iam::123456789012:instance-profile/profilePath/InstanceProfileName",
+ "",
+ iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "instance-profile", Path: "profilePath", FriendlyName: "InstanceProfileName"},
+ )
+
+ // Test that it properly handles pathological inputs...
+ _, err := parseIamArn("")
+ if err == nil {
+ t.Error("expected error from empty input string")
}
- if xformedRole != baseRoleArn {
- t.Fatalf("expected to transform ARN %#v into %#v but got %#v instead", assumedRoleArn, baseRoleArn, xformedRole)
+
+ _, err = parseIamArn("arn:aws:iam::123456789012:role")
+ if err == nil {
+ t.Error("expected error from malformed ARN without a role name")
}
- if principalFriendlyName != "RoleName" {
- t.Fatalf("expected to extract principal name of RoleName from ARN %#v but got %#v instead", assumedRoleArn, sessionName)
+
+ _, err = parseIamArn("arn:aws:iam")
+ if err == nil {
+ t.Error("expected error from incomplete ARN (arn:aws:iam)")
}
- if sessionName != "RoleSessionName" {
- t.Fatalf("expected to extract role session name of RoleSessionName from ARN %#v but got %#v instead", assumedRoleArn, sessionName)
+
+ _, err = parseIamArn("arn:aws:iam::1234556789012:/")
+ if err == nil {
+ t.Error("expected error from empty principal type and no principal name (arn:aws:iam::1234556789012:/)")
}
}
@@ -138,3 +168,43 @@ func TestBackend_validateVaultHeaderValue(t *testing.T) {
t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err)
}
}
+
+func TestBackend_pathLogin_parseIamRequestHeaders(t *testing.T) {
+ testIamParser := func(headers interface{}, expectedHeaders http.Header) error {
+ headersJson, err := json.Marshal(headers)
+ if err != nil {
+ return fmt.Errorf("unable to JSON encode headers: %v", err)
+ }
+ headersB64 := base64.StdEncoding.EncodeToString(headersJson)
+
+ parsedHeaders, err := parseIamRequestHeaders(headersB64)
+ if err != nil {
+ return fmt.Errorf("error parsing encoded headers: %v", err)
+ }
+ if parsedHeaders == nil {
+ return fmt.Errorf("nil result from parsing headers")
+ }
+ if !reflect.DeepEqual(parsedHeaders, expectedHeaders) {
+ return fmt.Errorf("parsed headers not equal to input headers")
+ }
+ return nil
+ }
+
+ headersGoStyle := http.Header{
+ "Header1": []string{"Value1"},
+ "Header2": []string{"Value2"},
+ }
+ headersMixedType := map[string]interface{}{
+ "Header1": "Value1",
+ "Header2": []string{"Value2"},
+ }
+
+ err := testIamParser(headersGoStyle, headersGoStyle)
+ if err != nil {
+ t.Errorf("error parsing go-style headers: %v", err)
+ }
+ err = testIamParser(headersMixedType, headersGoStyle)
+ if err != nil {
+ t.Errorf("error parsing mixed-style headers: %v", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go
index f6c19f2..476beca 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go
@@ -63,6 +63,14 @@ with an IAM instance profile ARN which has a prefix that matches
the value specified by this parameter. The value is prefix-matched
(as though it were a glob ending in '*'). This is only checked when
auth_type is ec2.`,
+ },
+ "resolve_aws_unique_ids": {
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, resolve all AWS IAM ARNs into AWS's internal unique IDs.
+When an IAM entity (e.g., user, role, or instance profile) is deleted, then all references
+to it within the role will be invalidated, which prevents a new IAM entity from being created
+with the same name and matching the role's IAM binds. Once set, this cannot be unset.`,
},
"inferred_entity_type": {
Type: framework.TypeString,
@@ -121,7 +129,7 @@ to 0, in which case the value will fallback to the system/mount defaults.`,
Description: "The maximum allowed lifetime of tokens issued using this role.",
},
"policies": {
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Default: "default",
Description: "Policies to be set on tokens issued using this role.",
},
@@ -210,7 +218,7 @@ func (b *backend) lockedAWSRole(s logical.Storage, roleName string) (*awsRoleEnt
if roleEntry == nil {
return nil, nil
}
- needUpgrade, err := upgradeRoleEntry(roleEntry)
+ needUpgrade, err := b.upgradeRoleEntry(s, roleEntry)
if err != nil {
return nil, fmt.Errorf("error upgrading roleEntry: %v", err)
}
@@ -228,7 +236,7 @@ func (b *backend) lockedAWSRole(s logical.Storage, roleName string) (*awsRoleEnt
return nil, nil
}
// now re-check to see if we need to upgrade
- if needUpgrade, err = upgradeRoleEntry(roleEntry); err != nil {
+ if needUpgrade, err = b.upgradeRoleEntry(s, roleEntry); err != nil {
return nil, fmt.Errorf("error upgrading roleEntry: %v", err)
}
if needUpgrade {
@@ -284,7 +292,7 @@ func (b *backend) nonLockedSetAWSRole(s logical.Storage, roleName string,
// If needed, updates the role entry and returns a bool indicating if it was updated
// (and thus needs to be persisted)
-func upgradeRoleEntry(roleEntry *awsRoleEntry) (bool, error) {
+func (b *backend) upgradeRoleEntry(s logical.Storage, roleEntry *awsRoleEntry) (bool, error) {
if roleEntry == nil {
return false, fmt.Errorf("received nil roleEntry")
}
@@ -307,6 +315,19 @@ func upgradeRoleEntry(roleEntry *awsRoleEntry) (bool, error) {
upgraded = true
}
+ if roleEntry.AuthType == iamAuthType &&
+ roleEntry.ResolveAWSUniqueIDs &&
+ roleEntry.BoundIamPrincipalARN != "" &&
+ roleEntry.BoundIamPrincipalID == "" &&
+ !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
+ principalId, err := b.resolveArnToUniqueIDFunc(s, roleEntry.BoundIamPrincipalARN)
+ if err != nil {
+ return false, err
+ }
+ roleEntry.BoundIamPrincipalID = principalId
+ upgraded = true
+ }
+
return upgraded, nil
}
@@ -411,7 +432,7 @@ func (b *backend) pathRoleCreateUpdate(
if roleEntry == nil {
roleEntry = &awsRoleEntry{}
} else {
- needUpdate, err := upgradeRoleEntry(roleEntry)
+ needUpdate, err := b.upgradeRoleEntry(req.Storage, roleEntry)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("failed to update roleEntry: %v", err)), nil
}
@@ -445,6 +466,19 @@ func (b *backend) pathRoleCreateUpdate(
roleEntry.BoundSubnetID = boundSubnetIDRaw.(string)
}
+ if resolveAWSUniqueIDsRaw, ok := data.GetOk("resolve_aws_unique_ids"); ok {
+ switch {
+ case req.Operation == logical.CreateOperation:
+ roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool)
+ case roleEntry.ResolveAWSUniqueIDs && !resolveAWSUniqueIDsRaw.(bool):
+ return logical.ErrorResponse("changing resolve_aws_unique_ids from true to false is not allowed"), nil
+ default:
+ roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool)
+ }
+ } else if req.Operation == logical.CreateOperation {
+ roleEntry.ResolveAWSUniqueIDs = data.Get("resolve_aws_unique_ids").(bool)
+ }
+
if boundIamRoleARNRaw, ok := data.GetOk("bound_iam_role_arn"); ok {
roleEntry.BoundIamRoleARN = boundIamRoleARNRaw.(string)
}
@@ -454,7 +488,29 @@ func (b *backend) pathRoleCreateUpdate(
}
if boundIamPrincipalARNRaw, ok := data.GetOk("bound_iam_principal_arn"); ok {
- roleEntry.BoundIamPrincipalARN = boundIamPrincipalARNRaw.(string)
+ principalARN := boundIamPrincipalARNRaw.(string)
+ roleEntry.BoundIamPrincipalARN = principalARN
+ // Explicitly not checking to see if the user has changed the ARN under us
+ // This allows the user to sumbit an update with the same ARN to force Vault
+ // to re-resolve the ARN to the unique ID, in case an entity was deleted and
+ // recreated
+ if roleEntry.ResolveAWSUniqueIDs && !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
+ principalID, err := b.resolveArnToUniqueIDFunc(req.Storage, principalARN)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("failed updating the unique ID of ARN %#v: %#v", principalARN, err)), nil
+ }
+ roleEntry.BoundIamPrincipalID = principalID
+ } else {
+ // Need to handle the case where we're switching from a non-wildcard principal to a wildcard principal
+ roleEntry.BoundIamPrincipalID = ""
+ }
+ } else if roleEntry.ResolveAWSUniqueIDs && roleEntry.BoundIamPrincipalARN != "" && !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") {
+ // we're turning on resolution on this role, so ensure we update it
+ principalID, err := b.resolveArnToUniqueIDFunc(req.Storage, roleEntry.BoundIamPrincipalARN)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("unable to resolve ARN %#v to internal ID: %#v", roleEntry.BoundIamPrincipalARN, err)), nil
+ }
+ roleEntry.BoundIamPrincipalID = principalID
}
if inferRoleTypeRaw, ok := data.GetOk("inferred_entity_type"); ok {
@@ -570,11 +626,11 @@ func (b *backend) pathRoleCreateUpdate(
return logical.ErrorResponse("at least be one bound parameter should be specified on the role"), nil
}
- policiesStr, ok := data.GetOk("policies")
+ policiesRaw, ok := data.GetOk("policies")
if ok {
- roleEntry.Policies = policyutil.ParsePolicies(policiesStr.(string))
+ roleEntry.Policies = policyutil.ParsePolicies(policiesRaw)
} else if req.Operation == logical.CreateOperation {
- roleEntry.Policies = []string{"default"}
+ roleEntry.Policies = []string{}
}
disallowReauthenticationBool, ok := data.GetOk("disallow_reauthentication")
@@ -669,7 +725,7 @@ func (b *backend) pathRoleCreateUpdate(
return nil, err
}
- if len(resp.Warnings()) == 0 {
+ if len(resp.Warnings) == 0 {
return nil, nil
}
@@ -682,6 +738,7 @@ type awsRoleEntry struct {
BoundAmiID string `json:"bound_ami_id" structs:"bound_ami_id" mapstructure:"bound_ami_id"`
BoundAccountID string `json:"bound_account_id" structs:"bound_account_id" mapstructure:"bound_account_id"`
BoundIamPrincipalARN string `json:"bound_iam_principal_arn" structs:"bound_iam_principal_arn" mapstructure:"bound_iam_principal_arn"`
+ BoundIamPrincipalID string `json:"bound_iam_principal_id" structs:"bound_iam_principal_id" mapstructure:"bound_iam_principal_id"`
BoundIamRoleARN string `json:"bound_iam_role_arn" structs:"bound_iam_role_arn" mapstructure:"bound_iam_role_arn"`
BoundIamInstanceProfileARN string `json:"bound_iam_instance_profile_arn" structs:"bound_iam_instance_profile_arn" mapstructure:"bound_iam_instance_profile_arn"`
BoundRegion string `json:"bound_region" structs:"bound_region" mapstructure:"bound_region"`
@@ -689,6 +746,7 @@ type awsRoleEntry struct {
BoundVpcID string `json:"bound_vpc_id" structs:"bound_vpc_id" mapstructure:"bound_vpc_id"`
InferredEntityType string `json:"inferred_entity_type" structs:"inferred_entity_type" mapstructure:"inferred_entity_type"`
InferredAWSRegion string `json:"inferred_aws_region" structs:"inferred_aws_region" mapstructure:"inferred_aws_region"`
+ ResolveAWSUniqueIDs bool `json:"resolve_aws_unique_ids" structs:"resolve_aws_unique_ids" mapstructure:"resolve_aws_unique_ids"`
RoleTag string `json:"role_tag" structs:"role_tag" mapstructure:"role_tag"`
AllowInstanceMigration bool `json:"allow_instance_migration" structs:"allow_instance_migration" mapstructure:"allow_instance_migration"`
TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go
index 5c8a119..0f5dc5e 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go
@@ -35,7 +35,7 @@ If set, the created tag can only be used by the instance with the given ID.`,
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Policies to be associated with the tag. If set, must be a subset of the role's policies. If set, but set to an empty value, only the 'default' policy will be given to issued tokens.",
},
@@ -107,9 +107,9 @@ func (b *backend) pathRoleTagUpdate(
// should be inherited. So, by leaving the policies var unset to anything when it is not
// supplied, we ensure that it inherits all the policies on the role.
var policies []string
- policiesStr, ok := data.GetOk("policies")
+ policiesRaw, ok := data.GetOk("policies")
if ok {
- policies = policyutil.ParsePolicies(policiesStr.(string))
+ policies = policyutil.ParsePolicies(policiesRaw)
}
if !strutil.StrListSubset(roleEntry.Policies, policies) {
resp.AddWarning("Policies on the tag are not a subset of the policies set on the role. Login will not be allowed with this tag unless the role policies are updated.")
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go
index 52ff435..21c87ab 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go
@@ -19,7 +19,7 @@ func TestBackend_pathRoleEc2(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -135,7 +135,81 @@ func TestBackend_pathRoleEc2(t *testing.T) {
if resp != nil {
t.Fatalf("bad: response: expected:nil actual:%#v\n", resp)
}
+}
+func Test_enableIamIDResolution(t *testing.T) {
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ roleName := "upgradable_role"
+
+ b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId
+
+ data := map[string]interface{}{
+ "auth_type": iamAuthType,
+ "policies": "p,q",
+ "bound_iam_principal_arn": "arn:aws:iam::123456789012:role/MyRole",
+ "resolve_aws_unique_ids": false,
+ }
+
+ submitRequest := func(roleName string, op logical.Operation) (*logical.Response, error) {
+ return b.HandleRequest(&logical.Request{
+ Operation: op,
+ Path: "role/" + roleName,
+ Data: data,
+ Storage: storage,
+ })
+ }
+
+ resp, err := submitRequest(roleName, logical.CreateOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to create role: %#v", resp)
+ }
+
+ resp, err = submitRequest(roleName, logical.ReadOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err)
+ }
+ if resp.Data["bound_iam_principal_id"] != "" {
+ t.Fatalf("expected to get no unique ID in role, but got %q", resp.Data["bound_iam_principal_id"])
+ }
+
+ data = map[string]interface{}{
+ "resolve_aws_unique_ids": true,
+ }
+ resp, err = submitRequest(roleName, logical.UpdateOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil && resp.IsError() {
+ t.Fatalf("unable to upgrade role to resolve internal IDs: resp:%#v", resp)
+ }
+
+ resp, err = submitRequest(roleName, logical.ReadOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil || resp.IsError() {
+ t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err)
+ }
+ if resp.Data["bound_iam_principal_id"] != "FakeUniqueId1" {
+ t.Fatalf("bad: expected upgrade of role resolve principal ID to %q, but got %q instead", "FakeUniqueId1", resp.Data["bound_iam_principal_id"])
+ }
}
func TestBackend_pathIam(t *testing.T) {
@@ -147,7 +221,7 @@ func TestBackend_pathIam(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -174,6 +248,7 @@ func TestBackend_pathIam(t *testing.T) {
"policies": "p,q,r,s",
"max_ttl": "2h",
"bound_iam_principal_arn": "n:aws:iam::123456789012:user/MyUserName",
+ "resolve_aws_unique_ids": false,
}
resp, err = b.HandleRequest(&logical.Request{
Operation: logical.CreateOperation,
@@ -310,7 +385,7 @@ func TestBackend_pathRoleMixedTypes(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -369,6 +444,7 @@ func TestBackend_pathRoleMixedTypes(t *testing.T) {
data["inferred_entity_type"] = ec2EntityType
data["inferred_aws_region"] = "us-east-1"
+ data["resolve_aws_unique_ids"] = false
resp, err = submitRequest("multipleTypesInferred", logical.CreateOperation)
if err != nil {
t.Fatal(err)
@@ -376,6 +452,32 @@ func TestBackend_pathRoleMixedTypes(t *testing.T) {
if resp.IsError() {
t.Fatalf("didn't allow creation of roles with only inferred bindings")
}
+
+ b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId
+ data["resolve_aws_unique_ids"] = true
+ resp, err = submitRequest("withInternalIdResolution", logical.CreateOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.IsError() {
+ t.Fatalf("didn't allow creation of role resolving unique IDs")
+ }
+ resp, err = submitRequest("withInternalIdResolution", logical.ReadOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Data["bound_iam_principal_id"] != "FakeUniqueId1" {
+ t.Fatalf("expected fake unique ID of FakeUniqueId1, got %q", resp.Data["bound_iam_principal_id"])
+ }
+ data["resolve_aws_unique_ids"] = false
+ resp, err = submitRequest("withInternalIdResolution", logical.UpdateOperation)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !resp.IsError() {
+ t.Fatalf("allowed changing resolve_aws_unique_ids from true to false")
+ }
+
}
func TestAwsEc2_RoleCrud(t *testing.T) {
@@ -389,7 +491,7 @@ func TestAwsEc2_RoleCrud(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -417,11 +519,12 @@ func TestAwsEc2_RoleCrud(t *testing.T) {
"bound_ami_id": "testamiid",
"bound_account_id": "testaccountid",
"bound_region": "testregion",
- "bound_iam_role_arn": "testiamrolearn",
- "bound_iam_instance_profile_arn": "testiaminstanceprofilearn",
+ "bound_iam_role_arn": "arn:aws:iam::123456789012:role/MyRole",
+ "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/MyInstanceProfile",
"bound_subnet_id": "testsubnetid",
"bound_vpc_id": "testvpcid",
"role_tag": "testtag",
+ "resolve_aws_unique_ids": false,
"allow_instance_migration": true,
"ttl": "10m",
"max_ttl": "20m",
@@ -451,17 +554,19 @@ func TestAwsEc2_RoleCrud(t *testing.T) {
"bound_account_id": "testaccountid",
"bound_region": "testregion",
"bound_iam_principal_arn": "",
- "bound_iam_role_arn": "testiamrolearn",
- "bound_iam_instance_profile_arn": "testiaminstanceprofilearn",
+ "bound_iam_principal_id": "",
+ "bound_iam_role_arn": "arn:aws:iam::123456789012:role/MyRole",
+ "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/MyInstanceProfile",
"bound_subnet_id": "testsubnetid",
"bound_vpc_id": "testvpcid",
"inferred_entity_type": "",
"inferred_aws_region": "",
+ "resolve_aws_unique_ids": false,
"role_tag": "testtag",
"allow_instance_migration": true,
"ttl": time.Duration(600),
"max_ttl": time.Duration(1200),
- "policies": []string{"default", "testpolicy1", "testpolicy2"},
+ "policies": []string{"testpolicy1", "testpolicy2"},
"disallow_reauthentication": true,
"period": time.Duration(60),
}
@@ -512,14 +617,15 @@ func TestAwsEc2_RoleDurationSeconds(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
roleData := map[string]interface{}{
"auth_type": "ec2",
- "bound_iam_instance_profile_arn": "testarn",
+ "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/test-profile-name",
+ "resolve_aws_unique_ids": false,
"ttl": "10s",
"max_ttl": "20s",
"period": "30s",
@@ -554,3 +660,7 @@ func TestAwsEc2_RoleDurationSeconds(t *testing.T) {
t.Fatalf("bad: period; expected: 30, actual: %d", resp.Data["period"])
}
}
+
+func resolveArnToFakeUniqueId(s logical.Storage, arn string) (string, error) {
+ return "FakeUniqueId1", nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go
index 088cc41..9420164 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go
@@ -10,9 +10,8 @@ import (
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
b := Backend()
- _, err := b.Setup(conf)
- if err != nil {
- return b, err
+ if err := b.Setup(conf); err != nil {
+ return nil, err
}
return b, nil
}
@@ -21,13 +20,11 @@ func Backend() *backend {
var b backend
b.Backend = &framework.Backend{
Help: backendHelp,
-
PathsSpecial: &logical.Paths{
Unauthenticated: []string{
"login",
},
},
-
Paths: append([]*framework.Path{
pathConfig(&b),
pathLogin(&b),
@@ -35,10 +32,9 @@ func Backend() *backend {
pathCerts(&b),
pathCRLs(&b),
}),
-
- AuthRenew: b.pathLoginRenew,
-
- Invalidate: b.invalidate,
+ AuthRenew: b.pathLoginRenew,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeCredential,
}
b.crlUpdateMutex = &sync.RWMutex{}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go
index f96c9cb..4680d61 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go
@@ -1,14 +1,23 @@
package cert
import (
+ "crypto/rand"
+ "crypto/rsa"
"crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
"fmt"
+ "io"
"io/ioutil"
+ "math/big"
+ "net"
+ "os"
"reflect"
"testing"
"time"
"github.com/hashicorp/go-rootcerts"
+ "github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
logicaltest "github.com/hashicorp/vault/logical/testing"
@@ -36,10 +45,10 @@ const (
// But the client, presents the CA cert of the server to trust the server.
// The client can present a cert and key which is completely independent of server's CA.
// The connection state returned will contain the certificate presented by the client.
-func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) tls.ConnectionState {
+func connectionState(serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) (tls.ConnectionState, error) {
serverKeyPair, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath)
if err != nil {
- t.Fatal(err)
+ return tls.ConnectionState{}, err
}
// Prepare the listener configuration with server's key pair
listenConf := &tls.Config{
@@ -49,7 +58,7 @@ func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath,
clientKeyPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
if err != nil {
- t.Fatal(err)
+ return tls.ConnectionState{}, err
}
// Load the CA cert required by the client to authenticate the server.
rootConfig := &rootcerts.Config{
@@ -57,7 +66,7 @@ func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath,
}
serverCAs, err := rootcerts.LoadCACerts(rootConfig)
if err != nil {
- t.Fatal(err)
+ return tls.ConnectionState{}, err
}
// Prepare the dial configuration that the client uses to establish the connection.
dialConf := &tls.Config{
@@ -68,37 +77,287 @@ func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath,
// Start the server.
list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
if err != nil {
- t.Fatal(err)
+ return tls.ConnectionState{}, err
}
defer list.Close()
+ // Accept connections.
+ serverErrors := make(chan error, 1)
+ connState := make(chan tls.ConnectionState)
+ go func() {
+ defer close(connState)
+ serverConn, err := list.Accept()
+ if err != nil {
+ serverErrors <- err
+ close(serverErrors)
+ return
+ }
+ defer serverConn.Close()
+
+ // Read the ping
+ buf := make([]byte, 4)
+ _, err = serverConn.Read(buf)
+ if (err != nil) && (err != io.EOF) {
+ serverErrors <- err
+ close(serverErrors)
+ return
+ }
+ close(serverErrors)
+ connState <- serverConn.(*tls.Conn).ConnectionState()
+ }()
+
// Establish a connection from the client side and write a few bytes.
+ clientErrors := make(chan error, 1)
go func() {
addr := list.Addr().String()
conn, err := tls.Dial("tcp", addr, dialConf)
if err != nil {
- t.Fatalf("err: %v", err)
+ clientErrors <- err
+ close(clientErrors)
+ return
}
defer conn.Close()
// Write ping
- conn.Write([]byte("ping"))
+ _, err = conn.Write([]byte("ping"))
+ if err != nil {
+ clientErrors <- err
+ }
+ close(clientErrors)
}()
- // Accept the connection on the server side.
- serverConn, err := list.Accept()
+ for err = range clientErrors {
+ if err != nil {
+ return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err)
+ }
+ }
+
+ for err = range serverErrors {
+ if err != nil {
+ return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err)
+ }
+ }
+ // Grab the current state
+ return <-connState, nil
+}
+
+func TestBackend_NonCAExpiry(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ // Create a self-signed certificate and issue a leaf certificate using the
+ // CA cert
+ template := &x509.Certificate{
+ SerialNumber: big.NewInt(1234),
+ Subject: pkix.Name{
+ CommonName: "localhost",
+ Organization: []string{"hashicorp"},
+ OrganizationalUnit: []string{"vault"},
+ },
+ BasicConstraintsValid: true,
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: time.Now().Add(50 * time.Second),
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
+ }
+
+ // Set IP SAN
+ parsedIP := net.ParseIP("127.0.0.1")
+ if parsedIP == nil {
+ t.Fatalf("failed to create parsed IP")
+ }
+ template.IPAddresses = []net.IP{parsedIP}
+
+ // Private key for CA cert
+ caPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
- defer serverConn.Close()
- // Read the ping
- buf := make([]byte, 4)
- serverConn.Read(buf)
+ // Marshalling to be able to create PEM file
+ caPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(caPrivateKey)
- // Grab the current state
- connState := serverConn.(*tls.Conn).ConnectionState()
- return connState
+ caPublicKey := &caPrivateKey.PublicKey
+
+ template.IsCA = true
+
+ caCertBytes, err := x509.CreateCertificate(rand.Reader, template, template, caPublicKey, caPrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ caCert, err := x509.ParseCertificate(caCertBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ parsedCaBundle := &certutil.ParsedCertBundle{
+ Certificate: caCert,
+ CertificateBytes: caCertBytes,
+ PrivateKeyBytes: caPrivateKeyBytes,
+ PrivateKeyType: certutil.RSAPrivateKey,
+ }
+
+ caCertBundle, err := parsedCaBundle.ToCertBundle()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ caCertFile, err := ioutil.TempFile("", "caCert")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(caCertFile.Name())
+
+ if _, err := caCertFile.Write([]byte(caCertBundle.Certificate)); err != nil {
+ t.Fatal(err)
+ }
+ if err := caCertFile.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ caKeyFile, err := ioutil.TempFile("", "caKey")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(caKeyFile.Name())
+
+ if _, err := caKeyFile.Write([]byte(caCertBundle.PrivateKey)); err != nil {
+ t.Fatal(err)
+ }
+ if err := caKeyFile.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Prepare template for non-CA cert
+
+ template.IsCA = false
+ template.SerialNumber = big.NewInt(5678)
+
+ template.KeyUsage = x509.KeyUsage(x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign)
+ issuedPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ issuedPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(issuedPrivateKey)
+
+ issuedPublicKey := &issuedPrivateKey.PublicKey
+
+ // Keep a short certificate lifetime so logins can be tested both when
+ // cert is valid and when it gets expired
+ template.NotBefore = time.Now().Add(-2 * time.Second)
+ template.NotAfter = time.Now().Add(3 * time.Second)
+
+ issuedCertBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, issuedPublicKey, caPrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ issuedCert, err := x509.ParseCertificate(issuedCertBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ parsedIssuedBundle := &certutil.ParsedCertBundle{
+ Certificate: issuedCert,
+ CertificateBytes: issuedCertBytes,
+ PrivateKeyBytes: issuedPrivateKeyBytes,
+ PrivateKeyType: certutil.RSAPrivateKey,
+ }
+
+ issuedCertBundle, err := parsedIssuedBundle.ToCertBundle()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ issuedCertFile, err := ioutil.TempFile("", "issuedCert")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(issuedCertFile.Name())
+
+ if _, err := issuedCertFile.Write([]byte(issuedCertBundle.Certificate)); err != nil {
+ t.Fatal(err)
+ }
+ if err := issuedCertFile.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ issuedKeyFile, err := ioutil.TempFile("", "issuedKey")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer os.Remove(issuedKeyFile.Name())
+
+ if _, err := issuedKeyFile.Write([]byte(issuedCertBundle.PrivateKey)); err != nil {
+ t.Fatal(err)
+ }
+ if err := issuedKeyFile.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Register the Non-CA certificate of the client key pair
+ certData := map[string]interface{}{
+ "certificate": issuedCertBundle.Certificate,
+ "policies": "abc",
+ "display_name": "cert1",
+ "ttl": 10000,
+ }
+ certReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "certs/cert1",
+ Storage: storage,
+ Data: certData,
+ }
+
+ resp, err = b.HandleRequest(certReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Create connection state using the certificates generated
+ connState, err := connectionState(caCertFile.Name(), caCertFile.Name(), caKeyFile.Name(), issuedCertFile.Name(), issuedKeyFile.Name())
+ if err != nil {
+ t.Fatalf("error testing connection state:%v", err)
+ }
+
+ loginReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Storage: storage,
+ Path: "login",
+ Connection: &logical.Connection{
+ ConnState: &connState,
+ },
+ }
+
+ // Login when the certificate is still valid. Login should succeed.
+ resp, err = b.HandleRequest(loginReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Wait until the certificate expires
+ time.Sleep(5 * time.Second)
+
+ // Login attempt after certificate expiry should fail
+ resp, err = b.HandleRequest(loginReq)
+ if err == nil {
+ t.Fatalf("expected error due to expired certificate")
+ }
}
func TestBackend_RegisteredNonCA_CRL(t *testing.T) {
@@ -137,7 +396,10 @@ func TestBackend_RegisteredNonCA_CRL(t *testing.T) {
// Connection state is presenting the client Non-CA cert and its key.
// This is exactly what is registered at the backend.
- connState := connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
+ connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
+ if err != nil {
+ t.Fatalf("error testing connection state:%v", err)
+ }
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
@@ -217,7 +479,10 @@ func TestBackend_CRLs(t *testing.T) {
// Connection state is presenting the client CA cert and its key.
// This is exactly what is registered at the backend.
- connState := connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1)
+ connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1)
+ if err != nil {
+ t.Fatalf("error testing connection state:%v", err)
+ }
loginReq := &logical.Request{
Operation: logical.UpdateOperation,
Storage: storage,
@@ -233,7 +498,10 @@ func TestBackend_CRLs(t *testing.T) {
// Now, without changing the registered client CA cert, present from
// the client side, a cert issued using the registered CA.
- connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
+ connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1)
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
loginReq.Connection.ConnState = &connState
// Attempt login with the updated connection
@@ -283,7 +551,10 @@ func TestBackend_CRLs(t *testing.T) {
}
// Test login using a different client CA cert pair.
- connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2)
+ connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2)
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
loginReq.Connection.ConnState = &connState
// Attempt login with the updated connection
@@ -359,8 +630,11 @@ func TestBackend_CertWrites(t *testing.T) {
// Test a client trusted by a CA
func TestBackend_basic_CA(t *testing.T) {
- connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
@@ -385,8 +659,11 @@ func TestBackend_basic_CA(t *testing.T) {
// Test CRL behavior
func TestBackend_Basic_CRLs(t *testing.T) {
- connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
@@ -411,8 +688,11 @@ func TestBackend_Basic_CRLs(t *testing.T) {
// Test a self-signed client (root CA) that is trusted
func TestBackend_basic_singleCert(t *testing.T) {
- connState := testConnState(t, "test-fixtures/root/rootcacert.pem",
+ connState, err := testConnState("test-fixtures/root/rootcacert.pem",
"test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
@@ -432,8 +712,11 @@ func TestBackend_basic_singleCert(t *testing.T) {
// Test against a collection of matching and non-matching rules
func TestBackend_mixed_constraints(t *testing.T) {
- connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatalf("err: %v", err)
@@ -454,8 +737,11 @@ func TestBackend_mixed_constraints(t *testing.T) {
// Test an untrusted client
func TestBackend_untrusted(t *testing.T) {
- connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
logicaltest.Test(t, logicaltest.TestCase{
Backend: testFactory(t),
Steps: []logicaltest.TestStep{
@@ -682,17 +968,17 @@ func testAccStepCertNoLease(
}
}
-func testConnState(t *testing.T, certPath, keyPath, rootCertPath string) tls.ConnectionState {
+func testConnState(certPath, keyPath, rootCertPath string) (tls.ConnectionState, error) {
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
- t.Fatalf("err: %v", err)
+ return tls.ConnectionState{}, err
}
rootConfig := &rootcerts.Config{
CAFile: rootCertPath,
}
rootCAs, err := rootcerts.LoadCACerts(rootConfig)
if err != nil {
- t.Fatalf("err: %v", err)
+ return tls.ConnectionState{}, err
}
listenConf := &tls.Config{
Certificates: []tls.Certificate{cert},
@@ -702,37 +988,72 @@ func testConnState(t *testing.T, certPath, keyPath, rootCertPath string) tls.Con
}
dialConf := new(tls.Config)
*dialConf = *listenConf
+ // start a server
list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf)
if err != nil {
- t.Fatalf("err: %v", err)
+ return tls.ConnectionState{}, err
}
defer list.Close()
+ // Accept connections.
+ serverErrors := make(chan error, 1)
+ connState := make(chan tls.ConnectionState)
+ go func() {
+ defer close(connState)
+ serverConn, err := list.Accept()
+ serverErrors <- err
+ if err != nil {
+ close(serverErrors)
+ return
+ }
+ defer serverConn.Close()
+
+ // Read the ping
+ buf := make([]byte, 4)
+ _, err = serverConn.Read(buf)
+ if (err != nil) && (err != io.EOF) {
+ serverErrors <- err
+ close(serverErrors)
+ return
+ } else {
+ // EOF is a reasonable error condition, so swallow it.
+ serverErrors <- nil
+ }
+ close(serverErrors)
+ connState <- serverConn.(*tls.Conn).ConnectionState()
+ }()
+
+ // Establish a connection from the client side and write a few bytes.
+ clientErrors := make(chan error, 1)
go func() {
addr := list.Addr().String()
conn, err := tls.Dial("tcp", addr, dialConf)
+ clientErrors <- err
if err != nil {
- t.Fatalf("err: %v", err)
+ close(clientErrors)
+ return
}
defer conn.Close()
// Write ping
- conn.Write([]byte("ping"))
+ _, err = conn.Write([]byte("ping"))
+ clientErrors <- err
+ close(clientErrors)
}()
- serverConn, err := list.Accept()
- if err != nil {
- t.Fatalf("err: %v", err)
+ for err = range clientErrors {
+ if err != nil {
+ return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err)
+ }
}
- defer serverConn.Close()
-
- // Read the pign
- buf := make([]byte, 4)
- serverConn.Read(buf)
+ for err = range serverErrors {
+ if err != nil {
+ return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err)
+ }
+ }
// Grab the current state
- connState := serverConn.(*tls.Conn).ConnectionState()
- return connState
+ return <-connState, nil
}
func Test_Renew(t *testing.T) {
@@ -750,8 +1071,11 @@ func Test_Renew(t *testing.T) {
}
b := lb.(*backend)
- connState := testConnState(t, "test-fixtures/keys/cert.pem",
+ connState, err := testConnState("test-fixtures/keys/cert.pem",
"test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem")
+ if err != nil {
+ t.Fatalf("error testing connection state: %v", err)
+ }
ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem")
if err != nil {
t.Fatal(err)
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go
index 66809c2..a1071fc 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go
@@ -10,13 +10,13 @@ import (
type CLIHandler struct{}
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
var data struct {
Mount string `mapstructure:"mount"`
Name string `mapstructure:"name"`
}
if err := mapstructure.WeakDecode(m, &data); err != nil {
- return "", err
+ return nil, err
}
if data.Mount == "" {
@@ -29,13 +29,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
path := fmt.Sprintf("auth/%s/login", data.Mount)
secret, err := c.Logical().Write(path, options)
if err != nil {
- return "", err
+ return nil, err
}
if secret == nil {
- return "", fmt.Errorf("empty response from credential provider")
+ return nil, fmt.Errorf("empty response from credential provider")
}
- return secret.Auth.ClientToken, nil
+ return secret, nil
}
func (h *CLIHandler) Help() string {
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go
index 2c002f6..fc5254f 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go
@@ -52,7 +52,7 @@ certificate.`,
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Comma-seperated list of policies.",
},
@@ -133,7 +133,7 @@ func (b *backend) pathCertRead(
Data: map[string]interface{}{
"certificate": cert.Certificate,
"display_name": cert.DisplayName,
- "policies": strings.Join(cert.Policies, ","),
+ "policies": cert.Policies,
"ttl": duration / time.Second,
},
}, nil
@@ -144,7 +144,7 @@ func (b *backend) pathCertWrite(
name := strings.ToLower(d.Get("name").(string))
certificate := d.Get("certificate").(string)
displayName := d.Get("display_name").(string)
- policies := policyutil.ParsePolicies(d.Get("policies").(string))
+ policies := policyutil.ParsePolicies(d.Get("policies"))
allowedNames := d.Get("allowed_names").([]string)
// Default the display name to the certificate name if not given
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go
index 164bbe7..2faecd3 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go
@@ -156,11 +156,22 @@ func (b *backend) verifyCredentials(req *logical.Request, d *framework.FieldData
clientCert := connState.PeerCertificates[0]
// Allow constraining the login request to a single CertEntry
- certName := d.Get("name").(string)
+ var certName string
+ if req.Auth != nil { // It's a renewal, use the saved certName
+ certName = req.Auth.Metadata["cert_name"]
+ } else {
+ certName = d.Get("name").(string)
+ }
// Load the trusted certificates
roots, trusted, trustedNonCAs := b.loadTrustedCerts(req.Storage, certName)
+ // Get the list of full chains matching the connection
+ trustedChains, err := validateConnState(roots, connState)
+ if err != nil {
+ return nil, nil, err
+ }
+
// If trustedNonCAs is not empty it means that client had registered a non-CA cert
// with the backend.
if len(trustedNonCAs) != 0 {
@@ -175,12 +186,6 @@ func (b *backend) verifyCredentials(req *logical.Request, d *framework.FieldData
}
}
- // Get the list of full chains matching the connection
- trustedChains, err := validateConnState(roots, connState)
- if err != nil {
- return nil, nil, err
- }
-
// If no trusted chain was found, client is not authenticated
if len(trustedChains) == 0 {
return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go
index 0dbe893..b53e95f 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go
@@ -1,6 +1,8 @@
package github
import (
+ "context"
+
"github.com/google/go-github/github"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/logical"
@@ -9,7 +11,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -44,7 +50,8 @@ func Backend() *backend {
pathLogin(&b),
}, allPaths...),
- AuthRenew: b.pathLoginRenew,
+ AuthRenew: b.pathLoginRenew,
+ BackendType: logical.TypeCredential,
}
return &b
@@ -63,7 +70,8 @@ type backend struct {
func (b *backend) Client(token string) (*github.Client, error) {
tc := cleanhttp.DefaultClient()
if token != "" {
- tc = oauth2.NewClient(oauth2.NoContext, &tokenSource{Value: token})
+ ctx := context.WithValue(context.Background(), oauth2.HTTPClient, tc)
+ tc = oauth2.NewClient(ctx, &tokenSource{Value: token})
}
return github.NewClient(tc), nil
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go
index 037c2ca..6dd7da8 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go
@@ -48,9 +48,8 @@ func TestBackend_Config(t *testing.T) {
}
logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
Steps: []logicaltest.TestStep{
testConfigWrite(t, config_data1),
testLoginWrite(t, login_data, expectedTTL1.Nanoseconds(), false),
@@ -105,9 +104,8 @@ func TestBackend_basic(t *testing.T) {
}
logicaltest.Test(t, logicaltest.TestCase{
- AcceptanceTest: true,
- PreCheck: func() { testAccPreCheck(t) },
- Backend: b,
+ PreCheck: func() { testAccPreCheck(t) },
+ Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, false),
testAccMap(t, "default", "fakepol"),
@@ -131,15 +129,15 @@ func TestBackend_basic(t *testing.T) {
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("GITHUB_TOKEN"); v == "" {
- t.Fatal("GITHUB_TOKEN must be set for acceptance tests")
+ t.Skip("GITHUB_TOKEN must be set for acceptance tests")
}
if v := os.Getenv("GITHUB_ORG"); v == "" {
- t.Fatal("GITHUB_ORG must be set for acceptance tests")
+ t.Skip("GITHUB_ORG must be set for acceptance tests")
}
if v := os.Getenv("GITHUB_BASEURL"); v == "" {
- t.Fatal("GITHUB_BASEURL must be set for acceptance tests (use 'https://api.github.com' if you don't know what you're doing)")
+ t.Skip("GITHUB_BASEURL must be set for acceptance tests (use 'https://api.github.com' if you don't know what you're doing)")
}
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go
index dda1dac..557939b 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go
@@ -10,7 +10,7 @@ import (
type CLIHandler struct{}
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
mount, ok := m["mount"]
if !ok {
mount = "github"
@@ -19,7 +19,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
token, ok := m["token"]
if !ok {
if token = os.Getenv("VAULT_AUTH_GITHUB_TOKEN"); token == "" {
- return "", fmt.Errorf("GitHub token should be provided either as 'value' for 'token' key,\nor via an env var VAULT_AUTH_GITHUB_TOKEN")
+ return nil, fmt.Errorf("GitHub token should be provided either as 'value' for 'token' key,\nor via an env var VAULT_AUTH_GITHUB_TOKEN")
}
}
@@ -28,13 +28,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
"token": token,
})
if err != nil {
- return "", err
+ return nil, err
}
if secret == nil {
- return "", fmt.Errorf("empty response from credential provider")
+ return nil, fmt.Errorf("empty response from credential provider")
}
- return secret.Auth.ClientToken, nil
+ return secret, nil
}
func (h *CLIHandler) Help() string {
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go
index 9db2e64..c211450 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go
@@ -5,9 +5,9 @@ import (
"net/url"
"time"
+ "github.com/fatih/structs"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
- "github.com/fatih/structs"
)
func pathConfig(b *backend) *framework.Path {
@@ -37,7 +37,7 @@ API-compatible authentication server.`,
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathConfigWrite,
- logical.ReadOperation: b.pathConfigRead,
+ logical.ReadOperation: b.pathConfigRead,
},
}
}
@@ -77,10 +77,10 @@ func (b *backend) pathConfigWrite(
}
entry, err := logical.StorageEntryJSON("config", config{
- Organization: organization,
- BaseURL: baseURL,
- TTL: ttl,
- MaxTTL: maxTTL,
+ Organization: organization,
+ BaseURL: baseURL,
+ TTL: ttl,
+ MaxTTL: maxTTL,
})
if err != nil {
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go
index d165626..835b4a6 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go
@@ -13,7 +13,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -39,7 +43,8 @@ func Backend() *backend {
mfa.MFAPaths(b.Backend, pathLogin(&b))...,
),
- AuthRenew: b.pathLoginRenew,
+ AuthRenew: b.pathLoginRenew,
+ BackendType: logical.TypeCredential,
}
return &b
@@ -118,7 +123,12 @@ func (b *backend) Login(req *logical.Request, username string, password string)
}
// Try to bind as the login user. This is where the actual authentication takes place.
- if err = c.Bind(userBindDN, password); err != nil {
+ if len(password) > 0 {
+ err = c.Bind(userBindDN, password)
+ } else {
+ err = c.UnauthenticatedBind(userBindDN)
+ }
+ if err != nil {
return nil, logical.ErrorResponse(fmt.Sprintf("LDAP bind failed: %v", err)), nil
}
@@ -184,8 +194,8 @@ func (b *backend) Login(req *logical.Request, username string, password string)
if len(policies) == 0 {
errStr := "user is not a member of any authorized group"
- if len(ldapResponse.Warnings()) > 0 {
- errStr = fmt.Sprintf("%s; additionally, %s", errStr, ldapResponse.Warnings()[0])
+ if len(ldapResponse.Warnings) > 0 {
+ errStr = fmt.Sprintf("%s; additionally, %s", errStr, ldapResponse.Warnings[0])
}
ldapResponse.Data["error"] = errStr
@@ -232,7 +242,13 @@ func (b *backend) getCN(dn string) string {
func (b *backend) getUserBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) (string, error) {
bindDN := ""
if cfg.DiscoverDN || (cfg.BindDN != "" && cfg.BindPassword != "") {
- if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil {
+ var err error
+ if cfg.BindPassword != "" {
+ err = c.Bind(cfg.BindDN, cfg.BindPassword)
+ } else {
+ err = c.UnauthenticatedBind(cfg.BindDN)
+ }
+ if err != nil {
return bindDN, fmt.Errorf("LDAP bind (service) failed: %v", err)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go
index 51b4df7..3b1d936 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ "github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/mitchellh/mapstructure"
@@ -21,7 +22,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
t.Fatalf("failed to create backend")
}
- _, err := b.Backend.Setup(config)
+ err := b.Backend.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -94,7 +95,7 @@ func TestLdapAuthBackend_UserPolicies(t *testing.T) {
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v", err, resp)
}
- expected := []string{"default", "grouppolicy", "userpolicy"}
+ expected := []string{"grouppolicy", "userpolicy"}
if !reflect.DeepEqual(expected, resp.Auth.Policies) {
t.Fatalf("bad: policies: expected: %q, actual: %q", expected, resp.Auth.Policies)
}
@@ -211,7 +212,7 @@ func TestBackend_groupCrud(t *testing.T) {
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepGroup(t, "g1", "foo"),
- testAccStepReadGroup(t, "g1", "default,foo"),
+ testAccStepReadGroup(t, "g1", "foo"),
testAccStepDeleteGroup(t, "g1"),
testAccStepReadGroup(t, "g1", ""),
},
@@ -357,13 +358,13 @@ func testAccStepReadGroup(t *testing.T, group string, policies string) logicalte
}
var d struct {
- Policies string `mapstructure:"policies"`
+ Policies []string `mapstructure:"policies"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
- if d.Policies != policies {
+ if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) {
return fmt.Errorf("bad: %#v", resp)
}
@@ -463,8 +464,8 @@ func testAccStepLoginNoGroupDN(t *testing.T, user string, pass string) logicalte
// Verifies a search without defined GroupDN returns a warnting rather than failing
Check: func(resp *logical.Response) error {
- if len(resp.Warnings()) != 1 {
- return fmt.Errorf("expected a warning due to no group dn, got: %#v", resp.Warnings())
+ if len(resp.Warnings) != 1 {
+ return fmt.Errorf("expected a warning due to no group dn, got: %#v", resp.Warnings)
}
return logicaltest.TestCheckAuth([]string{"bar", "default"})(resp)
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go
index e4d151f..262bc99 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go
@@ -11,7 +11,7 @@ import (
type CLIHandler struct{}
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
mount, ok := m["mount"]
if !ok {
mount = "ldap"
@@ -21,7 +21,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
if !ok {
username = usernameFromEnv()
if username == "" {
- return "", fmt.Errorf("'username' not supplied and neither 'LOGNAME' nor 'USER' env vars set")
+ return nil, fmt.Errorf("'username' not supplied and neither 'LOGNAME' nor 'USER' env vars set")
}
}
password, ok := m["password"]
@@ -31,7 +31,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
password, err = pwd.Read(os.Stdin)
fmt.Println()
if err != nil {
- return "", err
+ return nil, err
}
}
@@ -51,13 +51,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
path := fmt.Sprintf("auth/%s/login/%s", mount, username)
secret, err := c.Logical().Write(path, data)
if err != nil {
- return "", err
+ return nil, err
}
if secret == nil {
- return "", fmt.Errorf("empty response from credential provider")
+ return nil, fmt.Errorf("empty response from credential provider")
}
- return secret.Auth.ClientToken, nil
+ return secret, nil
}
func (h *CLIHandler) Help() string {
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go
index 4fc772e..bf76715 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go
@@ -3,6 +3,7 @@ package ldap
import (
"crypto/tls"
"crypto/x509"
+ "encoding/pem"
"fmt"
"net"
"net/url"
@@ -225,6 +226,15 @@ func (b *backend) newConfigEntry(d *framework.FieldData) (*ConfigEntry, error) {
}
certificate := d.Get("certificate").(string)
if certificate != "" {
+ block, _ := pem.Decode([]byte(certificate))
+
+ if block == nil || block.Type != "CERTIFICATE" {
+ return nil, fmt.Errorf("failed to decode PEM block in the certificate")
+ }
+ _, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate %s", err.Error())
+ }
cfg.Certificate = certificate
}
insecureTLS := d.Get("insecure_tls").(bool)
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go
index 998fdc4..48c0d25 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go
@@ -1,8 +1,6 @@
package ldap
import (
- "strings"
-
"github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -31,7 +29,7 @@ func pathGroups(b *backend) *framework.Path {
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Comma-separated list of policies associated to the group.",
},
},
@@ -86,7 +84,7 @@ func (b *backend) pathGroupRead(
return &logical.Response{
Data: map[string]interface{}{
- "policies": strings.Join(group.Policies, ","),
+ "policies": group.Policies,
},
}, nil
}
@@ -95,7 +93,7 @@ func (b *backend) pathGroupWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
// Store it
entry, err := logical.StorageEntryJSON("group/"+d.Get("name").(string), &GroupEntry{
- Policies: policyutil.ParsePolicies(d.Get("policies").(string)),
+ Policies: policyutil.ParsePolicies(d.Get("policies")),
})
if err != nil {
return nil, err
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go
index e859adb..2266e8d 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go
@@ -3,7 +3,6 @@ package ldap
import (
"fmt"
"sort"
- "strings"
"github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/logical"
@@ -59,7 +58,6 @@ func (b *backend) pathLogin(
Policies: policies,
Metadata: map[string]string{
"username": username,
- "policies": strings.Join(policies, ","),
},
InternalData: map[string]interface{}{
"password": password,
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go
index 605f779..6845a41 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go
@@ -37,7 +37,7 @@ func pathUsers(b *backend) *framework.Path {
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Comma-separated list of policies associated with the user.",
},
},
@@ -93,7 +93,7 @@ func (b *backend) pathUserRead(
return &logical.Response{
Data: map[string]interface{}{
"groups": strings.Join(user.Groups, ","),
- "policies": strings.Join(user.Policies, ","),
+ "policies": user.Policies,
},
}, nil
}
@@ -102,7 +102,7 @@ func (b *backend) pathUserWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
groups := strutil.RemoveDuplicates(strutil.ParseStringSlice(d.Get("groups").(string), ","), false)
- policies := policyutil.ParsePolicies(d.Get("policies").(string))
+ policies := policyutil.ParsePolicies(d.Get("policies"))
for i, g := range groups {
groups[i] = strings.TrimSpace(g)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go
index 43a1647..951d190 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go
@@ -3,12 +3,17 @@ package okta
import (
"fmt"
+ "github.com/chrismalek/oktasdk-go/okta"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -31,7 +36,8 @@ func Backend() *backend {
pathLogin(&b),
}),
- AuthRenew: b.pathLoginRenew,
+ AuthRenew: b.pathLoginRenew,
+ BackendType: logical.TypeCredential,
}
return &b
@@ -51,49 +57,76 @@ func (b *backend) Login(req *logical.Request, username string, password string)
}
client := cfg.OktaClient()
- auth, err := client.Authenticate(username, password)
+
+ type embeddedResult struct {
+ User okta.User `json:"user"`
+ }
+
+ type authResult struct {
+ Embedded embeddedResult `json:"_embedded"`
+ }
+
+ authReq, err := client.NewRequest("POST", "authn", map[string]interface{}{
+ "username": username,
+ "password": password,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var result authResult
+ rsp, err := client.Do(authReq, &result)
if err != nil {
return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil
}
- if auth == nil {
+ if rsp == nil {
return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil
}
- oktaGroups, err := b.getOktaGroups(cfg, auth.Embedded.User.ID)
- if err != nil {
- return nil, logical.ErrorResponse(err.Error()), nil
- }
- if b.Logger().IsDebug() {
- b.Logger().Debug("auth/okta: Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", oktaGroups)
- }
-
oktaResponse := &logical.Response{
Data: map[string]interface{}{},
}
- if len(oktaGroups) == 0 {
- errString := fmt.Sprintf(
- "no Okta groups found; only policies from locally-defined groups available")
- oktaResponse.AddWarning(errString)
- }
var allGroups []string
+ // Only query the Okta API for group membership if we have a token
+ if cfg.Token != "" {
+ oktaGroups, err := b.getOktaGroups(client, &result.Embedded.User)
+ if err != nil {
+ return nil, logical.ErrorResponse(fmt.Sprintf("okta failure retrieving groups: %v", err)), nil
+ }
+ if len(oktaGroups) == 0 {
+ errString := fmt.Sprintf(
+ "no Okta groups found; only policies from locally-defined groups available")
+ oktaResponse.AddWarning(errString)
+ }
+ allGroups = append(allGroups, oktaGroups...)
+ }
+
// Import the custom added groups from okta backend
user, err := b.User(req.Storage, username)
+ if err != nil {
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/okta: error looking up user", "error", err)
+ }
+ }
if err == nil && user != nil && user.Groups != nil {
if b.Logger().IsDebug() {
b.Logger().Debug("auth/okta: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups)
}
allGroups = append(allGroups, user.Groups...)
}
- // Merge local and Okta groups
- allGroups = append(allGroups, oktaGroups...)
// Retrieve policies
var policies []string
for _, groupName := range allGroups {
- group, err := b.Group(req.Storage, groupName)
- if err == nil && group != nil && group.Policies != nil {
- policies = append(policies, group.Policies...)
+ entry, _, err := b.Group(req.Storage, groupName)
+ if err != nil {
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/okta: error looking up group policies", "error", err)
+ }
+ }
+ if err == nil && entry != nil && entry.Policies != nil {
+ policies = append(policies, entry.Policies...)
}
}
@@ -104,8 +137,8 @@ func (b *backend) Login(req *logical.Request, username string, password string)
if len(policies) == 0 {
errStr := "user is not a member of any authorized policy"
- if len(oktaResponse.Warnings()) > 0 {
- errStr = fmt.Sprintf("%s; additionally, %s", errStr, oktaResponse.Warnings()[0])
+ if len(oktaResponse.Warnings) > 0 {
+ errStr = fmt.Sprintf("%s; additionally, %s", errStr, oktaResponse.Warnings[0])
}
oktaResponse.Data["error"] = errStr
@@ -115,21 +148,22 @@ func (b *backend) Login(req *logical.Request, username string, password string)
return policies, oktaResponse, nil
}
-func (b *backend) getOktaGroups(cfg *ConfigEntry, userID string) ([]string, error) {
- if cfg.Token != "" {
- client := cfg.OktaClient()
- groups, err := client.Groups(userID)
- if err != nil {
- return nil, err
- }
-
- oktaGroups := make([]string, 0, len(*groups))
- for _, group := range *groups {
- oktaGroups = append(oktaGroups, group.Profile.Name)
- }
- return oktaGroups, err
+func (b *backend) getOktaGroups(client *okta.Client, user *okta.User) ([]string, error) {
+ rsp, err := client.Users.PopulateGroups(user)
+ if err != nil {
+ return nil, err
}
- return nil, nil
+ if rsp == nil {
+ return nil, fmt.Errorf("okta auth backend unexpected failure")
+ }
+ oktaGroups := make([]string, 0, len(user.Groups))
+ for _, group := range user.Groups {
+ oktaGroups = append(oktaGroups, group.Profile.Name)
+ }
+ if b.Logger().IsDebug() {
+ b.Logger().Debug("auth/okta: Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", oktaGroups)
+ }
+ return oktaGroups, nil
}
const backendHelp = `
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go
index 7672dc0..9c2503d 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go
@@ -10,14 +10,21 @@ import (
"github.com/hashicorp/vault/helper/policyutil"
log "github.com/mgutz/logxi/v1"
+ "time"
+
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
)
func TestBackend_Config(t *testing.T) {
+ defaultLeaseTTLVal := time.Hour * 12
+ maxLeaseTTLVal := time.Hour * 24
b, err := Factory(&logical.BackendConfig{
Logger: logformat.NewVaultLogger(log.LevelTrace),
- System: &logical.StaticSystemView{},
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: defaultLeaseTTLVal,
+ MaxLeaseTTLVal: maxLeaseTTLVal,
+ },
})
if err != nil {
t.Fatalf("Unable to create backend: %s", err)
@@ -25,14 +32,17 @@ func TestBackend_Config(t *testing.T) {
username := os.Getenv("OKTA_USERNAME")
password := os.Getenv("OKTA_PASSWORD")
+ token := os.Getenv("OKTA_API_TOKEN")
configData := map[string]interface{}{
"organization": os.Getenv("OKTA_ORG"),
"base_url": "oktapreview.com",
}
+ updatedDuration := time.Hour * 1
configDataToken := map[string]interface{}{
- "token": os.Getenv("OKTA_API_TOKEN"),
+ "token": token,
+ "ttl": "1h",
}
logicaltest.Test(t, logicaltest.TestCase{
@@ -41,23 +51,23 @@ func TestBackend_Config(t *testing.T) {
Backend: b,
Steps: []logicaltest.TestStep{
testConfigCreate(t, configData),
- testLoginWrite(t, username, "wrong", "E0000004", nil),
- testLoginWrite(t, username, password, "user is not a member of any authorized policy", nil),
- testAccUserGroups(t, username, "local_group,local_group2"),
- testAccGroups(t, "local_group", "local_group_policy"),
- testLoginWrite(t, username, password, "", []string{"local_group_policy"}),
- testAccGroups(t, "Everyone", "everyone_group_policy,every_group_policy2"),
- testLoginWrite(t, username, password, "", []string{"local_group_policy"}),
+ testLoginWrite(t, username, "wrong", "E0000004", 0, nil),
+ testLoginWrite(t, username, password, "user is not a member of any authorized policy", 0, nil),
+ testAccUserGroups(t, username, "local_grouP,lOcal_group2"),
+ testAccGroups(t, "local_groUp", "loCal_group_policy"),
+ testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy"}),
+ testAccGroups(t, "everyoNe", "everyone_grouP_policy,eveRy_group_policy2"),
+ testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy"}),
testConfigUpdate(t, configDataToken),
- testConfigRead(t, configData),
- testLoginWrite(t, username, password, "", []string{"everyone_group_policy", "every_group_policy2", "local_group_policy"}),
- testAccGroups(t, "local_group2", "testgroup_group_policy"),
- testLoginWrite(t, username, password, "", []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "testgroup_group_policy"}),
+ testConfigRead(t, token, configData),
+ testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy"}),
+ testAccGroups(t, "locAl_group2", "testgroup_group_policy"),
+ testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "testgroup_group_policy"}),
},
})
}
-func testLoginWrite(t *testing.T, username, password, reason string, policies []string) logicaltest.TestStep {
+func testLoginWrite(t *testing.T, username, password, reason string, expectedTTL time.Duration, policies []string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "login/" + username,
@@ -76,6 +86,11 @@ func testLoginWrite(t *testing.T, username, password, reason string, policies []
if !policyutil.EquivalentPolicies(resp.Auth.Policies, policies) {
return fmt.Errorf("policy mismatch expected %v but got %v", policies, resp.Auth.Policies)
}
+
+ actualTTL := resp.Auth.LeaseOptions.TTL
+ if actualTTL != expectedTTL {
+ return fmt.Errorf("TTL mismatch expected %v but got %v", expectedTTL, actualTTL)
+ }
}
return nil
@@ -99,7 +114,7 @@ func testConfigUpdate(t *testing.T, d map[string]interface{}) logicaltest.TestSt
}
}
-func testConfigRead(t *testing.T, d map[string]interface{}) logicaltest.TestStep {
+func testConfigRead(t *testing.T, token string, d map[string]interface{}) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "config",
@@ -108,16 +123,18 @@ func testConfigRead(t *testing.T, d map[string]interface{}) logicaltest.TestStep
return resp.Error()
}
- if resp.Data["Org"] != d["organization"] {
+ if resp.Data["organization"] != d["organization"] {
return fmt.Errorf("Org mismatch expected %s but got %s", d["organization"], resp.Data["Org"])
}
- if resp.Data["BaseURL"] != d["base_url"] {
+ if resp.Data["base_url"] != d["base_url"] {
return fmt.Errorf("BaseURL mismatch expected %s but got %s", d["base_url"], resp.Data["BaseURL"])
}
- if _, exists := resp.Data["Token"]; exists {
- return fmt.Errorf("token should not be returned on a read request")
+ for _, value := range resp.Data {
+ if value == token {
+ return fmt.Errorf("token should not be returned on a read request")
+ }
}
return nil
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go
index 355e8cb..f5f8502 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go
@@ -13,7 +13,7 @@ import (
type CLIHandler struct{}
// Auth cli method
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
mount, ok := m["mount"]
if !ok {
mount = "okta"
@@ -21,7 +21,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
username, ok := m["username"]
if !ok {
- return "", fmt.Errorf("'username' var must be set")
+ return nil, fmt.Errorf("'username' var must be set")
}
password, ok := m["password"]
if !ok {
@@ -30,7 +30,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
password, err = pwd.Read(os.Stdin)
fmt.Println()
if err != nil {
- return "", err
+ return nil, err
}
}
@@ -41,13 +41,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
path := fmt.Sprintf("auth/%s/login/%s", mount, username)
secret, err := c.Logical().Write(path, data)
if err != nil {
- return "", err
+ return nil, err
}
if secret == nil {
- return "", fmt.Errorf("empty response from credential provider")
+ return nil, fmt.Errorf("empty response from credential provider")
}
- return secret.Auth.ClientToken, nil
+ return secret, nil
}
// Help method for okta cli
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go
index b454f7e..e879302 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go
@@ -4,9 +4,17 @@ import (
"fmt"
"net/url"
+ "time"
+
+ "github.com/chrismalek/oktasdk-go/okta"
+ "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
- "github.com/sstarcher/go-okta"
+)
+
+const (
+ defaultBaseURL = "okta.com"
+ previewBaseURL = "oktapreview.com"
)
func pathConfig(b *backend) *framework.Path {
@@ -15,16 +23,35 @@ func pathConfig(b *backend) *framework.Path {
Fields: map[string]*framework.FieldSchema{
"organization": &framework.FieldSchema{
Type: framework.TypeString,
- Description: "Okta organization to authenticate against",
+ Description: "(DEPRECATED) Okta organization to authenticate against. Use org_name instead.",
+ },
+ "org_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the organization to be used in the Okta API.",
},
"token": &framework.FieldSchema{
Type: framework.TypeString,
- Description: "Okta admin API token",
+ Description: "(DEPRECATED) Okta admin API token. Use api_token instead.",
+ },
+ "api_token": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Okta API key.",
},
"base_url": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `The API endpoint to use. Useful if you
-are using Okta development accounts.`,
+ Type: framework.TypeString,
+ Description: `The base domain to use for the Okta API. When not specified in the configuraiton, "okta.com" is used.`,
+ },
+ "production": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `(DEPRECATED) Use base_url.`,
+ },
+ "ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Duration after which authentication will be expired`,
+ },
+ "max_ttl": &framework.FieldSchema{
+ Type: framework.TypeDurationSecond,
+ Description: `Maximum duration after which authentication will be expired`,
},
},
@@ -73,17 +100,24 @@ func (b *backend) pathConfigRead(
resp := &logical.Response{
Data: map[string]interface{}{
- "Org": cfg.Org,
- "BaseURL": cfg.BaseURL,
+ "organization": cfg.Org,
+ "org_name": cfg.Org,
+ "ttl": cfg.TTL,
+ "max_ttl": cfg.MaxTTL,
},
}
+ if cfg.BaseURL != "" {
+ resp.Data["base_url"] = cfg.BaseURL
+ }
+ if cfg.Production != nil {
+ resp.Data["production"] = *cfg.Production
+ }
return resp, nil
}
func (b *backend) pathConfigWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- org := d.Get("organization").(string)
cfg, err := b.Config(req.Storage)
if err != nil {
return nil, err
@@ -92,30 +126,69 @@ func (b *backend) pathConfigWrite(
// Due to the existence check, entry will only be nil if it's a create
// operation, so just create a new one
if cfg == nil {
- cfg = &ConfigEntry{
- Org: org,
- }
+ cfg = &ConfigEntry{}
}
- token, ok := d.GetOk("token")
+ org, ok := d.GetOk("org_name")
+ if ok {
+ cfg.Org = org.(string)
+ }
+ if cfg.Org == "" {
+ org, ok = d.GetOk("organization")
+ if ok {
+ cfg.Org = org.(string)
+ }
+ }
+ if cfg.Org == "" && req.Operation == logical.CreateOperation {
+ return logical.ErrorResponse("org_name is missing"), nil
+ }
+
+ token, ok := d.GetOk("api_token")
if ok {
cfg.Token = token.(string)
- } else if req.Operation == logical.CreateOperation {
- cfg.Token = d.Get("token").(string)
+ }
+ if cfg.Token == "" {
+ token, ok = d.GetOk("token")
+ if ok {
+ cfg.Token = token.(string)
+ }
}
- baseURL, ok := d.GetOk("base_url")
+ baseURLRaw, ok := d.GetOk("base_url")
if ok {
- baseURLString := baseURL.(string)
- if len(baseURLString) != 0 {
- _, err = url.Parse(baseURLString)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil
- }
- cfg.BaseURL = baseURLString
+ baseURL := baseURLRaw.(string)
+ _, err = url.Parse(fmt.Sprintf("https://%s,%s", cfg.Org, baseURL))
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil
}
+ cfg.BaseURL = baseURL
+ }
+
+ // We only care about the production flag when base_url is not set. It is
+ // for compatibility reasons.
+ if cfg.BaseURL == "" {
+ productionRaw, ok := d.GetOk("production")
+ if ok {
+ production := productionRaw.(bool)
+ cfg.Production = &production
+ }
+ } else {
+ // clear out old production flag if base_url is set
+ cfg.Production = nil
+ }
+
+ ttl, ok := d.GetOk("ttl")
+ if ok {
+ cfg.TTL = time.Duration(ttl.(int)) * time.Second
} else if req.Operation == logical.CreateOperation {
- cfg.BaseURL = d.Get("base_url").(string)
+ cfg.TTL = time.Duration(d.Get("ttl").(int)) * time.Second
+ }
+
+ maxTTL, ok := d.GetOk("max_ttl")
+ if ok {
+ cfg.MaxTTL = time.Duration(maxTTL.(int)) * time.Second
+ } else if req.Operation == logical.CreateOperation {
+ cfg.MaxTTL = time.Duration(d.Get("max_ttl").(int)) * time.Second
}
jsonCfg, err := logical.StorageEntryJSON("config", cfg)
@@ -141,23 +214,29 @@ func (b *backend) pathConfigExistenceCheck(
// OktaClient creates a basic okta client connection
func (c *ConfigEntry) OktaClient() *okta.Client {
- client := okta.NewClient(c.Org)
+ baseURL := defaultBaseURL
+ if c.Production != nil {
+ if !*c.Production {
+ baseURL = previewBaseURL
+ }
+ }
if c.BaseURL != "" {
- client.Url = c.BaseURL
- }
-
- if c.Token != "" {
- client.ApiToken = c.Token
+ baseURL = c.BaseURL
}
+ // We validate config on input and errors are only returned when parsing URLs
+ client, _ := okta.NewClientWithDomain(cleanhttp.DefaultClient(), c.Org, baseURL, c.Token)
return client
}
// ConfigEntry for Okta
type ConfigEntry struct {
- Org string `json:"organization"`
- Token string `json:"token"`
- BaseURL string `json:"base_url"`
+ Org string `json:"organization"`
+ Token string `json:"token"`
+ BaseURL string `json:"base_url"`
+ Production *bool `json:"is_production,omitempty"`
+ TTL time.Duration `json:"ttl"`
+ MaxTTL time.Duration `json:"max_ttl"`
}
const pathConfigHelp = `
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go
index d111775..9f879a1 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go
@@ -1,6 +1,8 @@
package okta
import (
+ "strings"
+
"github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -29,7 +31,7 @@ func pathGroups(b *backend) *framework.Path {
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Comma-separated list of policies associated to the group.",
},
},
@@ -45,34 +47,59 @@ func pathGroups(b *backend) *framework.Path {
}
}
-func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, error) {
+// We look up groups in a case-insensitive manner since Okta is case-preserving
+// but case-insensitive for comparisons
+func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, string, error) {
+ canonicalName := n
entry, err := s.Get("group/" + n)
if err != nil {
- return nil, err
+ return nil, "", err
}
if entry == nil {
- return nil, nil
+ entries, err := s.List("group/")
+ if err != nil {
+ return nil, "", err
+ }
+ for _, groupName := range entries {
+ if strings.ToLower(groupName) == strings.ToLower(n) {
+ entry, err = s.Get("group/" + groupName)
+ if err != nil {
+ return nil, "", err
+ }
+ canonicalName = groupName
+ break
+ }
+ }
+ }
+ if entry == nil {
+ return nil, "", nil
}
var result GroupEntry
if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
+ return nil, "", err
}
- return &result, nil
+ return &result, canonicalName, nil
}
func (b *backend) pathGroupDelete(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
if len(name) == 0 {
- return logical.ErrorResponse("Error empty name"), nil
+ return logical.ErrorResponse("'name' must be supplied"), nil
}
- err := req.Storage.Delete("group/" + name)
+ entry, canonicalName, err := b.Group(req.Storage, name)
if err != nil {
return nil, err
}
+ if entry != nil {
+ err := req.Storage.Delete("group/" + canonicalName)
+ if err != nil {
+ return nil, err
+ }
+ }
return nil, nil
}
@@ -81,10 +108,10 @@ func (b *backend) pathGroupRead(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
if len(name) == 0 {
- return logical.ErrorResponse("Error empty name"), nil
+ return logical.ErrorResponse("'name' must be supplied"), nil
}
- group, err := b.Group(req.Storage, name)
+ group, _, err := b.Group(req.Storage, name)
if err != nil {
return nil, err
}
@@ -103,11 +130,23 @@ func (b *backend) pathGroupWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
if len(name) == 0 {
- return logical.ErrorResponse("Error empty name"), nil
+ return logical.ErrorResponse("'name' must be supplied"), nil
+ }
+
+ // Check for an existing group, possibly lowercased so that we keep using
+ // existing user set values
+ _, canonicalName, err := b.Group(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if canonicalName != "" {
+ name = canonicalName
+ } else {
+ name = strings.ToLower(name)
}
entry, err := logical.StorageEntryJSON("group/"+name, &GroupEntry{
- Policies: policyutil.ParsePolicies(d.Get("policies").(string)),
+ Policies: policyutil.ParsePolicies(d.Get("policies")),
})
if err != nil {
return nil, err
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go
index accc867..e439771 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go
@@ -5,6 +5,7 @@ import (
"sort"
"strings"
+ "github.com/go-errors/errors"
"github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -55,6 +56,11 @@ func (b *backend) pathLogin(
sort.Strings(policies)
+ cfg, err := b.getConfig(req)
+ if err != nil {
+ return nil, err
+ }
+
resp.Auth = &logical.Auth{
Policies: policies,
Metadata: map[string]string{
@@ -66,6 +72,7 @@ func (b *backend) pathLogin(
},
DisplayName: username,
LeaseOptions: logical.LeaseOptions{
+ TTL: cfg.TTL,
Renewable: true,
},
}
@@ -87,7 +94,25 @@ func (b *backend) pathLoginRenew(
return nil, fmt.Errorf("policies have changed, not renewing")
}
- return framework.LeaseExtend(0, 0, b.System())(req, d)
+ cfg, err := b.getConfig(req)
+ if err != nil {
+ return nil, err
+ }
+
+ return framework.LeaseExtend(cfg.TTL, cfg.MaxTTL, b.System())(req, d)
+}
+
+func (b *backend) getConfig(req *logical.Request) (*ConfigEntry, error) {
+
+ cfg, err := b.Config(req.Storage)
+ if err != nil {
+ return nil, err
+ }
+ if cfg == nil {
+ return nil, errors.New("Okta backend not configured")
+ }
+
+ return cfg, nil
}
const pathLoginSyn = `
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go
index 4bd3306..49dcb7f 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go
@@ -7,7 +7,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -32,7 +36,8 @@ func Backend() *backend {
mfa.MFAPaths(b.Backend, pathLogin(&b))...,
),
- AuthRenew: b.pathLoginRenew,
+ AuthRenew: b.pathLoginRenew,
+ BackendType: logical.TypeCredential,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go
index 2eaac22..7d4bc8b 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go
@@ -39,7 +39,7 @@ func pathConfig(b *backend) *framework.Path {
"read_timeout": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
Default: 10,
- Description: "Number of seconds before response times out (default: 10)",
+ Description: "Number of seconds before response times out (default: 10). Note: kept for backwards compatibility, currently unused.",
},
"nas_port": &framework.FieldSchema{
Type: framework.TypeInt,
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go
index 6f2c16d..f3f8c9d 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go
@@ -1,6 +1,7 @@
package radius
import (
+ "context"
"fmt"
"net"
"strconv"
@@ -51,12 +52,12 @@ func (b *backend) pathLogin(
if username == "" {
username = d.Get("urlusername").(string)
if username == "" {
- return logical.ErrorResponse("username cannot be emtpy"), nil
+ return logical.ErrorResponse("username cannot be empty"), nil
}
}
if password == "" {
- return logical.ErrorResponse("password cannot be emtpy"), nil
+ return logical.ErrorResponse("password cannot be empty"), nil
}
policies, resp, err := b.RadiusLogin(req, username, password)
@@ -123,15 +124,24 @@ func (b *backend) RadiusLogin(req *logical.Request, username string, password st
hostport := net.JoinHostPort(cfg.Host, strconv.Itoa(cfg.Port))
packet := radius.New(radius.CodeAccessRequest, []byte(cfg.Secret))
- packet.Add("User-Name", username)
- packet.Add("User-Password", password)
- packet.Add("NAS-Port", uint32(cfg.NasPort))
+ usernameAttr, err := radius.NewString(username)
+ if err != nil {
+ return nil, nil, err
+ }
+ passwordAttr, err := radius.NewString(password)
+ if err != nil {
+ return nil, nil, err
+ }
+ packet.Add(1, usernameAttr)
+ packet.Add(2, passwordAttr)
+ packet.Add(5, radius.NewInteger(uint32(cfg.NasPort)))
client := radius.Client{
- DialTimeout: time.Duration(cfg.DialTimeout) * time.Second,
- ReadTimeout: time.Duration(cfg.ReadTimeout) * time.Second,
+ Dialer: net.Dialer{
+ Timeout: time.Duration(cfg.DialTimeout) * time.Second,
+ },
}
- received, err := client.Exchange(packet, hostport)
+ received, err := client.Exchange(context.Background(), packet, hostport)
if err != nil {
return nil, logical.ErrorResponse(err.Error()), nil
}
@@ -142,6 +152,9 @@ func (b *backend) RadiusLogin(req *logical.Request, username string, password st
var policies []string
// Retrieve user entry from storage
user, err := b.user(req.Storage, username)
+ if err != nil {
+ return policies, logical.ErrorResponse("could not retrieve user entry from storage"), err
+ }
if user == nil {
// No user found, check if unregistered users are allowed (unregistered_user_policies not empty)
if len(policyutil.SanitizePolicies(cfg.UnregisteredUserPolicies, false)) == 0 {
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go
index ac9a971..1e0fc61 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go
@@ -32,7 +32,7 @@ func pathUsers(b *backend) *framework.Path {
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Comma-separated list of policies associated to the user.",
},
},
@@ -111,7 +111,7 @@ func (b *backend) pathUserRead(
func (b *backend) pathUserWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var policies = policyutil.ParsePolicies(d.Get("policies").(string))
+ var policies = policyutil.ParsePolicies(d.Get("policies"))
for _, policy := range policies {
if policy == "root" {
return logical.ErrorResponse("root policy cannot be granted by an authentication backend"), nil
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go
index d219895..65f67e1 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go
@@ -7,7 +7,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -32,7 +36,8 @@ func Backend() *backend {
mfa.MFAPaths(b.Backend, pathLogin(&b))...,
),
- AuthRenew: b.pathLoginRenew,
+ AuthRenew: b.pathLoginRenew,
+ BackendType: logical.TypeCredential,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go
index f04dc6a..4f077ee 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"time"
+ "github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/mitchellh/mapstructure"
@@ -106,7 +107,7 @@ func TestBackend_userCrud(t *testing.T) {
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepUser(t, "web", "password", "foo"),
- testAccStepReadUser(t, "web", "default,foo"),
+ testAccStepReadUser(t, "web", "foo"),
testAccStepDeleteUser(t, "web"),
testAccStepReadUser(t, "web", ""),
},
@@ -150,7 +151,7 @@ func TestBackend_passwordUpdate(t *testing.T) {
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepUser(t, "web", "password", "foo"),
- testAccStepReadUser(t, "web", "default,foo"),
+ testAccStepReadUser(t, "web", "foo"),
testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
testUpdatePassword(t, "web", "newpassword"),
testAccStepLogin(t, "web", "newpassword", []string{"default", "foo"}),
@@ -175,10 +176,10 @@ func TestBackend_policiesUpdate(t *testing.T) {
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepUser(t, "web", "password", "foo"),
- testAccStepReadUser(t, "web", "default,foo"),
+ testAccStepReadUser(t, "web", "foo"),
testAccStepLogin(t, "web", "password", []string{"default", "foo"}),
testUpdatePolicies(t, "web", "foo,bar"),
- testAccStepReadUser(t, "web", "bar,default,foo"),
+ testAccStepReadUser(t, "web", "bar,foo"),
testAccStepLogin(t, "web", "password", []string{"bar", "default", "foo"}),
},
})
@@ -311,13 +312,13 @@ func testAccStepReadUser(t *testing.T, name string, policies string) logicaltest
}
var d struct {
- Policies string `mapstructure:"policies"`
+ Policies []string `mapstructure:"policies"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
- if d.Policies != policies {
+ if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) {
return fmt.Errorf("bad: %#v", resp)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go
index 80b52e3..4433c0e 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go
@@ -14,7 +14,7 @@ type CLIHandler struct {
DefaultMount string
}
-func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
+func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
var data struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
@@ -23,18 +23,18 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
Passcode string `mapstructure:"passcode"`
}
if err := mapstructure.WeakDecode(m, &data); err != nil {
- return "", err
+ return nil, err
}
if data.Username == "" {
- return "", fmt.Errorf("'username' must be specified")
+ return nil, fmt.Errorf("'username' must be specified")
}
if data.Password == "" {
fmt.Printf("Password (will be hidden): ")
password, err := pwd.Read(os.Stdin)
fmt.Println()
if err != nil {
- return "", err
+ return nil, err
}
data.Password = password
}
@@ -55,13 +55,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) {
path := fmt.Sprintf("auth/%s/login/%s", data.Mount, data.Username)
secret, err := c.Logical().Write(path, options)
if err != nil {
- return "", err
+ return nil, err
}
if secret == nil {
- return "", fmt.Errorf("empty response from credential provider")
+ return nil, fmt.Errorf("empty response from credential provider")
}
- return secret.Auth.ClientToken, nil
+ return secret, nil
}
func (h *CLIHandler) Help() string {
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go
index 6165c18..d03a6c2 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go
@@ -17,7 +17,7 @@ func pathUserPolicies(b *backend) *framework.Path {
Description: "Username for this user.",
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Comma-separated list of policies",
},
},
@@ -44,7 +44,7 @@ func (b *backend) pathUserPoliciesUpdate(
return nil, fmt.Errorf("username does not exist")
}
- userEntry.Policies = policyutil.ParsePolicies(d.Get("policies").(string))
+ userEntry.Policies = policyutil.ParsePolicies(d.Get("policies"))
return nil, b.setUser(req.Storage, username, userEntry)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go
index f8d4eb0..b207598 100644
--- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go
+++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go
@@ -38,7 +38,7 @@ func pathUsers(b *backend) *framework.Path {
},
"policies": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeCommaStringSlice,
Description: "Comma-separated list of policies",
},
"ttl": &framework.FieldSchema{
@@ -137,7 +137,7 @@ func (b *backend) pathUserRead(
return &logical.Response{
Data: map[string]interface{}{
- "policies": strings.Join(user.Policies, ","),
+ "policies": user.Policies,
"ttl": user.TTL.Seconds(),
"max_ttl": user.MaxTTL.Seconds(),
},
@@ -166,7 +166,7 @@ func (b *backend) userCreateUpdate(req *logical.Request, d *framework.FieldData)
}
if policiesRaw, ok := d.GetOk("policies"); ok {
- userEntry.Policies = policyutil.ParsePolicies(policiesRaw.(string))
+ userEntry.Policies = policyutil.ParsePolicies(policiesRaw)
}
ttlStr := userEntry.TTL.String()
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go
index 246e25c..b6341e0 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go
@@ -9,7 +9,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -38,6 +42,7 @@ func Backend() *backend {
WALRollback: walRollback,
WALRollbackMinAge: 5 * time.Minute,
+ BackendType: logical.TypeLogical,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go
index 3f04c68..5fab073 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go
@@ -196,6 +196,10 @@ func teardown() error {
RoleName: aws.String(testRoleName), // Required
}
_, err := svc.DetachRolePolicy(attachment)
+ if err != nil {
+ log.Printf("[WARN] AWS DetachRolePolicy failed: %v", err)
+ return err
+ }
params := &iam.DeleteRoleInput{
RoleName: aws.String(testRoleName),
@@ -206,9 +210,10 @@ func teardown() error {
if err != nil {
log.Printf("[WARN] AWS DeleteRole failed: %v", err)
+ return err
}
- return err
+ return nil
}
func testAccStepConfig(t *testing.T) logicaltest.TestStep {
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go
index 545c685..f6bbbe2 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go
@@ -2,6 +2,7 @@ package aws
import (
"fmt"
+ "os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
@@ -31,7 +32,13 @@ func getRootConfig(s logical.Storage) (*aws.Config, error) {
}
if credsConfig.Region == "" {
- credsConfig.Region = "us-east-1"
+ credsConfig.Region = os.Getenv("AWS_REGION")
+ if credsConfig.Region == "" {
+ credsConfig.Region = os.Getenv("AWS_DEFAULT_REGION")
+ if credsConfig.Region == "" {
+ credsConfig.Region = "us-east-1"
+ }
+ }
}
credsConfig.HTTPClient = cleanhttp.DefaultClient()
@@ -49,11 +56,25 @@ func getRootConfig(s logical.Storage) (*aws.Config, error) {
}
func clientIAM(s logical.Storage) (*iam.IAM, error) {
- awsConfig, _ := getRootConfig(s)
- return iam.New(session.New(awsConfig)), nil
+ awsConfig, err := getRootConfig(s)
+ if err != nil {
+ return nil, err
+ }
+ client := iam.New(session.New(awsConfig))
+ if client == nil {
+ return nil, fmt.Errorf("could not obtain iam client")
+ }
+ return client, nil
}
func clientSTS(s logical.Storage) (*sts.STS, error) {
- awsConfig, _ := getRootConfig(s)
- return sts.New(session.New(awsConfig)), nil
+ awsConfig, err := getRootConfig(s)
+ if err != nil {
+ return nil, err
+ }
+ client := sts.New(session.New(awsConfig))
+ if client == nil {
+ return nil, fmt.Errorf("could not obtain sts client")
+ }
+ return client, nil
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go
index 0d1d1d5..754e5b2 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go
@@ -37,9 +37,6 @@ func pathConfigRoot() *framework.Path {
func pathConfigRootWrite(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
region := data.Get("region").(string)
- if region == "" {
- region = "us-east-1"
- }
entry, err := logical.StorageEntryJSON("config/root", rootConfig{
AccessKey: data.Get("access_key").(string),
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go
index 08bbca9..3314c7a 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go
@@ -14,7 +14,7 @@ func TestBackend_PathListRoles(t *testing.T) {
config.StorageView = &logical.InmemStorage{}
b := Backend()
- if _, err := b.Setup(config); err != nil {
+ if err := b.Setup(config); err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go
index 637bf9d..18dbb5d 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go
@@ -53,7 +53,7 @@ func genUsername(displayName, policyName, userType string) (ret string, warning
normalizeDisplayName(policyName))
if len(midString) > 42 {
midString = midString[0:42]
- warning = "the calling token display name/IAM policy name were truncated to find into IAM username length limits"
+ warning = "the calling token display name/IAM policy name were truncated to fit into IAM username length limits"
}
case "sts":
// Capped at 32 chars, which leaves only a couple of characters to play
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go
index c2e769c..dd54ba5 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go
@@ -12,7 +12,11 @@ import (
// Factory creates a new backend
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
// Backend contains the base information for the backend's functionality
@@ -36,6 +40,7 @@ func Backend() *backend {
Clean: func() {
b.ResetDB(nil)
},
+ BackendType: logical.TypeLogical,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go
index b84ce0d..cfeb329 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go
@@ -74,6 +74,9 @@ func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
}
func TestBackend_basic(t *testing.T) {
+ if os.Getenv("TRAVIS") != "true" {
+ t.SkipNow()
+ }
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
@@ -97,6 +100,9 @@ func TestBackend_basic(t *testing.T) {
}
func TestBackend_roleCrud(t *testing.T) {
+ if os.Getenv("TRAVIS") != "true" {
+ t.SkipNow()
+ }
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go
index 4b025ba..98981ce 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go
@@ -61,14 +61,14 @@ func (b *backend) pathCredsCreateRead(
if err != nil {
return nil, err
}
-
+
// Set consistency
if role.Consistency != "" {
consistencyValue, err := gocql.ParseConsistencyWrapper(role.Consistency)
if err != nil {
return nil, err
}
-
+
session.SetConsistency(consistencyValue)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go
index 0b4351f..9fd09ac 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go
@@ -6,7 +6,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -22,6 +26,7 @@ func Backend() *backend {
Secrets: []*framework.Secret{
secretToken(&b),
},
+ BackendType: logical.TypeLogical,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go
new file mode 100644
index 0000000..ffc1a40
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go
@@ -0,0 +1,181 @@
+package database
+
+import (
+ "fmt"
+ "net/rpc"
+ "strings"
+ "sync"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const databaseConfigPath = "database/config/"
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b := Backend(conf)
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func Backend(conf *logical.BackendConfig) *databaseBackend {
+ var b databaseBackend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathListPluginConnection(&b),
+ pathConfigurePluginConnection(&b),
+ pathListRoles(&b),
+ pathRoles(&b),
+ pathCredsCreate(&b),
+ pathResetConnection(&b),
+ },
+
+ Secrets: []*framework.Secret{
+ secretCreds(&b),
+ },
+ Clean: b.closeAllDBs,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeLogical,
+ }
+
+ b.logger = conf.Logger
+ b.connections = make(map[string]dbplugin.Database)
+ return &b
+}
+
+type databaseBackend struct {
+ connections map[string]dbplugin.Database
+ logger log.Logger
+
+ *framework.Backend
+ sync.RWMutex
+}
+
+// closeAllDBs closes all connections from all database types
+func (b *databaseBackend) closeAllDBs() {
+ b.Lock()
+ defer b.Unlock()
+
+ for _, db := range b.connections {
+ db.Close()
+ }
+
+ b.connections = make(map[string]dbplugin.Database)
+}
+
+// This function is used to retrieve a database object either from the cached
+// connection map. The caller of this function needs to hold the backend's read
+// lock.
+func (b *databaseBackend) getDBObj(name string) (dbplugin.Database, bool) {
+ db, ok := b.connections[name]
+ return db, ok
+}
+
+// This function creates a new db object from the stored configuration and
+// caches it in the connections map. The caller of this function needs to hold
+// the backend's write lock
+func (b *databaseBackend) createDBObj(s logical.Storage, name string) (dbplugin.Database, error) {
+ db, ok := b.connections[name]
+ if ok {
+ return db, nil
+ }
+
+ config, err := b.DatabaseConfig(s, name)
+ if err != nil {
+ return nil, err
+ }
+
+ db, err = dbplugin.PluginFactory(config.PluginName, b.System(), b.logger)
+ if err != nil {
+ return nil, err
+ }
+
+ err = db.Initialize(config.ConnectionDetails, true)
+ if err != nil {
+ return nil, err
+ }
+
+ b.connections[name] = db
+
+ return db, nil
+}
+
+func (b *databaseBackend) DatabaseConfig(s logical.Storage, name string) (*DatabaseConfig, error) {
+ entry, err := s.Get(fmt.Sprintf("config/%s", name))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read connection configuration: %s", err)
+ }
+ if entry == nil {
+ return nil, fmt.Errorf("failed to find entry for connection with name: %s", name)
+ }
+
+ var config DatabaseConfig
+ if err := entry.DecodeJSON(&config); err != nil {
+ return nil, err
+ }
+
+ return &config, nil
+}
+
+func (b *databaseBackend) Role(s logical.Storage, roleName string) (*roleEntry, error) {
+ entry, err := s.Get("role/" + roleName)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result roleEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *databaseBackend) invalidate(key string) {
+ b.Lock()
+ defer b.Unlock()
+
+ switch {
+ case strings.HasPrefix(key, databaseConfigPath):
+ name := strings.TrimPrefix(key, databaseConfigPath)
+ b.clearConnection(name)
+ }
+}
+
+// clearConnection closes the database connection and
+// removes it from the b.connections map.
+func (b *databaseBackend) clearConnection(name string) {
+ db, ok := b.connections[name]
+ if ok {
+ db.Close()
+ delete(b.connections, name)
+ }
+}
+
+func (b *databaseBackend) closeIfShutdown(name string, err error) {
+ // Plugin has shutdown, close it so next call can reconnect.
+ if err == rpc.ErrShutdown {
+ b.Lock()
+ b.clearConnection(name)
+ b.Unlock()
+ }
+}
+
+const backendHelp = `
+The database backend supports using many different databases
+as secret backends, including but not limited to:
+cassandra, mssql, mysql, postgres
+
+After mounting this backend, configure it using the endpoints within
+the "database/config/" path.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go
new file mode 100644
index 0000000..d5461e2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go
@@ -0,0 +1,753 @@
+package database
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sync"
+ "testing"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/plugins/database/postgresql"
+ "github.com/hashicorp/vault/vault"
+ "github.com/lib/pq"
+ "github.com/mitchellh/mapstructure"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+var (
+ testImagePull sync.Once
+)
+
+func preparePostgresTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cleanup func(), retURL string) {
+ if os.Getenv("PG_URL") != "" {
+ return func() {}, os.Getenv("PG_URL")
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"})
+ if err != nil {
+ t.Fatalf("Could not start local PostgreSQL docker container: %s", err)
+ }
+
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local container: %s", err)
+ }
+ }
+
+ retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp"))
+
+ // exponential backoff-retry
+ if err = pool.Retry(func() error {
+ // This will cause a validation to run
+ resp, err := b.HandleRequest(&logical.Request{
+ Storage: s,
+ Operation: logical.UpdateOperation,
+ Path: "config/postgresql",
+ Data: map[string]interface{}{
+ "plugin_name": "postgresql-database-plugin",
+ "connection_url": retURL,
+ },
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ // It's likely not up and running yet, so return error and try again
+ return fmt.Errorf("err:%s resp:%#v\n", err, resp)
+ }
+ if resp == nil {
+ t.Fatal("expected warning")
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatalf("Could not connect to PostgreSQL docker container: %s", err)
+ }
+
+ return
+}
+
+func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "database": Factory,
+ },
+ }
+
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ cores := cluster.Cores
+
+ os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
+
+ sys := vault.TestDynamicSystemView(cores[0].Core)
+ vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", "TestBackend_PluginMain")
+
+ return cluster, sys
+}
+
+func TestBackend_PluginMain(t *testing.T) {
+ if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
+ return
+ }
+
+ caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
+ if caPEM == "" {
+ t.Fatal("CA cert not passed in")
+ }
+
+ args := []string{"--ca-cert=" + caPEM}
+
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(args)
+
+ postgresql.Run(apiClientMeta.GetTLSConfig())
+}
+
+func TestBackend_config_connection(t *testing.T) {
+ var resp *logical.Response
+ var err error
+
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ config.System = sys
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer b.Cleanup()
+
+ configData := map[string]interface{}{
+ "connection_url": "sample_connection_url",
+ "plugin_name": "postgresql-database-plugin",
+ "verify_connection": false,
+ "allowed_roles": []string{"*"},
+ }
+
+ configReq := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: configData,
+ }
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ expected := map[string]interface{}{
+ "plugin_name": "postgresql-database-plugin",
+ "connection_details": map[string]interface{}{
+ "connection_url": "sample_connection_url",
+ },
+ "allowed_roles": []string{"*"},
+ }
+ configReq.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(configReq)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ delete(resp.Data["connection_details"].(map[string]interface{}), "name")
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data)
+ }
+
+ configReq.Operation = logical.ListOperation
+ configReq.Data = nil
+ configReq.Path = "config/"
+ resp, err = b.HandleRequest(configReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ keys := resp.Data["keys"].([]string)
+ key := keys[0]
+ if key != "plugin-test" {
+ t.Fatalf("bad key: %q", key)
+ }
+}
+
+func TestBackend_basic(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ config.System = sys
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer b.Cleanup()
+
+ cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
+ defer cleanup()
+
+ // Configure a connection
+ data := map[string]interface{}{
+ "connection_url": connURL,
+ "plugin_name": "postgresql-database-plugin",
+ "allowed_roles": []string{"plugin-role-test"},
+ }
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err := b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Create a role
+ data = map[string]interface{}{
+ "db_name": "plugin-test",
+ "creation_statements": testRole,
+ "default_ttl": "5m",
+ "max_ttl": "10m",
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Get creds
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "creds/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ credsResp, err := b.HandleRequest(req)
+ if err != nil || (credsResp != nil && credsResp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, credsResp)
+ }
+
+ if !testCredsExist(t, credsResp, connURL) {
+ t.Fatalf("Creds should exist")
+ }
+
+ // Revoke creds
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.RevokeOperation,
+ Storage: config.StorageView,
+ Secret: &logical.Secret{
+ InternalData: map[string]interface{}{
+ "secret_type": "creds",
+ "username": credsResp.Data["username"],
+ "role": "plugin-role-test",
+ },
+ },
+ })
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ if testCredsExist(t, credsResp, connURL) {
+ t.Fatalf("Creds should not exist")
+ }
+
+}
+
+func TestBackend_connectionCrud(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ config.System = sys
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer b.Cleanup()
+
+ cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
+ defer cleanup()
+
+ // Configure a connection
+ data := map[string]interface{}{
+ "connection_url": "test",
+ "plugin_name": "postgresql-database-plugin",
+ "verify_connection": false,
+ }
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err := b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Create a role
+ data = map[string]interface{}{
+ "db_name": "plugin-test",
+ "creation_statements": testRole,
+ "revocation_statements": defaultRevocationSQL,
+ "default_ttl": "5m",
+ "max_ttl": "10m",
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Update the connection
+ data = map[string]interface{}{
+ "connection_url": connURL,
+ "plugin_name": "postgresql-database-plugin",
+ "allowed_roles": []string{"plugin-role-test"},
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Read connection
+ expected := map[string]interface{}{
+ "plugin_name": "postgresql-database-plugin",
+ "connection_details": map[string]interface{}{
+ "connection_url": connURL,
+ },
+ "allowed_roles": []string{"plugin-role-test"},
+ }
+ req.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ delete(resp.Data["connection_details"].(map[string]interface{}), "name")
+ if !reflect.DeepEqual(expected, resp.Data) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data)
+ }
+
+ // Reset Connection
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "reset/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Get creds
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "creds/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ credsResp, err := b.HandleRequest(req)
+ if err != nil || (credsResp != nil && credsResp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, credsResp)
+ }
+
+ if !testCredsExist(t, credsResp, connURL) {
+ t.Fatalf("Creds should exist")
+ }
+
+ // Delete Connection
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Read connection
+ req.Operation = logical.ReadOperation
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Should be empty
+ if resp != nil {
+ t.Fatal("Expected response to be nil")
+ }
+}
+
+func TestBackend_roleCrud(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ config.System = sys
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer b.Cleanup()
+
+ cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
+ defer cleanup()
+
+ // Configure a connection
+ data := map[string]interface{}{
+ "connection_url": connURL,
+ "plugin_name": "postgresql-database-plugin",
+ }
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err := b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Create a role
+ data = map[string]interface{}{
+ "db_name": "plugin-test",
+ "creation_statements": testRole,
+ "revocation_statements": defaultRevocationSQL,
+ "default_ttl": "5m",
+ "max_ttl": "10m",
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Read the role
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "roles/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ expected := dbplugin.Statements{
+ CreationStatements: testRole,
+ RevocationStatements: defaultRevocationSQL,
+ }
+
+ var actual dbplugin.Statements
+ if err := mapstructure.Decode(resp.Data, &actual); err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("Statements did not match, exepected %#v, got %#v", expected, actual)
+ }
+
+ // Delete the role
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "roles/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Read the role
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "roles/plugin-role-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Should be empty
+ if resp != nil {
+ t.Fatal("Expected response to be nil")
+ }
+}
+func TestBackend_allowedRoles(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ config.System = sys
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer b.Cleanup()
+
+ cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b)
+ defer cleanup()
+
+ // Configure a connection
+ data := map[string]interface{}{
+ "connection_url": connURL,
+ "plugin_name": "postgresql-database-plugin",
+ }
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err := b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Create a denied and an allowed role
+ data = map[string]interface{}{
+ "db_name": "plugin-test",
+ "creation_statements": testRole,
+ "default_ttl": "5m",
+ "max_ttl": "10m",
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/denied",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ data = map[string]interface{}{
+ "db_name": "plugin-test",
+ "creation_statements": testRole,
+ "default_ttl": "5m",
+ "max_ttl": "10m",
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "roles/allowed",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Get creds from denied role, should fail
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "creds/denied",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ credsResp, err := b.HandleRequest(req)
+ if err != logical.ErrPermissionDenied {
+ t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err)
+ }
+
+ // update connection with * allowed roles connection
+ data = map[string]interface{}{
+ "connection_url": connURL,
+ "plugin_name": "postgresql-database-plugin",
+ "allowed_roles": "*",
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Get creds, should work.
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "creds/allowed",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ credsResp, err = b.HandleRequest(req)
+ if err != nil || (credsResp != nil && credsResp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, credsResp)
+ }
+
+ if !testCredsExist(t, credsResp, connURL) {
+ t.Fatalf("Creds should exist")
+ }
+
+ // update connection with allowed roles
+ data = map[string]interface{}{
+ "connection_url": connURL,
+ "plugin_name": "postgresql-database-plugin",
+ "allowed_roles": "allow, allowed",
+ }
+ req = &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "config/plugin-test",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ resp, err = b.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
+ // Get creds from denied role, should fail
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "creds/denied",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ credsResp, err = b.HandleRequest(req)
+ if err != logical.ErrPermissionDenied {
+ t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err)
+ }
+
+ // Get creds from allowed role, should work.
+ data = map[string]interface{}{}
+ req = &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "creds/allowed",
+ Storage: config.StorageView,
+ Data: data,
+ }
+ credsResp, err = b.HandleRequest(req)
+ if err != nil || (credsResp != nil && credsResp.IsError()) {
+ t.Fatalf("err:%s resp:%#v\n", err, credsResp)
+ }
+
+ if !testCredsExist(t, credsResp, connURL) {
+ t.Fatalf("Creds should exist")
+ }
+}
+
+func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool {
+ var d struct {
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ }
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ t.Fatal(err)
+ }
+ log.Printf("[TRACE] Generated credentials: %v", d)
+ conn, err := pq.ParseURL(connURL)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conn += " timezone=utc"
+
+ db, err := sql.Open("postgres", conn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ returnedRows := func() int {
+ stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');")
+ if err != nil {
+ return -1
+ }
+ defer stmt.Close()
+
+ rows, err := stmt.Query(d.Username)
+ if err != nil {
+ return -1
+ }
+ defer rows.Close()
+
+ i := 0
+ for rows.Next() {
+ i++
+ }
+ return i
+ }
+
+ return returnedRows() == 2
+}
+
+const testRole = `
+CREATE ROLE "{{name}}" WITH
+ LOGIN
+ PASSWORD '{{password}}'
+ VALID UNTIL '{{expiration}}';
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
+`
+
+const defaultRevocationSQL = `
+REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
+REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
+REVOKE USAGE ON SCHEMA public FROM {{name}};
+
+DROP ROLE IF EXISTS {{name}};
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go
new file mode 100644
index 0000000..6df3948
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go
@@ -0,0 +1,133 @@
+package dbplugin
+
+import (
+ "fmt"
+ "net/rpc"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// DatabasePluginClient embeds a databasePluginRPCClient and wraps it's Close
+// method to also call Kill() on the plugin.Client.
+type DatabasePluginClient struct {
+ client *plugin.Client
+ sync.Mutex
+
+ *databasePluginRPCClient
+}
+
+func (dc *DatabasePluginClient) Close() error {
+ err := dc.databasePluginRPCClient.Close()
+ dc.client.Kill()
+
+ return err
+}
+
+// newPluginClient returns a databaseRPCClient with a connection to a running
+// plugin. The client is wrapped in a DatabasePluginClient object to ensure the
+// plugin is killed on call of Close().
+func newPluginClient(sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger) (Database, error) {
+ // pluginMap is the map of plugins we can dispense.
+ var pluginMap = map[string]plugin.Plugin{
+ "database": new(DatabasePlugin),
+ }
+
+ client, err := pluginRunner.Run(sys, pluginMap, handshakeConfig, []string{}, logger)
+ if err != nil {
+ return nil, err
+ }
+
+ // Connect via RPC
+ rpcClient, err := client.Client()
+ if err != nil {
+ return nil, err
+ }
+
+ // Request the plugin
+ raw, err := rpcClient.Dispense("database")
+ if err != nil {
+ return nil, err
+ }
+
+ // We should have a database type now. This feels like a normal interface
+ // implementation but is in fact over an RPC connection.
+ databaseRPC := raw.(*databasePluginRPCClient)
+
+ // Wrap RPC implimentation in DatabasePluginClient
+ return &DatabasePluginClient{
+ client: client,
+ databasePluginRPCClient: databaseRPC,
+ }, nil
+}
+
+// ---- RPC client domain ----
+
+// databasePluginRPCClient implements Database and is used on the client to
+// make RPC calls to a plugin.
+type databasePluginRPCClient struct {
+ client *rpc.Client
+}
+
+func (dr *databasePluginRPCClient) Type() (string, error) {
+ var dbType string
+ err := dr.client.Call("Plugin.Type", struct{}{}, &dbType)
+
+ return fmt.Sprintf("plugin-%s", dbType), err
+}
+
+func (dr *databasePluginRPCClient) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ req := CreateUserRequest{
+ Statements: statements,
+ UsernameConfig: usernameConfig,
+ Expiration: expiration,
+ }
+
+ var resp CreateUserResponse
+ err = dr.client.Call("Plugin.CreateUser", req, &resp)
+
+ return resp.Username, resp.Password, err
+}
+
+func (dr *databasePluginRPCClient) RenewUser(statements Statements, username string, expiration time.Time) error {
+ req := RenewUserRequest{
+ Statements: statements,
+ Username: username,
+ Expiration: expiration,
+ }
+
+ err := dr.client.Call("Plugin.RenewUser", req, &struct{}{})
+
+ return err
+}
+
+func (dr *databasePluginRPCClient) RevokeUser(statements Statements, username string) error {
+ req := RevokeUserRequest{
+ Statements: statements,
+ Username: username,
+ }
+
+ err := dr.client.Call("Plugin.RevokeUser", req, &struct{}{})
+
+ return err
+}
+
+func (dr *databasePluginRPCClient) Initialize(conf map[string]interface{}, verifyConnection bool) error {
+ req := InitializeRequest{
+ Config: conf,
+ VerifyConnection: verifyConnection,
+ }
+
+ err := dr.client.Call("Plugin.Initialize", req, &struct{}{})
+
+ return err
+}
+
+func (dr *databasePluginRPCClient) Close() error {
+ err := dr.client.Call("Plugin.Close", struct{}{}, &struct{}{})
+
+ return err
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go
new file mode 100644
index 0000000..87dfa6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go
@@ -0,0 +1,162 @@
+package dbplugin
+
+import (
+ "time"
+
+ metrics "github.com/armon/go-metrics"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// ---- Tracing Middleware Domain ----
+
+// databaseTracingMiddleware wraps a implementation of Database and executes
+// trace logging on function call.
+type databaseTracingMiddleware struct {
+ next Database
+ logger log.Logger
+
+ typeStr string
+}
+
+func (mw *databaseTracingMiddleware) Type() (string, error) {
+ return mw.next.Type()
+}
+
+func (mw *databaseTracingMiddleware) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ defer func(then time.Time) {
+ mw.logger.Trace("database", "operation", "CreateUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
+ }(time.Now())
+
+ mw.logger.Trace("database", "operation", "CreateUser", "status", "started", "type", mw.typeStr)
+ return mw.next.CreateUser(statements, usernameConfig, expiration)
+}
+
+func (mw *databaseTracingMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) {
+ defer func(then time.Time) {
+ mw.logger.Trace("database", "operation", "RenewUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
+ }(time.Now())
+
+ mw.logger.Trace("database", "operation", "RenewUser", "status", "started", mw.typeStr)
+ return mw.next.RenewUser(statements, username, expiration)
+}
+
+func (mw *databaseTracingMiddleware) RevokeUser(statements Statements, username string) (err error) {
+ defer func(then time.Time) {
+ mw.logger.Trace("database", "operation", "RevokeUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
+ }(time.Now())
+
+ mw.logger.Trace("database", "operation", "RevokeUser", "status", "started", "type", mw.typeStr)
+ return mw.next.RevokeUser(statements, username)
+}
+
+func (mw *databaseTracingMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) {
+ defer func(then time.Time) {
+ mw.logger.Trace("database", "operation", "Initialize", "status", "finished", "type", mw.typeStr, "verify", verifyConnection, "err", err, "took", time.Since(then))
+ }(time.Now())
+
+ mw.logger.Trace("database", "operation", "Initialize", "status", "started", "type", mw.typeStr)
+ return mw.next.Initialize(conf, verifyConnection)
+}
+
+func (mw *databaseTracingMiddleware) Close() (err error) {
+ defer func(then time.Time) {
+ mw.logger.Trace("database", "operation", "Close", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then))
+ }(time.Now())
+
+ mw.logger.Trace("database", "operation", "Close", "status", "started", "type", mw.typeStr)
+ return mw.next.Close()
+}
+
+// ---- Metrics Middleware Domain ----
+
+// databaseMetricsMiddleware wraps an implementation of Databases and on
+// function call logs metrics about this instance.
+type databaseMetricsMiddleware struct {
+ next Database
+
+ typeStr string
+}
+
+func (mw *databaseMetricsMiddleware) Type() (string, error) {
+ return mw.next.Type()
+}
+
+func (mw *databaseMetricsMiddleware) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ defer func(now time.Time) {
+ metrics.MeasureSince([]string{"database", "CreateUser"}, now)
+ metrics.MeasureSince([]string{"database", mw.typeStr, "CreateUser"}, now)
+
+ if err != nil {
+ metrics.IncrCounter([]string{"database", "CreateUser", "error"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser", "error"}, 1)
+ }
+ }(time.Now())
+
+ metrics.IncrCounter([]string{"database", "CreateUser"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser"}, 1)
+ return mw.next.CreateUser(statements, usernameConfig, expiration)
+}
+
+func (mw *databaseMetricsMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) {
+ defer func(now time.Time) {
+ metrics.MeasureSince([]string{"database", "RenewUser"}, now)
+ metrics.MeasureSince([]string{"database", mw.typeStr, "RenewUser"}, now)
+
+ if err != nil {
+ metrics.IncrCounter([]string{"database", "RenewUser", "error"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser", "error"}, 1)
+ }
+ }(time.Now())
+
+ metrics.IncrCounter([]string{"database", "RenewUser"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser"}, 1)
+ return mw.next.RenewUser(statements, username, expiration)
+}
+
+func (mw *databaseMetricsMiddleware) RevokeUser(statements Statements, username string) (err error) {
+ defer func(now time.Time) {
+ metrics.MeasureSince([]string{"database", "RevokeUser"}, now)
+ metrics.MeasureSince([]string{"database", mw.typeStr, "RevokeUser"}, now)
+
+ if err != nil {
+ metrics.IncrCounter([]string{"database", "RevokeUser", "error"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser", "error"}, 1)
+ }
+ }(time.Now())
+
+ metrics.IncrCounter([]string{"database", "RevokeUser"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser"}, 1)
+ return mw.next.RevokeUser(statements, username)
+}
+
+func (mw *databaseMetricsMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) {
+ defer func(now time.Time) {
+ metrics.MeasureSince([]string{"database", "Initialize"}, now)
+ metrics.MeasureSince([]string{"database", mw.typeStr, "Initialize"}, now)
+
+ if err != nil {
+ metrics.IncrCounter([]string{"database", "Initialize", "error"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize", "error"}, 1)
+ }
+ }(time.Now())
+
+ metrics.IncrCounter([]string{"database", "Initialize"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize"}, 1)
+ return mw.next.Initialize(conf, verifyConnection)
+}
+
+func (mw *databaseMetricsMiddleware) Close() (err error) {
+ defer func(now time.Time) {
+ metrics.MeasureSince([]string{"database", "Close"}, now)
+ metrics.MeasureSince([]string{"database", mw.typeStr, "Close"}, now)
+
+ if err != nil {
+ metrics.IncrCounter([]string{"database", "Close", "error"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "Close", "error"}, 1)
+ }
+ }(time.Now())
+
+ metrics.IncrCounter([]string{"database", "Close"}, 1)
+ metrics.IncrCounter([]string{"database", mw.typeStr, "Close"}, 1)
+ return mw.next.Close()
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go
new file mode 100644
index 0000000..0becc9f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go
@@ -0,0 +1,147 @@
+package dbplugin
+
+import (
+ "fmt"
+ "net/rpc"
+ "time"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// Database is the interface that all database objects must implement.
+type Database interface {
+ Type() (string, error)
+ CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error)
+ RenewUser(statements Statements, username string, expiration time.Time) error
+ RevokeUser(statements Statements, username string) error
+
+ Initialize(config map[string]interface{}, verifyConnection bool) error
+ Close() error
+}
+
+// Statements set in role creation and passed into the database type's functions.
+type Statements struct {
+ CreationStatements string `json:"creation_statments" mapstructure:"creation_statements" structs:"creation_statments"`
+ RevocationStatements string `json:"revocation_statements" mapstructure:"revocation_statements" structs:"revocation_statements"`
+ RollbackStatements string `json:"rollback_statements" mapstructure:"rollback_statements" structs:"rollback_statements"`
+ RenewStatements string `json:"renew_statements" mapstructure:"renew_statements" structs:"renew_statements"`
+}
+
+// UsernameConfig is used to configure prefixes for the username to be
+// generated.
+type UsernameConfig struct {
+ DisplayName string
+ RoleName string
+}
+
+// PluginFactory is used to build plugin database types. It wraps the database
+// object in a logging and metrics middleware.
+func PluginFactory(pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) {
+ // Look for plugin in the plugin catalog
+ pluginRunner, err := sys.LookupPlugin(pluginName)
+ if err != nil {
+ return nil, err
+ }
+
+ var db Database
+ if pluginRunner.Builtin {
+ // Plugin is builtin so we can retrieve an instance of the interface
+ // from the pluginRunner. Then cast it to a Database.
+ dbRaw, err := pluginRunner.BuiltinFactory()
+ if err != nil {
+ return nil, fmt.Errorf("error getting plugin type: %s", err)
+ }
+
+ var ok bool
+ db, ok = dbRaw.(Database)
+ if !ok {
+ return nil, fmt.Errorf("unsuported database type: %s", pluginName)
+ }
+
+ } else {
+ // create a DatabasePluginClient instance
+ db, err = newPluginClient(sys, pluginRunner, logger)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ typeStr, err := db.Type()
+ if err != nil {
+ return nil, fmt.Errorf("error getting plugin type: %s", err)
+ }
+
+ // Wrap with metrics middleware
+ db = &databaseMetricsMiddleware{
+ next: db,
+ typeStr: typeStr,
+ }
+
+ // Wrap with tracing middleware
+ if logger.IsTrace() {
+ db = &databaseTracingMiddleware{
+ next: db,
+ typeStr: typeStr,
+ logger: logger,
+ }
+ }
+
+ return db, nil
+}
+
+// handshakeConfigs are used to just do a basic handshake between
+// a plugin and host. If the handshake fails, a user friendly error is shown.
+// This prevents users from executing bad plugins or executing a plugin
+// directory. It is a UX feature, not a security feature.
+var handshakeConfig = plugin.HandshakeConfig{
+ ProtocolVersion: 3,
+ MagicCookieKey: "VAULT_DATABASE_PLUGIN",
+ MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb",
+}
+
+// DatabasePlugin implements go-plugin's Plugin interface. It has methods for
+// retrieving a server and a client instance of the plugin.
+type DatabasePlugin struct {
+ impl Database
+}
+
+func (d DatabasePlugin) Server(*plugin.MuxBroker) (interface{}, error) {
+ return &databasePluginRPCServer{impl: d.impl}, nil
+}
+
+func (DatabasePlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+ return &databasePluginRPCClient{client: c}, nil
+}
+
+// ---- RPC Request Args Domain ----
+
+type InitializeRequest struct {
+ Config map[string]interface{}
+ VerifyConnection bool
+}
+
+type CreateUserRequest struct {
+ Statements Statements
+ UsernameConfig UsernameConfig
+ Expiration time.Time
+}
+
+type RenewUserRequest struct {
+ Statements Statements
+ Username string
+ Expiration time.Time
+}
+
+type RevokeUserRequest struct {
+ Statements Statements
+ Username string
+}
+
+// ---- RPC Response Args Domain ----
+
+type CreateUserResponse struct {
+ Username string
+ Password string
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go
new file mode 100644
index 0000000..3a78595
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go
@@ -0,0 +1,245 @@
+package dbplugin_test
+
+import (
+ "errors"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/plugins"
+ "github.com/hashicorp/vault/vault"
+ log "github.com/mgutz/logxi/v1"
+)
+
+type mockPlugin struct {
+ users map[string][]string
+}
+
+func (m *mockPlugin) Type() (string, error) { return "mock", nil }
+func (m *mockPlugin) CreateUser(statements dbplugin.Statements, usernameConf dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ err = errors.New("err")
+ if usernameConf.DisplayName == "" || expiration.IsZero() {
+ return "", "", err
+ }
+
+ if _, ok := m.users[usernameConf.DisplayName]; ok {
+ return "", "", err
+ }
+
+ m.users[usernameConf.DisplayName] = []string{password}
+
+ return usernameConf.DisplayName, "test", nil
+}
+func (m *mockPlugin) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
+ err := errors.New("err")
+ if username == "" || expiration.IsZero() {
+ return err
+ }
+
+ if _, ok := m.users[username]; !ok {
+ return err
+ }
+
+ return nil
+}
+func (m *mockPlugin) RevokeUser(statements dbplugin.Statements, username string) error {
+ err := errors.New("err")
+ if username == "" {
+ return err
+ }
+
+ if _, ok := m.users[username]; !ok {
+ return err
+ }
+
+ delete(m.users, username)
+ return nil
+}
+func (m *mockPlugin) Initialize(conf map[string]interface{}, _ bool) error {
+ err := errors.New("err")
+ if len(conf) != 1 {
+ return err
+ }
+
+ return nil
+}
+func (m *mockPlugin) Close() error {
+ m.users = nil
+ return nil
+}
+
+func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
+ cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ cores := cluster.Cores
+
+ sys := vault.TestDynamicSystemView(cores[0].Core)
+ vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", "TestPlugin_Main")
+
+ return cluster, sys
+}
+
+// This is not an actual test case, it's a helper function that will be executed
+// by the go-plugin client via an exec call.
+func TestPlugin_Main(t *testing.T) {
+ if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
+ return
+ }
+
+ plugin := &mockPlugin{
+ users: make(map[string][]string),
+ }
+
+ args := []string{"--tls-skip-verify=true"}
+
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(args)
+
+ plugins.Serve(plugin, apiClientMeta.GetTLSConfig())
+}
+
+func TestPlugin_Initialize(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ dbRaw, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ connectionDetails := map[string]interface{}{
+ "test": 1,
+ }
+
+ err = dbRaw.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = dbRaw.Close()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestPlugin_CreateUser(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer db.Close()
+
+ connectionDetails := map[string]interface{}{
+ "test": 1,
+ }
+
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConf := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ us, pw, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if us != "test" || pw != "test" {
+ t.Fatal("expected username and password to be 'test'")
+ }
+
+ // try and save the same user again to verify it saved the first time, this
+ // should return an error
+ _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
+ if err == nil {
+ t.Fatal("expected an error, user wasn't created correctly")
+ }
+}
+
+func TestPlugin_RenewUser(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer db.Close()
+
+ connectionDetails := map[string]interface{}{
+ "test": 1,
+ }
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConf := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ us, _, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = db.RenewUser(dbplugin.Statements{}, us, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestPlugin_RevokeUser(t *testing.T) {
+ cluster, sys := getCluster(t)
+ defer cluster.Cleanup()
+
+ db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer db.Close()
+
+ connectionDetails := map[string]interface{}{
+ "test": 1,
+ }
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConf := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ us, _, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Test default revoke statememts
+ err = db.RevokeUser(dbplugin.Statements{}, us)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Try adding the same username back so we can verify it was removed
+ _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go
new file mode 100644
index 0000000..381f0ae
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go
@@ -0,0 +1,71 @@
+package dbplugin
+
+import (
+ "crypto/tls"
+
+ "github.com/hashicorp/go-plugin"
+)
+
+// Serve is called from within a plugin and wraps the provided
+// Database implementation in a databasePluginRPCServer object and starts a
+// RPC server.
+func Serve(db Database, tlsProvider func() (*tls.Config, error)) {
+ dbPlugin := &DatabasePlugin{
+ impl: db,
+ }
+
+ // pluginMap is the map of plugins we can dispense.
+ var pluginMap = map[string]plugin.Plugin{
+ "database": dbPlugin,
+ }
+
+ plugin.Serve(&plugin.ServeConfig{
+ HandshakeConfig: handshakeConfig,
+ Plugins: pluginMap,
+ TLSProvider: tlsProvider,
+ })
+}
+
+// ---- RPC server domain ----
+
+// databasePluginRPCServer implements an RPC version of Database and is run
+// inside a plugin. It wraps an underlying implementation of Database.
+type databasePluginRPCServer struct {
+ impl Database
+}
+
+func (ds *databasePluginRPCServer) Type(_ struct{}, resp *string) error {
+ var err error
+ *resp, err = ds.impl.Type()
+ return err
+}
+
+func (ds *databasePluginRPCServer) CreateUser(args *CreateUserRequest, resp *CreateUserResponse) error {
+ var err error
+ resp.Username, resp.Password, err = ds.impl.CreateUser(args.Statements, args.UsernameConfig, args.Expiration)
+
+ return err
+}
+
+func (ds *databasePluginRPCServer) RenewUser(args *RenewUserRequest, _ *struct{}) error {
+ err := ds.impl.RenewUser(args.Statements, args.Username, args.Expiration)
+
+ return err
+}
+
+func (ds *databasePluginRPCServer) RevokeUser(args *RevokeUserRequest, _ *struct{}) error {
+ err := ds.impl.RevokeUser(args.Statements, args.Username)
+
+ return err
+}
+
+func (ds *databasePluginRPCServer) Initialize(args *InitializeRequest, _ *struct{}) error {
+ err := ds.impl.Initialize(args.Config, args.VerifyConnection)
+
+ return err
+}
+
+func (ds *databasePluginRPCServer) Close(_ struct{}, _ *struct{}) error {
+ ds.impl.Close()
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go
new file mode 100644
index 0000000..d1e6cb2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go
@@ -0,0 +1,294 @@
+package database
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+var (
+ respErrEmptyPluginName = "empty plugin name"
+ respErrEmptyName = "empty name attribute given"
+)
+
+// DatabaseConfig is used by the Factory function to configure a Database
+// object.
+type DatabaseConfig struct {
+ PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"`
+ // ConnectionDetails stores the database specific connection settings needed
+ // by each database type.
+ ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"`
+ AllowedRoles []string `json:"allowed_roles" structs:"allowed_roles" mapstructure:"allowed_roles"`
+}
+
+// pathResetConnection configures a path to reset a plugin.
+func pathResetConnection(b *databaseBackend) *framework.Path {
+ return &framework.Path{
+ Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of this database connection",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathConnectionReset(),
+ },
+
+ HelpSynopsis: pathResetConnectionHelpSyn,
+ HelpDescription: pathResetConnectionHelpDesc,
+ }
+}
+
+// pathConnectionReset resets a plugin by closing the existing instance and
+// creating a new one.
+func (b *databaseBackend) pathConnectionReset() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse(respErrEmptyName), nil
+ }
+
+ // Grab the mutex lock
+ b.Lock()
+ defer b.Unlock()
+
+ // Close plugin and delete the entry in the connections cache.
+ b.clearConnection(name)
+
+ // Execute plugin again, we don't need the object so throw away.
+ _, err := b.createDBObj(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ }
+}
+
+// pathConfigurePluginConnection returns a configured framework.Path setup to
+// operate on plugins.
+func pathConfigurePluginConnection(b *databaseBackend) *framework.Path {
+ return &framework.Path{
+ Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of this database connection",
+ },
+
+ "plugin_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `The name of a builtin or previously registered
+ plugin known to vault. This endpoint will create an instance of
+ that plugin type.`,
+ },
+
+ "verify_connection": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If true, the connection details are verified by
+ actually connecting to the database. Defaults to true.`,
+ },
+
+ "allowed_roles": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: `Comma separated string or array of the role names
+ allowed to get creds from this database connection. If empty no
+ roles are allowed. If "*" all roles are allowed.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.connectionWriteHandler(),
+ logical.ReadOperation: b.connectionReadHandler(),
+ logical.DeleteOperation: b.connectionDeleteHandler(),
+ },
+
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+func pathListPluginConnection(b *databaseBackend) *framework.Path {
+ return &framework.Path{
+ Pattern: fmt.Sprintf("config/?$"),
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.connectionListHandler(),
+ },
+
+ HelpSynopsis: pathConfigConnectionHelpSyn,
+ HelpDescription: pathConfigConnectionHelpDesc,
+ }
+}
+
+func (b *databaseBackend) connectionListHandler() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("config/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+ }
+}
+
+// connectionReadHandler reads out the connection configuration
+func (b *databaseBackend) connectionReadHandler() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse(respErrEmptyName), nil
+ }
+
+ entry, err := req.Storage.Get(fmt.Sprintf("config/%s", name))
+ if err != nil {
+ return nil, errors.New("failed to read connection configuration")
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var config DatabaseConfig
+ if err := entry.DecodeJSON(&config); err != nil {
+ return nil, err
+ }
+ return &logical.Response{
+ Data: structs.New(config).Map(),
+ }, nil
+ }
+}
+
+// connectionDeleteHandler deletes the connection configuration
+func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse(respErrEmptyName), nil
+ }
+
+ err := req.Storage.Delete(fmt.Sprintf("config/%s", name))
+ if err != nil {
+ return nil, errors.New("failed to delete connection configuration")
+ }
+
+ b.Lock()
+ defer b.Unlock()
+
+ if _, ok := b.connections[name]; ok {
+ err = b.connections[name].Close()
+ if err != nil {
+ return nil, err
+ }
+
+ delete(b.connections, name)
+ }
+
+ return nil, nil
+ }
+}
+
+// connectionWriteHandler returns a handler function for creating and updating
+// both builtin and plugin database types.
+func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ pluginName := data.Get("plugin_name").(string)
+ if pluginName == "" {
+ return logical.ErrorResponse(respErrEmptyPluginName), nil
+ }
+
+ name := data.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse(respErrEmptyName), nil
+ }
+
+ verifyConnection := data.Get("verify_connection").(bool)
+
+ allowedRoles := data.Get("allowed_roles").([]string)
+
+ // Remove these entries from the data before we store it keyed under
+ // ConnectionDetails.
+ delete(data.Raw, "name")
+ delete(data.Raw, "plugin_name")
+ delete(data.Raw, "allowed_roles")
+ delete(data.Raw, "verify_connection")
+
+ config := &DatabaseConfig{
+ ConnectionDetails: data.Raw,
+ PluginName: pluginName,
+ AllowedRoles: allowedRoles,
+ }
+
+ db, err := dbplugin.PluginFactory(config.PluginName, b.System(), b.logger)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil
+ }
+
+ err = db.Initialize(config.ConnectionDetails, verifyConnection)
+ if err != nil {
+ db.Close()
+ return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil
+ }
+
+ // Grab the mutex lock
+ b.Lock()
+ defer b.Unlock()
+
+ // Close and remove the old connection
+ b.clearConnection(name)
+
+ // Save the new connection
+ b.connections[name] = db
+
+ // Store it
+ entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/%s", name), config)
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ resp := &logical.Response{}
+ resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any.")
+
+ return resp, nil
+ }
+}
+
+const pathConfigConnectionHelpSyn = `
+Configure connection details to a database plugin.
+`
+
+const pathConfigConnectionHelpDesc = `
+This path configures the connection details used to connect to a particular
+database. This path runs the provided plugin name and passes the configured
+connection details to the plugin. See the documentation for the plugin specified
+for a full list of accepted connection details.
+
+In addition to the database specific connection details, this endpoint also
+accepts:
+
+ * "plugin_name" (required) - The name of a builtin or previously registered
+ plugin known to vault. This endpoint will create an instance of that
+ plugin type.
+
+ * "verify_connection" (default: true) - A boolean value denoting if the plugin should verify
+ it is able to connect to the database using the provided connection
+ details.
+`
+
+const pathResetConnectionHelpSyn = `
+Resets a database plugin.
+`
+
+const pathResetConnectionHelpDesc = `
+This path resets the database connection by closing the existing database plugin
+instance and running a new one.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go
new file mode 100644
index 0000000..6fb61a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go
@@ -0,0 +1,112 @@
+package database
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathCredsCreate(b *databaseBackend) *framework.Path {
+ return &framework.Path{
+ Pattern: "creds/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathCredsCreateRead(),
+ },
+
+ HelpSynopsis: pathCredsCreateReadHelpSyn,
+ HelpDescription: pathCredsCreateReadHelpDesc,
+ }
+}
+
+func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ // Get the role
+ role, err := b.Role(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
+ }
+
+ dbConfig, err := b.DatabaseConfig(req.Storage, role.DBName)
+ if err != nil {
+ return nil, err
+ }
+
+ // If role name isn't in the database's allowed roles, send back a
+ // permission denied.
+ if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContains(dbConfig.AllowedRoles, name) {
+ return nil, logical.ErrPermissionDenied
+ }
+
+ // Grab the read lock
+ b.RLock()
+ var unlockFunc func() = b.RUnlock
+
+ // Get the Database object
+ db, ok := b.getDBObj(role.DBName)
+ if !ok {
+ // Upgrade lock
+ b.RUnlock()
+ b.Lock()
+ unlockFunc = b.Unlock
+
+ // Create a new DB object
+ db, err = b.createDBObj(req.Storage, role.DBName)
+ if err != nil {
+ unlockFunc()
+ return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err)
+ }
+ }
+
+ expiration := time.Now().Add(role.DefaultTTL)
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: req.DisplayName,
+ RoleName: name,
+ }
+
+ // Create the user
+ username, password, err := db.CreateUser(role.Statements, usernameConfig, expiration)
+ // Unlock
+ unlockFunc()
+ if err != nil {
+ b.closeIfShutdown(role.DBName, err)
+ return nil, err
+ }
+
+ resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
+ "username": username,
+ "password": password,
+ }, map[string]interface{}{
+ "username": username,
+ "role": name,
+ })
+ resp.Secret.TTL = role.DefaultTTL
+ return resp, nil
+ }
+}
+
+const pathCredsCreateReadHelpSyn = `
+Request database credentials for a certain role.
+`
+
+const pathCredsCreateReadHelpDesc = `
+This path reads database credentials for a certain role. The
+database credentials will be generated on demand and will be automatically
+revoked when the lease is up.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go
new file mode 100644
index 0000000..69884cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go
@@ -0,0 +1,232 @@
+package database
+
+import (
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+func pathListRoles(b *databaseBackend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathRoleList(),
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func pathRoles(b *databaseBackend) *framework.Path {
+ return &framework.Path{
+ Pattern: "roles/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the role.",
+ },
+
+ "db_name": {
+ Type: framework.TypeString,
+ Description: "Name of the database this role acts on.",
+ },
+ "creation_statements": {
+ Type: framework.TypeString,
+ Description: `Specifies the database statements executed to
+ create and configure a user. See the plugin's API page for more
+ information on support and formatting for this parameter.`,
+ },
+ "revocation_statements": {
+ Type: framework.TypeString,
+ Description: `Specifies the database statements to be executed
+ to revoke a user. See the plugin's API page for more information
+ on support and formatting for this parameter.`,
+ },
+ "renew_statements": {
+ Type: framework.TypeString,
+ Description: `Specifies the database statements to be executed
+ to renew a user. Not every plugin type will support this
+ functionality. See the plugin's API page for more information on
+ support and formatting for this parameter. `,
+ },
+ "rollback_statements": {
+ Type: framework.TypeString,
+ Description: `Specifies the database statements to be executed
+ rollback a create operation in the event of an error. Not every
+ plugin type will support this functionality. See the plugin's
+ API page for more information on support and formatting for this
+ parameter.`,
+ },
+
+ "default_ttl": {
+ Type: framework.TypeDurationSecond,
+ Description: "Default ttl for role.",
+ },
+
+ "max_ttl": {
+ Type: framework.TypeDurationSecond,
+ Description: "Maximum time a credential is valid for",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathRoleRead(),
+ logical.UpdateOperation: b.pathRoleCreate(),
+ logical.DeleteOperation: b.pathRoleDelete(),
+ },
+
+ HelpSynopsis: pathRoleHelpSyn,
+ HelpDescription: pathRoleHelpDesc,
+ }
+}
+
+func (b *databaseBackend) pathRoleDelete() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("role/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ }
+}
+
+func (b *databaseBackend) pathRoleRead() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ role, err := b.Role(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, nil
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "db_name": role.DBName,
+ "creation_statements": role.Statements.CreationStatements,
+ "revocation_statements": role.Statements.RevocationStatements,
+ "rollback_statements": role.Statements.RollbackStatements,
+ "renew_statements": role.Statements.RenewStatements,
+ "default_ttl": role.DefaultTTL.Seconds(),
+ "max_ttl": role.MaxTTL.Seconds(),
+ },
+ }, nil
+ }
+}
+
+func (b *databaseBackend) pathRoleList() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("role/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+ }
+}
+
+func (b *databaseBackend) pathRoleCreate() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ if name == "" {
+ return logical.ErrorResponse("empty role name attribute given"), nil
+ }
+
+ dbName := data.Get("db_name").(string)
+ if dbName == "" {
+ return logical.ErrorResponse("empty database name attribute given"), nil
+ }
+
+ // Get statements
+ creationStmts := data.Get("creation_statements").(string)
+ revocationStmts := data.Get("revocation_statements").(string)
+ rollbackStmts := data.Get("rollback_statements").(string)
+ renewStmts := data.Get("renew_statements").(string)
+
+ // Get TTLs
+ defaultTTLRaw := data.Get("default_ttl").(int)
+ maxTTLRaw := data.Get("max_ttl").(int)
+ defaultTTL := time.Duration(defaultTTLRaw) * time.Second
+ maxTTL := time.Duration(maxTTLRaw) * time.Second
+
+ statements := dbplugin.Statements{
+ CreationStatements: creationStmts,
+ RevocationStatements: revocationStmts,
+ RollbackStatements: rollbackStmts,
+ RenewStatements: renewStmts,
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
+ DBName: dbName,
+ Statements: statements,
+ DefaultTTL: defaultTTL,
+ MaxTTL: maxTTL,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ }
+}
+
+type roleEntry struct {
+ DBName string `json:"db_name" mapstructure:"db_name" structs:"db_name"`
+ Statements dbplugin.Statements `json:"statments" mapstructure:"statements" structs:"statments"`
+ DefaultTTL time.Duration `json:"default_ttl" mapstructure:"default_ttl" structs:"default_ttl"`
+ MaxTTL time.Duration `json:"max_ttl" mapstructure:"max_ttl" structs:"max_ttl"`
+}
+
+const pathRoleHelpSyn = `
+Manage the roles that can be created with this backend.
+`
+
+const pathRoleHelpDesc = `
+This path lets you manage the roles that can be created with this backend.
+
+The "db_name" parameter is required and configures the name of the database
+connection to use.
+
+The "creation_statements" parameter customizes the string used to create the
+credentials. This can be a sequence of SQL queries, or other statement formats
+for a particular database type. Some substitution will be done to the statement
+strings for certain keys. The names of the variables must be surrounded by "{{"
+and "}}" to be replaced.
+
+ * "name" - The random username generated for the DB user.
+
+ * "password" - The random password generated for the DB user.
+
+ * "expiration" - The timestamp when this user will expire.
+
+Example of a decent creation_statements for a postgresql database plugin:
+
+ CREATE ROLE "{{name}}" WITH
+ LOGIN
+ PASSWORD '{{password}}'
+ VALID UNTIL '{{expiration}}';
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
+
+The "revocation_statements" parameter customizes the statement string used to
+revoke a user. Example of a decent revocation_statements for a postgresql
+database plugin:
+
+ REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}};
+ REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}};
+ REVOKE USAGE ON SCHEMA public FROM {{name}};
+ DROP ROLE IF EXISTS {{name}};
+
+The "renew_statements" parameter customizes the statement string used to renew a
+user.
+The "rollback_statements' parameter customizes the statement string used to
+rollback a change if needed.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go
new file mode 100644
index 0000000..c3dfcb9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go
@@ -0,0 +1,139 @@
+package database
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+const SecretCredsType = "creds"
+
+func secretCreds(b *databaseBackend) *framework.Secret {
+ return &framework.Secret{
+ Type: SecretCredsType,
+ Fields: map[string]*framework.FieldSchema{},
+
+ Renew: b.secretCredsRenew(),
+ Revoke: b.secretCredsRevoke(),
+ }
+}
+
+func (b *databaseBackend) secretCredsRenew() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+
+ roleNameRaw, ok := req.Secret.InternalData["role"]
+ if !ok {
+ return nil, fmt.Errorf("could not find role with name: %s", req.Secret.InternalData["role"])
+ }
+
+ role, err := b.Role(req.Storage, roleNameRaw.(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, fmt.Errorf("error during renew: could not find role with name %s", req.Secret.InternalData["role"])
+ }
+
+ f := framework.LeaseExtend(role.DefaultTTL, role.MaxTTL, b.System())
+ resp, err := f(req, data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Grab the read lock
+ b.RLock()
+ var unlockFunc func() = b.RUnlock
+
+ // Get the Database object
+ db, ok := b.getDBObj(role.DBName)
+ if !ok {
+ // Upgrade lock
+ b.RUnlock()
+ b.Lock()
+ unlockFunc = b.Unlock
+
+ // Create a new DB object
+ db, err = b.createDBObj(req.Storage, role.DBName)
+ if err != nil {
+ unlockFunc()
+ return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err)
+ }
+ }
+
+ // Make sure we increase the VALID UNTIL endpoint for this user.
+ if expireTime := resp.Secret.ExpirationTime(); !expireTime.IsZero() {
+ err := db.RenewUser(role.Statements, username, expireTime)
+ // Unlock
+ unlockFunc()
+ if err != nil {
+ b.closeIfShutdown(role.DBName, err)
+ return nil, err
+ }
+ }
+
+ return resp, nil
+ }
+}
+
+func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc {
+ return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Get the username from the internal data
+ usernameRaw, ok := req.Secret.InternalData["username"]
+ if !ok {
+ return nil, fmt.Errorf("secret is missing username internal data")
+ }
+ username, ok := usernameRaw.(string)
+
+ var resp *logical.Response
+
+ roleNameRaw, ok := req.Secret.InternalData["role"]
+ if !ok {
+ return nil, fmt.Errorf("no role name was provided")
+ }
+
+ role, err := b.Role(req.Storage, roleNameRaw.(string))
+ if err != nil {
+ return nil, err
+ }
+ if role == nil {
+ return nil, fmt.Errorf("error during revoke: could not find role with name %s", req.Secret.InternalData["role"])
+ }
+
+ // Grab the read lock
+ b.RLock()
+ var unlockFunc func() = b.RUnlock
+
+ // Get our connection
+ db, ok := b.getDBObj(role.DBName)
+ if !ok {
+ // Upgrade lock
+ b.RUnlock()
+ b.Lock()
+ unlockFunc = b.Unlock
+
+ // Create a new DB object
+ db, err = b.createDBObj(req.Storage, role.DBName)
+ if err != nil {
+ unlockFunc()
+ return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err)
+ }
+ }
+
+ err = db.RevokeUser(role.Statements, username)
+ // Unlock
+ unlockFunc()
+ if err != nil {
+ b.closeIfShutdown(role.DBName, err)
+ return nil, err
+ }
+
+ return resp, nil
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go
index e9f3b29..d850e8a 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go
@@ -12,7 +12,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *framework.Backend {
@@ -34,7 +38,8 @@ func Backend() *framework.Backend {
Clean: b.ResetSession,
- Invalidate: b.invalidate,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeLogical,
}
return b.Backend
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go
index 61afe75..ccd981b 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go
@@ -12,7 +12,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -32,9 +36,9 @@ func Backend() *backend {
secretCreds(&b),
},
- Invalidate: b.invalidate,
-
- Clean: b.ResetDB,
+ Invalidate: b.invalidate,
+ Clean: b.ResetDB,
+ BackendType: logical.TypeLogical,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go
index 7ae0335..a89cc49 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go
@@ -12,7 +12,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend() *backend {
@@ -32,9 +36,9 @@ func Backend() *backend {
secretCreds(&b),
},
- Invalidate: b.invalidate,
-
- Clean: b.ResetDB,
+ Invalidate: b.invalidate,
+ Clean: b.ResetDB,
+ BackendType: logical.TypeLogical,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go
index b8d6513..27c3bf8 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go
@@ -66,6 +66,9 @@ func (b *backend) secretCredsRevoke(
return nil, fmt.Errorf("secret is missing username internal data")
}
username, ok := usernameRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("usernameRaw is not a string")
+ }
// Get our connection
db, err := b.DB(req.Storage)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go
index 6128028..bf5168d 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go
@@ -11,7 +11,11 @@ import (
// Factory creates a new backend implementing the logical.Backend interface
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
// Backend returns a new Backend framework struct
@@ -35,15 +39,22 @@ func Backend() *backend {
"crl",
"certs/",
},
+
+ Root: []string{
+ "root",
+ "root/sign-self-issued",
+ },
},
Paths: []*framework.Path{
pathListRoles(&b),
pathRoles(&b),
pathGenerateRoot(&b),
+ pathSignIntermediate(&b),
+ pathSignSelfIssued(&b),
+ pathDeleteRoot(&b),
pathGenerateIntermediate(&b),
pathSetSignedIntermediate(&b),
- pathSignIntermediate(&b),
pathConfigCA(&b),
pathConfigCRL(&b),
pathConfigURLs(&b),
@@ -64,6 +75,8 @@ func Backend() *backend {
Secrets: []*framework.Secret{
secretCerts(&b),
},
+
+ BackendType: logical.TypeLogical,
}
b.crlLifetime = time.Hour * 72
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go
index 4fefc95..7a32ec2 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go
@@ -1,6 +1,7 @@
package pki
import (
+ "bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
@@ -12,6 +13,7 @@ import (
"encoding/pem"
"fmt"
"math"
+ "math/big"
mathrand "math/rand"
"net"
"os"
@@ -22,10 +24,13 @@ import (
"time"
"github.com/fatih/structs"
+ "github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/helper/strutil"
+ vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/hashicorp/vault/vault"
"github.com/mitchellh/mapstructure"
)
@@ -398,8 +403,8 @@ func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUs
return nil, fmt.Errorf("Validity period not far enough in the past")
}
- if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 10 {
- return nil, fmt.Errorf("Validity period of %d too large vs max of 10", cert.NotAfter.Unix())
+ if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 20 {
+ return nil, fmt.Errorf("Certificate validity end: %s; expected within 20 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339))
}
return parsedCertBundle, nil
@@ -648,6 +653,11 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s
ErrorOk: true,
},
+ logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "root",
+ },
+
logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "root/generate/exported",
@@ -865,6 +875,11 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
},
// Test a bunch of generation stuff
+ logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "root",
+ },
+
logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "root/generate/exported",
@@ -997,6 +1012,11 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
},
// Do it all again, with EC keys and DER format
+ logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: "root",
+ },
+
logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "root/generate/exported",
@@ -1218,7 +1238,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
Operation: logical.ReadOperation,
PreFlight: setSerialUnderTest,
Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
}
@@ -1232,7 +1252,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
Operation: logical.ReadOperation,
PreFlight: setSerialUnderTest,
Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
}
@@ -1290,7 +1310,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
Operation: logical.ReadOperation,
PreFlight: setSerialUnderTest,
Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
}
@@ -1304,7 +1324,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
Operation: logical.ReadOperation,
PreFlight: setSerialUnderTest,
Check: func(resp *logical.Response) error {
- if resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
+ if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" {
return fmt.Errorf("got an error: %s", resp.Data["error"].(string))
}
@@ -1330,8 +1350,8 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
Operation: logical.ReadOperation,
PreFlight: setSerialUnderTest,
Check: func(resp *logical.Response) error {
- if resp.Data["error"] == nil || resp.Data["error"].(string) == "" {
- return fmt.Errorf("didn't get an expected error")
+ if resp != nil {
+ return fmt.Errorf("expected no response")
}
serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string)
@@ -1344,8 +1364,8 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int
Operation: logical.ReadOperation,
PreFlight: setSerialUnderTest,
Check: func(resp *logical.Response) error {
- if resp.Data["error"] == nil || resp.Data["error"].(string) == "" {
- return fmt.Errorf("didn't get an expected error")
+ if resp != nil {
+ return fmt.Errorf("expected no response")
}
serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string)
@@ -1827,6 +1847,8 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep {
addTests(nil)
roleTestStep.ErrorOk = false
+ roleVals.TTL = ""
+ roleVals.MaxTTL = "12h"
}
// Listing test
@@ -1870,7 +1892,7 @@ func TestBackend_PathFetchCertList(t *testing.T) {
config.StorageView = storage
b := Backend()
- _, err := b.Setup(config)
+ err := b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -1997,7 +2019,7 @@ func TestBackend_SignVerbatim(t *testing.T) {
config.StorageView = storage
b := Backend()
- _, err := b.Setup(config)
+ err := b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -2108,12 +2130,31 @@ func TestBackend_SignVerbatim(t *testing.T) {
"ttl": "12h",
},
})
- if resp != nil && !resp.IsError() {
- t.Fatalf("sign-verbatim signed too-large-ttl'd CSR: %#v", *resp)
- }
if err != nil {
t.Fatal(err)
}
+ if resp != nil && resp.IsError() {
+ t.Fatalf(resp.Error().Error())
+ }
+ if resp.Data == nil || resp.Data["certificate"] == nil {
+ t.Fatal("did not get expected data")
+ }
+ certString := resp.Data["certificate"].(string)
+ block, _ := pem.Decode([]byte(certString))
+ if block == nil {
+ t.Fatal("nil pem block")
+ }
+ certs, err := x509.ParseCertificates(block.Bytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(certs) != 1 {
+ t.Fatalf("expected a single cert, got %d", len(certs))
+ }
+ cert := certs[0]
+ if math.Abs(float64(time.Now().Add(12*time.Hour).Unix()-cert.NotAfter.Unix())) < 10 {
+ t.Fatalf("sign-verbatim did not properly cap validiaty period on signed CSR")
+ }
// now check that if we set generate-lease it takes it from the role and the TTLs match
roleData = map[string]interface{}{
@@ -2156,6 +2197,522 @@ func TestBackend_SignVerbatim(t *testing.T) {
}
}
+func TestBackend_Root_Idempotentcy(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "pki": Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ client := cluster.Cores[0].Client
+ var err error
+ err = client.Sys().Mount("pki", &api.MountInput{
+ Type: "pki",
+ Config: api.MountConfigInput{
+ DefaultLeaseTTL: "16h",
+ MaxLeaseTTL: "32h",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{
+ "common_name": "myvault.com",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("expected ca info")
+ }
+ resp, err = client.Logical().Read("pki/cert/ca_chain")
+ if err != nil {
+ t.Fatalf("error reading ca_chain: %v", err)
+ }
+
+ r1Data := resp.Data
+
+ // Try again, make sure it's a 204 and same CA
+ resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{
+ "common_name": "myvault.com",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatal("expected no ca info")
+ }
+ resp, err = client.Logical().Read("pki/cert/ca_chain")
+ if err != nil {
+ t.Fatalf("error reading ca_chain: %v", err)
+ }
+ r2Data := resp.Data
+ if !reflect.DeepEqual(r1Data, r2Data) {
+ t.Fatal("got different ca certs")
+ }
+
+ resp, err = client.Logical().Delete("pki/root")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatal("expected nil response")
+ }
+ // Make sure it behaves the same
+ resp, err = client.Logical().Delete("pki/root")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp != nil {
+ t.Fatal("expected nil response")
+ }
+
+ _, err = client.Logical().Read("pki/cert/ca_chain")
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{
+ "common_name": "myvault.com",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("expected ca info")
+ }
+
+ _, err = client.Logical().Read("pki/cert/ca_chain")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_Permitted_DNS_Domains(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "pki": Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ client := cluster.Cores[0].Client
+ var err error
+ err = client.Sys().Mount("root", &api.MountInput{
+ Type: "pki",
+ Config: api.MountConfigInput{
+ DefaultLeaseTTL: "16h",
+ MaxLeaseTTL: "32h",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = client.Sys().Mount("int", &api.MountInput{
+ Type: "pki",
+ Config: api.MountConfigInput{
+ DefaultLeaseTTL: "4h",
+ MaxLeaseTTL: "20h",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Logical().Write("root/roles/example", map[string]interface{}{
+ "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com",
+ "allow_bare_domains": true,
+ "allow_subdomains": true,
+ "max_ttl": "2h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Logical().Write("int/roles/example", map[string]interface{}{
+ "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com",
+ "allow_subdomains": true,
+ "allow_bare_domains": true,
+ "max_ttl": "2h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Direct issuing from root
+ _, err = client.Logical().Write("root/root/generate/internal", map[string]interface{}{
+ "ttl": "40h",
+ "common_name": "myvault.com",
+ "permitted_dns_domains": []string{"foobar.com", ".zipzap.com"},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ clientKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ path := "root/"
+ checkIssue := func(valid bool, args ...interface{}) {
+ argMap := map[string]interface{}{}
+ var currString string
+ for i, arg := range args {
+ if i%2 == 0 {
+ currString = arg.(string)
+ } else {
+ argMap[currString] = arg
+ }
+ }
+ _, err = client.Logical().Write(path+"issue/example", argMap)
+ switch {
+ case valid && err != nil:
+ t.Fatal(err)
+ case !valid && err == nil:
+ t.Fatal("expected error")
+ }
+
+ csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{
+ Subject: pkix.Name{
+ CommonName: argMap["common_name"].(string),
+ },
+ }, clientKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ delete(argMap, "common_name")
+ argMap["csr"] = string(pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr,
+ }))
+
+ _, err = client.Logical().Write(path+"sign/example", argMap)
+ switch {
+ case valid && err != nil:
+ t.Fatal(err)
+ case !valid && err == nil:
+ t.Fatal("expected error")
+ }
+ }
+
+ // Check issuing and signing against root's permitted domains
+ checkIssue(false, "common_name", "zipzap.com")
+ checkIssue(false, "common_name", "host.foobar.com")
+ checkIssue(true, "common_name", "host.zipzap.com")
+ checkIssue(true, "common_name", "foobar.com")
+
+ // Verify that root also won't issue an intermediate outside of its permitted domains
+ resp, err := client.Logical().Write("int/intermediate/generate/internal", map[string]interface{}{
+ "common_name": "issuer.abc.com",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
+ "common_name": "issuer.abc.com",
+ "csr": resp.Data["csr"],
+ "permitted_dns_domains": []string{"abc.com", ".xyz.com"},
+ "ttl": "5h",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ _, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
+ "use_csr_values": true,
+ "csr": resp.Data["csr"],
+ "permitted_dns_domains": []string{"abc.com", ".xyz.com"},
+ "ttl": "5h",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ // Sign a valid intermediate
+ resp, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
+ "common_name": "issuer.zipzap.com",
+ "csr": resp.Data["csr"],
+ "permitted_dns_domains": []string{"abc.com", ".xyz.com"},
+ "ttl": "5h",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp, err = client.Logical().Write("int/intermediate/set-signed", map[string]interface{}{
+ "certificate": resp.Data["certificate"],
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check enforcement with the intermediate's set values
+ path = "int/"
+ checkIssue(false, "common_name", "host.abc.com")
+ checkIssue(false, "common_name", "xyz.com")
+ checkIssue(true, "common_name", "abc.com")
+ checkIssue(true, "common_name", "host.xyz.com")
+}
+
+func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "pki": Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ client := cluster.Cores[0].Client
+ var err error
+ err = client.Sys().Mount("root", &api.MountInput{
+ Type: "pki",
+ Config: api.MountConfigInput{
+ DefaultLeaseTTL: "16h",
+ MaxLeaseTTL: "60h",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = client.Sys().Mount("int", &api.MountInput{
+ Type: "pki",
+ Config: api.MountConfigInput{
+ DefaultLeaseTTL: "4h",
+ MaxLeaseTTL: "20h",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Direct issuing from root
+ _, err = client.Logical().Write("root/root/generate/internal", map[string]interface{}{
+ "ttl": "40h",
+ "common_name": "myvault.com",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Write("root/roles/test", map[string]interface{}{
+ "allow_bare_domains": true,
+ "allow_subdomains": true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := client.Logical().Write("int/intermediate/generate/internal", map[string]interface{}{
+ "common_name": "myint.com",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ csr := resp.Data["csr"]
+
+ _, err = client.Logical().Write("root/sign/test", map[string]interface{}{
+ "common_name": "myint.com",
+ "csr": csr,
+ "ttl": "60h",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ _, err = client.Logical().Write("root/sign-verbatim/test", map[string]interface{}{
+ "common_name": "myint.com",
+ "csr": csr,
+ "ttl": "60h",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+
+ resp, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{
+ "common_name": "myint.com",
+ "csr": csr,
+ "ttl": "60h",
+ })
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response")
+ }
+ if len(resp.Warnings) == 0 {
+ t.Fatalf("expected warnings, got %#v", *resp)
+ }
+}
+
+func TestBackend_SignSelfIssued(t *testing.T) {
+ // create the backend
+ config := logical.TestBackendConfig()
+ storage := &logical.InmemStorage{}
+ config.StorageView = storage
+
+ b := Backend()
+ err := b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // generate root
+ rootData := map[string]interface{}{
+ "common_name": "test.com",
+ "ttl": "172800",
+ }
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/generate/internal",
+ Storage: storage,
+ Data: rootData,
+ })
+ if resp != nil && resp.IsError() {
+ t.Fatalf("failed to generate root, %#v", *resp)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ getSelfSigned := func(subject, issuer *x509.Certificate) (string, *x509.Certificate) {
+ selfSigned, err := x509.CreateCertificate(rand.Reader, subject, issuer, key.Public(), key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cert, err := x509.ParseCertificate(selfSigned)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pemSS := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: selfSigned,
+ })
+ return string(pemSS), cert
+ }
+
+ template := &x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: "foo.bar.com",
+ },
+ SerialNumber: big.NewInt(1234),
+ IsCA: false,
+ BasicConstraintsValid: true,
+ }
+
+ ss, _ := getSelfSigned(template, template)
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-self-issued",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "certificate": ss,
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response")
+ }
+ if !resp.IsError() {
+ t.Fatalf("expected error due to non-CA; got: %#v", *resp)
+ }
+
+ // Set CA to true, but leave issuer alone
+ template.IsCA = true
+
+ issuer := &x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: "bar.foo.com",
+ },
+ SerialNumber: big.NewInt(2345),
+ IsCA: true,
+ BasicConstraintsValid: true,
+ }
+ ss, ssCert := getSelfSigned(template, issuer)
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-self-issued",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "certificate": ss,
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response")
+ }
+ if !resp.IsError() {
+ t.Fatalf("expected error due to different issuer; cert info is\nIssuer\n%#v\nSubject\n%#v\n", ssCert.Issuer, ssCert.Subject)
+ }
+
+ ss, ssCert = getSelfSigned(template, template)
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "root/sign-self-issued",
+ Storage: storage,
+ Data: map[string]interface{}{
+ "certificate": ss,
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("got nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("error in response: %s", resp.Error().Error())
+ }
+
+ newCertString := resp.Data["certificate"].(string)
+ block, _ := pem.Decode([]byte(newCertString))
+ newCert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ signingBundle, err := fetchCAInfo(&logical.Request{Storage: storage})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if reflect.DeepEqual(newCert.Subject, newCert.Issuer) {
+ t.Fatal("expected different subject/issuer")
+ }
+ if !reflect.DeepEqual(newCert.Issuer, signingBundle.Certificate.Subject) {
+ t.Fatalf("expected matching issuer/CA subject\n\nIssuer:\n%#v\nSubject:\n%#v\n", newCert.Issuer, signingBundle.Certificate.Subject)
+ }
+ if bytes.Equal(newCert.AuthorityKeyId, newCert.SubjectKeyId) {
+ t.Fatal("expected different authority/subject")
+ }
+ if !bytes.Equal(newCert.AuthorityKeyId, signingBundle.Certificate.SubjectKeyId) {
+ t.Fatal("expected authority on new cert to be same as signing subject")
+ }
+ if newCert.Subject.CommonName != "foo.bar.com" {
+ t.Fatalf("unexpected common name on new cert: %s", newCert.Subject.CommonName)
+ }
+}
+
const (
rsaCAKey string = `-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAmPQlK7xD5p+E8iLQ8XlVmll5uU2NKMxKY3UF5tbh+0vkc+Fy
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go
index cab5797..7a6deda 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go
@@ -1,6 +1,8 @@
package pki
import (
+ "time"
+
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@@ -27,7 +29,7 @@ func (b *backend) getGenerationParams(
}
role = &roleEntry{
- TTL: data.Get("ttl").(string),
+ TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(),
KeyType: data.Get("key_type").(string),
KeyBits: data.Get("key_bits").(int),
AllowLocalhost: true,
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go
index 1796d98..b4bb381 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go
@@ -45,12 +45,13 @@ type creationBundle struct {
KeyType string
KeyBits int
SigningBundle *caInfoBundle
- TTL time.Duration
+ NotAfter time.Time
KeyUsage x509.KeyUsage
ExtKeyUsage certExtKeyUsage
// Only used when signing a CA cert
- UseCSRValues bool
+ UseCSRValues bool
+ PermittedDNSDomains []string
// URLs to encode into the certificate
URLs *urlEntries
@@ -434,6 +435,8 @@ func generateCert(b *backend,
if isCA {
creationBundle.IsCA = isCA
+ creationBundle.PermittedDNSDomains = data.Get("permitted_dns_domains").([]string)
+
if signingBundle == nil {
// Generating a self-signed root certificate
entries, err := getURLs(req)
@@ -581,6 +584,10 @@ func signCert(b *backend,
creationBundle.IsCA = isCA
creationBundle.UseCSRValues = useCSRValues
+ if isCA {
+ creationBundle.PermittedDNSDomains = data.Get("permitted_dns_domains").([]string)
+ }
+
parsedBundle, err := signCertificate(creationBundle, csr)
if err != nil {
return nil, err
@@ -720,54 +727,48 @@ func generateCreationBundle(b *backend,
}
}
- // Get the TTL and very it against the max allowed
- var ttlField string
+ // Get the TTL and verify it against the max allowed
var ttl time.Duration
var maxTTL time.Duration
- var ttlFieldInt interface{}
+ var notAfter time.Time
{
- ttlFieldInt, ok = data.GetOk("ttl")
- if !ok {
- ttlField = role.TTL
- } else {
- ttlField = ttlFieldInt.(string)
- }
+ ttl = time.Duration(data.Get("ttl").(int)) * time.Second
- if len(ttlField) == 0 {
- ttl = b.System().DefaultLeaseTTL()
- } else {
- ttl, err = parseutil.ParseDurationSecond(ttlField)
- if err != nil {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "invalid requested ttl: %s", err)}
+ if ttl == 0 {
+ if role.TTL != "" {
+ ttl, err = parseutil.ParseDurationSecond(role.TTL)
+ if err != nil {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "invalid role ttl: %s", err)}
+ }
}
}
- if len(role.MaxTTL) == 0 {
- maxTTL = b.System().MaxLeaseTTL()
- } else {
+ if role.MaxTTL != "" {
maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
if err != nil {
return nil, errutil.UserError{Err: fmt.Sprintf(
- "invalid ttl: %s", err)}
+ "invalid role max_ttl: %s", err)}
}
}
- if ttl > maxTTL {
- // Don't error if they were using system defaults, only error if
- // they specifically chose a bad TTL
- if len(ttlField) == 0 {
- ttl = maxTTL
- } else {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "ttl is larger than maximum allowed (%d)", maxTTL/time.Second)}
- }
+ if ttl == 0 {
+ ttl = b.System().DefaultLeaseTTL()
}
+ if maxTTL == 0 {
+ maxTTL = b.System().MaxLeaseTTL()
+ }
+ if ttl > maxTTL {
+ ttl = maxTTL
+ }
+
+ notAfter = time.Now().Add(ttl)
// If it's not self-signed, verify that the issued certificate won't be
// valid past the lifetime of the CA certificate
if signingBundle != nil &&
- time.Now().Add(ttl).After(signingBundle.Certificate.NotAfter) {
+ notAfter.After(signingBundle.Certificate.NotAfter) && !role.AllowExpirationPastCA {
+
return nil, errutil.UserError{Err: fmt.Sprintf(
"cannot satisfy request, as TTL is beyond the expiration of the CA certificate")}
}
@@ -800,7 +801,7 @@ func generateCreationBundle(b *backend,
KeyType: role.KeyType,
KeyBits: role.KeyBits,
SigningBundle: signingBundle,
- TTL: ttl,
+ NotAfter: notAfter,
KeyUsage: x509.KeyUsage(parseKeyUsages(role.KeyUsage)),
ExtKeyUsage: extUsage,
}
@@ -893,7 +894,7 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle
SerialNumber: serialNumber,
Subject: subject,
NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: time.Now().Add(creationInfo.TTL),
+ NotAfter: creationInfo.NotAfter,
IsCA: false,
SubjectKeyId: subjKeyID,
DNSNames: creationInfo.DNSNames,
@@ -906,6 +907,12 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle
certTemplate.IsCA = true
}
+ // This will only be filled in from the generation paths
+ if len(creationInfo.PermittedDNSDomains) > 0 {
+ certTemplate.PermittedDNSDomains = creationInfo.PermittedDNSDomains
+ certTemplate.PermittedDNSDomainsCritical = true
+ }
+
addKeyUsages(creationInfo, certTemplate)
certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates
@@ -922,6 +929,12 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle
}
caCert := creationInfo.SigningBundle.Certificate
+ certTemplate.AuthorityKeyId = caCert.SubjectKeyId
+
+ err = checkPermittedDNSDomains(certTemplate, caCert)
+ if err != nil {
+ return nil, errutil.UserError{Err: err.Error()}
+ }
certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), creationInfo.SigningBundle.PrivateKey)
} else {
@@ -940,6 +953,7 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle
certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256
}
+ certTemplate.AuthorityKeyId = subjKeyID
certTemplate.BasicConstraintsValid = true
certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey)
}
@@ -1047,6 +1061,8 @@ func signCertificate(creationInfo *creationBundle,
}
subjKeyID := sha1.Sum(marshaledKey)
+ caCert := creationInfo.SigningBundle.Certificate
+
subject := pkix.Name{
CommonName: creationInfo.CommonName,
OrganizationalUnit: creationInfo.OU,
@@ -1054,11 +1070,12 @@ func signCertificate(creationInfo *creationBundle,
}
certTemplate := &x509.Certificate{
- SerialNumber: serialNumber,
- Subject: subject,
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: time.Now().Add(creationInfo.TTL),
- SubjectKeyId: subjKeyID[:],
+ SerialNumber: serialNumber,
+ Subject: subject,
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: creationInfo.NotAfter,
+ SubjectKeyId: subjKeyID[:],
+ AuthorityKeyId: caCert.SubjectKeyId,
}
switch creationInfo.SigningBundle.PrivateKeyType {
@@ -1085,7 +1102,6 @@ func signCertificate(creationInfo *creationBundle,
addKeyUsages(creationInfo, certTemplate)
var certBytes []byte
- caCert := creationInfo.SigningBundle.Certificate
certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates
certTemplate.CRLDistributionPoints = creationInfo.URLs.CRLDistributionPoints
@@ -1106,6 +1122,15 @@ func signCertificate(creationInfo *creationBundle,
}
}
+ if len(creationInfo.PermittedDNSDomains) > 0 {
+ certTemplate.PermittedDNSDomains = creationInfo.PermittedDNSDomains
+ certTemplate.PermittedDNSDomainsCritical = true
+ }
+ err = checkPermittedDNSDomains(certTemplate, caCert)
+ if err != nil {
+ return nil, errutil.UserError{Err: err.Error()}
+ }
+
certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, csr.PublicKey, creationInfo.SigningBundle.PrivateKey)
if err != nil {
@@ -1122,3 +1147,39 @@ func signCertificate(creationInfo *creationBundle,
return result, nil
}
+
+func checkPermittedDNSDomains(template, ca *x509.Certificate) error {
+ if len(ca.PermittedDNSDomains) == 0 {
+ return nil
+ }
+
+ namesToCheck := map[string]struct{}{
+ template.Subject.CommonName: struct{}{},
+ }
+ for _, name := range template.DNSNames {
+ namesToCheck[name] = struct{}{}
+ }
+
+ var badName string
+NameCheck:
+ for name := range namesToCheck {
+ for _, perm := range ca.PermittedDNSDomains {
+ switch {
+ case strings.HasPrefix(perm, ".") && strings.HasSuffix(name, perm):
+ // .example.com matches my.host.example.com and
+ // host.example.com but does not match example.com
+ break NameCheck
+ case perm == name:
+ break NameCheck
+ }
+ }
+ badName = name
+ break
+ }
+
+ if badName == "" {
+ return nil
+ }
+
+ return fmt.Errorf("name %q disallowed by CA's permitted DNS domains", badName)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go
index e97a970..52adf10 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go
@@ -59,7 +59,7 @@ email addresses.`,
}
fields["ttl"] = &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeDurationSecond,
Description: `The requested Time To Live for the certificate;
sets the expiration date. If not specified
the role default, backend default, or system
@@ -92,7 +92,7 @@ must still be specified in alt_names or ip_sans.`,
}
fields["ttl"] = &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeDurationSecond,
Description: `The requested Time To Live for the certificate;
sets the expiration date. If not specified
the role default, backend default, or system
@@ -144,5 +144,10 @@ func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*fram
Description: "The maximum allowable path length",
}
+ fields["permitted_dns_domains"] = &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: `Domains for which this certificate is allowed to sign or issue child certificates. If set, all DNS names (subject and alt) on child certs must be exact matches or subsets of the given domains (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`,
+ }
+
return fields
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go
index c182553..347ac01 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go
@@ -16,9 +16,7 @@ func pathConfigCA(b *backend) *framework.Path {
"pem_bundle": &framework.FieldSchema{
Type: framework.TypeString,
Description: `PEM-format, concatenated unencrypted
-secret key and certificate, or, if a
-CSR was generated with the "generate"
-endpoint, just the signed certificate.`,
+secret key and certificate.`,
},
},
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go
index ed60e75..cf71b4c 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go
@@ -159,7 +159,7 @@ func (b *backend) pathFetchRead(req *logical.Request, data *framework.FieldData)
caInfo, err := fetchCAInfo(req)
switch err.(type) {
case errutil.UserError:
- response = logical.ErrorResponse(funcErr.Error())
+ response = logical.ErrorResponse(err.Error())
goto reply
case errutil.InternalError:
retErr = err
@@ -189,7 +189,7 @@ func (b *backend) pathFetchRead(req *logical.Request, data *framework.FieldData)
}
}
if certEntry == nil {
- response = logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found", serial))
+ response = nil
goto reply
}
@@ -244,6 +244,11 @@ reply:
}
case retErr != nil:
response = nil
+ return
+ case response == nil:
+ return
+ case response.IsError():
+ return response, nil
default:
response.Data["certificate"] = string(certificate)
response.Data["revocation_time"] = revocationTime
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go
index 71a0455..2073621 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go
@@ -127,7 +127,7 @@ func (b *backend) pathSetSignedIntermediate(
cert := data.Get("certificate").(string)
if cert == "" {
- return logical.ErrorResponse("no certificate provided in the \"certficate\" parameter"), nil
+ return logical.ErrorResponse("no certificate provided in the \"certificate\" parameter"), nil
}
inputBundle, err := certutil.ParsePEMBundle(cert)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go
index 26f7421..d7b0c36 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go
@@ -277,6 +277,15 @@ func (b *backend) pathIssueSignCert(
}
}
+ if useCSR {
+ if role.UseCSRCommonName && data.Get("common_name").(string) != "" {
+ resp.AddWarning("the common_name field was provided but the role is set with \"use_csr_common_name\" set to true")
+ }
+ if role.UseCSRSANs && data.Get("alt_names").(string) != "" {
+ resp.AddWarning("the alt_names field was provided but the role is set with \"use_csr_sans\" set to true")
+ }
+ }
+
return resp, nil
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go
index 4d9e115..96d0197 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go
@@ -35,7 +35,7 @@ func pathRoles(b *backend) *framework.Path {
},
"ttl": &framework.FieldSchema{
- Type: framework.TypeString,
+ Type: framework.TypeDurationSecond,
Default: "",
Description: `The lease duration if no specific lease duration is
requested. The lease duration controls the expiration
@@ -383,7 +383,7 @@ func (b *backend) pathRoleCreate(
entry := &roleEntry{
MaxTTL: data.Get("max_ttl").(string),
- TTL: data.Get("ttl").(string),
+ TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(),
AllowLocalhost: data.Get("allow_localhost").(bool),
AllowedDomains: data.Get("allowed_domains").(string),
AllowBareDomains: data.Get("allow_bare_domains").(bool),
@@ -532,6 +532,9 @@ type roleEntry struct {
Organization string `json:"organization" structs:"organization" mapstructure:"organization"`
GenerateLease *bool `json:"generate_lease,omitempty" structs:"generate_lease,omitempty"`
NoStore bool `json:"no_store" structs:"no_store" mapstructure:"no_store"`
+
+ // Used internally for signing intermediates
+ AllowExpirationPastCA bool
}
const pathListRolesHelpSyn = `List the existing roles in this backend`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go
index 82772b0..bd0aa90 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go
@@ -13,11 +13,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
var err error
b := Backend()
- _, err = b.Setup(config)
- if err != nil {
- t.Fatal(err)
- }
- err = b.Initialize()
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go
index d029531..438c92e 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go
@@ -1,9 +1,15 @@
package pki
import (
+ "crypto/rand"
+ "crypto/x509"
"encoding/base64"
+ "encoding/pem"
"fmt"
+ "reflect"
+ "time"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/errutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -28,6 +34,21 @@ func pathGenerateRoot(b *backend) *framework.Path {
return ret
}
+func pathDeleteRoot(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "root",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.DeleteOperation: b.pathCADeleteRoot,
+ },
+
+ HelpSynopsis: pathDeleteRootHelpSyn,
+ HelpDescription: pathDeleteRootHelpDesc,
+ }
+
+ return ret
+}
+
func pathSignIntermediate(b *backend) *framework.Path {
ret := &framework.Path{
Pattern: "root/sign-intermediate",
@@ -66,10 +87,45 @@ the non-repudiation flag.`,
return ret
}
+func pathSignSelfIssued(b *backend) *framework.Path {
+ ret := &framework.Path{
+ Pattern: "root/sign-self-issued",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathCASignSelfIssued,
+ },
+
+ Fields: map[string]*framework.FieldSchema{
+ "certificate": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `PEM-format self-issued certificate to be signed.`,
+ },
+ },
+
+ HelpSynopsis: pathSignSelfIssuedHelpSyn,
+ HelpDescription: pathSignSelfIssuedHelpDesc,
+ }
+
+ return ret
+}
+
+func (b *backend) pathCADeleteRoot(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return nil, req.Storage.Delete("config/ca_bundle")
+}
+
func (b *backend) pathCAGenerateRoot(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
var err error
+ entry, err := req.Storage.Get("config/ca_bundle")
+ if err != nil {
+ return nil, err
+ }
+ if entry != nil {
+ return nil, nil
+ }
+
exported, format, role, errorResp := b.getGenerationParams(data)
if errorResp != nil {
return errorResp, nil
@@ -133,7 +189,7 @@ func (b *backend) pathCAGenerateRoot(
}
// Store it as the CA bundle
- entry, err := logical.StorageEntryJSON("config/ca_bundle", cb)
+ entry, err = logical.StorageEntryJSON("config/ca_bundle", cb)
if err != nil {
return nil, err
}
@@ -186,12 +242,13 @@ func (b *backend) pathCASignIntermediate(
}
role := &roleEntry{
- TTL: data.Get("ttl").(string),
- AllowLocalhost: true,
- AllowAnyName: true,
- AllowIPSANs: true,
- EnforceHostnames: false,
- KeyType: "any",
+ TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(),
+ AllowLocalhost: true,
+ AllowAnyName: true,
+ AllowIPSANs: true,
+ EnforceHostnames: false,
+ KeyType: "any",
+ AllowExpirationPastCA: true,
}
if cn := data.Get("common_name").(string); len(cn) == 0 {
@@ -248,6 +305,10 @@ func (b *backend) pathCASignIntermediate(
},
}
+ if signingBundle.Certificate.NotAfter.Before(parsedBundle.Certificate.NotAfter) {
+ resp.AddWarning("The expiration time for the signed certificate is after the CA's expiration time. If the new certificate is not treated as a root, validation paths with the certificate past the issuing CA's expiration time will fail.")
+ }
+
switch format {
case "pem":
resp.Data["certificate"] = cb.Certificate
@@ -291,6 +352,75 @@ func (b *backend) pathCASignIntermediate(
return resp, nil
}
+func (b *backend) pathCASignSelfIssued(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var err error
+
+ certPem := data.Get("certificate").(string)
+ block, _ := pem.Decode([]byte(certPem))
+ if block == nil || len(block.Bytes) == 0 {
+ return logical.ErrorResponse("certificate could not be PEM-decoded"), nil
+ }
+ certs, err := x509.ParseCertificates(block.Bytes)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf("error parsing certificate: %s", err)), nil
+ }
+ if len(certs) != 1 {
+ return logical.ErrorResponse(fmt.Sprintf("%d certificates found in PEM file, expected 1", len(certs))), nil
+ }
+
+ cert := certs[0]
+ if !cert.IsCA {
+ return logical.ErrorResponse("given certificate is not a CA certificate"), nil
+ }
+ if !reflect.DeepEqual(cert.Issuer, cert.Subject) {
+ return logical.ErrorResponse("given certificate is not self-issued"), nil
+ }
+
+ var caErr error
+ signingBundle, caErr := fetchCAInfo(req)
+ switch caErr.(type) {
+ case errutil.UserError:
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "could not fetch the CA certificate (was one set?): %s", caErr)}
+ case errutil.InternalError:
+ return nil, errutil.InternalError{Err: fmt.Sprintf(
+ "error fetching CA certificate: %s", caErr)}
+ }
+
+ signingCB, err := signingBundle.ToCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err)
+ }
+
+ urls := &urlEntries{}
+ if signingBundle.URLs != nil {
+ urls = signingBundle.URLs
+ }
+ cert.IssuingCertificateURL = urls.IssuingCertificates
+ cert.CRLDistributionPoints = urls.CRLDistributionPoints
+ cert.OCSPServer = urls.OCSPServers
+
+ newCert, err := x509.CreateCertificate(rand.Reader, cert, signingBundle.Certificate, cert.PublicKey, signingBundle.PrivateKey)
+ if err != nil {
+ return nil, errwrap.Wrapf("error signing self-issued certificate: {{err}}", err)
+ }
+ if len(newCert) == 0 {
+ return nil, fmt.Errorf("nil cert was created when signing self-issued certificate")
+ }
+ pemCert := pem.EncodeToMemory(&pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: newCert,
+ })
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "certificate": string(pemCert),
+ "issuing_ca": signingCB.Certificate,
+ },
+ }, nil
+}
+
const pathGenerateRootHelpSyn = `
Generate a new CA certificate and private key used for signing.
`
@@ -299,10 +429,30 @@ const pathGenerateRootHelpDesc = `
See the API documentation for more information.
`
+const pathDeleteRootHelpSyn = `
+Deletes the root CA key to allow a new one to be generated.
+`
+
+const pathDeleteRootHelpDesc = `
+See the API documentation for more information.
+`
+
const pathSignIntermediateHelpSyn = `
Issue an intermediate CA certificate based on the provided CSR.
`
const pathSignIntermediateHelpDesc = `
-See the API documentation for more information.
+see the API documentation for more information.
+`
+
+const pathSignSelfIssuedHelpSyn = `
+Signs another CA's self-issued certificate.
+`
+
+const pathSignSelfIssuedHelpDesc = `
+Signs another CA's self-issued certificate. This is most often used for rolling roots; unless you know you need this you probably want to use sign-intermediate instead.
+
+Note that this is a very privileged operation and should be extremely restricted in terms of who is allowed to use it. All values will be taken directly from the incoming certificate and only verification that it is self-issued will be performed.
+
+Configured URLs for CRLs/OCSP/etc. will be copied over and the issuer will be this mount's CA cert. Other than that, all other values will be used verbatim.
`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go
index 6f4befd..4a689f8 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go
@@ -13,7 +13,11 @@ import (
)
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend(conf).Setup(conf)
+ b := Backend(conf)
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend(conf *logical.BackendConfig) *backend {
@@ -33,9 +37,9 @@ func Backend(conf *logical.BackendConfig) *backend {
secretCreds(&b),
},
- Clean: b.ResetDB,
-
- Invalidate: b.invalidate,
+ Clean: b.ResetDB,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeLogical,
}
b.logger = conf.Logger
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go
index 535d1c1..9c5010a 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go
@@ -41,7 +41,9 @@ func (b *backend) secretCredsRenew(
return nil, fmt.Errorf("secret is missing username internal data")
}
username, ok := usernameRaw.(string)
-
+ if !ok {
+ return nil, fmt.Errorf("usernameRaw is not a string")
+ }
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
@@ -92,7 +94,9 @@ func (b *backend) secretCredsRevoke(
return nil, fmt.Errorf("secret is missing username internal data")
}
username, ok := usernameRaw.(string)
-
+ if !ok {
+ return nil, fmt.Errorf("usernameRaw is not a string")
+ }
var revocationSQL string
var resp *logical.Response
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go
index 4f9cde0..1e3f1ec 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go
@@ -13,7 +13,11 @@ import (
// Factory creates and configures the backend
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
- return Backend().Setup(conf)
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
// Creates a new backend with all the paths and secrets belonging to it
@@ -34,9 +38,9 @@ func Backend() *backend {
secretCreds(&b),
},
- Clean: b.resetClient,
-
- Invalidate: b.invalidate,
+ Clean: b.resetClient,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeLogical,
}
return &b
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go
index a5c9983..4182fd4 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go
@@ -13,7 +13,7 @@ func TestBackend_config_lease_RU(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b := Backend()
- if _, err = b.Setup(config); err != nil {
+ if err = b.Setup(config); err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go
index dcfb00d..c14685d 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go
@@ -2,6 +2,7 @@ package ssh
import (
"strings"
+ "sync"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
@@ -10,8 +11,9 @@ import (
type backend struct {
*framework.Backend
- view logical.Storage
- salt *salt.Salt
+ view logical.Storage
+ salt *salt.Salt
+ saltMutex sync.RWMutex
}
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
@@ -19,7 +21,10 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
if err != nil {
return nil, err
}
- return b.Setup(conf)
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
}
func Backend(conf *logical.BackendConfig) (*backend, error) {
@@ -57,20 +62,42 @@ func Backend(conf *logical.BackendConfig) (*backend, error) {
secretOTP(&b),
},
- Init: b.Initialize,
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeLogical,
}
return &b, nil
}
-func (b *backend) Initialize() error {
+func (b *backend) Salt() (*salt.Salt, error) {
+ b.saltMutex.RLock()
+ if b.salt != nil {
+ defer b.saltMutex.RUnlock()
+ return b.salt, nil
+ }
+ b.saltMutex.RUnlock()
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ if b.salt != nil {
+ return b.salt, nil
+ }
salt, err := salt.NewSalt(b.view, &salt.Config{
HashFunc: salt.SHA256Hash,
+ Location: salt.DefaultLocation,
})
if err != nil {
- return err
+ return nil, err
}
b.salt = salt
- return nil
+ return salt, nil
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case salt.DefaultLocation:
+ b.saltMutex.Lock()
+ defer b.saltMutex.Unlock()
+ b.salt = nil
+ }
}
const backendHelp = `
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go
index 538455c..139d24a 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go
@@ -106,7 +106,7 @@ func TestBackend_allowed_users(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -652,6 +652,94 @@ func TestBackend_OptionsOverrideDefaults(t *testing.T) {
logicaltest.Test(t, testCase)
}
+func TestBackend_CustomKeyIDFormat(t *testing.T) {
+ config := logical.TestBackendConfig()
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatalf("Cannot create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ configCaStep(),
+
+ createRoleStep("customrole", map[string]interface{}{
+ "key_type": "ca",
+ "key_id_format": "{{role_name}}-{{token_display_name}}-{{public_key_hash}}",
+ "allowed_users": "tuber",
+ "default_user": "tuber",
+ "allow_user_certificates": true,
+ "allowed_critical_options": "option,secondary",
+ "allowed_extensions": "extension,additional",
+ "default_critical_options": map[string]interface{}{
+ "option": "value",
+ },
+ "default_extensions": map[string]interface{}{
+ "extension": "extended",
+ },
+ }),
+
+ signCertificateStep("customrole", "customrole-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{
+ "secondary": "value",
+ }, map[string]string{
+ "additional": "value",
+ }, 2*time.Hour, map[string]interface{}{
+ "public_key": publicKey2,
+ "ttl": "2h",
+ "critical_options": map[string]interface{}{
+ "secondary": "value",
+ },
+ "extensions": map[string]interface{}{
+ "additional": "value",
+ },
+ }),
+ },
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
+func TestBackend_DisallowUserProvidedKeyIDs(t *testing.T) {
+ config := logical.TestBackendConfig()
+
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatalf("Cannot create backend: %s", err)
+ }
+
+ testCase := logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ configCaStep(),
+
+ createRoleStep("testing", map[string]interface{}{
+ "key_type": "ca",
+ "allow_user_key_ids": false,
+ "allow_user_certificates": true,
+ }),
+ logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "sign/testing",
+ Data: map[string]interface{}{
+ "public_key": publicKey2,
+ "key_id": "override",
+ },
+ ErrorOk: true,
+ Check: func(resp *logical.Response) error {
+ if resp.Data["error"] != "setting key_id is not allowed by role" {
+ return errors.New("Custom user key id was allowed even when 'allow_user_key_ids' is false.")
+ }
+ return nil
+ },
+ },
+ },
+ }
+
+ logicaltest.Test(t, testCase)
+}
+
func configCaStep() logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go
index cc0b17b..250ab4f 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go
@@ -17,7 +17,7 @@ func TestSSH_ConfigCAStorageUpgrade(t *testing.T) {
t.Fatal(err)
}
- _, err = b.Setup(config)
+ err = b.Setup(config)
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go
index e2b1e0c..53d55ed 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go
@@ -207,7 +207,12 @@ func (b *backend) GenerateSaltedOTP() (string, string, error) {
if err != nil {
return "", "", err
}
- return str, b.salt.SaltID(str), nil
+ salt, err := b.Salt()
+ if err != nil {
+ return "", "", err
+ }
+
+ return str, salt.SaltID(str), nil
}
// Generates an UUID OTP and creates an entry for the same in storage backend with its salted string.
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go
index b905115..6be96b6 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go
@@ -45,6 +45,7 @@ type sshRole struct {
AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"`
AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"`
AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"`
+ KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"`
}
func pathListRoles(b *backend) *framework.Path {
@@ -150,7 +151,7 @@ func pathRoles(b *backend) *framework.Path {
this list enforces it. If this field is set, then credentials
can only be created for default_user and usernames present in
this list. Setting this option will enable all the users with
- access this role to fetch credentials for all other usernames
+ access to this role to fetch credentials for all other usernames
in this list. Use with caution. N.B.: with the CA type, an empty
list means that no users are allowed; explicitly specify '*' to
allow any user.
@@ -213,7 +214,7 @@ func pathRoles(b *backend) *framework.Path {
have if none are provided when signing. This field takes in key
value pairs in JSON format. Note that these are not restricted
by "allowed_critical_options". Defaults to none.
-`,
+ `,
},
"default_extensions": &framework.FieldSchema{
Type: framework.TypeMap,
@@ -266,6 +267,16 @@ func pathRoles(b *backend) *framework.Path {
The key ID is logged by the SSH server and can be useful for auditing.
`,
},
+ "key_id_format": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ When supplied, this value specifies a custom format for the key id of a signed certificate.
+ The following variables are availble for use: '{{token_display_name}}' - The display name of
+ the token used to make the request. '{{role_name}}' - The name of the role signing the request.
+ '{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed.
+ `,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -435,6 +446,7 @@ func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework
AllowBareDomains: data.Get("allow_bare_domains").(bool),
AllowSubdomains: data.Get("allow_subdomains").(bool),
AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool),
+ KeyIDFormat: data.Get("key_id_format").(string),
KeyType: KeyTypeCA,
}
@@ -553,6 +565,7 @@ func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*l
"allow_bare_domains": role.AllowBareDomains,
"allow_subdomains": role.AllowSubdomains,
"allow_user_key_ids": role.AllowUserKeyIDs,
+ "key_id_format": role.KeyIDFormat,
"key_type": role.KeyType,
"default_critical_options": role.DefaultCriticalOptions,
"default_extensions": role.DefaultExtensions,
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go
index b5c2e0d..4d62f4a 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go
@@ -3,7 +3,6 @@ package ssh
import (
"crypto/rand"
"crypto/sha256"
- "encoding/hex"
"errors"
"fmt"
"strconv"
@@ -275,16 +274,22 @@ func (b *backend) calculateKeyId(data *framework.FieldData, req *logical.Request
return reqId, nil
}
- keyHash := sha256.Sum256(pubKey.Marshal())
- keyId := hex.EncodeToString(keyHash[:])
-
- if req.DisplayName != "" {
- keyId = fmt.Sprintf("%s-%s", req.DisplayName, keyId)
+ keyIDFormat := "vault-{{token_display_name}}-{{public_key_hash}}"
+ if req.DisplayName == "" {
+ keyIDFormat = "vault-{{public_key_hash}}"
}
- keyId = fmt.Sprintf("vault-%s", keyId)
+ if role.KeyIDFormat != "" {
+ keyIDFormat = role.KeyIDFormat
+ }
- return keyId, nil
+ keyID := substQuery(keyIDFormat, map[string]string{
+ "token_display_name": req.DisplayName,
+ "role_name": data.Get("role").(string),
+ "public_key_hash": fmt.Sprintf("%x", sha256.Sum256(pubKey.Marshal())),
+ })
+
+ return keyID, nil
}
func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
@@ -383,7 +388,17 @@ func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.D
return ttl, nil
}
-func (b *creationBundle) sign() (*ssh.Certificate, error) {
+func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) {
+ defer func() {
+ if r := recover(); r != nil {
+ errMsg, ok := r.(string)
+ if ok {
+ retCert = nil
+ retErr = errors.New(errMsg)
+ }
+ }
+ }()
+
serialNumber, err := certutil.GenerateSerialNumber()
if err != nil {
return nil, err
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go
index 9cb98ad..1c5e453 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go
@@ -57,7 +57,11 @@ func (b *backend) pathVerifyWrite(req *logical.Request, d *framework.FieldData)
// Create the salt of OTP because entry would have been create with the
// salt and not directly of the OTP. Salt will yield the same value which
// because the seed is the same, the backend salt.
- otpSalted := b.salt.SaltID(otp)
+ salt, err := b.Salt()
+ if err != nil {
+ return nil, err
+ }
+ otpSalted := salt.SaltID(otp)
// Return nil if there is no entry found for the OTP
otpEntry, err := b.getOTP(req.Storage, otpSalted)
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go
index d0e4dd5..cc8872b 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go
@@ -33,7 +33,11 @@ func (b *backend) secretOTPRevoke(req *logical.Request, d *framework.FieldData)
return nil, fmt.Errorf("secret is missing internal data")
}
- err := req.Storage.Delete("otp/" + b.salt.SaltID(otp))
+ salt, err := b.Salt()
+ if err != nil {
+ return nil, err
+ }
+ err = req.Storage.Delete("otp/" + salt.SaltID(otp))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go
index c18ccaf..106c740 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go
@@ -163,6 +163,7 @@ func createSSHComm(logger log.Logger, username, ip string, port int, hostkey str
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
connfunc := func() (net.Conn, error) {
@@ -211,3 +212,12 @@ func convertMapToStringValue(initial map[string]interface{}) map[string]string {
}
return result
}
+
+// Serve a template processor for custom format inputs
+func substQuery(tpl string, data map[string]string) string {
+ for k, v := range data {
+ tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
+ }
+
+ return tpl
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go
new file mode 100644
index 0000000..936b46b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go
@@ -0,0 +1,48 @@
+package totp
+
+import (
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ cache "github.com/patrickmn/go-cache"
+)
+
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: strings.TrimSpace(backendHelp),
+
+ Paths: []*framework.Path{
+ pathListKeys(&b),
+ pathKeys(&b),
+ pathCode(&b),
+ },
+
+ Secrets: []*framework.Secret{},
+ BackendType: logical.TypeLogical,
+ }
+
+ b.usedCodes = cache.New(0, 30*time.Second)
+
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ usedCodes *cache.Cache
+}
+
+const backendHelp = `
+The TOTP backend dynamically generates time-based one-time use passwords.
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go
new file mode 100644
index 0000000..a3304c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go
@@ -0,0 +1,1131 @@
+package totp
+
+import (
+ "fmt"
+ "log"
+ "net/url"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/logical"
+ logicaltest "github.com/hashicorp/vault/logical/testing"
+ "github.com/mitchellh/mapstructure"
+ otplib "github.com/pquerna/otp"
+ totplib "github.com/pquerna/otp/totp"
+)
+
+func createKey() (string, error) {
+ keyUrl, err := totplib.Generate(totplib.GenerateOpts{
+ Issuer: "Vault",
+ AccountName: "Test",
+ })
+
+ key := keyUrl.Secret()
+
+ return key, err
+}
+
+func generateCode(key string, period uint, digits otplib.Digits, algorithm otplib.Algorithm) (string, error) {
+ // Generate password using totp library
+ totpToken, err := totplib.GenerateCodeCustom(key, time.Now(), totplib.ValidateOpts{
+ Period: period,
+ Digits: digits,
+ Algorithm: algorithm,
+ })
+
+ return totpToken, err
+}
+
+func TestBackend_readCredentialsDefaultValues(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "key": key,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "",
+ "account_name": "",
+ "digits": otplib.DigitsSix,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA1,
+ "key": key,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_readCredentialsEightDigitsThirtySecondPeriod(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "digits": 8,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "digits": otplib.DigitsEight,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA1,
+ "key": key,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_readCredentialsSixDigitsNinetySecondPeriod(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "period": 90,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "digits": otplib.DigitsSix,
+ "period": 90,
+ "algorithm": otplib.AlgorithmSHA1,
+ "key": key,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_readCredentialsSHA256(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "algorithm": "SHA256",
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "digits": otplib.DigitsSix,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA256,
+ "key": key,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_readCredentialsSHA512(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "algorithm": "SHA512",
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "digits": otplib.DigitsSix,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA512,
+ "key": key,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_keyCrudDefaultValues(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "digits": otplib.DigitsSix,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA1,
+ "key": key,
+ }
+
+ code, _ := generateCode(key, 30, otplib.DigitsSix, otplib.AlgorithmSHA1)
+ invalidCode := "12345678"
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepValidateCode(t, "test", code, true, false),
+ // Next step should fail because it should be in the used cache
+ testAccStepValidateCode(t, "test", code, false, true),
+ testAccStepValidateCode(t, "test", invalidCode, false, false),
+ testAccStepDeleteKey(t, "test"),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_createKeyMissingKeyValue(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_createKeyInvalidKeyValue(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": "1",
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_createKeyInvalidAlgorithm(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "algorithm": "BADALGORITHM",
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_createKeyInvalidPeriod(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "period": -1,
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_createKeyInvalidDigits(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Generate a new shared key
+ key, _ := createKey()
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key": key,
+ "digits": 20,
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_generatedKeyDefaultValues(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "generate": true,
+ "key_size": 20,
+ "exported": true,
+ "qr_size": 200,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "digits": otplib.DigitsSix,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA1,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ },
+ })
+}
+
+func TestBackend_generatedKeyDefaultValuesNoQR(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "generate": true,
+ "key_size": 20,
+ "exported": true,
+ "qr_size": 0,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ },
+ })
+}
+
+func TestBackend_generatedKeyNonDefaultKeySize(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "generate": true,
+ "key_size": 10,
+ "exported": true,
+ "qr_size": 200,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "digits": otplib.DigitsSix,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA1,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ },
+ })
+}
+
+func TestBackend_urlPassedNonGeneratedKeyInvalidPeriod(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=AZ"
+
+ keyData := map[string]interface{}{
+ "url": urlString,
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_urlPassedNonGeneratedKeyInvalidDigits(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=Q&period=60"
+
+ keyData := map[string]interface{}{
+ "url": urlString,
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_urlPassedNonGeneratedKeyIssuerInFirstPosition(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
+
+ keyData := map[string]interface{}{
+ "url": urlString,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "test@email.com",
+ "digits": otplib.DigitsSix,
+ "period": 60,
+ "algorithm": otplib.AlgorithmSHA512,
+ "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_urlPassedNonGeneratedKeyIssuerInQueryString(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60&issuer=Vault"
+
+ keyData := map[string]interface{}{
+ "url": urlString,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "test@email.com",
+ "digits": otplib.DigitsSix,
+ "period": 60,
+ "algorithm": otplib.AlgorithmSHA512,
+ "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_urlPassedNonGeneratedKeyMissingIssuer(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
+
+ keyData := map[string]interface{}{
+ "url": urlString,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "",
+ "account_name": "test@email.com",
+ "digits": otplib.DigitsSix,
+ "period": 60,
+ "algorithm": otplib.AlgorithmSHA512,
+ "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_urlPassedNonGeneratedKeyMissingAccountName(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ urlString := "otpauth://totp/Vault:?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
+
+ keyData := map[string]interface{}{
+ "url": urlString,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "",
+ "digits": otplib.DigitsSix,
+ "period": 60,
+ "algorithm": otplib.AlgorithmSHA512,
+ "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_urlPassedNonGeneratedKeyMissingAccountNameandIssuer(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ urlString := "otpauth://totp/?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60"
+
+ keyData := map[string]interface{}{
+ "url": urlString,
+ "generate": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "",
+ "account_name": "",
+ "digits": otplib.DigitsSix,
+ "period": 60,
+ "algorithm": otplib.AlgorithmSHA512,
+ "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ testAccStepReadCreds(t, b, config.StorageView, "test", expected),
+ },
+ })
+}
+
+func TestBackend_generatedKeyInvalidSkew(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "skew": "2",
+ "generate": true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_generatedKeyInvalidQRSize(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "qr_size": "-100",
+ "generate": true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_generatedKeyInvalidKeySize(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "Test",
+ "key_size": "-100",
+ "generate": true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_generatedKeyMissingAccountName(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "generate": true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_generatedKeyMissingIssuer(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "account_name": "test@email.com",
+ "generate": true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_invalidURLValue(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "url": "notaurl",
+ "generate": false,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_urlAndGenerateTrue(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "url": "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60",
+ "generate": true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_keyAndGenerateTrue(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ",
+ "generate": true,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, true),
+ testAccStepReadKey(t, "test", nil),
+ },
+ })
+}
+
+func TestBackend_generatedKeyExportedFalse(t *testing.T) {
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+ b, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ keyData := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "test@email.com",
+ "generate": true,
+ "exported": false,
+ }
+
+ expected := map[string]interface{}{
+ "issuer": "Vault",
+ "account_name": "test@email.com",
+ "digits": otplib.DigitsSix,
+ "period": 30,
+ "algorithm": otplib.AlgorithmSHA1,
+ }
+
+ logicaltest.Test(t, logicaltest.TestCase{
+ Backend: b,
+ Steps: []logicaltest.TestStep{
+ testAccStepCreateKey(t, "test", keyData, false),
+ testAccStepReadKey(t, "test", expected),
+ },
+ })
+}
+
+func testAccStepCreateKey(t *testing.T, name string, keyData map[string]interface{}, expectFail bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: path.Join("keys", name),
+ Data: keyData,
+ ErrorOk: expectFail,
+ Check: func(resp *logical.Response) error {
+ //Skip this if the key is not generated by vault or if the test is expected to fail
+ if !keyData["generate"].(bool) || expectFail {
+ return nil
+ }
+
+ // Check to see if barcode and url were returned if exported is false
+ if !keyData["exported"].(bool) {
+ if resp != nil {
+ t.Fatalf("data was returned when exported was set to false")
+ }
+ return nil
+ }
+
+ // Check to see if a barcode was returned when qr_size is zero
+ if keyData["qr_size"].(int) == 0 {
+ if _, exists := resp.Data["barcode"]; exists {
+ t.Fatalf("a barcode was returned when qr_size was set to zero")
+ }
+ return nil
+ }
+
+ var d struct {
+ Url string `mapstructure:"url"`
+ Barcode string `mapstructure:"barcode"`
+ }
+
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ //Check to see if barcode and url are returned
+ if d.Barcode == "" {
+ t.Fatalf("a barcode was not returned for a generated key")
+ }
+
+ if d.Url == "" {
+ t.Fatalf("a url was not returned for a generated key")
+ }
+
+ //Parse url
+ urlObject, err := url.Parse(d.Url)
+
+ if err != nil {
+ t.Fatal("an error occured while parsing url string")
+ }
+
+ //Set up query object
+ urlQuery := urlObject.Query()
+
+ //Read secret
+ urlSecret := urlQuery.Get("secret")
+
+ //Check key length
+ keySize := keyData["key_size"].(int)
+ correctSecretStringSize := (keySize / 5) * 8
+ actualSecretStringSize := len(urlSecret)
+
+ if actualSecretStringSize != correctSecretStringSize {
+ t.Fatal("incorrect key string length")
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepDeleteKey(t *testing.T, name string) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.DeleteOperation,
+ Path: path.Join("keys", name),
+ }
+}
+
+func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, validation map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: path.Join("code", name),
+ Check: func(resp *logical.Response) error {
+ var d struct {
+ Code string `mapstructure:"code"`
+ }
+
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ log.Printf("[TRACE] Generated credentials: %v", d)
+
+ period := validation["period"].(int)
+ key := validation["key"].(string)
+ algorithm := validation["algorithm"].(otplib.Algorithm)
+ digits := validation["digits"].(otplib.Digits)
+
+ valid, _ := totplib.ValidateCustom(d.Code, key, time.Now(), totplib.ValidateOpts{
+ Period: uint(period),
+ Skew: 1,
+ Digits: digits,
+ Algorithm: algorithm,
+ })
+
+ if !valid {
+ t.Fatalf("generated code isn't valid")
+ }
+
+ return nil
+ },
+ }
+}
+
+func testAccStepReadKey(t *testing.T, name string, expected map[string]interface{}) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.ReadOperation,
+ Path: "keys/" + name,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ if expected == nil {
+ return nil
+ }
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Issuer string `mapstructure:"issuer"`
+ AccountName string `mapstructure:"account_name"`
+ Period uint `mapstructure:"period"`
+ Algorithm string `mapstructure:"algorithm"`
+ Digits otplib.Digits `mapstructure:"digits"`
+ }
+
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ var keyAlgorithm otplib.Algorithm
+ switch d.Algorithm {
+ case "SHA1":
+ keyAlgorithm = otplib.AlgorithmSHA1
+ case "SHA256":
+ keyAlgorithm = otplib.AlgorithmSHA256
+ case "SHA512":
+ keyAlgorithm = otplib.AlgorithmSHA512
+ }
+
+ period := expected["period"].(int)
+
+ switch {
+ case d.Issuer != expected["issuer"]:
+ return fmt.Errorf("issuer should equal: %s", expected["issuer"])
+ case d.AccountName != expected["account_name"]:
+ return fmt.Errorf("account_name should equal: %s", expected["account_name"])
+ case d.Period != uint(period):
+ return fmt.Errorf("period should equal: %d", expected["period"])
+ case keyAlgorithm != expected["algorithm"]:
+ return fmt.Errorf("algorithm should equal: %s", expected["algorithm"])
+ case d.Digits != expected["digits"]:
+ return fmt.Errorf("digits should equal: %d", expected["digits"])
+ }
+ return nil
+ },
+ }
+}
+
+func testAccStepValidateCode(t *testing.T, name string, code string, valid, expectError bool) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "code/" + name,
+ Data: map[string]interface{}{
+ "code": code,
+ },
+ ErrorOk: expectError,
+ Check: func(resp *logical.Response) error {
+ if resp == nil {
+ return fmt.Errorf("bad: %#v", resp)
+ }
+
+ var d struct {
+ Valid bool `mapstructure:"valid"`
+ }
+
+ if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ return err
+ }
+
+ switch valid {
+ case true:
+ if d.Valid != true {
+ return fmt.Errorf("code was not valid: %s", code)
+ }
+
+ default:
+ if d.Valid != false {
+ return fmt.Errorf("code was incorrectly validated: %s", code)
+ }
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go
new file mode 100644
index 0000000..ebc3d47
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go
@@ -0,0 +1,128 @@
+package totp
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ otplib "github.com/pquerna/otp"
+ totplib "github.com/pquerna/otp/totp"
+)
+
+func pathCode(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "code/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "Name of the key.",
+ },
+ "code": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: "TOTP code to be validated.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathReadCode,
+ logical.UpdateOperation: b.pathValidateCode,
+ },
+
+ HelpSynopsis: pathCodeHelpSyn,
+ HelpDescription: pathCodeHelpDesc,
+ }
+}
+
+func (b *backend) pathReadCode(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+
+ // Get the key
+ key, err := b.Key(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if key == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil
+ }
+
+ // Generate password using totp library
+ totpToken, err := totplib.GenerateCodeCustom(key.Key, time.Now(), totplib.ValidateOpts{
+ Period: key.Period,
+ Digits: key.Digits,
+ Algorithm: key.Algorithm,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the secret
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "code": totpToken,
+ },
+ }, nil
+}
+
+func (b *backend) pathValidateCode(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ code := data.Get("code").(string)
+
+ // Enforce input value requirements
+ if code == "" {
+ return logical.ErrorResponse("the code value is required"), nil
+ }
+
+ // Get the key's stored values
+ key, err := b.Key(req.Storage, name)
+ if err != nil {
+ return nil, err
+ }
+ if key == nil {
+ return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil
+ }
+
+ usedName := fmt.Sprintf("%s_%s", name, code)
+
+ _, ok := b.usedCodes.Get(usedName)
+ if ok {
+ return logical.ErrorResponse("code already used; wait until the next time period"), nil
+ }
+
+ valid, err := totplib.ValidateCustom(code, key.Key, time.Now(), totplib.ValidateOpts{
+ Period: key.Period,
+ Skew: key.Skew,
+ Digits: key.Digits,
+ Algorithm: key.Algorithm,
+ })
+ if err != nil && err != otplib.ErrValidateInputInvalidLength {
+ return logical.ErrorResponse("an error occured while validating the code"), err
+ }
+
+ // Take the key skew, add two for behind and in front, and multiple that by
+ // the period to cover the full possibility of the validity of the key
+ err = b.usedCodes.Add(usedName, nil, time.Duration(
+ int64(time.Second)*
+ int64(key.Period)*
+ int64((2+key.Skew))))
+ if err != nil {
+ return nil, errwrap.Wrapf("error adding code to used cache: {{err}}", err)
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "valid": valid,
+ },
+ }, nil
+}
+
+const pathCodeHelpSyn = `
+Request time-based one-time use password or validate a password for a certain key .
+`
+const pathCodeHelpDesc = `
+This path generates and validates time-based one-time use passwords for a certain key.
+
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go
new file mode 100644
index 0000000..3f36aef
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go
@@ -0,0 +1,424 @@
+package totp
+
+import (
+ "bytes"
+ "encoding/base32"
+ "encoding/base64"
+ "fmt"
+ "image/png"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ otplib "github.com/pquerna/otp"
+ totplib "github.com/pquerna/otp/totp"
+)
+
+func pathListKeys(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "keys/?$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathKeyList,
+ },
+
+ HelpSynopsis: pathKeyHelpSyn,
+ HelpDescription: pathKeyHelpDesc,
+ }
+}
+
+func pathKeys(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "keys/" + framework.GenericNameRegex("name"),
+ Fields: map[string]*framework.FieldSchema{
+ "name": {
+ Type: framework.TypeString,
+ Description: "Name of the key.",
+ },
+
+ "generate": {
+ Type: framework.TypeBool,
+ Default: false,
+ Description: "Determines if a key should be generated by Vault or if a key is being passed from another service.",
+ },
+
+ "exported": {
+ Type: framework.TypeBool,
+ Default: true,
+ Description: "Determines if a QR code and url are returned upon generating a key. Only used if generate is true.",
+ },
+
+ "key_size": {
+ Type: framework.TypeInt,
+ Default: 20,
+ Description: "Determines the size in bytes of the generated key. Only used if generate is true.",
+ },
+
+ "key": {
+ Type: framework.TypeString,
+ Description: "The shared master key used to generate a TOTP token. Only used if generate is false.",
+ },
+
+ "issuer": {
+ Type: framework.TypeString,
+ Description: `The name of the key's issuing organization. Required if generate is true.`,
+ },
+
+ "account_name": {
+ Type: framework.TypeString,
+ Description: `The name of the account associated with the key. Required if generate is true.`,
+ },
+
+ "period": {
+ Type: framework.TypeDurationSecond,
+ Default: 30,
+ Description: `The length of time used to generate a counter for the TOTP token calculation.`,
+ },
+
+ "algorithm": {
+ Type: framework.TypeString,
+ Default: "SHA1",
+ Description: `The hashing algorithm used to generate the TOTP token. Options include SHA1, SHA256 and SHA512.`,
+ },
+
+ "digits": {
+ Type: framework.TypeInt,
+ Default: 6,
+ Description: `The number of digits in the generated TOTP token. This value can either be 6 or 8.`,
+ },
+
+ "skew": {
+ Type: framework.TypeInt,
+ Default: 1,
+ Description: `The number of delay periods that are allowed when validating a TOTP token. This value can either be 0 or 1. Only used if generate is true.`,
+ },
+
+ "qr_size": {
+ Type: framework.TypeInt,
+ Default: 200,
+ Description: `The pixel size of the generated square QR code. Only used if generate is true and exported is true. If this value is 0, a QR code will not be returned.`,
+ },
+
+ "url": {
+ Type: framework.TypeString,
+ Description: `A TOTP url string containing all of the parameters for key setup. Only used if generate is false.`,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathKeyRead,
+ logical.UpdateOperation: b.pathKeyCreate,
+ logical.DeleteOperation: b.pathKeyDelete,
+ },
+
+ HelpSynopsis: pathKeyHelpSyn,
+ HelpDescription: pathKeyHelpDesc,
+ }
+}
+
+func (b *backend) Key(s logical.Storage, n string) (*keyEntry, error) {
+ entry, err := s.Get("key/" + n)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var result keyEntry
+ if err := entry.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+func (b *backend) pathKeyDelete(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ err := req.Storage.Delete("key/" + data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathKeyRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ key, err := b.Key(req.Storage, data.Get("name").(string))
+ if err != nil {
+ return nil, err
+ }
+ if key == nil {
+ return nil, nil
+ }
+
+ // Translate algorithm back to string
+ algorithm := key.Algorithm.String()
+
+ // Return values of key
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "issuer": key.Issuer,
+ "account_name": key.AccountName,
+ "period": key.Period,
+ "algorithm": algorithm,
+ "digits": key.Digits,
+ },
+ }, nil
+}
+
+func (b *backend) pathKeyList(
+ req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ entries, err := req.Storage.List("key/")
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(entries), nil
+}
+
+func (b *backend) pathKeyCreate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ name := data.Get("name").(string)
+ generate := data.Get("generate").(bool)
+ exported := data.Get("exported").(bool)
+ keyString := data.Get("key").(string)
+ issuer := data.Get("issuer").(string)
+ accountName := data.Get("account_name").(string)
+ period := data.Get("period").(int)
+ algorithm := data.Get("algorithm").(string)
+ digits := data.Get("digits").(int)
+ skew := data.Get("skew").(int)
+ qrSize := data.Get("qr_size").(int)
+ keySize := data.Get("key_size").(int)
+ inputURL := data.Get("url").(string)
+
+ if generate {
+ if keyString != "" {
+ return logical.ErrorResponse("a key should not be passed if generate is true"), nil
+ }
+ if inputURL != "" {
+ return logical.ErrorResponse("a url should not be passed if generate is true"), nil
+ }
+ }
+
+ // Read parameters from url if given
+ if inputURL != "" {
+ //Parse url
+ urlObject, err := url.Parse(inputURL)
+ if err != nil {
+ return logical.ErrorResponse("an error occured while parsing url string"), err
+ }
+
+ //Set up query object
+ urlQuery := urlObject.Query()
+ path := strings.TrimPrefix(urlObject.Path, "/")
+ index := strings.Index(path, ":")
+
+ //Read issuer
+ urlIssuer := urlQuery.Get("issuer")
+ if urlIssuer != "" {
+ issuer = urlIssuer
+ } else {
+ if index != -1 {
+ issuer = path[:index]
+ }
+ }
+
+ //Read account name
+ if index == -1 {
+ accountName = path
+ } else {
+ accountName = path[index+1:]
+ }
+
+ //Read key string
+ keyString = urlQuery.Get("secret")
+
+ //Read period
+ periodQuery := urlQuery.Get("period")
+ if periodQuery != "" {
+ periodInt, err := strconv.Atoi(periodQuery)
+ if err != nil {
+ return logical.ErrorResponse("an error occured while parsing period value in url"), err
+ }
+ period = periodInt
+ }
+
+ //Read digits
+ digitsQuery := urlQuery.Get("digits")
+ if digitsQuery != "" {
+ digitsInt, err := strconv.Atoi(digitsQuery)
+ if err != nil {
+ return logical.ErrorResponse("an error occured while parsing digits value in url"), err
+ }
+ digits = digitsInt
+ }
+
+ //Read algorithm
+ algorithmQuery := urlQuery.Get("algorithm")
+ if algorithmQuery != "" {
+ algorithm = algorithmQuery
+ }
+ }
+
+ // Translate digits and algorithm to a format the totp library understands
+ var keyDigits otplib.Digits
+ switch digits {
+ case 6:
+ keyDigits = otplib.DigitsSix
+ case 8:
+ keyDigits = otplib.DigitsEight
+ default:
+ return logical.ErrorResponse("the digits value can only be 6 or 8"), nil
+ }
+
+ var keyAlgorithm otplib.Algorithm
+ switch algorithm {
+ case "SHA1":
+ keyAlgorithm = otplib.AlgorithmSHA1
+ case "SHA256":
+ keyAlgorithm = otplib.AlgorithmSHA256
+ case "SHA512":
+ keyAlgorithm = otplib.AlgorithmSHA512
+ default:
+ return logical.ErrorResponse("the algorithm value is not valid"), nil
+ }
+
+ // Enforce input value requirements
+ if period <= 0 {
+ return logical.ErrorResponse("the period value must be greater than zero"), nil
+ }
+
+ switch skew {
+ case 0:
+ case 1:
+ default:
+ return logical.ErrorResponse("the skew value must be 0 or 1"), nil
+ }
+
+ // QR size can be zero but it shouldn't be negative
+ if qrSize < 0 {
+ return logical.ErrorResponse("the qr_size value must be greater than or equal to zero"), nil
+ }
+
+ if keySize <= 0 {
+ return logical.ErrorResponse("the key_size value must be greater than zero"), nil
+ }
+
+ // Period, Skew and Key Size need to be unsigned ints
+ uintPeriod := uint(period)
+ uintSkew := uint(skew)
+ uintKeySize := uint(keySize)
+
+ var response *logical.Response
+
+ switch generate {
+ case true:
+ // If the key is generated, Account Name and Issuer are required.
+ if accountName == "" {
+ return logical.ErrorResponse("the account_name value is required for generated keys"), nil
+ }
+
+ if issuer == "" {
+ return logical.ErrorResponse("the issuer value is required for generated keys"), nil
+ }
+
+ // Generate a new key
+ keyObject, err := totplib.Generate(totplib.GenerateOpts{
+ Issuer: issuer,
+ AccountName: accountName,
+ Period: uintPeriod,
+ Digits: keyDigits,
+ Algorithm: keyAlgorithm,
+ SecretSize: uintKeySize,
+ })
+ if err != nil {
+ return logical.ErrorResponse("an error occured while generating a key"), err
+ }
+
+ // Get key string value
+ keyString = keyObject.Secret()
+
+ // Skip returning the QR code and url if exported is set to false
+ if exported {
+ // Prepare the url and barcode
+ urlString := keyObject.String()
+
+ // Don't include QR code is size is set to zero
+ if qrSize == 0 {
+ response = &logical.Response{
+ Data: map[string]interface{}{
+ "url": urlString,
+ },
+ }
+ } else {
+ barcode, err := keyObject.Image(qrSize, qrSize)
+ if err != nil {
+ return logical.ErrorResponse("an error occured while generating a QR code image"), err
+ }
+
+ var buff bytes.Buffer
+ png.Encode(&buff, barcode)
+ b64Barcode := base64.StdEncoding.EncodeToString(buff.Bytes())
+ response = &logical.Response{
+ Data: map[string]interface{}{
+ "url": urlString,
+ "barcode": b64Barcode,
+ },
+ }
+ }
+ }
+ default:
+ if keyString == "" {
+ return logical.ErrorResponse("the key value is required"), nil
+ }
+
+ _, err := base32.StdEncoding.DecodeString(keyString)
+ if err != nil {
+ return logical.ErrorResponse(fmt.Sprintf(
+ "invalid key value: %s", err)), nil
+ }
+ }
+
+ // Store it
+ entry, err := logical.StorageEntryJSON("key/"+name, &keyEntry{
+ Key: keyString,
+ Issuer: issuer,
+ AccountName: accountName,
+ Period: uintPeriod,
+ Algorithm: keyAlgorithm,
+ Digits: keyDigits,
+ Skew: uintSkew,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := req.Storage.Put(entry); err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+type keyEntry struct {
+ Key string `json:"key" mapstructure:"key" structs:"key"`
+ Issuer string `json:"issuer" mapstructure:"issuer" structs:"issuer"`
+ AccountName string `json:"account_name" mapstructure:"account_name" structs:"account_name"`
+ Period uint `json:"period" mapstructure:"period" structs:"period"`
+ Algorithm otplib.Algorithm `json:"algorithm" mapstructure:"algorithm" structs:"algorithm"`
+ Digits otplib.Digits `json:"digits" mapstructure:"digits" structs:"digits"`
+ Skew uint `json:"skew" mapstructure:"skew" structs:"skew"`
+}
+
+const pathKeyHelpSyn = `
+Manage the keys that can be created with this backend.
+`
+
+const pathKeyHelpDesc = `
+This path lets you manage the keys that can be created with this backend.
+
+`
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go
index 37ebca4..db85ba1 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go
@@ -10,12 +10,10 @@ import (
func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
b := Backend(conf)
- be, err := b.Backend.Setup(conf)
- if err != nil {
+ if err := b.Setup(conf); err != nil {
return nil, err
}
-
- return be, nil
+ return b, nil
}
func Backend(conf *logical.BackendConfig) *backend {
@@ -40,9 +38,9 @@ func Backend(conf *logical.BackendConfig) *backend {
b.pathVerify(),
},
- Secrets: []*framework.Secret{},
-
- Invalidate: b.invalidate,
+ Secrets: []*framework.Secret{},
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeLogical,
}
b.lm = keysutil.NewLockManager(conf.System.CachingDisabled())
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go
index 0f9d06f..a9c27bc 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go
@@ -31,7 +31,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) {
if b == nil {
t.Fatalf("failed to create backend")
}
- _, err := b.Backend.Setup(config)
+ err := b.Backend.Setup(config)
if err != nil {
t.Fatal(err)
}
@@ -129,7 +129,9 @@ func TestBackend_rotation(t *testing.T) {
testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory),
testAccStepDecrypt(t, "test", testPlaintext, decryptData),
testAccStepDeleteNotDisabledPolicy(t, "test"),
- testAccStepAdjustPolicy(t, "test", 3),
+ testAccStepAdjustPolicyMinDecryption(t, "test", 3),
+ testAccStepAdjustPolicyMinEncryption(t, "test", 4),
+ testAccStepReadPolicyWithVersions(t, "test", false, false, 3, 4),
testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData),
testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
@@ -140,7 +142,8 @@ func TestBackend_rotation(t *testing.T) {
testAccStepDecrypt(t, "test", testPlaintext, decryptData),
testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory),
testAccStepDecrypt(t, "test", testPlaintext, decryptData),
- testAccStepAdjustPolicy(t, "test", 1),
+ testAccStepAdjustPolicyMinDecryption(t, "test", 1),
+ testAccStepReadPolicyWithVersions(t, "test", false, false, 1, 4),
testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory),
testAccStepDecrypt(t, "test", testPlaintext, decryptData),
testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory),
@@ -221,7 +224,7 @@ func testAccStepListPolicy(t *testing.T, name string, expectNone bool) logicalte
}
}
-func testAccStepAdjustPolicy(t *testing.T, name string, minVer int) logicaltest.TestStep {
+func testAccStepAdjustPolicyMinDecryption(t *testing.T, name string, minVer int) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "keys/" + name + "/config",
@@ -230,6 +233,15 @@ func testAccStepAdjustPolicy(t *testing.T, name string, minVer int) logicaltest.
},
}
}
+func testAccStepAdjustPolicyMinEncryption(t *testing.T, name string, minVer int) logicaltest.TestStep {
+ return logicaltest.TestStep{
+ Operation: logical.UpdateOperation,
+ Path: "keys/" + name + "/config",
+ Data: map[string]interface{}{
+ "min_encryption_version": minVer,
+ },
+ }
+}
func testAccStepDisableDeletion(t *testing.T, name string) logicaltest.TestStep {
return logicaltest.TestStep{
@@ -276,6 +288,10 @@ func testAccStepDeleteNotDisabledPolicy(t *testing.T, name string) logicaltest.T
}
func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) logicaltest.TestStep {
+ return testAccStepReadPolicyWithVersions(t, name, expectNone, derived, 1, 0)
+}
+
+func testAccStepReadPolicyWithVersions(t *testing.T, name string, expectNone, derived bool, minDecryptionVersion int, minEncryptionVersion int) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "keys/" + name,
@@ -297,6 +313,8 @@ func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool)
KDF string `mapstructure:"kdf"`
DeletionAllowed bool `mapstructure:"deletion_allowed"`
ConvergentEncryption bool `mapstructure:"convergent_encryption"`
+ MinDecryptionVersion int `mapstructure:"min_decryption_version"`
+ MinEncryptionVersion int `mapstructure:"min_encryption_version"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
@@ -315,6 +333,12 @@ func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool)
if d.Keys == nil {
return fmt.Errorf("bad: %#v", d)
}
+ if d.MinDecryptionVersion != minDecryptionVersion {
+ return fmt.Errorf("bad: %#v", d)
+ }
+ if d.MinEncryptionVersion != minEncryptionVersion {
+ return fmt.Errorf("bad: %#v", d)
+ }
if d.DeletionAllowed == true {
return fmt.Errorf("bad: %#v", d)
}
@@ -610,7 +634,7 @@ func TestKeyUpgrade(t *testing.T) {
if p.Key != nil ||
p.Keys == nil ||
len(p.Keys) != 1 ||
- !reflect.DeepEqual(p.Keys[1].AESKey, key) {
+ !reflect.DeepEqual(p.Keys[1].Key, key) {
t.Errorf("bad key migration, result is %#v", p.Keys)
}
}
@@ -730,6 +754,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
"context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
}
resp, err = b.HandleRequest(req)
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -755,6 +782,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
"context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
}
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -764,6 +794,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
ciphertext1 := resp.Data["ciphertext"].(string)
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -789,6 +822,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
}
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -798,6 +834,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
ciphertext3 := resp.Data["ciphertext"].(string)
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -820,6 +859,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
"context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT",
}
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -829,6 +871,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
ciphertext5 := resp.Data["ciphertext"].(string)
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -854,6 +899,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
"context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
}
resp, err = b.HandleRequest(req)
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -868,6 +916,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
"context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S",
}
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
@@ -877,6 +928,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
ciphertext7 := resp.Data["ciphertext"].(string)
resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
if resp == nil {
t.Fatal("expected non-nil response")
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go
index d2b3e5f..7cbd513 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go
@@ -19,7 +19,16 @@ func (b *backend) pathConfig() *framework.Path {
"min_decryption_version": &framework.FieldSchema{
Type: framework.TypeInt,
Description: `If set, the minimum version of the key allowed
-to be decrypted.`,
+to be decrypted. For signing keys, the minimum
+version allowed to be used for verification.`,
+ },
+
+ "min_encryption_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `If set, the minimum version of the key allowed
+to be used for encryption; or for signing keys,
+to be used for signing. If set to zero, only
+the latest version of the key is allowed.`,
},
"deletion_allowed": &framework.FieldSchema{
@@ -72,8 +81,7 @@ func (b *backend) pathConfigWrite(
resp.AddWarning("since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1")
}
- if minDecryptionVersion > 0 &&
- minDecryptionVersion != p.MinDecryptionVersion {
+ if minDecryptionVersion != p.MinDecryptionVersion {
if minDecryptionVersion > p.LatestVersion {
return logical.ErrorResponse(
fmt.Sprintf("cannot set min decryption version of %d, latest key version is %d", minDecryptionVersion, p.LatestVersion)), nil
@@ -83,6 +91,32 @@ func (b *backend) pathConfigWrite(
}
}
+ minEncryptionVersionRaw, ok := d.GetOk("min_encryption_version")
+ if ok {
+ minEncryptionVersion := minEncryptionVersionRaw.(int)
+
+ if minEncryptionVersion < 0 {
+ return logical.ErrorResponse("min encryption version cannot be negative"), nil
+ }
+
+ if minEncryptionVersion != p.MinEncryptionVersion {
+ if minEncryptionVersion > p.LatestVersion {
+ return logical.ErrorResponse(
+ fmt.Sprintf("cannot set min encryption version of %d, latest key version is %d", minEncryptionVersion, p.LatestVersion)), nil
+ }
+ p.MinEncryptionVersion = minEncryptionVersion
+ persistNeeded = true
+ }
+ }
+
+ // Check here to get the final picture after the logic on each
+ // individually. MinDecryptionVersion will always be 1 or above.
+ if p.MinEncryptionVersion > 0 &&
+ p.MinEncryptionVersion < p.MinDecryptionVersion {
+ return logical.ErrorResponse(
+ fmt.Sprintf("cannot set min encryption/decryption values; min encryption version of %d must be greater than or equal to min decryption version of %d", p.MinEncryptionVersion, p.MinDecryptionVersion)), nil
+ }
+
allowDeletionInt, ok := d.GetOk("deletion_allowed")
if ok {
allowDeletion := allowDeletionInt.(bool)
@@ -104,7 +138,7 @@ func (b *backend) pathConfigWrite(
return nil, nil
}
- if len(resp.Warnings()) == 0 {
+ if len(resp.Warnings) == 0 {
return nil, p.Persist(req.Storage)
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go
new file mode 100644
index 0000000..6819710
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go
@@ -0,0 +1,223 @@
+package transit
+
+import (
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestTransit_ConfigSettings(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ doReq := func(req *logical.Request) *logical.Response {
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req)
+ }
+ return resp
+ }
+ doErrReq := func(req *logical.Request) {
+ resp, err := b.HandleRequest(req)
+ if err == nil {
+ if resp == nil || !resp.IsError() {
+ t.Fatalf("expected error; req:\n%#v\n", *req)
+ }
+ }
+ }
+
+ // First create a key
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/aes",
+ Data: map[string]interface{}{
+ "derived": true,
+ },
+ }
+ doReq(req)
+
+ req.Path = "keys/ed"
+ req.Data["type"] = "ed25519"
+ doReq(req)
+
+ delete(req.Data, "derived")
+
+ req.Path = "keys/p256"
+ req.Data["type"] = "ecdsa-p256"
+ doReq(req)
+
+ delete(req.Data, "type")
+
+ req.Path = "keys/aes/rotate"
+ doReq(req)
+ doReq(req)
+ doReq(req)
+ doReq(req)
+
+ req.Path = "keys/ed/rotate"
+ doReq(req)
+ doReq(req)
+ doReq(req)
+ doReq(req)
+
+ req.Path = "keys/p256/rotate"
+ doReq(req)
+ doReq(req)
+ doReq(req)
+ doReq(req)
+
+ req.Path = "keys/aes/config"
+ // Too high
+ req.Data["min_decryption_version"] = 7
+ doErrReq(req)
+ // Too low
+ req.Data["min_decryption_version"] = -1
+ doErrReq(req)
+
+ delete(req.Data, "min_decryption_version")
+ // Too high
+ req.Data["min_encryption_version"] = 7
+ doErrReq(req)
+ // Too low
+ req.Data["min_encryption_version"] = 7
+ doErrReq(req)
+
+ // Not allowed, cannot decrypt
+ req.Data["min_decryption_version"] = 3
+ req.Data["min_encryption_version"] = 2
+ doErrReq(req)
+
+ // Allowed
+ req.Data["min_decryption_version"] = 2
+ req.Data["min_encryption_version"] = 3
+ doReq(req)
+ req.Path = "keys/ed/config"
+ doReq(req)
+ req.Path = "keys/p256/config"
+ doReq(req)
+
+ req.Data = map[string]interface{}{
+ "plaintext": "abcd",
+ "context": "abcd",
+ }
+
+ maxKeyVersion := 5
+ key := "aes"
+
+ testHMAC := func(ver int, valid bool) {
+ req.Path = "hmac/" + key
+ delete(req.Data, "hmac")
+ if ver == maxKeyVersion {
+ delete(req.Data, "key_version")
+ } else {
+ req.Data["key_version"] = ver
+ }
+
+ if !valid {
+ doErrReq(req)
+ return
+ }
+
+ resp := doReq(req)
+ ct := resp.Data["hmac"].(string)
+ if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) {
+ t.Fatal("wrong hmac version")
+ }
+
+ req.Path = "verify/" + key
+ delete(req.Data, "key_version")
+ req.Data["hmac"] = resp.Data["hmac"]
+ doReq(req)
+ }
+
+ testEncryptDecrypt := func(ver int, valid bool) {
+ req.Path = "encrypt/" + key
+ delete(req.Data, "ciphertext")
+ if ver == maxKeyVersion {
+ delete(req.Data, "key_version")
+ } else {
+ req.Data["key_version"] = ver
+ }
+
+ if !valid {
+ doErrReq(req)
+ return
+ }
+
+ resp := doReq(req)
+ ct := resp.Data["ciphertext"].(string)
+ if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) {
+ t.Fatal("wrong encryption version")
+ }
+
+ req.Path = "decrypt/" + key
+ delete(req.Data, "key_version")
+ req.Data["ciphertext"] = resp.Data["ciphertext"]
+ doReq(req)
+ }
+ testEncryptDecrypt(5, true)
+ testEncryptDecrypt(4, true)
+ testEncryptDecrypt(3, true)
+ testEncryptDecrypt(2, false)
+ testHMAC(5, true)
+ testHMAC(4, true)
+ testHMAC(3, true)
+ testHMAC(2, false)
+
+ delete(req.Data, "plaintext")
+ req.Data["input"] = "abcd"
+ key = "ed"
+ testSignVerify := func(ver int, valid bool) {
+ req.Path = "sign/" + key
+ delete(req.Data, "signature")
+ if ver == maxKeyVersion {
+ delete(req.Data, "key_version")
+ } else {
+ req.Data["key_version"] = ver
+ }
+
+ if !valid {
+ doErrReq(req)
+ return
+ }
+
+ resp := doReq(req)
+ ct := resp.Data["signature"].(string)
+ if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) {
+ t.Fatal("wrong signature version")
+ }
+
+ req.Path = "verify/" + key
+ delete(req.Data, "key_version")
+ req.Data["signature"] = resp.Data["signature"]
+ doReq(req)
+ }
+ testSignVerify(5, true)
+ testSignVerify(4, true)
+ testSignVerify(3, true)
+ testSignVerify(2, false)
+ testHMAC(5, true)
+ testHMAC(4, true)
+ testHMAC(3, true)
+ testHMAC(2, false)
+
+ delete(req.Data, "context")
+ key = "p256"
+ testSignVerify(5, true)
+ testSignVerify(4, true)
+ testSignVerify(3, true)
+ testSignVerify(2, false)
+ testHMAC(5, true)
+ testHMAC(4, true)
+ testHMAC(3, true)
+ testHMAC(2, false)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go
index 36c6aea..7af1a03 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go
@@ -32,7 +32,7 @@ ciphertext; "wrapped" will return the ciphertext only.`,
"nonce": &framework.FieldSchema{
Type: framework.TypeString,
- Description: "Nonce for when convergent encryption is used",
+ Description: "Nonce for when convergent encryption v1 is used (only in Vault 0.6.1)",
},
"bits": &framework.FieldSchema{
@@ -41,6 +41,14 @@ ciphertext; "wrapped" will return the ciphertext only.`,
and 512 bits are supported. Defaults to 256.`,
Default: 256,
},
+
+ "key_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `The version of the Vault key to use for
+encryption of the data key. Must be 0 (for latest)
+or a value greater than or equal to the
+min_encryption_version configured on the key.`,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -55,6 +63,7 @@ and 512 bits are supported. Defaults to 256.`,
func (b *backend) pathDatakeyWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
+ ver := d.Get("key_version").(int)
plaintext := d.Get("plaintext").(string)
plaintextAllowed := false
@@ -97,7 +106,7 @@ func (b *backend) pathDatakeyWrite(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
newKey := make([]byte, 32)
@@ -116,7 +125,7 @@ func (b *backend) pathDatakeyWrite(
return nil, err
}
- ciphertext, err := p.Encrypt(context, nonce, base64.StdEncoding.EncodeToString(newKey))
+ ciphertext, err := p.Encrypt(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey))
if err != nil {
switch err.(type) {
case errutil.UserError:
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go
index c66931d..9750beb 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go
@@ -119,7 +119,7 @@ func (b *backend) pathDecryptWrite(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
for i, item := range batchInputItems {
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go
index b4281d6..3b60198 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go
@@ -29,6 +29,9 @@ type BatchRequestItem struct {
// Nonce to be used when v1 convergent encryption is used
Nonce string `json:"nonce" structs:"nonce" mapstructure:"nonce"`
+ // The key version to be used for encryption
+ KeyVersion int `json:"key_version" structs:"key_version" mapstructure:"key_version"`
+
// DecodedNonce is the base64 decoded version of Nonce
DecodedNonce []byte
}
@@ -100,6 +103,13 @@ same ciphertext is generated. It is *very important* when using this mode that
you ensure that all nonces are unique for a given context. Failing to do so
will severely impact the ciphertext's security.`,
},
+
+ "key_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `The version of the key to use for encryption.
+Must be 0 (for latest) or a value greater than or equal
+to the min_encryption_version configured on the key.`,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -151,9 +161,10 @@ func (b *backend) pathEncryptWrite(
batchInputItems = make([]BatchRequestItem, 1)
batchInputItems[0] = BatchRequestItem{
- Plaintext: valueRaw.(string),
- Context: d.Get("context").(string),
- Nonce: d.Get("nonce").(string),
+ Plaintext: valueRaw.(string),
+ Context: d.Get("context").(string),
+ Nonce: d.Get("nonce").(string),
+ KeyVersion: d.Get("key_version").(int),
}
}
@@ -233,7 +244,7 @@ func (b *backend) pathEncryptWrite(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
// Process batch request items. If encryption of any request
@@ -244,7 +255,7 @@ func (b *backend) pathEncryptWrite(
continue
}
- ciphertext, err := p.Encrypt(item.DecodedContext, item.DecodedNonce, item.Plaintext)
+ ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext)
if err != nil {
switch err.(type) {
case errutil.UserError:
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go
index 1f7350e..a18db91 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go
@@ -151,7 +151,7 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st
case exportTypeEncryptionKey:
switch policy.Type {
case keysutil.KeyType_AES256_GCM96:
- return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.AESKey)), nil
+ return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil
}
case exportTypeSigningKey:
@@ -162,6 +162,9 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st
return "", err
}
return ecKey, nil
+
+ case keysutil.KeyType_ED25519:
+ return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil
}
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go
index e021ac6..314653c 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go
@@ -12,8 +12,10 @@ import (
func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) {
verifyExportsCorrectVersion(t, "encryption-key", "aes256-gcm96")
verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p256")
+ verifyExportsCorrectVersion(t, "signing-key", "ed25519")
verifyExportsCorrectVersion(t, "hmac-key", "aes256-gcm96")
verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p256")
+ verifyExportsCorrectVersion(t, "hmac-key", "ed25519")
}
func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) {
@@ -293,6 +295,11 @@ func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T)
}
func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T) {
+ testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p256")
+ testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ed25519")
+}
+
+func testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T, keyType string) {
var b *backend
sysView := logical.TestSystemView()
storage := &logical.InmemStorage{}
@@ -309,7 +316,7 @@ func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testi
}
req.Data = map[string]interface{}{
"exportable": true,
- "type": "ecdsa-p256",
+ "type": keyType,
}
_, err := b.HandleRequest(req)
if err != nil {
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go
index 31c156f..0a4ba19 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go
@@ -45,6 +45,13 @@ Defaults to "sha2-256".`,
Type: framework.TypeString,
Description: `Algorithm to use (POST URL parameter)`,
},
+
+ "key_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `The version of the key to use for generating the HMAC.
+Must be 0 (for latest) or a value greater than or equal
+to the min_encryption_version configured on the key.`,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -59,6 +66,7 @@ Defaults to "sha2-256".`,
func (b *backend) pathHMACWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
+ ver := d.Get("key_version").(int)
inputB64 := d.Get("input").(string)
algorithm := d.Get("urlalgorithm").(string)
if algorithm == "" {
@@ -79,10 +87,21 @@ func (b *backend) pathHMACWrite(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
- key, err := p.HMACKey(p.LatestVersion)
+ switch {
+ case ver == 0:
+ // Allowed, will use latest; set explicitly here to ensure the string
+ // is generated properly
+ ver = p.LatestVersion
+ case ver == p.LatestVersion:
+ // Allowed
+ case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion:
+ return logical.ErrorResponse("cannot generate HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest
+ }
+
+ key, err := p.HMACKey(ver)
if err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
@@ -107,7 +126,7 @@ func (b *backend) pathHMACWrite(
retBytes := hf.Sum(nil)
retStr := base64.StdEncoding.EncodeToString(retBytes)
- retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(p.LatestVersion), retStr)
+ retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(ver), retStr)
// Generate the response
resp := &logical.Response{
@@ -162,7 +181,7 @@ func (b *backend) pathHMACVerify(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
if ver > p.LatestVersion {
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go
index a69c555..ad9a918 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go
@@ -2,9 +2,14 @@ package transit
import (
"crypto/elliptic"
+ "encoding/base64"
"fmt"
"strconv"
+ "time"
+ "golang.org/x/crypto/ed25519"
+
+ "github.com/fatih/structs"
"github.com/hashicorp/vault/helper/keysutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -36,8 +41,8 @@ func (b *backend) pathKeys() *framework.Path {
Type: framework.TypeString,
Default: "aes256-gcm96",
Description: `The type of key to create. Currently,
-"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric) are
-supported. Defaults to "aes256-gcm96".`,
+"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric), and
+'ed25519' (asymmetric) are supported. Defaults to "aes256-gcm96".`,
},
"derived": &framework.FieldSchema{
@@ -69,6 +74,14 @@ impact the ciphertext's security.`,
This allows for all the valid keys
in the key ring to be exported.`,
},
+
+ "context": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Base64 encoded context for key derivation.
+When reading a key with key derivation enabled,
+if the key type supports public keys, this will
+return the public key for the given context.`,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -116,6 +129,8 @@ func (b *backend) pathPolicyWrite(
polReq.KeyType = keysutil.KeyType_AES256_GCM96
case "ecdsa-p256":
polReq.KeyType = keysutil.KeyType_ECDSA_P256
+ case "ed25519":
+ polReq.KeyType = keysutil.KeyType_ED25519
default:
return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest
}
@@ -139,6 +154,13 @@ func (b *backend) pathPolicyWrite(
return nil, nil
}
+// Built-in helper type for returning asymmetric keys
+type asymKey struct {
+ Name string `json:"name" structs:"name" mapstructure:"name"`
+ PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"`
+ CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
+}
+
func (b *backend) pathPolicyRead(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
@@ -162,6 +184,7 @@ func (b *backend) pathPolicyRead(
"derived": p.Derived,
"deletion_allowed": p.DeletionAllowed,
"min_decryption_version": p.MinDecryptionVersion,
+ "min_encryption_version": p.MinEncryptionVersion,
"latest_version": p.LatestVersion,
"exportable": p.Exportable,
"supports_encryption": p.Type.EncryptionSupported(),
@@ -185,25 +208,54 @@ func (b *backend) pathPolicyRead(
}
}
+ contextRaw := d.Get("context").(string)
+ var context []byte
+ if len(contextRaw) != 0 {
+ context, err = base64.StdEncoding.DecodeString(contextRaw)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
+ }
+ }
+
switch p.Type {
case keysutil.KeyType_AES256_GCM96:
retKeys := map[string]int64{}
for k, v := range p.Keys {
- retKeys[strconv.Itoa(k)] = v.CreationTime
+ retKeys[strconv.Itoa(k)] = v.DeprecatedCreationTime
}
resp.Data["keys"] = retKeys
- case keysutil.KeyType_ECDSA_P256:
- type ecdsaKey struct {
- Name string `json:"name"`
- PublicKey string `json:"public_key"`
- }
- retKeys := map[string]ecdsaKey{}
+ case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ED25519:
+ retKeys := map[string]map[string]interface{}{}
for k, v := range p.Keys {
- retKeys[strconv.Itoa(k)] = ecdsaKey{
- Name: elliptic.P256().Params().Name,
- PublicKey: v.FormattedPublicKey,
+ key := asymKey{
+ PublicKey: v.FormattedPublicKey,
+ CreationTime: v.CreationTime,
}
+ if key.CreationTime.IsZero() {
+ key.CreationTime = time.Unix(v.DeprecatedCreationTime, 0)
+ }
+
+ switch p.Type {
+ case keysutil.KeyType_ECDSA_P256:
+ key.Name = elliptic.P256().Params().Name
+ case keysutil.KeyType_ED25519:
+ if p.Derived {
+ if len(context) == 0 {
+ key.PublicKey = ""
+ } else {
+ derived, err := p.DeriveKey(context, k)
+ if err != nil {
+ return nil, fmt.Errorf("failed to derive key to return public component")
+ }
+ pubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey)
+ key.PublicKey = base64.StdEncoding.EncodeToString(pubKey)
+ }
+ }
+ key.Name = "ed25519"
+ }
+
+ retKeys[strconv.Itoa(k)] = structs.New(key).Map()
}
resp.Data["keys"] = retKeys
}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go
new file mode 100644
index 0000000..7a87fdd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go
@@ -0,0 +1,77 @@
+package transit_test
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/builtin/audit/file"
+ "github.com/hashicorp/vault/builtin/logical/transit"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestTransit_Issue_2958(t *testing.T) {
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "transit": transit.Factory,
+ },
+ AuditBackends: map[string]audit.Factory{
+ "file": file.Factory,
+ },
+ }
+
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ cores := cluster.Cores
+
+ vault.TestWaitActive(t, cores[0].Core)
+
+ client := cores[0].Client
+
+ err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{
+ Type: "file",
+ Options: map[string]string{
+ "file_path": "/dev/null",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = client.Sys().Mount("transit", &api.MountInput{
+ Type: "transit",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Write("transit/keys/foo", map[string]interface{}{
+ "type": "ecdsa-p256",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Write("transit/keys/bar", map[string]interface{}{
+ "type": "ed25519",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Read("transit/keys/foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Logical().Read("transit/keys/bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go
index 167656a..81e811a 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go
@@ -33,6 +33,13 @@ func (b *backend) pathRewrap() *framework.Path {
Type: framework.TypeString,
Description: "Nonce for when convergent encryption is used",
},
+
+ "key_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `The version of the key to use for encryption.
+Must be 0 (for latest) or a value greater than or equal
+to the min_encryption_version configured on the key.`,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -69,6 +76,7 @@ func (b *backend) pathRewrapWrite(
Ciphertext: ciphertext,
Context: d.Get("context").(string),
Nonce: d.Get("nonce").(string),
+ KeyVersion: d.Get("key_version").(int),
}
}
@@ -113,7 +121,7 @@ func (b *backend) pathRewrapWrite(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
for i, item := range batchInputItems {
@@ -132,7 +140,7 @@ func (b *backend) pathRewrapWrite(
}
}
- ciphertext, err := p.Encrypt(item.DecodedContext, item.DecodedNonce, plaintext)
+ ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, plaintext)
if err != nil {
switch err.(type) {
case errutil.UserError:
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go
index ff01880..549ae05 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go
@@ -26,6 +26,12 @@ func (b *backend) pathSign() *framework.Path {
Description: "The base64-encoded input data",
},
+ "context": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Base64 encoded context for key derivation. Required if key
+derivation is enabled; currently only available with ed25519 keys.`,
+ },
+
"algorithm": &framework.FieldSchema{
Type: framework.TypeString,
Default: "sha2-256",
@@ -36,13 +42,21 @@ func (b *backend) pathSign() *framework.Path {
* sha2-384
* sha2-512
-Defaults to "sha2-256".`,
+Defaults to "sha2-256". Not valid for all key types,
+including ed25519.`,
},
"urlalgorithm": &framework.FieldSchema{
Type: framework.TypeString,
Description: `Hash algorithm to use (POST URL parameter)`,
},
+
+ "key_version": &framework.FieldSchema{
+ Type: framework.TypeInt,
+ Description: `The version of the key to use for signing.
+Must be 0 (for latest) or a value greater than or equal
+to the min_encryption_version configured on the key.`,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -63,6 +77,12 @@ func (b *backend) pathVerify() *framework.Path {
Description: "The key to use",
},
+ "context": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: `Base64 encoded context for key derivation. Required if key
+derivation is enabled; currently only available with ed25519 keys.`,
+ },
+
"signature": &framework.FieldSchema{
Type: framework.TypeString,
Description: "The signature, including vault header/key version",
@@ -93,7 +113,7 @@ func (b *backend) pathVerify() *framework.Path {
* sha2-384
* sha2-512
-Defaults to "sha2-256".`,
+Defaults to "sha2-256". Not valid for all key types.`,
},
},
@@ -109,6 +129,7 @@ Defaults to "sha2-256".`,
func (b *backend) pathSignWrite(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
name := d.Get("name").(string)
+ ver := d.Get("key_version").(int)
inputB64 := d.Get("input").(string)
algorithm := d.Get("urlalgorithm").(string)
if algorithm == "" {
@@ -120,22 +141,6 @@ func (b *backend) pathSignWrite(
return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
}
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = sha256.New224()
- case "sha2-256":
- hf = sha256.New()
- case "sha2-384":
- hf = sha512.New384()
- case "sha2-512":
- hf = sha512.New()
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- hashedInput := hf.Sum(nil)
-
// Get the policy
p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
if lock != nil {
@@ -145,27 +150,59 @@ func (b *backend) pathSignWrite(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
if !p.Type.SigningSupported() {
return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest
}
- sig, err := p.Sign(hashedInput)
+ contextRaw := d.Get("context").(string)
+ var context []byte
+ if len(contextRaw) != 0 {
+ context, err = base64.StdEncoding.DecodeString(contextRaw)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
+ }
+ }
+
+ if p.Type.HashSignatureInput() {
+ var hf hash.Hash
+ switch algorithm {
+ case "sha2-224":
+ hf = sha256.New224()
+ case "sha2-256":
+ hf = sha256.New()
+ case "sha2-384":
+ hf = sha512.New384()
+ case "sha2-512":
+ hf = sha512.New()
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
+ }
+ hf.Write(input)
+ input = hf.Sum(nil)
+ }
+
+ sig, err := p.Sign(ver, context, input)
if err != nil {
return nil, err
}
- if sig == "" {
+ if sig == nil {
return nil, fmt.Errorf("signature could not be computed")
}
// Generate the response
resp := &logical.Response{
Data: map[string]interface{}{
- "signature": sig,
+ "signature": sig.Signature,
},
}
+
+ if len(sig.PublicKey) > 0 {
+ resp.Data["public_key"] = sig.PublicKey
+ }
+
return resp, nil
}
@@ -197,22 +234,6 @@ func (b *backend) pathVerifyWrite(
return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
}
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = sha256.New224()
- case "sha2-256":
- hf = sha256.New()
- case "sha2-384":
- hf = sha512.New384()
- case "sha2-512":
- hf = sha512.New()
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- hashedInput := hf.Sum(nil)
-
// Get the policy
p, lock, err := b.lm.GetPolicyShared(req.Storage, name)
if lock != nil {
@@ -222,10 +243,41 @@ func (b *backend) pathVerifyWrite(
return nil, err
}
if p == nil {
- return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest
+ return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest
}
- valid, err := p.VerifySignature(hashedInput, sig)
+ if !p.Type.SigningSupported() {
+ return logical.ErrorResponse(fmt.Sprintf("key type %v does not support verification", p.Type)), logical.ErrInvalidRequest
+ }
+
+ contextRaw := d.Get("context").(string)
+ var context []byte
+ if len(contextRaw) != 0 {
+ context, err = base64.StdEncoding.DecodeString(contextRaw)
+ if err != nil {
+ return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest
+ }
+ }
+
+ if p.Type.HashSignatureInput() {
+ var hf hash.Hash
+ switch algorithm {
+ case "sha2-224":
+ hf = sha256.New224()
+ case "sha2-256":
+ hf = sha256.New()
+ case "sha2-384":
+ hf = sha512.New384()
+ case "sha2-512":
+ hf = sha512.New()
+ default:
+ return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
+ }
+ hf.Write(input)
+ input = hf.Sum(nil)
+ }
+
+ valid, err := p.VerifySignature(context, input, sig)
if err != nil {
switch err.(type) {
case errutil.UserError:
diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go
index 3a41c28..4abdad6 100644
--- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go
+++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go
@@ -1,12 +1,17 @@
package transit
import (
+ "encoding/base64"
+ "strings"
"testing"
+ "golang.org/x/crypto/ed25519"
+
"github.com/hashicorp/vault/logical"
+ "github.com/mitchellh/mapstructure"
)
-func TestTransit_SignVerify(t *testing.T) {
+func TestTransit_SignVerify_P256(t *testing.T) {
var b *backend
sysView := logical.TestSystemView()
storage := &logical.InmemStorage{}
@@ -91,7 +96,7 @@ func TestTransit_SignVerify(t *testing.T) {
}
if errExpected {
if !resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
+ t.Fatalf("bad: should have gotten error response: %#v", *resp)
}
return ""
}
@@ -114,7 +119,7 @@ func TestTransit_SignVerify(t *testing.T) {
}
if errExpected {
if resp != nil && !resp.IsError() {
- t.Fatalf("bad: got error response: %#v", *resp)
+ t.Fatalf("bad: should have gotten error response: %#v", *resp)
}
return
}
@@ -199,3 +204,210 @@ func TestTransit_SignVerify(t *testing.T) {
// Now try the v1
verifyRequest(req, true, "", v1sig)
}
+
+func TestTransit_SignVerify_ED25519(t *testing.T) {
+ var b *backend
+ sysView := logical.TestSystemView()
+ storage := &logical.InmemStorage{}
+
+ b = Backend(&logical.BackendConfig{
+ StorageView: storage,
+ System: sysView,
+ })
+
+ // First create a key
+ req := &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/foo",
+ Data: map[string]interface{}{
+ "type": "ed25519",
+ },
+ }
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Now create a derived key"
+ req = &logical.Request{
+ Storage: storage,
+ Operation: logical.UpdateOperation,
+ Path: "keys/bar",
+ Data: map[string]interface{}{
+ "type": "ed25519",
+ "derived": true,
+ },
+ }
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get the keys for later
+ fooP, lock, err := b.lm.GetPolicyShared(storage, "foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We don't care as we're the only one using this
+ lock.RUnlock()
+
+ barP, lock, err := b.lm.GetPolicyShared(storage, "bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lock.RUnlock()
+
+ signRequest := func(req *logical.Request, errExpected bool, postpath string) string {
+ // Delete any key that exists in the request
+ delete(req.Data, "public_key")
+ req.Path = "sign/" + postpath
+ resp, err := b.HandleRequest(req)
+ if err != nil && !errExpected {
+ t.Fatal(err)
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if errExpected {
+ if !resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ return ""
+ }
+ if resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ value, ok := resp.Data["signature"]
+ if !ok {
+ t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data)
+ }
+ // memoize any pubic key
+ if key, ok := resp.Data["public_key"]; ok {
+ req.Data["public_key"] = key
+ }
+ return value.(string)
+ }
+
+ verifyRequest := func(req *logical.Request, errExpected bool, postpath, sig string) {
+ req.Path = "verify/" + postpath
+ req.Data["signature"] = sig
+ resp, err := b.HandleRequest(req)
+ if err != nil && !errExpected {
+ t.Fatalf("got error: %v, sig was %v", err, sig)
+ }
+ if errExpected {
+ if resp != nil && !resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ return
+ }
+ if resp == nil {
+ t.Fatal("expected non-nil response")
+ }
+ if resp.IsError() {
+ t.Fatalf("bad: got error response: %#v", *resp)
+ }
+ value, ok := resp.Data["valid"]
+ if !ok {
+ t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data)
+ }
+ if !value.(bool) && !errExpected {
+ t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp)
+ }
+
+ if pubKeyRaw, ok := req.Data["public_key"]; ok {
+ input, _ := base64.StdEncoding.DecodeString(req.Data["input"].(string))
+ splitSig := strings.Split(sig, ":")
+ signature, _ := base64.StdEncoding.DecodeString(splitSig[2])
+ if !ed25519.Verify(ed25519.PublicKey(pubKeyRaw.([]byte)), input, signature) && !errExpected {
+ t.Fatal("invalid signature")
+ }
+
+ keyReadReq := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "keys/" + postpath,
+ }
+ keyReadResp, err := b.HandleRequest(keyReadReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ val := keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")]
+ var ak asymKey
+ if err := mapstructure.Decode(val, &ak); err != nil {
+ t.Fatal(err)
+ }
+ if ak.PublicKey != "" {
+ t.Fatal("got non-empty public key")
+ }
+ keyReadReq.Data = map[string]interface{}{
+ "context": "abcd",
+ }
+ keyReadResp, err = b.HandleRequest(keyReadReq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ val = keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")]
+ if err := mapstructure.Decode(val, &ak); err != nil {
+ t.Fatal(err)
+ }
+ if ak.PublicKey != base64.StdEncoding.EncodeToString(pubKeyRaw.([]byte)) {
+ t.Fatalf("got incorrect public key; got %q, expected %q\nasymKey struct is\n%#v", ak.PublicKey, pubKeyRaw, ak)
+ }
+ }
+ }
+
+ req.Data = map[string]interface{}{
+ "input": "dGhlIHF1aWNrIGJyb3duIGZveA==",
+ "context": "abcd",
+ }
+
+ // Test defaults
+ sig := signRequest(req, false, "foo")
+ verifyRequest(req, false, "foo", sig)
+
+ sig = signRequest(req, false, "bar")
+ verifyRequest(req, false, "bar", sig)
+
+ // Test a bad signature
+ verifyRequest(req, true, "foo", sig[0:len(sig)-2])
+ verifyRequest(req, true, "bar", sig[0:len(sig)-2])
+
+ v1sig := sig
+
+ // Rotate and set min decryption version
+ err = fooP.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = fooP.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fooP.MinDecryptionVersion = 2
+ if err = fooP.Persist(storage); err != nil {
+ t.Fatal(err)
+ }
+ err = barP.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = barP.Rotate(storage)
+ if err != nil {
+ t.Fatal(err)
+ }
+ barP.MinDecryptionVersion = 2
+ if err = barP.Persist(storage); err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure signing still works fine
+ sig = signRequest(req, false, "foo")
+ verifyRequest(req, false, "foo", sig)
+ // Now try the v1
+ verifyRequest(req, true, "foo", v1sig)
+ // Repeat with the other key
+ sig = signRequest(req, false, "bar")
+ verifyRequest(req, false, "bar", sig)
+ verifyRequest(req, true, "bar", v1sig)
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go b/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go
new file mode 100644
index 0000000..a1c781f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go
@@ -0,0 +1,231 @@
+package plugin
+
+import (
+ "fmt"
+ "net/rpc"
+ "reflect"
+ "sync"
+
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+ bplugin "github.com/hashicorp/vault/logical/plugin"
+)
+
+var (
+ ErrMismatchType = fmt.Errorf("mismatch on mounted backend and plugin backend type")
+ ErrMismatchPaths = fmt.Errorf("mismatch on mounted backend and plugin backend special paths")
+)
+
+// Factory returns a configured plugin logical.Backend.
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ _, ok := conf.Config["plugin_name"]
+ if !ok {
+ return nil, fmt.Errorf("plugin_name not provided")
+ }
+ b, err := Backend(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// Backend returns an instance of the backend, either as a plugin if external
+// or as a concrete implementation if builtin, casted as logical.Backend.
+func Backend(conf *logical.BackendConfig) (logical.Backend, error) {
+ var b backend
+
+ name := conf.Config["plugin_name"]
+ sys := conf.System
+
+ // NewBackend with isMetadataMode set to true
+ raw, err := bplugin.NewBackend(name, sys, conf.Logger, true)
+ if err != nil {
+ return nil, err
+ }
+ err = raw.Setup(conf)
+ if err != nil {
+ return nil, err
+ }
+ // Get SpecialPaths and BackendType
+ paths := raw.SpecialPaths()
+ btype := raw.Type()
+
+ // Cleanup meta plugin backend
+ raw.Cleanup()
+
+ // Initialize b.Backend with dummy backend since plugin
+ // backends will need to be lazy loaded.
+ b.Backend = &framework.Backend{
+ PathsSpecial: paths,
+ BackendType: btype,
+ }
+
+ b.config = conf
+
+ return &b, nil
+}
+
+// backend is a thin wrapper around plugin.BackendPluginClient
+type backend struct {
+ logical.Backend
+ sync.RWMutex
+
+ config *logical.BackendConfig
+
+ // Used to detect if we already reloaded
+ canary string
+
+ // Used to detect if plugin is set
+ loaded bool
+}
+
+func (b *backend) reloadBackend() error {
+ b.Logger().Trace("plugin: reloading plugin backend", "plugin", b.config.Config["plugin_name"])
+ return b.startBackend()
+}
+
+// startBackend starts a plugin backend
+func (b *backend) startBackend() error {
+ pluginName := b.config.Config["plugin_name"]
+
+ // Ensure proper cleanup of the backend (i.e. call client.Kill())
+ b.Backend.Cleanup()
+
+ nb, err := bplugin.NewBackend(pluginName, b.config.System, b.config.Logger, false)
+ if err != nil {
+ return err
+ }
+ err = nb.Setup(b.config)
+ if err != nil {
+ return err
+ }
+
+ // If the backend has not been loaded (i.e. still in metadata mode),
+ // check if type and special paths still matches
+ if !b.loaded {
+ if b.Backend.Type() != nb.Type() {
+ nb.Cleanup()
+ b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchType)
+ return ErrMismatchType
+ }
+ if !reflect.DeepEqual(b.Backend.SpecialPaths(), nb.SpecialPaths()) {
+ nb.Cleanup()
+ b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchPaths)
+ return ErrMismatchPaths
+ }
+ }
+
+ b.Backend = nb
+ b.loaded = true
+
+ // Call initialize
+ if err := b.Backend.Initialize(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// HandleRequest is a thin wrapper implementation of HandleRequest that includes automatic plugin reload.
+func (b *backend) HandleRequest(req *logical.Request) (*logical.Response, error) {
+ b.RLock()
+ canary := b.canary
+
+ // Lazy-load backend
+ if !b.loaded {
+ // Upgrade lock
+ b.RUnlock()
+ b.Lock()
+ // Check once more after lock swap
+ if !b.loaded {
+ err := b.startBackend()
+ if err != nil {
+ b.Unlock()
+ return nil, err
+ }
+ }
+ b.Unlock()
+ b.RLock()
+ }
+ resp, err := b.Backend.HandleRequest(req)
+ b.RUnlock()
+ // Need to compare string value for case were err comes from plugin RPC
+ // and is returned as plugin.BasicError type.
+ if err != nil && err.Error() == rpc.ErrShutdown.Error() {
+ // Reload plugin if it's an rpc.ErrShutdown
+ b.Lock()
+ if b.canary == canary {
+ err := b.reloadBackend()
+ if err != nil {
+ b.Unlock()
+ return nil, err
+ }
+ b.canary, err = uuid.GenerateUUID()
+ if err != nil {
+ b.Unlock()
+ return nil, err
+ }
+ }
+ b.Unlock()
+
+ // Try request once more
+ b.RLock()
+ defer b.RUnlock()
+ return b.Backend.HandleRequest(req)
+ }
+ return resp, err
+}
+
+// HandleExistenceCheck is a thin wrapper implementation of HandleRequest that includes automatic plugin reload.
+func (b *backend) HandleExistenceCheck(req *logical.Request) (bool, bool, error) {
+ b.RLock()
+ canary := b.canary
+
+ // Lazy-load backend
+ if !b.loaded {
+ // Upgrade lock
+ b.RUnlock()
+ b.Lock()
+ // Check once more after lock swap
+ if !b.loaded {
+ err := b.startBackend()
+ if err != nil {
+ b.Unlock()
+ return false, false, err
+ }
+ }
+ b.Unlock()
+ b.RLock()
+ }
+
+ checkFound, exists, err := b.Backend.HandleExistenceCheck(req)
+ b.RUnlock()
+ if err != nil && err.Error() == rpc.ErrShutdown.Error() {
+ // Reload plugin if it's an rpc.ErrShutdown
+ b.Lock()
+ if b.canary == canary {
+ err := b.reloadBackend()
+ if err != nil {
+ b.Unlock()
+ return false, false, err
+ }
+ b.canary, err = uuid.GenerateUUID()
+ if err != nil {
+ b.Unlock()
+ return false, false, err
+ }
+ }
+ b.Unlock()
+
+ // Try request once more
+ b.RLock()
+ defer b.RUnlock()
+ return b.Backend.HandleExistenceCheck(req)
+ }
+ return checkFound, exists, err
+}
diff --git a/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go
new file mode 100644
index 0000000..5b07197
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go
@@ -0,0 +1,96 @@
+package plugin
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/plugin"
+ "github.com/hashicorp/vault/logical/plugin/mock"
+ "github.com/hashicorp/vault/vault"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestBackend_impl(t *testing.T) {
+ var _ logical.Backend = &backend{}
+}
+
+func TestBackend(t *testing.T) {
+ config, cleanup := testConfig(t)
+ defer cleanup()
+
+ _, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_Factory(t *testing.T) {
+ config, cleanup := testConfig(t)
+ defer cleanup()
+
+ _, err := Factory(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_PluginMain(t *testing.T) {
+ args := []string{}
+ if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadaModeEnv) != "true" {
+ return
+ }
+
+ caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
+ if caPEM == "" {
+ t.Fatal("CA cert not passed in")
+ }
+
+ args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM))
+
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(args)
+ tlsConfig := apiClientMeta.GetTLSConfig()
+ tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig)
+
+ err := plugin.Serve(&plugin.ServeOpts{
+ BackendFactoryFunc: mock.Factory,
+ TLSProviderFunc: tlsProviderFunc,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func testConfig(t *testing.T) (*logical.BackendConfig, func()) {
+ cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ })
+ cluster.Start()
+ cores := cluster.Cores
+
+ core := cores[0]
+
+ sys := vault.TestDynamicSystemView(core.Core)
+
+ config := &logical.BackendConfig{
+ Logger: logformat.NewVaultLogger(log.LevelTrace),
+ System: sys,
+ Config: map[string]string{
+ "plugin_name": "mock-plugin",
+ },
+ }
+
+ os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
+
+ vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMain")
+
+ return config, func() {
+ cluster.Cleanup()
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/cli/commands.go b/vendor/github.com/hashicorp/vault/cli/commands.go
index 7494c06..22c8640 100644
--- a/vendor/github.com/hashicorp/vault/cli/commands.go
+++ b/vendor/github.com/hashicorp/vault/cli/commands.go
@@ -6,8 +6,11 @@ import (
auditFile "github.com/hashicorp/vault/builtin/audit/file"
auditSocket "github.com/hashicorp/vault/builtin/audit/socket"
auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog"
+ "github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/version"
+ credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin"
+ credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
credAppId "github.com/hashicorp/vault/builtin/credential/app-id"
credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
credAws "github.com/hashicorp/vault/builtin/credential/aws"
@@ -18,9 +21,27 @@ import (
credRadius "github.com/hashicorp/vault/builtin/credential/radius"
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
+ physAzure "github.com/hashicorp/vault/physical/azure"
+ physCassandra "github.com/hashicorp/vault/physical/cassandra"
+ physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb"
+ physConsul "github.com/hashicorp/vault/physical/consul"
+ physCouchDB "github.com/hashicorp/vault/physical/couchdb"
+ physDynamoDB "github.com/hashicorp/vault/physical/dynamodb"
+ physEtcd "github.com/hashicorp/vault/physical/etcd"
+ physFile "github.com/hashicorp/vault/physical/file"
+ physGCS "github.com/hashicorp/vault/physical/gcs"
+ physInmem "github.com/hashicorp/vault/physical/inmem"
+ physMSSQL "github.com/hashicorp/vault/physical/mssql"
+ physMySQL "github.com/hashicorp/vault/physical/mysql"
+ physPostgreSQL "github.com/hashicorp/vault/physical/postgresql"
+ physS3 "github.com/hashicorp/vault/physical/s3"
+ physSwift "github.com/hashicorp/vault/physical/swift"
+ physZooKeeper "github.com/hashicorp/vault/physical/zookeeper"
+
"github.com/hashicorp/vault/builtin/logical/aws"
"github.com/hashicorp/vault/builtin/logical/cassandra"
"github.com/hashicorp/vault/builtin/logical/consul"
+ "github.com/hashicorp/vault/builtin/logical/database"
"github.com/hashicorp/vault/builtin/logical/mongodb"
"github.com/hashicorp/vault/builtin/logical/mssql"
"github.com/hashicorp/vault/builtin/logical/mysql"
@@ -28,7 +49,9 @@ import (
"github.com/hashicorp/vault/builtin/logical/postgresql"
"github.com/hashicorp/vault/builtin/logical/rabbitmq"
"github.com/hashicorp/vault/builtin/logical/ssh"
+ "github.com/hashicorp/vault/builtin/logical/totp"
"github.com/hashicorp/vault/builtin/logical/transit"
+ "github.com/hashicorp/vault/builtin/plugin"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/command"
@@ -59,9 +82,8 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory {
Meta: *metaPtr,
}, nil
},
-
"server": func() (cli.Command, error) {
- return &command.ServerCommand{
+ c := &command.ServerCommand{
Meta: *metaPtr,
AuditBackends: map[string]audit.Factory{
"file": auditFile.Factory,
@@ -69,15 +91,18 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory {
"socket": auditSocket.Factory,
},
CredentialBackends: map[string]logical.Factory{
- "approle": credAppRole.Factory,
- "cert": credCert.Factory,
- "aws": credAws.Factory,
- "app-id": credAppId.Factory,
- "github": credGitHub.Factory,
- "userpass": credUserpass.Factory,
- "ldap": credLdap.Factory,
- "okta": credOkta.Factory,
- "radius": credRadius.Factory,
+ "approle": credAppRole.Factory,
+ "cert": credCert.Factory,
+ "aws": credAws.Factory,
+ "app-id": credAppId.Factory,
+ "gcp": credGcp.Factory,
+ "github": credGitHub.Factory,
+ "userpass": credUserpass.Factory,
+ "ldap": credLdap.Factory,
+ "okta": credOkta.Factory,
+ "radius": credRadius.Factory,
+ "kubernetes": credKube.Factory,
+ "plugin": plugin.Factory,
},
LogicalBackends: map[string]logical.Factory{
"aws": aws.Factory,
@@ -91,10 +116,40 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory {
"mysql": mysql.Factory,
"ssh": ssh.Factory,
"rabbitmq": rabbitmq.Factory,
+ "database": database.Factory,
+ "totp": totp.Factory,
+ "plugin": plugin.Factory,
},
+
ShutdownCh: command.MakeShutdownCh(),
SighupCh: command.MakeSighupCh(),
- }, nil
+ }
+
+ c.PhysicalBackends = map[string]physical.Factory{
+ "azure": physAzure.NewAzureBackend,
+ "cassandra": physCassandra.NewCassandraBackend,
+ "cockroachdb": physCockroachDB.NewCockroachDBBackend,
+ "consul": physConsul.NewConsulBackend,
+ "couchdb": physCouchDB.NewCouchDBBackend,
+ "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend,
+ "dynamodb": physDynamoDB.NewDynamoDBBackend,
+ "etcd": physEtcd.NewEtcdBackend,
+ "file": physFile.NewFileBackend,
+ "file_transactional": physFile.NewTransactionalFileBackend,
+ "gcs": physGCS.NewGCSBackend,
+ "inmem": physInmem.NewInmem,
+ "inmem_ha": physInmem.NewInmemHA,
+ "inmem_transactional": physInmem.NewTransactionalInmem,
+ "inmem_transactional_ha": physInmem.NewTransactionalInmemHA,
+ "mssql": physMSSQL.NewMSSQLBackend,
+ "mysql": physMySQL.NewMySQLBackend,
+ "postgresql": physPostgreSQL.NewPostgreSQLBackend,
+ "s3": physS3.NewS3Backend,
+ "swift": physSwift.NewSwiftBackend,
+ "zookeeper": physZooKeeper.NewZooKeeperBackend,
+ }
+
+ return c, nil
},
"ssh": func() (cli.Command, error) {
diff --git a/vendor/github.com/hashicorp/vault/cli/main.go b/vendor/github.com/hashicorp/vault/cli/main.go
index 3d0ced3..000e1e9 100644
--- a/vendor/github.com/hashicorp/vault/cli/main.go
+++ b/vendor/github.com/hashicorp/vault/cli/main.go
@@ -36,9 +36,11 @@ func RunCustom(args []string, commands map[string]cli.CommandFactory) int {
}
cli := &cli.CLI{
- Args: args,
- Commands: commands,
- HelpFunc: cli.FilteredHelpFunc(commandsInclude, HelpFunc),
+ Args: args,
+ Commands: commands,
+ Name: "vault",
+ Autocomplete: true,
+ HelpFunc: cli.FilteredHelpFunc(commandsInclude, HelpFunc),
}
exitCode, err := cli.Run()
diff --git a/vendor/github.com/hashicorp/vault/command/audit_enable.go b/vendor/github.com/hashicorp/vault/command/audit_enable.go
index 3293c79..680a94e 100644
--- a/vendor/github.com/hashicorp/vault/command/audit_enable.go
+++ b/vendor/github.com/hashicorp/vault/command/audit_enable.go
@@ -10,6 +10,7 @@ import (
"github.com/hashicorp/vault/helper/kv-builder"
"github.com/hashicorp/vault/meta"
"github.com/mitchellh/mapstructure"
+ "github.com/posener/complete"
)
// AuditEnableCommand is a Command that mounts a new mount.
@@ -72,7 +73,7 @@ func (c *AuditEnableCommand) Run(args []string) int {
}
err = client.Sys().EnableAuditWithOptions(path, &api.EnableAuditOptions{
- Type: auditType,
+ Type: auditType,
Description: desc,
Options: opts,
Local: local,
@@ -127,3 +128,19 @@ Audit Enable Options:
`
return strings.TrimSpace(helpText)
}
+
+func (c *AuditEnableCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictSet(
+ "file",
+ "syslog",
+ "socket",
+ )
+}
+
+func (c *AuditEnableCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-description": complete.PredictNothing,
+ "-path": complete.PredictNothing,
+ "-local": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth.go b/vendor/github.com/hashicorp/vault/command/auth.go
index 2af8780..00b21ce 100644
--- a/vendor/github.com/hashicorp/vault/command/auth.go
+++ b/vendor/github.com/hashicorp/vault/command/auth.go
@@ -15,13 +15,14 @@ import (
"github.com/hashicorp/vault/helper/password"
"github.com/hashicorp/vault/meta"
"github.com/mitchellh/mapstructure"
+ "github.com/posener/complete"
"github.com/ryanuber/columnize"
)
// AuthHandler is the interface that any auth handlers must implement
// to enable auth via the CLI.
type AuthHandler interface {
- Auth(*api.Client, map[string]string) (string, error)
+ Auth(*api.Client, map[string]string) (*api.Secret, error)
Help() string
}
@@ -37,11 +38,13 @@ type AuthCommand struct {
func (c *AuthCommand) Run(args []string) int {
var method, authPath string
- var methods, methodHelp, noVerify bool
+ var methods, methodHelp, noVerify, noStore, tokenOnly bool
flags := c.Meta.FlagSet("auth", meta.FlagSetDefault)
flags.BoolVar(&methods, "methods", false, "")
flags.BoolVar(&methodHelp, "method-help", false, "")
flags.BoolVar(&noVerify, "no-verify", false, "")
+ flags.BoolVar(&noStore, "no-store", false, "")
+ flags.BoolVar(&tokenOnly, "token-only", false, "")
flags.StringVar(&method, "method", "", "method")
flags.StringVar(&authPath, "path", "", "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
@@ -127,8 +130,8 @@ func (c *AuthCommand) Run(args []string) int {
}
// Warn if the VAULT_TOKEN environment variable is set, as that will take
- // precedence
- if os.Getenv("VAULT_TOKEN") != "" {
+ // precedence. Don't output on token-only since we're likely piping output.
+ if os.Getenv("VAULT_TOKEN") != "" && !tokenOnly {
c.Ui.Output("==> WARNING: VAULT_TOKEN environment variable set!\n")
c.Ui.Output(" The environment variable takes precedence over the value")
c.Ui.Output(" set by the auth command. Either update the value of the")
@@ -164,11 +167,52 @@ func (c *AuthCommand) Run(args []string) int {
}
// Authenticate
- token, err := handler.Auth(client, vars)
+ secret, err := handler.Auth(client, vars)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
+ if secret == nil {
+ c.Ui.Error("Empty response from auth helper")
+ return 1
+ }
+
+ // If we had requested a wrapped token, we want to unset that request
+ // before performing further functions
+ client.SetWrappingLookupFunc(func(string, string) string {
+ return ""
+ })
+
+CHECK_TOKEN:
+ var token string
+ switch {
+ case secret == nil:
+ c.Ui.Error("Empty response from auth helper")
+ return 1
+
+ case secret.Auth != nil:
+ token = secret.Auth.ClientToken
+
+ case secret.WrapInfo != nil:
+ if secret.WrapInfo.WrappedAccessor == "" {
+ c.Ui.Error("Got a wrapped response from Vault but wrapped reply does not seem to contain a token")
+ return 1
+ }
+ if tokenOnly {
+ c.Ui.Output(secret.WrapInfo.Token)
+ return 0
+ }
+ if noStore {
+ return OutputSecret(c.Ui, "table", secret)
+ }
+ client.SetToken(secret.WrapInfo.Token)
+ secret, err = client.Logical().Unwrap("")
+ goto CHECK_TOKEN
+
+ default:
+ c.Ui.Error("No auth or wrapping info in auth helper response")
+ return 1
+ }
// Cache the previous token so that it can be restored if authentication fails
var previousToken string
@@ -177,14 +221,21 @@ func (c *AuthCommand) Run(args []string) int {
return 1
}
+ if tokenOnly {
+ c.Ui.Output(token)
+ return 0
+ }
+
// Store the token!
- if err := tokenHelper.Store(token); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error storing token: %s\n\n"+
- "Authentication was not successful and did not persist.\n"+
- "Please reauthenticate, or fix the issue above if possible.",
- err))
- return 1
+ if !noStore {
+ if err := tokenHelper.Store(token); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error storing token: %s\n\n"+
+ "Authentication was not successful and did not persist.\n"+
+ "Please reauthenticate, or fix the issue above if possible.",
+ err))
+ return 1
+ }
}
if noVerify {
@@ -192,6 +243,16 @@ func (c *AuthCommand) Run(args []string) int {
"Authenticated - no token verification has been performed.",
))
+ if noStore {
+ if err := tokenHelper.Erase(); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error removing prior token: %s\n\n"+
+ "Authentication was successful, but unable to remove the\n"+
+ "previous token.",
+ err))
+ return 1
+ }
+ }
return 0
}
@@ -200,17 +261,28 @@ func (c *AuthCommand) Run(args []string) int {
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client to verify the token: %s", err))
- if err := tokenHelper.Store(previousToken); err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error restoring the previous token: %s\n\n"+
- "Please reauthenticate with a valid token.",
- err))
+ if !noStore {
+ if err := tokenHelper.Store(previousToken); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error restoring the previous token: %s\n\n"+
+ "Please reauthenticate with a valid token.",
+ err))
+ }
}
return 1
}
+ client.SetWrappingLookupFunc(func(string, string) string {
+ return ""
+ })
+
+ // If in no-store mode it won't have read the token from a token-helper (or
+ // will read an old one) so set it explicitly
+ if noStore {
+ client.SetToken(token)
+ }
// Verify the token
- secret, err := client.Auth().Token().LookupSelf()
+ secret, err = client.Auth().Token().LookupSelf()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error validating token: %s", err))
@@ -222,7 +294,7 @@ func (c *AuthCommand) Run(args []string) int {
}
return 1
}
- if secret == nil {
+ if secret == nil && !noStore {
c.Ui.Error(fmt.Sprintf("Error: Invalid token"))
if err := tokenHelper.Store(previousToken); err != nil {
c.Ui.Error(fmt.Sprintf(
@@ -233,10 +305,21 @@ func (c *AuthCommand) Run(args []string) int {
return 1
}
+ if noStore {
+ if err := tokenHelper.Erase(); err != nil {
+ c.Ui.Error(fmt.Sprintf(
+ "Error removing prior token: %s\n\n"+
+ "Authentication was successful, but unable to remove the\n"+
+ "previous token.",
+ err))
+ return 1
+ }
+ }
+
// Get the policies we have
policiesRaw, ok := secret.Data["policies"]
- if !ok {
- policiesRaw = []string{"unknown"}
+ if !ok || policiesRaw == nil {
+ policiesRaw = []interface{}{"unknown"}
}
var policies []string
for _, v := range policiesRaw.([]interface{}) {
@@ -244,6 +327,9 @@ func (c *AuthCommand) Run(args []string) int {
}
output := "Successfully authenticated! You are now logged in."
+ if noStore {
+ output += "\nThe token has not been stored to the configured token helper."
+ }
if method != "" {
output += "\nThe token below is already saved in the session. You do not"
output += "\nneed to \"vault auth\" again with the token."
@@ -260,15 +346,25 @@ func (c *AuthCommand) Run(args []string) int {
}
-func (c *AuthCommand) listMethods() int {
+func (c *AuthCommand) getMethods() (map[string]*api.AuthMount, error) {
client, err := c.Client()
if err != nil {
- c.Ui.Error(fmt.Sprintf(
- "Error initializing client: %s", err))
- return 1
+ return nil, err
}
+ client.SetWrappingLookupFunc(func(string, string) string {
+ return ""
+ })
auth, err := client.Sys().ListAuth()
+ if err != nil {
+ return nil, err
+ }
+
+ return auth, nil
+}
+
+func (c *AuthCommand) listMethods() int {
+ auth, err := c.getMethods()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error reading auth table: %s", err))
@@ -281,7 +377,7 @@ func (c *AuthCommand) listMethods() int {
}
sort.Strings(paths)
- columns := []string{"Path | Type | Default TTL | Max TTL | Replication Behavior | Description"}
+ columns := []string{"Path | Type | Accessor | Default TTL | Max TTL | Replication Behavior | Description"}
for _, path := range paths {
auth := auth[path]
defTTL := "system"
@@ -297,7 +393,7 @@ func (c *AuthCommand) listMethods() int {
replicatedBehavior = "local"
}
columns = append(columns, fmt.Sprintf(
- "%s | %s | %s | %s | %s | %s", path, auth.Type, defTTL, maxTTL, replicatedBehavior, auth.Description))
+ "%s | %s | %s | %s | %s | %s | %s", path, auth.Type, auth.Accessor, defTTL, maxTTL, replicatedBehavior, auth.Description))
}
c.Ui.Output(columnize.SimpleFormat(columns))
@@ -338,15 +434,21 @@ Usage: vault auth [options] [auth-information]
The value of the "-path" flag is supplied to auth providers as the "mount"
option in the payload to specify the mount point.
+ If response wrapping is used (via -wrap-ttl), the returned token will be
+ automatically unwrapped unless:
+ * -token-only is used, in which case the wrapping token will be output
+ * -no-store is used, in which case the details of the wrapping token
+ will be printed
+
General Options:
` + meta.GeneralOptionsUsage() + `
Auth Options:
- -method=name Outputs help for the authentication method with the given
- name for the remote server. If this authentication method
- is not available, exit with code 1.
+ -method=name Use the method given here, which is a type of backend, not
+ the path. If this authentication method is not available,
+ exit with code 1.
-method-help If set, the help for the selected method will be shown.
@@ -355,6 +457,12 @@ Auth Options:
-no-verify Do not verify the token after creation; avoids a use count
decrement.
+ -no-store Do not store the token after creation; it will only be
+ displayed in the command output.
+
+ -token-only Output only the token to stdout. This implies -no-verify
+ and -no-store.
+
-path The path at which the auth backend is enabled. If an auth
backend is mounted at multiple paths, this option can be
used to authenticate against specific paths.
@@ -367,7 +475,7 @@ type tokenAuthHandler struct {
Token string
}
-func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error) {
+func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (*api.Secret, error) {
token := h.Token
if token == "" {
var err error
@@ -377,7 +485,7 @@ func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error)
token, err = password.Read(os.Stdin)
fmt.Printf("\n")
if err != nil {
- return "", fmt.Errorf(
+ return nil, fmt.Errorf(
"Error attempting to ask for token. The raw error message\n"+
"is shown below, but the most common reason for this error is\n"+
"that you attempted to pipe a value into auth. If you want to\n"+
@@ -387,12 +495,16 @@ func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error)
}
if token == "" {
- return "", fmt.Errorf(
+ return nil, fmt.Errorf(
"A token must be passed to auth. Please view the help\n" +
"for more information.")
}
- return token, nil
+ return &api.Secret{
+ Auth: &api.SecretAuth{
+ ClientToken: token,
+ },
+ }, nil
}
func (h *tokenAuthHandler) Help() string {
@@ -411,3 +523,35 @@ tokens are created via the API or command line interface (with the
return strings.TrimSpace(help)
}
+
+func (c *AuthCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictNothing
+}
+
+func (c *AuthCommand) AutocompleteFlags() complete.Flags {
+ var predictFunc complete.PredictFunc = func(a complete.Args) []string {
+ auths, err := c.getMethods()
+ if err != nil {
+ return []string{}
+ }
+
+ methods := make([]string, 0, len(auths))
+ for _, auth := range auths {
+ if strings.HasPrefix(auth.Type, a.Last) {
+ methods = append(methods, auth.Type)
+ }
+ }
+
+ return methods
+ }
+
+ return complete.Flags{
+ "-method": predictFunc,
+ "-methods": complete.PredictNothing,
+ "-method-help": complete.PredictNothing,
+ "-no-verify": complete.PredictNothing,
+ "-no-store": complete.PredictNothing,
+ "-token-only": complete.PredictNothing,
+ "-path": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_enable.go b/vendor/github.com/hashicorp/vault/command/auth_enable.go
index 81c7cce..e6b7f20 100644
--- a/vendor/github.com/hashicorp/vault/command/auth_enable.go
+++ b/vendor/github.com/hashicorp/vault/command/auth_enable.go
@@ -6,6 +6,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/meta"
+ "github.com/posener/complete"
)
// AuthEnableCommand is a Command that enables a new endpoint.
@@ -14,11 +15,12 @@ type AuthEnableCommand struct {
}
func (c *AuthEnableCommand) Run(args []string) int {
- var description, path string
+ var description, path, pluginName string
var local bool
flags := c.Meta.FlagSet("auth-enable", meta.FlagSetDefault)
flags.StringVar(&description, "description", "", "")
flags.StringVar(&path, "path", "", "")
+ flags.StringVar(&pluginName, "plugin-name", "", "")
flags.BoolVar(&local, "local", false, "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
@@ -36,8 +38,13 @@ func (c *AuthEnableCommand) Run(args []string) int {
authType := args[0]
// If no path is specified, we default the path to the backend type
+ // or use the plugin name if it's a plugin backend
if path == "" {
- path = authType
+ if authType == "plugin" {
+ path = pluginName
+ } else {
+ path = authType
+ }
}
client, err := c.Client()
@@ -50,16 +57,24 @@ func (c *AuthEnableCommand) Run(args []string) int {
if err := client.Sys().EnableAuthWithOptions(path, &api.EnableAuthOptions{
Type: authType,
Description: description,
- Local: local,
+ Config: api.AuthConfigInput{
+ PluginName: pluginName,
+ },
+ Local: local,
}); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error: %s", err))
return 2
}
+ authTypeOutput := fmt.Sprintf("'%s'", authType)
+ if authType == "plugin" {
+ authTypeOutput = fmt.Sprintf("plugin '%s'", pluginName)
+ }
+
c.Ui.Output(fmt.Sprintf(
- "Successfully enabled '%s' at '%s'!",
- authType, path))
+ "Successfully enabled %s at '%s'!",
+ authTypeOutput, path))
return 0
}
@@ -89,9 +104,38 @@ Auth Enable Options:
to the type of the mount. This will make the auth
provider available at "/auth/"
+ -plugin-name Name of the auth plugin to use based from the name
+ in the plugin catalog.
+
-local Mark the mount as a local mount. Local mounts
are not replicated nor (if a secondary)
removed by replication.
`
return strings.TrimSpace(helpText)
}
+
+func (c *AuthEnableCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictSet(
+ "approle",
+ "cert",
+ "aws",
+ "app-id",
+ "gcp",
+ "github",
+ "userpass",
+ "ldap",
+ "okta",
+ "radius",
+ "plugin",
+ )
+
+}
+
+func (c *AuthEnableCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-description": complete.PredictNothing,
+ "-path": complete.PredictNothing,
+ "-plugin-name": complete.PredictNothing,
+ "-local": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/auth_test.go b/vendor/github.com/hashicorp/vault/command/auth_test.go
index 9ffd0ac..8243129 100644
--- a/vendor/github.com/hashicorp/vault/command/auth_test.go
+++ b/vendor/github.com/hashicorp/vault/command/auth_test.go
@@ -9,6 +9,9 @@ import (
"strings"
"testing"
+ credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
+ "github.com/hashicorp/vault/logical"
+
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/meta"
@@ -84,6 +87,194 @@ func TestAuth_token(t *testing.T) {
}
}
+func TestAuth_wrapping(t *testing.T) {
+ baseConfig := &vault.CoreConfig{
+ CredentialBackends: map[string]logical.Factory{
+ "userpass": credUserpass.Factory,
+ },
+ }
+ cluster := vault.NewTestCluster(t, baseConfig, &vault.TestClusterOptions{
+ HandlerFunc: http.Handler,
+ BaseListenAddress: "127.0.0.1:8200",
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+
+ testAuthInit(t)
+
+ client := cluster.Cores[0].Client
+ err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{
+ Type: "userpass",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{
+ "password": "bar",
+ "policies": "zip,zap",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ui := new(cli.MockUi)
+ c := &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ Handlers: map[string]AuthHandler{
+ "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
+ },
+ }
+
+ args := []string{
+ "-address",
+ "https://127.0.0.1:8200",
+ "-tls-skip-verify",
+ "-method",
+ "userpass",
+ "username=foo",
+ "password=bar",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Test again with wrapping
+ ui = new(cli.MockUi)
+ c = &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ Handlers: map[string]AuthHandler{
+ "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
+ },
+ }
+
+ args = []string{
+ "-address",
+ "https://127.0.0.1:8200",
+ "-tls-skip-verify",
+ "-wrap-ttl",
+ "5m",
+ "-method",
+ "userpass",
+ "username=foo",
+ "password=bar",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Test again with no-store
+ ui = new(cli.MockUi)
+ c = &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ Handlers: map[string]AuthHandler{
+ "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
+ },
+ }
+
+ args = []string{
+ "-address",
+ "https://127.0.0.1:8200",
+ "-tls-skip-verify",
+ "-wrap-ttl",
+ "5m",
+ "-no-store",
+ "-method",
+ "userpass",
+ "username=foo",
+ "password=bar",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ // Test again with wrapping and token-only
+ ui = new(cli.MockUi)
+ c = &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ Handlers: map[string]AuthHandler{
+ "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"},
+ },
+ }
+
+ args = []string{
+ "-address",
+ "https://127.0.0.1:8200",
+ "-tls-skip-verify",
+ "-wrap-ttl",
+ "5m",
+ "-token-only",
+ "-method",
+ "userpass",
+ "username=foo",
+ "password=bar",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+ token := strings.TrimSpace(ui.OutputWriter.String())
+ if token == "" {
+ t.Fatal("expected to find token in output")
+ }
+ secret, err := client.Logical().Unwrap(token)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.Auth.ClientToken == "" {
+ t.Fatal("no client token found")
+ }
+}
+
+func TestAuth_token_nostore(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ testAuthInit(t)
+
+ ui := new(cli.MockUi)
+ c := &AuthCommand{
+ Meta: meta.Meta{
+ Ui: ui,
+ TokenHelper: DefaultTokenHelper,
+ },
+ }
+
+ args := []string{
+ "-address", addr,
+ "-no-store",
+ token,
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ helper, err := c.TokenHelper()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ actual, err := helper.Get()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if actual != "" {
+ t.Fatalf("bad: %s", actual)
+ }
+}
+
func TestAuth_stdin(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := http.TestServer(t, core)
@@ -198,8 +389,12 @@ func testAuthInit(t *testing.T) {
type testAuthHandler struct{}
-func (h *testAuthHandler) Auth(c *api.Client, m map[string]string) (string, error) {
- return m["foo"], nil
+func (h *testAuthHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
+ return &api.Secret{
+ Auth: &api.SecretAuth{
+ ClientToken: m["foo"],
+ },
+ }, nil
}
func (h *testAuthHandler) Help() string { return "" }
diff --git a/vendor/github.com/hashicorp/vault/command/format.go b/vendor/github.com/hashicorp/vault/command/format.go
index 4520b20..38f24d4 100644
--- a/vendor/github.com/hashicorp/vault/command/format.go
+++ b/vendor/github.com/hashicorp/vault/command/format.go
@@ -14,9 +14,12 @@ import (
"github.com/ghodss/yaml"
"github.com/hashicorp/vault/api"
"github.com/mitchellh/cli"
+ "github.com/posener/complete"
"github.com/ryanuber/columnize"
)
+var predictFormat complete.Predictor = complete.PredictSet("json", "yaml")
+
func OutputSecret(ui cli.Ui, format string, secret *api.Secret) int {
return outputWithFormat(ui, format, secret, secret)
}
@@ -181,6 +184,7 @@ func (t TableFormatter) OutputSecret(ui cli.Ui, secret, s *api.Secret) error {
input = append(input, fmt.Sprintf("wrapping_token: %s %s", config.Delim, s.WrapInfo.Token))
input = append(input, fmt.Sprintf("wrapping_token_ttl: %s %s", config.Delim, (time.Second*time.Duration(s.WrapInfo.TTL)).String()))
input = append(input, fmt.Sprintf("wrapping_token_creation_time: %s %s", config.Delim, s.WrapInfo.CreationTime.String()))
+ input = append(input, fmt.Sprintf("wrapping_token_creation_path: %s %s", config.Delim, s.WrapInfo.CreationPath))
if s.WrapInfo.WrappedAccessor != "" {
input = append(input, fmt.Sprintf("wrapped_accessor: %s %s", config.Delim, s.WrapInfo.WrappedAccessor))
}
diff --git a/vendor/github.com/hashicorp/vault/command/generate-root.go b/vendor/github.com/hashicorp/vault/command/generate-root.go
index f013294..2d9521b 100644
--- a/vendor/github.com/hashicorp/vault/command/generate-root.go
+++ b/vendor/github.com/hashicorp/vault/command/generate-root.go
@@ -13,6 +13,7 @@ import (
"github.com/hashicorp/vault/helper/pgpkeys"
"github.com/hashicorp/vault/helper/xor"
"github.com/hashicorp/vault/meta"
+ "github.com/posener/complete"
)
// GenerateRootCommand is a Command that generates a new root token.
@@ -352,3 +353,20 @@ Generate Root Options:
`
return strings.TrimSpace(helpText)
}
+
+func (c *GenerateRootCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictNothing
+}
+
+func (c *GenerateRootCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-init": complete.PredictNothing,
+ "-cancel": complete.PredictNothing,
+ "-status": complete.PredictNothing,
+ "-decode": complete.PredictNothing,
+ "-genotp": complete.PredictNothing,
+ "-otp": complete.PredictNothing,
+ "-pgp-key": complete.PredictNothing,
+ "-nonce": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/generate-root_test.go b/vendor/github.com/hashicorp/vault/command/generate-root_test.go
index 847400d..31d956d 100644
--- a/vendor/github.com/hashicorp/vault/command/generate-root_test.go
+++ b/vendor/github.com/hashicorp/vault/command/generate-root_test.go
@@ -82,7 +82,7 @@ func TestGenerateRoot_status(t *testing.T) {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
- if !strings.Contains(string(ui.OutputWriter.Bytes()), "Started: true") {
+ if !strings.Contains(ui.OutputWriter.String(), "Started: true") {
t.Fatalf("bad: %s", ui.OutputWriter.String())
}
}
diff --git a/vendor/github.com/hashicorp/vault/command/init.go b/vendor/github.com/hashicorp/vault/command/init.go
index 4c638dc..470c325 100644
--- a/vendor/github.com/hashicorp/vault/command/init.go
+++ b/vendor/github.com/hashicorp/vault/command/init.go
@@ -11,7 +11,8 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/pgpkeys"
"github.com/hashicorp/vault/meta"
- "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/consul"
+ "github.com/posener/complete"
)
// InitCommand is a Command that initializes a new Vault server.
@@ -36,7 +37,7 @@ func (c *InitCommand) Run(args []string) int {
flags.Var(&recoveryPgpKeys, "recovery-pgp-keys", "")
flags.BoolVar(&check, "check", false, "")
flags.BoolVar(&auto, "auto", false, "")
- flags.StringVar(&consulServiceName, "consul-service", physical.DefaultServiceName, "")
+ flags.StringVar(&consulServiceName, "consul-service", consul.DefaultServiceName, "")
if err := flags.Parse(args); err != nil {
return 1
}
@@ -384,3 +385,22 @@ Init Options:
`
return strings.TrimSpace(helpText)
}
+
+func (c *InitCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictNothing
+}
+
+func (c *InitCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-check": complete.PredictNothing,
+ "-key-shares": complete.PredictNothing,
+ "-key-threshold": complete.PredictNothing,
+ "-pgp-keys": complete.PredictNothing,
+ "-root-token-pgp-key": complete.PredictNothing,
+ "-recovery-shares": complete.PredictNothing,
+ "-recovery-threshold": complete.PredictNothing,
+ "-recovery-pgp-keys": complete.PredictNothing,
+ "-auto": complete.PredictNothing,
+ "-consul-service": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/mount.go b/vendor/github.com/hashicorp/vault/command/mount.go
index eb2b53a..895e7b8 100644
--- a/vendor/github.com/hashicorp/vault/command/mount.go
+++ b/vendor/github.com/hashicorp/vault/command/mount.go
@@ -6,6 +6,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/meta"
+ "github.com/posener/complete"
)
// MountCommand is a Command that mounts a new mount.
@@ -14,13 +15,14 @@ type MountCommand struct {
}
func (c *MountCommand) Run(args []string) int {
- var description, path, defaultLeaseTTL, maxLeaseTTL string
+ var description, path, defaultLeaseTTL, maxLeaseTTL, pluginName string
var local, forceNoCache bool
flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
flags.StringVar(&description, "description", "", "")
flags.StringVar(&path, "path", "", "")
flags.StringVar(&defaultLeaseTTL, "default-lease-ttl", "", "")
flags.StringVar(&maxLeaseTTL, "max-lease-ttl", "", "")
+ flags.StringVar(&pluginName, "plugin-name", "", "")
flags.BoolVar(&forceNoCache, "force-no-cache", false, "")
flags.BoolVar(&local, "local", false, "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
@@ -39,8 +41,13 @@ func (c *MountCommand) Run(args []string) int {
mountType := args[0]
// If no path is specified, we default the path to the backend type
+ // or use the plugin name if it's a plugin backend
if path == "" {
- path = mountType
+ if mountType == "plugin" {
+ path = pluginName
+ } else {
+ path = mountType
+ }
}
client, err := c.Client()
@@ -57,6 +64,7 @@ func (c *MountCommand) Run(args []string) int {
DefaultLeaseTTL: defaultLeaseTTL,
MaxLeaseTTL: maxLeaseTTL,
ForceNoCache: forceNoCache,
+ PluginName: pluginName,
},
Local: local,
}
@@ -67,9 +75,14 @@ func (c *MountCommand) Run(args []string) int {
return 2
}
+ mountTypeOutput := fmt.Sprintf("'%s'", mountType)
+ if mountType == "plugin" {
+ mountTypeOutput = fmt.Sprintf("plugin '%s'", pluginName)
+ }
+
c.Ui.Output(fmt.Sprintf(
- "Successfully mounted '%s' at '%s'!",
- mountType, path))
+ "Successfully mounted %s at '%s'!",
+ mountTypeOutput, path))
return 0
}
@@ -112,10 +125,40 @@ Mount Options:
not affect caching of the underlying encrypted
data storage.
+ -plugin-name Name of the plugin to mount based from the name
+ in the plugin catalog.
+
-local Mark the mount as a local mount. Local mounts
are not replicated nor (if a secondary)
removed by replication.
-
`
return strings.TrimSpace(helpText)
}
+
+func (c *MountCommand) AutocompleteArgs() complete.Predictor {
+ // This list does not contain deprecated backends
+ return complete.PredictSet(
+ "aws",
+ "consul",
+ "pki",
+ "transit",
+ "ssh",
+ "rabbitmq",
+ "database",
+ "totp",
+ "plugin",
+ )
+
+}
+
+func (c *MountCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-description": complete.PredictNothing,
+ "-path": complete.PredictNothing,
+ "-default-lease-ttl": complete.PredictNothing,
+ "-max-lease-ttl": complete.PredictNothing,
+ "-force-no-cache": complete.PredictNothing,
+ "-plugin-name": complete.PredictNothing,
+ "-local": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/mount_test.go b/vendor/github.com/hashicorp/vault/command/mount_test.go
index 314ac13..ea9108c 100644
--- a/vendor/github.com/hashicorp/vault/command/mount_test.go
+++ b/vendor/github.com/hashicorp/vault/command/mount_test.go
@@ -22,6 +22,46 @@ func TestMount(t *testing.T) {
},
}
+ args := []string{
+ "-address", addr,
+ "kv",
+ }
+ if code := c.Run(args); code != 0 {
+ t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
+ }
+
+ client, err := c.Client()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mounts, err := client.Sys().ListMounts()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ mount, ok := mounts["kv/"]
+ if !ok {
+ t.Fatal("should have kv mount")
+ }
+ if mount.Type != "kv" {
+ t.Fatal("should be kv type")
+ }
+}
+
+func TestMount_Generic(t *testing.T) {
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := http.TestServer(t, core)
+ defer ln.Close()
+
+ ui := new(cli.MockUi)
+ c := &MountCommand{
+ Meta: meta.Meta{
+ ClientToken: token,
+ Ui: ui,
+ },
+ }
+
args := []string{
"-address", addr,
"generic",
@@ -42,7 +82,7 @@ func TestMount(t *testing.T) {
mount, ok := mounts["generic/"]
if !ok {
- t.Fatal("should have generic mount")
+ t.Fatal("should have generic mount path")
}
if mount.Type != "generic" {
t.Fatal("should be generic type")
diff --git a/vendor/github.com/hashicorp/vault/command/mounts.go b/vendor/github.com/hashicorp/vault/command/mounts.go
index d918d67..2615776 100644
--- a/vendor/github.com/hashicorp/vault/command/mounts.go
+++ b/vendor/github.com/hashicorp/vault/command/mounts.go
@@ -42,9 +42,13 @@ func (c *MountsCommand) Run(args []string) int {
}
sort.Strings(paths)
- columns := []string{"Path | Type | Default TTL | Max TTL | Force No Cache | Replication Behavior | Description"}
+ columns := []string{"Path | Type | Accessor | Plugin | Default TTL | Max TTL | Force No Cache | Replication Behavior | Description"}
for _, path := range paths {
mount := mounts[path]
+ pluginName := "n/a"
+ if mount.Config.PluginName != "" {
+ pluginName = mount.Config.PluginName
+ }
defTTL := "system"
switch {
case mount.Type == "system":
@@ -68,7 +72,7 @@ func (c *MountsCommand) Run(args []string) int {
replicatedBehavior = "local"
}
columns = append(columns, fmt.Sprintf(
- "%s | %s | %s | %s | %v | %s | %s", path, mount.Type, defTTL, maxTTL,
+ "%s | %s | %s | %s | %s | %s | %v | %s | %s", path, mount.Type, mount.Accessor, pluginName, defTTL, maxTTL,
mount.Config.ForceNoCache, replicatedBehavior, mount.Description))
}
diff --git a/vendor/github.com/hashicorp/vault/command/policy_write.go b/vendor/github.com/hashicorp/vault/command/policy_write.go
index 4f73ffe..59b26fb 100644
--- a/vendor/github.com/hashicorp/vault/command/policy_write.go
+++ b/vendor/github.com/hashicorp/vault/command/policy_write.go
@@ -37,7 +37,8 @@ func (c *PolicyWriteCommand) Run(args []string) int {
return 2
}
- name := args[0]
+ // Policies are normalized to lowercase
+ name := strings.ToLower(args[0])
path := args[1]
// Read the policy
diff --git a/vendor/github.com/hashicorp/vault/command/read.go b/vendor/github.com/hashicorp/vault/command/read.go
index 6e9c4d7..d989178 100644
--- a/vendor/github.com/hashicorp/vault/command/read.go
+++ b/vendor/github.com/hashicorp/vault/command/read.go
@@ -7,6 +7,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/meta"
+ "github.com/posener/complete"
)
// ReadCommand is a Command that reads data from the Vault.
@@ -95,3 +96,14 @@ Read Options:
`
return strings.TrimSpace(helpText)
}
+
+func (c *ReadCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictNothing
+}
+
+func (c *ReadCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-format": predictFormat,
+ "-field": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/rekey.go b/vendor/github.com/hashicorp/vault/command/rekey.go
index 16022be..bf47c2c 100644
--- a/vendor/github.com/hashicorp/vault/command/rekey.go
+++ b/vendor/github.com/hashicorp/vault/command/rekey.go
@@ -10,6 +10,7 @@ import (
"github.com/hashicorp/vault/helper/password"
"github.com/hashicorp/vault/helper/pgpkeys"
"github.com/hashicorp/vault/meta"
+ "github.com/posener/complete"
)
// RekeyCommand is a Command that rekeys the vault.
@@ -418,3 +419,23 @@ Rekey Options:
`
return strings.TrimSpace(helpText)
}
+
+func (c *RekeyCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictNothing
+}
+
+func (c *RekeyCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-init": complete.PredictNothing,
+ "-cancel": complete.PredictNothing,
+ "-status": complete.PredictNothing,
+ "-retrieve": complete.PredictNothing,
+ "-delete": complete.PredictNothing,
+ "-key-shares": complete.PredictNothing,
+ "-key-threshold": complete.PredictNothing,
+ "-nonce": complete.PredictNothing,
+ "-pgp-keys": complete.PredictNothing,
+ "-backup": complete.PredictNothing,
+ "-recovery-key": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/rekey_test.go b/vendor/github.com/hashicorp/vault/command/rekey_test.go
index 21e4e24..6f12d78 100644
--- a/vendor/github.com/hashicorp/vault/command/rekey_test.go
+++ b/vendor/github.com/hashicorp/vault/command/rekey_test.go
@@ -182,7 +182,7 @@ func TestRekey_status(t *testing.T) {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
- if !strings.Contains(string(ui.OutputWriter.Bytes()), "Started: true") {
+ if !strings.Contains(ui.OutputWriter.String(), "Started: true") {
t.Fatalf("bad: %s", ui.OutputWriter.String())
}
}
@@ -199,7 +199,8 @@ func TestRekey_init_pgp(t *testing.T) {
MaxLeaseTTLVal: time.Hour * 24 * 32,
},
}
- sysBackend, err := vault.NewSystemBackend(core, bc)
+ sysBackend := vault.NewSystemBackend(core)
+ err := sysBackend.Backend.Setup(bc)
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/hashicorp/vault/command/remount.go b/vendor/github.com/hashicorp/vault/command/remount.go
index a6defa7..a36f141 100644
--- a/vendor/github.com/hashicorp/vault/command/remount.go
+++ b/vendor/github.com/hashicorp/vault/command/remount.go
@@ -65,7 +65,7 @@ Usage: vault remount [options] from to
the data associated with the backend (such as configuration), will
be preserved.
- Example: vault remount secret/ generic/
+ Example: vault remount secret/ kv/
General Options:
` + meta.GeneralOptionsUsage()
diff --git a/vendor/github.com/hashicorp/vault/command/remount_test.go b/vendor/github.com/hashicorp/vault/command/remount_test.go
index 0d6f191..7ec1321 100644
--- a/vendor/github.com/hashicorp/vault/command/remount_test.go
+++ b/vendor/github.com/hashicorp/vault/command/remount_test.go
@@ -24,7 +24,7 @@ func TestRemount(t *testing.T) {
args := []string{
"-address", addr,
- "secret/", "generic",
+ "secret/", "kv",
}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
@@ -45,8 +45,8 @@ func TestRemount(t *testing.T) {
t.Fatal("should not have mount")
}
- _, ok = mounts["generic/"]
+ _, ok = mounts["kv/"]
if !ok {
- t.Fatal("should have generic")
+ t.Fatal("should have kv")
}
}
diff --git a/vendor/github.com/hashicorp/vault/command/renew_test.go b/vendor/github.com/hashicorp/vault/command/renew_test.go
index d43e516..2191662 100644
--- a/vendor/github.com/hashicorp/vault/command/renew_test.go
+++ b/vendor/github.com/hashicorp/vault/command/renew_test.go
@@ -90,6 +90,27 @@ func TestRenewBothWays(t *testing.T) {
t.Fatal("bad lease duration")
}
+ // Test another
+ r = client.NewRequest("PUT", "/v1/sys/leases/renew")
+ body = map[string]interface{}{
+ "lease_id": secret.LeaseID,
+ }
+ if err := r.SetJSONBody(body); err != nil {
+ t.Fatal(err)
+ }
+ resp, err = client.RawRequest(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close()
+ secret, err = api.ParseSecret(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.LeaseDuration != 60 {
+ t.Fatal("bad lease duration")
+ }
+
// Test the other
r = client.NewRequest("PUT", "/v1/sys/renew/"+secret.LeaseID)
resp, err = client.RawRequest(r)
@@ -104,4 +125,19 @@ func TestRenewBothWays(t *testing.T) {
if secret.LeaseDuration != 60 {
t.Fatalf("bad lease duration; secret is %#v\n", *secret)
}
+
+ // Test another
+ r = client.NewRequest("PUT", "/v1/sys/leases/renew/"+secret.LeaseID)
+ resp, err = client.RawRequest(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close()
+ secret, err = api.ParseSecret(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if secret.LeaseDuration != 60 {
+ t.Fatalf("bad lease duration; secret is %#v\n", *secret)
+ }
}
diff --git a/vendor/github.com/hashicorp/vault/command/server.go b/vendor/github.com/hashicorp/vault/command/server.go
index c09db3d..e089ef2 100644
--- a/vendor/github.com/hashicorp/vault/command/server.go
+++ b/vendor/github.com/hashicorp/vault/command/server.go
@@ -3,11 +3,13 @@ package command
import (
"encoding/base64"
"fmt"
+ "io/ioutil"
"net"
"net/http"
"net/url"
"os"
"os/signal"
+ "path/filepath"
"runtime"
"sort"
"strconv"
@@ -20,11 +22,14 @@ import (
colorable "github.com/mattn/go-colorable"
log "github.com/mgutz/logxi/v1"
+ testing "github.com/mitchellh/go-testing-interface"
+ "github.com/posener/complete"
"google.golang.org/grpc/grpclog"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/circonus"
+ "github.com/armon/go-metrics/datadog"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/audit"
@@ -33,6 +38,8 @@ import (
"github.com/hashicorp/vault/helper/gated-writer"
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/helper/mlock"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/reload"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/meta"
@@ -46,6 +53,7 @@ type ServerCommand struct {
AuditBackends map[string]audit.Factory
CredentialBackends map[string]logical.Factory
LogicalBackends map[string]logical.Factory
+ PhysicalBackends map[string]physical.Factory
ShutdownCh chan struct{}
SighupCh chan struct{}
@@ -54,26 +62,33 @@ type ServerCommand struct {
meta.Meta
- logger log.Logger
+ logGate *gatedwriter.Writer
+ logger log.Logger
cleanupGuard sync.Once
reloadFuncsLock *sync.RWMutex
- reloadFuncs *map[string][]vault.ReloadFunc
+ reloadFuncs *map[string][]reload.ReloadFunc
}
func (c *ServerCommand) Run(args []string) int {
- var dev, verifyOnly, devHA, devTransactional bool
+ var dev, verifyOnly, devHA, devTransactional, devLeasedKV, devThreeNode bool
var configPath []string
- var logLevel, devRootTokenID, devListenAddress string
+ var logLevel, devRootTokenID, devListenAddress, devPluginDir string
+ var devLatency, devLatencyJitter int
flags := c.Meta.FlagSet("server", meta.FlagSetDefault)
flags.BoolVar(&dev, "dev", false, "")
flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "")
flags.StringVar(&devListenAddress, "dev-listen-address", "", "")
+ flags.StringVar(&devPluginDir, "dev-plugin-dir", "", "")
flags.StringVar(&logLevel, "log-level", "info", "")
+ flags.IntVar(&devLatency, "dev-latency", 0, "")
+ flags.IntVar(&devLatencyJitter, "dev-latency-jitter", 20, "")
flags.BoolVar(&verifyOnly, "verify-only", false, "")
- flags.BoolVar(&devHA, "ha", false, "")
- flags.BoolVar(&devTransactional, "transactional", false, "")
+ flags.BoolVar(&devHA, "dev-ha", false, "")
+ flags.BoolVar(&devTransactional, "dev-transactional", false, "")
+ flags.BoolVar(&devLeasedKV, "dev-leased-kv", false, "")
+ flags.BoolVar(&devThreeNode, "dev-three-node", false, "")
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
if err := flags.Parse(args); err != nil {
@@ -82,7 +97,7 @@ func (c *ServerCommand) Run(args []string) int {
// Create a logger. We wrap it in a gated writer so that it doesn't
// start logging too early.
- logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
+ c.logGate = &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
var level int
logLevel = strings.ToLower(strings.TrimSpace(logLevel))
switch logLevel {
@@ -109,9 +124,9 @@ func (c *ServerCommand) Run(args []string) int {
}
switch strings.ToLower(logFormat) {
case "vault", "vault_json", "vault-json", "vaultjson", "json", "":
- c.logger = logformat.NewVaultLoggerWithWriter(logGate, level)
+ c.logger = logformat.NewVaultLoggerWithWriter(c.logGate, level)
default:
- c.logger = log.NewLogger(logGate, "vault")
+ c.logger = log.NewLogger(c.logGate, "vault")
c.logger.SetLevel(level)
}
grpclog.SetLogger(&grpclogFaker{
@@ -126,7 +141,7 @@ func (c *ServerCommand) Run(args []string) int {
devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS")
}
- if devHA || devTransactional {
+ if devHA || devTransactional || devLeasedKV || devThreeNode {
dev = true
}
@@ -194,8 +209,14 @@ func (c *ServerCommand) Run(args []string) int {
}
// Initialize the backend
- backend, err := physical.NewBackend(
- config.Storage.Type, c.logger, config.Storage.Config)
+ factory, exists := c.PhysicalBackends[config.Storage.Type]
+ if !exists {
+ c.Ui.Output(fmt.Sprintf(
+ "Unknown storage type %s",
+ config.Storage.Type))
+ return 1
+ }
+ backend, err := factory(config.Storage.Config, c.logger)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing storage of type %s: %s",
@@ -238,9 +259,29 @@ func (c *ServerCommand) Run(args []string) int {
DefaultLeaseTTL: config.DefaultLeaseTTL,
ClusterName: config.ClusterName,
CacheSize: config.CacheSize,
+ PluginDirectory: config.PluginDirectory,
+ EnableRaw: config.EnableRawEndpoint,
}
if dev {
coreConfig.DevToken = devRootTokenID
+ if devLeasedKV {
+ coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory
+ }
+ if devPluginDir != "" {
+ coreConfig.PluginDirectory = devPluginDir
+ }
+ if devLatency > 0 {
+ injectLatency := time.Duration(devLatency) * time.Millisecond
+ if _, txnOK := backend.(physical.Transactional); txnOK {
+ coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger)
+ } else {
+ coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger)
+ }
+ }
+ }
+
+ if devThreeNode {
+ return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, devListenAddress)
}
var disableClustering bool
@@ -248,8 +289,14 @@ func (c *ServerCommand) Run(args []string) int {
// Initialize the separate HA storage backend, if it exists
var ok bool
if config.HAStorage != nil {
- habackend, err := physical.NewBackend(
- config.HAStorage.Type, c.logger, config.HAStorage.Config)
+ factory, exists := c.PhysicalBackends[config.HAStorage.Type]
+ if !exists {
+ c.Ui.Output(fmt.Sprintf(
+ "Unknown HA storage type %s",
+ config.HAStorage.Type))
+ return 1
+ }
+ habackend, err := factory(config.HAStorage.Config, c.logger)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing HA storage of type %s: %s",
@@ -415,16 +462,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.reloadFuncsLock.Lock()
lns := make([]net.Listener, 0, len(config.Listeners))
for i, lnConfig := range config.Listeners {
- if lnConfig.Type == "atlas" {
- if config.ClusterName == "" {
- c.Ui.Output("cluster_name is not set in the config and is a required value")
- return 1
- }
-
- lnConfig.Config["cluster_name"] = config.ClusterName
- }
-
- ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, logGate)
+ ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.logGate)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing listener of type %s: %s",
@@ -441,9 +479,11 @@ CLUSTER_SYNTHESIS_COMPLETE:
}
if !disableClustering && lnConfig.Type == "tcp" {
+ var addrRaw interface{}
var addr string
var ok bool
- if addr, ok = lnConfig.Config["cluster_address"]; ok {
+ if addrRaw, ok = lnConfig.Config["cluster_address"]; ok {
+ addr = addrRaw.(string)
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
c.Ui.Output(fmt.Sprintf(
@@ -538,7 +578,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery)
if ok {
activeFunc := func() bool {
- if isLeader, _, err := core.Leader(); err == nil {
+ if isLeader, _, _, err := core.Leader(); err == nil {
return isLeader
}
return false
@@ -563,11 +603,11 @@ CLUSTER_SYNTHESIS_COMPLETE:
// This needs to happen before we first unseal, so before we trigger dev
// mode if it's set
core.SetClusterListenerAddrs(clusterAddrs)
- core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger))
+ core.SetClusterHandler(handler)
// If we're in Dev mode, then initialize the core
if dev {
- init, err := c.enableDev(core, devRootTokenID)
+ init, err := c.enableDev(core, coreConfig)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing Dev mode: %s", err))
@@ -589,7 +629,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
"immediately begin using the Vault CLI.\n\n"+
"The only step you need to take is to set the following\n"+
"environment variables:\n\n"+
- " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"]+quote+"\n\n"+
+ " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"].(string)+quote+"\n\n"+
"The unseal key and root token are reproduced below in case you\n"+
"want to seal/unseal the Vault or play with authentication.\n\n"+
"Unseal Key: %s\nRoot Token: %s\n",
@@ -618,7 +658,19 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.Ui.Output("==> Vault server started! Log data will stream in below:\n")
// Release the log gate.
- logGate.Flush()
+ c.logGate.Flush()
+
+ // Write out the PID to the file now that server has successfully started
+ if err := c.storePidFile(config.PidFile); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error storing PID: %v", err))
+ return 1
+ }
+
+ defer func() {
+ if err := c.removePidFile(config.PidFile); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error deleting the PID file: %v", err))
+ }
+ }()
// Wait for shutdown
shutdownTriggered := false
@@ -642,7 +694,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
case <-c.SighupCh:
c.Ui.Output("==> Vault reload triggered")
- if err := c.Reload(configPath); err != nil {
+ if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, configPath); err != nil {
c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
}
}
@@ -653,7 +705,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
return 0
}
-func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.InitResult, error) {
+func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) {
// Initialize it with a basic single key
init, err := core.Initialize(&vault.InitParams{
BarrierConfig: &vault.SealConfig{
@@ -679,7 +731,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
return nil, fmt.Errorf("failed to unseal Vault for dev mode")
}
- isLeader, _, err := core.Leader()
+ isLeader, _, _, err := core.Leader()
if err != nil && err != vault.ErrHANotEnabled {
return nil, fmt.Errorf("failed to check active status: %v", err)
}
@@ -692,7 +744,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s\n", buf)
}
time.Sleep(1 * time.Second)
- isLeader, _, err = core.Leader()
+ isLeader, _, _, err = core.Leader()
if err != nil {
return nil, fmt.Errorf("failed to check active status: %v", err)
}
@@ -700,14 +752,14 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
}
}
- if rootTokenID != "" {
+ if coreConfig.DevToken != "" {
req := &logical.Request{
ID: "dev-gen-root",
Operation: logical.UpdateOperation,
ClientToken: init.RootToken,
Path: "auth/token/create",
Data: map[string]interface{}{
- "id": rootTokenID,
+ "id": coreConfig.DevToken,
"policies": []string{"root"},
"no_parent": true,
"no_default_policy": true,
@@ -715,13 +767,13 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
}
resp, err := core.HandleRequest(req)
if err != nil {
- return nil, fmt.Errorf("failed to create root token with ID %s: %s", rootTokenID, err)
+ return nil, fmt.Errorf("failed to create root token with ID %s: %s", coreConfig.DevToken, err)
}
if resp == nil {
- return nil, fmt.Errorf("nil response when creating root token with ID %s", rootTokenID)
+ return nil, fmt.Errorf("nil response when creating root token with ID %s", coreConfig.DevToken)
}
if resp.Auth == nil {
- return nil, fmt.Errorf("nil auth when creating root token with ID %s", rootTokenID)
+ return nil, fmt.Errorf("nil auth when creating root token with ID %s", coreConfig.DevToken)
}
init.RootToken = resp.Auth.ClientToken
@@ -747,6 +799,178 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
return init, nil
}
+func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress string) int {
+ testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ BaseListenAddress: devListenAddress,
+ })
+ defer c.cleanupGuard.Do(testCluster.Cleanup)
+
+ info["cluster parameters path"] = testCluster.TempDir
+ info["log level"] = "trace"
+ infoKeys = append(infoKeys, "cluster parameters path", "log level")
+
+ for i, core := range testCluster.Cores {
+ info[fmt.Sprintf("node %d redirect address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String())
+ infoKeys = append(infoKeys, fmt.Sprintf("node %d redirect address", i))
+ }
+
+ infoKeys = append(infoKeys, "version")
+ verInfo := version.GetVersion()
+ info["version"] = verInfo.FullVersionNumber(false)
+ if verInfo.Revision != "" {
+ info["version sha"] = strings.Trim(verInfo.Revision, "'")
+ infoKeys = append(infoKeys, "version sha")
+ }
+ infoKeys = append(infoKeys, "cgo")
+ info["cgo"] = "disabled"
+ if version.CgoEnabled {
+ info["cgo"] = "enabled"
+ }
+
+ // Server configuration output
+ padding := 24
+ sort.Strings(infoKeys)
+ c.Ui.Output("==> Vault server configuration:\n")
+ for _, k := range infoKeys {
+ c.Ui.Output(fmt.Sprintf(
+ "%s%s: %s",
+ strings.Repeat(" ", padding-len(k)),
+ strings.Title(k),
+ info[k]))
+ }
+ c.Ui.Output("")
+
+ for _, core := range testCluster.Cores {
+ core.Server.Handler = vaulthttp.Handler(core.Core)
+ core.SetClusterHandler(core.Server.Handler)
+ }
+
+ testCluster.Start()
+
+ if base.DevToken != "" {
+ req := &logical.Request{
+ ID: "dev-gen-root",
+ Operation: logical.UpdateOperation,
+ ClientToken: testCluster.RootToken,
+ Path: "auth/token/create",
+ Data: map[string]interface{}{
+ "id": base.DevToken,
+ "policies": []string{"root"},
+ "no_parent": true,
+ "no_default_policy": true,
+ },
+ }
+ resp, err := testCluster.Cores[0].HandleRequest(req)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
+ return 1
+ }
+ if resp == nil {
+ c.Ui.Output(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken))
+ return 1
+ }
+ if resp.Auth == nil {
+ c.Ui.Output(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken))
+ return 1
+ }
+
+ testCluster.RootToken = resp.Auth.ClientToken
+
+ req.ID = "dev-revoke-init-root"
+ req.Path = "auth/token/revoke-self"
+ req.Data = nil
+ resp, err = testCluster.Cores[0].HandleRequest(req)
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
+ return 1
+ }
+ }
+
+ // Set the token
+ tokenHelper, err := c.TokenHelper()
+ if err != nil {
+ c.Ui.Output(fmt.Sprintf("%v", err))
+ return 1
+ }
+ if err := tokenHelper.Store(testCluster.RootToken); err != nil {
+ c.Ui.Output(fmt.Sprintf("%v", err))
+ return 1
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil {
+ c.Ui.Output(fmt.Sprintf("%v", err))
+ return 1
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "==> Three node dev mode is enabled\n\n" +
+ "The unseal key and root token are reproduced below in case you\n" +
+ "want to seal/unseal the Vault or play with authentication.\n",
+ ))
+
+ for i, key := range testCluster.BarrierKeys {
+ c.Ui.Output(fmt.Sprintf(
+ "Unseal Key %d: %s",
+ i+1, base64.StdEncoding.EncodeToString(key),
+ ))
+ }
+
+ c.Ui.Output(fmt.Sprintf(
+ "\nRoot Token: %s\n", testCluster.RootToken,
+ ))
+
+ c.Ui.Output(fmt.Sprintf(
+ "\nUseful env vars:\n"+
+ "VAULT_TOKEN=%s\n"+
+ "VAULT_ADDR=%s\n"+
+ "VAULT_CACERT=%s/ca_cert.pem\n",
+ testCluster.RootToken,
+ testCluster.Cores[0].Client.Address(),
+ testCluster.TempDir,
+ ))
+
+ // Output the header that the server has started
+ c.Ui.Output("==> Vault server started! Log data will stream in below:\n")
+
+ // Release the log gate.
+ c.logGate.Flush()
+
+ // Wait for shutdown
+ shutdownTriggered := false
+
+ for !shutdownTriggered {
+ select {
+ case <-c.ShutdownCh:
+ c.Ui.Output("==> Vault shutdown triggered")
+
+ // Stop the listners so that we don't process further client requests.
+ c.cleanupGuard.Do(testCluster.Cleanup)
+
+ // Shutdown will wait until after Vault is sealed, which means the
+ // request forwarding listeners will also be closed (and also
+ // waited for).
+ for _, core := range testCluster.Cores {
+ if err := core.Shutdown(); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err))
+ }
+ }
+
+ shutdownTriggered = true
+
+ case <-c.SighupCh:
+ c.Ui.Output("==> Vault reload triggered")
+ for _, core := range testCluster.Cores {
+ if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil {
+ c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
+ }
+ }
+ }
+ }
+
+ return 0
+}
+
// detectRedirect is used to attempt redirect address detection
func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
config *server.Config) (string, error) {
@@ -774,7 +998,7 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
// Check if TLS is disabled
if val, ok := list.Config["tls_disable"]; ok {
- disable, err := strconv.ParseBool(val)
+ disable, err := parseutil.ParseBool(val)
if err != nil {
return "", fmt.Errorf("tls_disable: %s", err)
}
@@ -785,9 +1009,12 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
}
// Check for address override
- addr, ok := list.Config["address"]
+ var addr string
+ addrRaw, ok := list.Config["address"]
if !ok {
addr = "127.0.0.1:8200"
+ } else {
+ addr = addrRaw.(string)
}
// Check for localhost
@@ -892,6 +1119,21 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
fanout = append(fanout, sink)
}
+ if telConfig.DogStatsDAddr != "" {
+ var tags []string
+
+ if telConfig.DogStatsDTags != nil {
+ tags = telConfig.DogStatsDTags
+ }
+
+ sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName)
+ if err != nil {
+ return fmt.Errorf("failed to start DogStatsD sink. Got: %s", err)
+ }
+ sink.SetTags(tags)
+ fanout = append(fanout, sink)
+ }
+
// Initialize the global sink
if len(fanout) > 0 {
fanout = append(fanout, inm)
@@ -903,55 +1145,29 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
return nil
}
-func (c *ServerCommand) Reload(configPath []string) error {
- c.reloadFuncsLock.RLock()
- defer c.reloadFuncsLock.RUnlock()
+func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reload.ReloadFunc, configPath []string) error {
+ lock.RLock()
+ defer lock.RUnlock()
var reloadErrors *multierror.Error
- // Read the new config
- var config *server.Config
- for _, path := range configPath {
- current, err := server.LoadConfig(path, c.logger)
- if err != nil {
- reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error loading configuration from %s: %s", path, err))
- goto audit
- }
-
- if config == nil {
- config = current
- } else {
- config = config.Merge(current)
- }
- }
-
- // Ensure at least one config was found.
- if config == nil {
- reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("No configuration files found"))
- goto audit
- }
-
- // Call reload on the listeners. This will call each listener with each
- // config block, but they verify the address.
- for _, lnConfig := range config.Listeners {
- for _, relFunc := range (*c.reloadFuncs)["listener|"+lnConfig.Type] {
- if err := relFunc(lnConfig.Config); err != nil {
- reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading configuration: %s", err))
- goto audit
+ for k, relFuncs := range *reloadFuncs {
+ switch {
+ case strings.HasPrefix(k, "listener|"):
+ for _, relFunc := range relFuncs {
+ if relFunc != nil {
+ if err := relFunc(nil); err != nil {
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading listener: %v", err))
+ }
+ }
}
- }
- }
-audit:
- // file audit reload funcs
- for k, relFuncs := range *c.reloadFuncs {
- if !strings.HasPrefix(k, "audit_file|") {
- continue
- }
- for _, relFunc := range relFuncs {
- if relFunc != nil {
- if err := relFunc(nil); err != nil {
- reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading file audit backend at path %s: %v", strings.TrimPrefix(k, "audit_file|"), err))
+ case strings.HasPrefix(k, "audit_file|"):
+ for _, relFunc := range relFuncs {
+ if relFunc != nil {
+ if err := relFunc(nil); err != nil {
+ reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading file audit backend at path %s: %v", strings.TrimPrefix(k, "audit_file|"), err))
+ }
}
}
}
@@ -1008,6 +1224,51 @@ General Options:
return strings.TrimSpace(helpText)
}
+func (c *ServerCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictNothing
+}
+
+func (c *ServerCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-config": complete.PredictOr(complete.PredictFiles("*.hcl"), complete.PredictFiles("*.json")),
+ "-dev": complete.PredictNothing,
+ "-dev-root-token-id": complete.PredictNothing,
+ "-dev-listen-address": complete.PredictNothing,
+ "-log-level": complete.PredictSet("trace", "debug", "info", "warn", "err"),
+ }
+}
+
+// storePidFile is used to write out our PID to a file if necessary
+func (c *ServerCommand) storePidFile(pidPath string) error {
+ // Quit fast if no pidfile
+ if pidPath == "" {
+ return nil
+ }
+
+ // Open the PID file
+ pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+ if err != nil {
+ return fmt.Errorf("could not open pid file: %v", err)
+ }
+ defer pidFile.Close()
+
+ // Write out the PID
+ pid := os.Getpid()
+ _, err = pidFile.WriteString(fmt.Sprintf("%d", pid))
+ if err != nil {
+ return fmt.Errorf("could not write to pid file: %v", err)
+ }
+ return nil
+}
+
+// removePidFile is used to cleanup the PID file if necessary
+func (c *ServerCommand) removePidFile(pidPath string) error {
+ if pidPath == "" {
+ return nil
+ }
+ return os.Remove(pidPath)
+}
+
// MakeShutdownCh returns a channel that can be used for shutdown
// notifications for commands. This channel will send a message for every
// SIGINT or SIGTERM received.
diff --git a/vendor/github.com/hashicorp/vault/command/server/config.go b/vendor/github.com/hashicorp/vault/command/server/config.go
index e6ea123..8f78ac0 100644
--- a/vendor/github.com/hashicorp/vault/command/server/config.go
+++ b/vendor/github.com/hashicorp/vault/command/server/config.go
@@ -42,14 +42,22 @@ type Config struct {
DefaultLeaseTTL time.Duration `hcl:"-"`
DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"`
- ClusterName string `hcl:"cluster_name"`
+ ClusterName string `hcl:"cluster_name"`
+ ClusterCipherSuites string `hcl:"cluster_cipher_suites"`
+
+ PluginDirectory string `hcl:"plugin_directory"`
+
+ PidFile string `hcl:"pid_file"`
+ EnableRawEndpoint bool `hcl:"-"`
+ EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"`
}
// DevConfig is a Config that is used for dev mode of Vault.
func DevConfig(ha, transactional bool) *Config {
ret := &Config{
- DisableCache: false,
- DisableMlock: true,
+ DisableCache: false,
+ DisableMlock: true,
+ EnableRawEndpoint: true,
Storage: &Storage{
Type: "inmem",
@@ -58,9 +66,11 @@ func DevConfig(ha, transactional bool) *Config {
Listeners: []*Listener{
&Listener{
Type: "tcp",
- Config: map[string]string{
- "address": "127.0.0.1:8200",
- "tls_disable": "1",
+ Config: map[string]interface{}{
+ "address": "127.0.0.1:8200",
+ "tls_disable": true,
+ "proxy_protocol_behavior": "allow_authorized",
+ "proxy_protocol_authorized_addrs": "127.0.0.1:8200",
},
},
},
@@ -68,9 +78,6 @@ func DevConfig(ha, transactional bool) *Config {
EnableUI: true,
Telemetry: &Telemetry{},
-
- MaxLeaseTTL: 32 * 24 * time.Hour,
- DefaultLeaseTTL: 32 * 24 * time.Hour,
}
switch {
@@ -88,7 +95,7 @@ func DevConfig(ha, transactional bool) *Config {
// Listener is the listener configuration for the server.
type Listener struct {
Type string
- Config map[string]string
+ Config map[string]interface{}
}
func (l *Listener) GoString() string {
@@ -195,6 +202,15 @@ type Telemetry struct {
// (e.g. a specific geo location or datacenter, dc:sfo)
// Default: none
CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"`
+
+ // Dogstats:
+ // DogStatsdAddr is the address of a dogstatsd instance. If provided,
+ // metrics will be sent to that instance
+ DogStatsDAddr string `hcl:"dogstatsd_addr"`
+
+ // DogStatsdTags are the global tags that should be sent with each packet to dogstatsd
+ // It is a list of strings, where each string looks like "my_tag_name:my_tag_value"
+ DogStatsDTags []string `hcl:"dogstatsd_tags"`
}
func (s *Telemetry) GoString() string {
@@ -267,11 +283,31 @@ func (c *Config) Merge(c2 *Config) *Config {
result.ClusterName = c2.ClusterName
}
+ result.ClusterCipherSuites = c.ClusterCipherSuites
+ if c2.ClusterCipherSuites != "" {
+ result.ClusterCipherSuites = c2.ClusterCipherSuites
+ }
+
result.EnableUI = c.EnableUI
if c2.EnableUI {
result.EnableUI = c2.EnableUI
}
+ result.EnableRawEndpoint = c.EnableRawEndpoint
+ if c2.EnableRawEndpoint {
+ result.EnableRawEndpoint = c2.EnableRawEndpoint
+ }
+
+ result.PluginDirectory = c.PluginDirectory
+ if c2.PluginDirectory != "" {
+ result.PluginDirectory = c2.PluginDirectory
+ }
+
+ result.PidFile = c.PidFile
+ if c2.PidFile != "" {
+ result.PidFile = c2.PidFile
+ }
+
return result
}
@@ -285,9 +321,8 @@ func LoadConfig(path string, logger log.Logger) (*Config, error) {
if fi.IsDir() {
return LoadConfigDir(path, logger)
- } else {
- return LoadConfigFile(path, logger)
}
+ return LoadConfigFile(path, logger)
}
// LoadConfigFile loads the configuration from the given file.
@@ -342,13 +377,18 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
}
}
+ if result.EnableRawEndpointRaw != nil {
+ if result.EnableRawEndpoint, err = parseutil.ParseBool(result.EnableRawEndpointRaw); err != nil {
+ return nil, err
+ }
+ }
+
list, ok := obj.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
}
valid := []string{
- "atlas",
"storage",
"ha_storage",
"backend",
@@ -363,6 +403,10 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
"default_lease_ttl",
"max_lease_ttl",
"cluster_name",
+ "cluster_cipher_suites",
+ "plugin_directory",
+ "pid_file",
+ "raw_storage_endpoint",
}
if err := checkHCLKeys(list, valid); err != nil {
return nil, err
@@ -641,8 +685,6 @@ func parseHSMs(result *Config, list *ast.ObjectList) error {
}
func parseListeners(result *Config, list *ast.ObjectList) error {
- var foundAtlas bool
-
listeners := make([]*Listener, 0, len(list.Items))
for _, item := range list.Items {
key := "listener"
@@ -656,6 +698,8 @@ func parseListeners(result *Config, list *ast.ObjectList) error {
"endpoint",
"infrastructure",
"node_id",
+ "proxy_protocol_behavior",
+ "proxy_protocol_authorized_addrs",
"tls_disable",
"tls_cert_file",
"tls_key_file",
@@ -663,36 +707,20 @@ func parseListeners(result *Config, list *ast.ObjectList) error {
"tls_cipher_suites",
"tls_prefer_server_cipher_suites",
"tls_require_and_verify_client_cert",
+ "tls_client_ca_file",
"token",
}
if err := checkHCLKeys(item.Val, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key))
}
- var m map[string]string
+ var m map[string]interface{}
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key))
}
lnType := strings.ToLower(key)
- if lnType == "atlas" {
- if foundAtlas {
- return multierror.Prefix(fmt.Errorf("only one listener of type 'atlas' is permitted"), fmt.Sprintf("listeners.%s", key))
- }
-
- foundAtlas = true
- if m["token"] == "" {
- return multierror.Prefix(fmt.Errorf("'token' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key))
- }
- if m["infrastructure"] == "" {
- return multierror.Prefix(fmt.Errorf("'infrastructure' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key))
- }
- if m["node_id"] == "" {
- return multierror.Prefix(fmt.Errorf("'node_id' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key))
- }
- }
-
listeners = append(listeners, &Listener{
Type: lnType,
Config: m,
@@ -727,6 +755,8 @@ func parseTelemetry(result *Config, list *ast.ObjectList) error {
"circonus_broker_id",
"circonus_broker_select_tag",
"disable_hostname",
+ "dogstatsd_addr",
+ "dogstatsd_tags",
"statsd_address",
"statsite_address",
}
diff --git a/vendor/github.com/hashicorp/vault/command/server/config_test.go b/vendor/github.com/hashicorp/vault/command/server/config_test.go
index 789be40..bdc9128 100644
--- a/vendor/github.com/hashicorp/vault/command/server/config_test.go
+++ b/vendor/github.com/hashicorp/vault/command/server/config_test.go
@@ -6,6 +6,8 @@ import (
"testing"
"time"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/helper/logformat"
log "github.com/mgutz/logxi/v1"
)
@@ -20,18 +22,9 @@ func TestLoadConfigFile(t *testing.T) {
expected := &Config{
Listeners: []*Listener{
- &Listener{
- Type: "atlas",
- Config: map[string]string{
- "token": "foobar",
- "infrastructure": "foo/bar",
- "endpoint": "https://foo.bar:1111",
- "node_id": "foo_node",
- },
- },
&Listener{
Type: "tcp",
- Config: map[string]string{
+ Config: map[string]interface{}{
"address": "127.0.0.1:443",
},
},
@@ -58,6 +51,8 @@ func TestLoadConfigFile(t *testing.T) {
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
+ DogStatsDAddr: "127.0.0.1:7254",
+ DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
},
DisableCache: true,
@@ -67,11 +62,16 @@ func TestLoadConfigFile(t *testing.T) {
EnableUI: true,
EnableUIRaw: true,
+ EnableRawEndpoint: true,
+ EnableRawEndpointRaw: true,
+
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
ClusterName: "testcluster",
+
+ PidFile: "./pidfile",
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
@@ -90,19 +90,10 @@ func TestLoadConfigFile_json(t *testing.T) {
Listeners: []*Listener{
&Listener{
Type: "tcp",
- Config: map[string]string{
+ Config: map[string]interface{}{
"address": "127.0.0.1:443",
},
},
- &Listener{
- Type: "atlas",
- Config: map[string]string{
- "token": "foobar",
- "infrastructure": "foo/bar",
- "endpoint": "https://foo.bar:1111",
- "node_id": "foo_node",
- },
- },
},
Storage: &Storage{
@@ -113,6 +104,8 @@ func TestLoadConfigFile_json(t *testing.T) {
DisableClustering: true,
},
+ ClusterCipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+
Telemetry: &Telemetry{
StatsiteAddr: "baz",
StatsdAddr: "",
@@ -132,15 +125,18 @@ func TestLoadConfigFile_json(t *testing.T) {
CirconusBrokerSelectTag: "",
},
- MaxLeaseTTL: 10 * time.Hour,
- MaxLeaseTTLRaw: "10h",
- DefaultLeaseTTL: 10 * time.Hour,
- DefaultLeaseTTLRaw: "10h",
- ClusterName: "testcluster",
- DisableCacheRaw: interface{}(nil),
- DisableMlockRaw: interface{}(nil),
- EnableUI: true,
- EnableUIRaw: true,
+ MaxLeaseTTL: 10 * time.Hour,
+ MaxLeaseTTLRaw: "10h",
+ DefaultLeaseTTL: 10 * time.Hour,
+ DefaultLeaseTTLRaw: "10h",
+ ClusterName: "testcluster",
+ DisableCacheRaw: interface{}(nil),
+ DisableMlockRaw: interface{}(nil),
+ EnableUI: true,
+ EnableUIRaw: true,
+ PidFile: "./pidfile",
+ EnableRawEndpoint: true,
+ EnableRawEndpointRaw: true,
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
@@ -159,13 +155,13 @@ func TestLoadConfigFile_json2(t *testing.T) {
Listeners: []*Listener{
&Listener{
Type: "tcp",
- Config: map[string]string{
+ Config: map[string]interface{}{
"address": "127.0.0.1:443",
},
},
&Listener{
Type: "tcp",
- Config: map[string]string{
+ Config: map[string]interface{}{
"address": "127.0.0.1:444",
},
},
@@ -190,6 +186,8 @@ func TestLoadConfigFile_json2(t *testing.T) {
EnableUI: true,
+ EnableRawEndpoint: true,
+
Telemetry: &Telemetry{
StatsiteAddr: "foo",
StatsdAddr: "bar",
@@ -228,7 +226,7 @@ func TestLoadConfigDir(t *testing.T) {
Listeners: []*Listener{
&Listener{
Type: "tcp",
- Config: map[string]string{
+ Config: map[string]interface{}{
"address": "127.0.0.1:443",
},
},
@@ -244,6 +242,8 @@ func TestLoadConfigDir(t *testing.T) {
EnableUI: true,
+ EnableRawEndpoint: true,
+
Telemetry: &Telemetry{
StatsiteAddr: "qux",
StatsdAddr: "baz",
@@ -259,6 +259,56 @@ func TestLoadConfigDir(t *testing.T) {
}
}
+func TestParseListeners(t *testing.T) {
+ obj, _ := hcl.Parse(strings.TrimSpace(`
+listener "tcp" {
+ address = "127.0.0.1:443"
+ cluster_address = "127.0.0.1:8201"
+ tls_disable = false
+ tls_cert_file = "./certs/server.crt"
+ tls_key_file = "./certs/server.key"
+ tls_client_ca_file = "./certs/rootca.crt"
+ tls_min_version = "tls12"
+ tls_require_and_verify_client_cert = true
+}`))
+
+ var config Config
+ list, _ := obj.Node.(*ast.ObjectList)
+ objList := list.Filter("listener")
+ parseListeners(&config, objList)
+ listeners := config.Listeners
+ if len(listeners) == 0 {
+ t.Fatalf("expected at least one listener in the config")
+ }
+ listener := listeners[0]
+ if listener.Type != "tcp" {
+ t.Fatalf("expected tcp listener in the config")
+ }
+
+ expected := &Config{
+ Listeners: []*Listener{
+ &Listener{
+ Type: "tcp",
+ Config: map[string]interface{}{
+ "address": "127.0.0.1:443",
+ "cluster_address": "127.0.0.1:8201",
+ "tls_disable": false,
+ "tls_cert_file": "./certs/server.crt",
+ "tls_key_file": "./certs/server.key",
+ "tls_client_ca_file": "./certs/rootca.crt",
+ "tls_min_version": "tls12",
+ "tls_require_and_verify_client_cert": true,
+ },
+ },
+ },
+ }
+
+ if !reflect.DeepEqual(config, *expected) {
+ t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, *expected)
+ }
+
+}
+
func TestParseConfig_badTopLevel(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener.go b/vendor/github.com/hashicorp/vault/command/server/listener.go
index 999966e..4f9aedf 100644
--- a/vendor/github.com/hashicorp/vault/command/server/listener.go
+++ b/vendor/github.com/hashicorp/vault/command/server/listener.go
@@ -5,28 +5,29 @@ import (
// certificates that use it can be parsed.
_ "crypto/sha512"
"crypto/tls"
+ "crypto/x509"
"fmt"
"io"
+ "io/ioutil"
"net"
- "strconv"
- "sync"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/proxyutil"
+ "github.com/hashicorp/vault/helper/reload"
"github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/vault"
)
// ListenerFactory is the factory function to create a listener.
-type ListenerFactory func(map[string]string, io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error)
+type ListenerFactory func(map[string]interface{}, io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error)
// BuiltinListeners is the list of built-in listener types.
var BuiltinListeners = map[string]ListenerFactory{
- "tcp": tcpListenerFactory,
- "atlas": atlasListenerFactory,
+ "tcp": tcpListenerFactory,
}
// NewListener creates a new listener of the given type with the given
// configuration. The type is looked up in the BuiltinListeners map.
-func NewListener(t string, config map[string]string, logger io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
+func NewListener(t string, config map[string]interface{}, logger io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) {
f, ok := BuiltinListeners[t]
if !ok {
return nil, nil, nil, fmt.Errorf("unknown listener type: %s", t)
@@ -35,14 +36,45 @@ func NewListener(t string, config map[string]string, logger io.Writer) (net.List
return f(config, logger)
}
+func listenerWrapProxy(ln net.Listener, config map[string]interface{}) (net.Listener, error) {
+ behaviorRaw, ok := config["proxy_protocol_behavior"]
+ if !ok {
+ return ln, nil
+ }
+
+ behavior, ok := behaviorRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("failed parsing proxy_protocol_behavior value: not a string")
+ }
+
+ authorizedAddrsRaw, ok := config["proxy_protocol_authorized_addrs"]
+ if !ok {
+ return nil, fmt.Errorf("proxy_protocol_behavior set but no proxy_protocol_authorized_addrs value")
+ }
+
+ proxyProtoConfig := &proxyutil.ProxyProtoConfig{
+ Behavior: behavior,
+ }
+ if err := proxyProtoConfig.SetAuthorizedAddrs(authorizedAddrsRaw); err != nil {
+ return nil, fmt.Errorf("failed parsing proxy_protocol_authorized_addrs: %v", err)
+ }
+
+ newLn, err := proxyutil.WrapInProxyProto(ln, proxyProtoConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed configuring PROXY protocol wrapper: %s", err)
+ }
+
+ return newLn, nil
+}
+
func listenerWrapTLS(
ln net.Listener,
props map[string]string,
- config map[string]string) (net.Listener, map[string]string, vault.ReloadFunc, error) {
+ config map[string]interface{}) (net.Listener, map[string]string, reload.ReloadFunc, error) {
props["tls"] = "disabled"
if v, ok := config["tls_disable"]; ok {
- disabled, err := strconv.ParseBool(v)
+ disabled, err := parseutil.ParseBool(v)
if err != nil {
return nil, nil, nil, fmt.Errorf("invalid value for 'tls_disable': %v", err)
}
@@ -61,21 +93,22 @@ func listenerWrapTLS(
return nil, nil, nil, fmt.Errorf("'tls_key_file' must be set")
}
- cg := &certificateGetter{
- id: config["address"],
- }
+ cg := reload.NewCertificateGetter(config["tls_cert_file"].(string), config["tls_key_file"].(string))
- if err := cg.reload(config); err != nil {
+ if err := cg.Reload(config); err != nil {
return nil, nil, nil, fmt.Errorf("error loading TLS cert: %s", err)
}
- tlsvers, ok := config["tls_min_version"]
+ var tlsvers string
+ tlsversRaw, ok := config["tls_min_version"]
if !ok {
tlsvers = "tls12"
+ } else {
+ tlsvers = tlsversRaw.(string)
}
tlsConf := &tls.Config{}
- tlsConf.GetCertificate = cg.getCertificate
+ tlsConf.GetCertificate = cg.GetCertificate
tlsConf.NextProtos = []string{"h2", "http/1.1"}
tlsConf.MinVersion, ok = tlsutil.TLSLookup[tlsvers]
if !ok {
@@ -84,67 +117,42 @@ func listenerWrapTLS(
tlsConf.ClientAuth = tls.RequestClientCert
if v, ok := config["tls_cipher_suites"]; ok {
- ciphers, err := tlsutil.ParseCiphers(v)
+ ciphers, err := tlsutil.ParseCiphers(v.(string))
if err != nil {
return nil, nil, nil, fmt.Errorf("invalid value for 'tls_cipher_suites': %v", err)
}
tlsConf.CipherSuites = ciphers
}
if v, ok := config["tls_prefer_server_cipher_suites"]; ok {
- preferServer, err := strconv.ParseBool(v)
+ preferServer, err := parseutil.ParseBool(v)
if err != nil {
return nil, nil, nil, fmt.Errorf("invalid value for 'tls_prefer_server_cipher_suites': %v", err)
}
tlsConf.PreferServerCipherSuites = preferServer
}
if v, ok := config["tls_require_and_verify_client_cert"]; ok {
- requireClient, err := strconv.ParseBool(v)
+ requireClient, err := parseutil.ParseBool(v)
if err != nil {
return nil, nil, nil, fmt.Errorf("invalid value for 'tls_require_and_verify_client_cert': %v", err)
}
if requireClient {
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
}
+ if tlsClientCaFile, ok := config["tls_client_ca_file"]; ok {
+ caPool := x509.NewCertPool()
+ data, err := ioutil.ReadFile(tlsClientCaFile.(string))
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to read tls_client_ca_file: %v", err)
+ }
+
+ if !caPool.AppendCertsFromPEM(data) {
+ return nil, nil, nil, fmt.Errorf("failed to parse CA certificate in tls_client_ca_file")
+ }
+ tlsConf.ClientCAs = caPool
+ }
}
ln = tls.NewListener(ln, tlsConf)
props["tls"] = "enabled"
- return ln, props, cg.reload, nil
-}
-
-type certificateGetter struct {
- sync.RWMutex
-
- cert *tls.Certificate
-
- id string
-}
-
-func (cg *certificateGetter) reload(config map[string]string) error {
- if config["address"] != cg.id {
- return nil
- }
-
- cert, err := tls.LoadX509KeyPair(config["tls_cert_file"], config["tls_key_file"])
- if err != nil {
- return err
- }
-
- cg.Lock()
- defer cg.Unlock()
-
- cg.cert = &cert
-
- return nil
-}
-
-func (cg *certificateGetter) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- cg.RLock()
- defer cg.RUnlock()
-
- if cg.cert == nil {
- return nil, fmt.Errorf("nil certificate")
- }
-
- return cg.cert, nil
+ return ln, props, cg.Reload, nil
}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go b/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go
deleted file mode 100644
index c000474..0000000
--- a/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package server
-
-import (
- "io"
- "net"
-
- "github.com/hashicorp/scada-client/scada"
- "github.com/hashicorp/vault/vault"
- "github.com/hashicorp/vault/version"
-)
-
-type SCADAListener struct {
- ln net.Listener
- scadaProvider *scada.Provider
-}
-
-func (s *SCADAListener) Accept() (net.Conn, error) {
- return s.ln.Accept()
-}
-
-func (s *SCADAListener) Close() error {
- s.scadaProvider.Shutdown()
- return s.ln.Close()
-}
-
-func (s *SCADAListener) Addr() net.Addr {
- return s.ln.Addr()
-}
-
-func atlasListenerFactory(config map[string]string, logger io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
- scadaConfig := &scada.Config{
- Service: "vault",
- Version: version.GetVersion().VersionNumber(),
- ResourceType: "vault-cluster",
- Meta: map[string]string{
- "node_id": config["node_id"],
- "cluster_name": config["cluster_name"],
- },
- Atlas: scada.AtlasConfig{
- Endpoint: config["endpoint"],
- Infrastructure: config["infrastructure"],
- Token: config["token"],
- },
- }
-
- provider, list, err := scada.NewHTTPProvider(scadaConfig, logger)
- if err != nil {
- return nil, nil, nil, err
- }
-
- ln := &SCADAListener{
- ln: list,
- scadaProvider: provider,
- }
-
- props := map[string]string{
- "addr": "Atlas/SCADA",
- "infrastructure": scadaConfig.Atlas.Infrastructure,
- }
-
- // The outer connection is already TLS-enabled; this is just the listener
- // that reaches back inside that connection
- config["tls_disable"] = "1"
-
- return listenerWrapTLS(ln, props, config)
-}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go
index 4e5e9b4..b0ab687 100644
--- a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go
+++ b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go
@@ -6,14 +6,17 @@ import (
"strings"
"time"
- "github.com/hashicorp/vault/vault"
+ "github.com/hashicorp/vault/helper/reload"
)
-func tcpListenerFactory(config map[string]string, _ io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
+func tcpListenerFactory(config map[string]interface{}, _ io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) {
bind_proto := "tcp"
- addr, ok := config["address"]
+ var addr string
+ addrRaw, ok := config["address"]
if !ok {
addr = "127.0.0.1:8200"
+ } else {
+ addr = addrRaw.(string)
}
// If they've passed 0.0.0.0, we only want to bind on IPv4
@@ -28,6 +31,12 @@ func tcpListenerFactory(config map[string]string, _ io.Writer) (net.Listener, ma
}
ln = tcpKeepAliveListener{ln.(*net.TCPListener)}
+
+ ln, err = listenerWrapProxy(ln, config)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
props := map[string]string{"addr": addr}
return listenerWrapTLS(ln, props, config)
}
diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go
index 7da2033..4da12b3 100644
--- a/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go
+++ b/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go
@@ -13,7 +13,7 @@ import (
)
func TestTCPListener(t *testing.T) {
- ln, _, _, err := tcpListenerFactory(map[string]string{
+ ln, _, _, err := tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_disable": "1",
}, nil)
@@ -48,19 +48,28 @@ func TestTCPListener_tls(t *testing.T) {
t.Fatal("not ok when appending CA cert")
}
- ln, _, _, err := tcpListenerFactory(map[string]string{
- "address": "127.0.0.1:0",
- "tls_cert_file": wd + "reload_foo.pem",
- "tls_key_file": wd + "reload_foo.key",
+ ln, _, _, err := tcpListenerFactory(map[string]interface{}{
+ "address": "127.0.0.1:0",
+ "tls_cert_file": wd + "reload_foo.pem",
+ "tls_key_file": wd + "reload_foo.key",
+ "tls_require_and_verify_client_cert": "true",
+ "tls_client_ca_file": wd + "reload_ca.pem",
}, nil)
if err != nil {
t.Fatalf("err: %s", err)
}
+ cwd, _ := os.Getwd()
+
+ clientCert, _ := tls.LoadX509KeyPair(
+ cwd+"/test-fixtures/reload/reload_foo.pem",
+ cwd+"/test-fixtures/reload/reload_foo.key")
connFn := func(lnReal net.Listener) (net.Conn, error) {
conn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
- RootCAs: certPool,
+ RootCAs: certPool,
+ Certificates: []tls.Certificate{clientCert},
})
+
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json
index 70e7e14..918af56 100644
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json
@@ -3,14 +3,8 @@
"tcp": {
"address": "127.0.0.1:443"
}
- }, {
- "atlas": {
- "token": "foobar",
- "infrastructure": "foo/bar",
- "endpoint": "https://foo.bar:1111",
- "node_id": "foo_node"
- }
}],
+ "cluster_cipher_suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"storage": {
"consul": {
"foo": "bar",
@@ -23,5 +17,7 @@
"max_lease_ttl": "10h",
"default_lease_ttl": "10h",
"cluster_name":"testcluster",
- "ui":true
+ "ui":true,
+ "pid_file":"./pidfile",
+ "raw_storage_endpoint":true
}
diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json
index 5279d63..e1eb73e 100644
--- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json
+++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json
@@ -1,5 +1,6 @@
{
"ui":true,
+ "raw_storage_endpoint":true,
"listener":[
{
"tcp":{
diff --git a/vendor/github.com/hashicorp/vault/command/server_ha_test.go b/vendor/github.com/hashicorp/vault/command/server_ha_test.go
index 5562191..a9b1188 100644
--- a/vendor/github.com/hashicorp/vault/command/server_ha_test.go
+++ b/vendor/github.com/hashicorp/vault/command/server_ha_test.go
@@ -9,7 +9,10 @@ import (
"testing"
"github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/physical"
"github.com/mitchellh/cli"
+
+ physConsul "github.com/hashicorp/vault/physical/consul"
)
// The following tests have a go-metrics/exp manager race condition
@@ -19,6 +22,9 @@ func TestServer_CommonHA(t *testing.T) {
Meta: meta.Meta{
Ui: ui,
},
+ PhysicalBackends: map[string]physical.Factory{
+ "consul": physConsul.NewConsulBackend,
+ },
}
tmpfile, err := ioutil.TempFile("", "")
@@ -47,6 +53,9 @@ func TestServer_GoodSeparateHA(t *testing.T) {
Meta: meta.Meta{
Ui: ui,
},
+ PhysicalBackends: map[string]physical.Factory{
+ "consul": physConsul.NewConsulBackend,
+ },
}
tmpfile, err := ioutil.TempFile("", "")
@@ -75,6 +84,9 @@ func TestServer_BadSeparateHA(t *testing.T) {
Meta: meta.Meta{
Ui: ui,
},
+ PhysicalBackends: map[string]physical.Factory{
+ "consul": physConsul.NewConsulBackend,
+ },
}
tmpfile, err := ioutil.TempFile("", "")
diff --git a/vendor/github.com/hashicorp/vault/command/server_test.go b/vendor/github.com/hashicorp/vault/command/server_test.go
index f95016f..9a90239 100644
--- a/vendor/github.com/hashicorp/vault/command/server_test.go
+++ b/vendor/github.com/hashicorp/vault/command/server_test.go
@@ -15,7 +15,10 @@ import (
"time"
"github.com/hashicorp/vault/meta"
+ "github.com/hashicorp/vault/physical"
"github.com/mitchellh/cli"
+
+ physFile "github.com/hashicorp/vault/physical/file"
)
var (
@@ -58,8 +61,8 @@ disable_mlock = true
listener "tcp" {
address = "127.0.0.1:8203"
- tls_cert_file = "TMPDIR/reload_FILE.pem"
- tls_key_file = "TMPDIR/reload_FILE.key"
+ tls_cert_file = "TMPDIR/reload_cert.pem"
+ tls_key_file = "TMPDIR/reload_key.pem"
}
`
)
@@ -79,15 +82,11 @@ func TestServer_ReloadListener(t *testing.T) {
// Setup initial certs
inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem")
- ioutil.WriteFile(td+"/reload_foo.pem", inBytes, 0777)
+ ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777)
inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key")
- ioutil.WriteFile(td+"/reload_foo.key", inBytes, 0777)
- inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem")
- ioutil.WriteFile(td+"/reload_bar.pem", inBytes, 0777)
- inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key")
- ioutil.WriteFile(td+"/reload_bar.key", inBytes, 0777)
+ ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777)
- relhcl := strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "foo", -1)
+ relhcl := strings.Replace(reloadhcl, "TMPDIR", td, -1)
ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem")
@@ -104,6 +103,9 @@ func TestServer_ReloadListener(t *testing.T) {
},
ShutdownCh: MakeShutdownCh(),
SighupCh: MakeSighupCh(),
+ PhysicalBackends: map[string]physical.Factory{
+ "file": physFile.NewFileBackend,
+ },
}
finished := false
@@ -155,7 +157,11 @@ func TestServer_ReloadListener(t *testing.T) {
t.Fatalf("certificate name didn't check out: %s", err)
}
- relhcl = strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "bar", -1)
+ relhcl = strings.Replace(reloadhcl, "TMPDIR", td, -1)
+ inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem")
+ ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777)
+ inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key")
+ ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777)
ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
c.SighupCh <- struct{}{}
diff --git a/vendor/github.com/hashicorp/vault/command/ssh.go b/vendor/github.com/hashicorp/vault/command/ssh.go
index a9aebbe..03e1933 100644
--- a/vendor/github.com/hashicorp/vault/command/ssh.go
+++ b/vendor/github.com/hashicorp/vault/command/ssh.go
@@ -10,15 +10,42 @@ import (
"os/user"
"strings"
+ "github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/ssh"
"github.com/hashicorp/vault/meta"
+ homedir "github.com/mitchellh/go-homedir"
"github.com/mitchellh/mapstructure"
+ "github.com/pkg/errors"
)
-// SSHCommand is a Command that establishes a SSH connection
-// with target by generating a dynamic key
+// SSHCommand is a Command that establishes a SSH connection with target by
+// generating a dynamic key
type SSHCommand struct {
meta.Meta
+
+ // API
+ client *api.Client
+ sshClient *api.SSH
+
+ // Common options
+ mode string
+ noExec bool
+ format string
+ mountPoint string
+ role string
+ username string
+ ip string
+ sshArgs []string
+
+ // Key options
+ strictHostKeyChecking string
+ userKnownHostsFile string
+
+ // SSH CA backend specific options
+ publicKeyPath string
+ privateKeyPath string
+ hostKeyMountPoint string
+ hostKeyHostnames string
}
// Structure to hold the fields returned when asked for a credential from SSHh backend.
@@ -31,42 +58,50 @@ type SSHCredentialResp struct {
}
func (c *SSHCommand) Run(args []string) int {
- var role, mountPoint, format, userKnownHostsFile, strictHostKeyChecking string
- var noExec bool
- var sshCmdArgs []string
+
flags := c.Meta.FlagSet("ssh", meta.FlagSetDefault)
- flags.StringVar(&strictHostKeyChecking, "strict-host-key-checking", "", "")
- flags.StringVar(&userKnownHostsFile, "user-known-hosts-file", "", "")
- flags.StringVar(&format, "format", "table", "")
- flags.StringVar(&role, "role", "", "")
- flags.StringVar(&mountPoint, "mount-point", "ssh", "")
- flags.BoolVar(&noExec, "no-exec", false, "")
+
+ envOrDefault := func(key string, def string) string {
+ if k := os.Getenv(key); k != "" {
+ return k
+ }
+ return def
+ }
+
+ expandPath := func(p string) string {
+ e, err := homedir.Expand(p)
+ if err != nil {
+ return p
+ }
+ return e
+ }
+
+ // Common options
+ flags.StringVar(&c.mode, "mode", "", "")
+ flags.BoolVar(&c.noExec, "no-exec", false, "")
+ flags.StringVar(&c.format, "format", "table", "")
+ flags.StringVar(&c.mountPoint, "mount-point", "ssh", "")
+ flags.StringVar(&c.role, "role", "", "")
+
+ // Key options
+ flags.StringVar(&c.strictHostKeyChecking, "strict-host-key-checking",
+ envOrDefault("VAULT_SSH_STRICT_HOST_KEY_CHECKING", "ask"), "")
+ flags.StringVar(&c.userKnownHostsFile, "user-known-hosts-file",
+ envOrDefault("VAULT_SSH_USER_KNOWN_HOSTS_FILE", expandPath("~/.ssh/known_hosts")), "")
+
+ // CA-specific options
+ flags.StringVar(&c.publicKeyPath, "public-key-path",
+ expandPath("~/.ssh/id_rsa.pub"), "")
+ flags.StringVar(&c.privateKeyPath, "private-key-path",
+ expandPath("~/.ssh/id_rsa"), "")
+ flags.StringVar(&c.hostKeyMountPoint, "host-key-mount-point", "", "")
+ flags.StringVar(&c.hostKeyHostnames, "host-key-hostnames", "*", "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
- // If the flag is already set then it takes the precedence. If the flag is not
- // set, try setting it from env var.
- if os.Getenv("VAULT_SSH_STRICT_HOST_KEY_CHECKING") != "" && strictHostKeyChecking == "" {
- strictHostKeyChecking = os.Getenv("VAULT_SSH_STRICT_HOST_KEY_CHECKING")
- }
- // Assign default value if both flag and env var are not set
- if strictHostKeyChecking == "" {
- strictHostKeyChecking = "ask"
- }
-
- // If the flag is already set then it takes the precedence. If the flag is not
- // set, try setting it from env var.
- if os.Getenv("VAULT_SSH_USER_KNOWN_HOSTS_FILE") != "" && userKnownHostsFile == "" {
- userKnownHostsFile = os.Getenv("VAULT_SSH_USER_KNOWN_HOSTS_FILE")
- }
- // Assign default value if both flag and env var are not set
- if userKnownHostsFile == "" {
- userKnownHostsFile = "~/.ssh/known_hosts"
- }
-
args = flags.Args()
if len(args) < 1 {
c.Ui.Error("ssh expects at least one argument")
@@ -78,46 +113,35 @@ func (c *SSHCommand) Run(args []string) int {
c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
return 1
}
+ c.client = client
+ c.sshClient = client.SSHWithMountPoint(c.mountPoint)
- // split the parameter username@ip
- input := strings.Split(args[0], "@")
- var username string
- var ipAddr string
-
- // If only IP is mentioned and username is skipped, assume username to
- // be the current username. Vault SSH role's default username could have
- // been used, but in order to retain the consistency with SSH command,
- // current username is employed.
- if len(input) == 1 {
- u, err := user.Current()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error fetching username: %v", err))
- return 1
- }
- username = u.Username
- ipAddr = input[0]
- } else if len(input) == 2 {
- username = input[0]
- ipAddr = input[1]
- } else {
- c.Ui.Error(fmt.Sprintf("Invalid parameter: %q", args[0]))
+ // Extract the username and IP.
+ c.username, c.ip, err = c.userAndIP(args[0])
+ if err != nil {
+ c.Ui.Error(fmt.Sprintf("Error parsing user and IP: %s", err))
return 1
}
- // Resolving domain names to IP address on the client side.
- // Vault only deals with IP addresses.
- ip, err := net.ResolveIPAddr("ip", ipAddr)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error resolving IP Address: %v", err))
- return 1
+ // The rest of the args are ssh args
+ if len(args) > 1 {
+ c.sshArgs = args[1:]
}
// Credentials are generated only against a registered role. If user
// does not specify a role with the SSH command, then lookup API is used
// to fetch all the roles with which this IP is associated. If there is
// only one role associated with it, use it to establish the connection.
- if role == "" {
- role, err = c.defaultRole(mountPoint, ip.String())
+ //
+ // TODO: remove in 0.9.0, convert to validation error
+ if c.role == "" {
+ c.Ui.Warn("" +
+ "WARNING: No -role specified. Use -role to tell Vault which ssh role\n" +
+ "to use for authentication. In the future, you will need to tell Vault\n" +
+ "which role to use. For now, Vault will attempt to guess based on a\n" +
+ "the API response.")
+
+ role, err := c.defaultRole(c.mountPoint, c.ip)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error choosing role: %v", err))
return 1
@@ -127,110 +151,362 @@ func (c *SSHCommand) Run(args []string) int {
// be used by the user (ACL enforcement), then user should see an
// error message accordingly.
c.Ui.Output(fmt.Sprintf("Vault SSH: Role: %q", role))
+ c.role = role
}
- data := map[string]interface{}{
- "username": username,
- "ip": ip.String(),
- }
-
- keySecret, err := client.SSHWithMountPoint(mountPoint).Credential(role, data)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error getting key for SSH session: %v", err))
- return 1
- }
-
- // if no-exec was chosen, just print out the secret and return.
- if noExec {
- return OutputSecret(c.Ui, format, keySecret)
- }
-
- // Port comes back as a json.Number which mapstructure doesn't like, so convert it
- if keySecret.Data["port"] != nil {
- keySecret.Data["port"] = keySecret.Data["port"].(json.Number).String()
- }
- var resp SSHCredentialResp
- if err := mapstructure.Decode(keySecret.Data, &resp); err != nil {
- c.Ui.Error(fmt.Sprintf("Error parsing the credential response: %v", err))
- return 1
- }
-
- if resp.KeyType == ssh.KeyTypeDynamic {
- if len(resp.Key) == 0 {
- c.Ui.Error(fmt.Sprintf("Invalid key"))
- return 1
- }
- sshDynamicKeyFile, err := ioutil.TempFile("", fmt.Sprintf("vault_ssh_%s_%s_", username, ip.String()))
+ // If no mode was given, perform the old-school lookup. Keep this now for
+ // backwards-compatability, but print a warning.
+ //
+ // TODO: remove in 0.9.0, convert to validation error
+ if c.mode == "" {
+ c.Ui.Warn("" +
+ "WARNING: No -mode specified. Use -mode to tell Vault which ssh\n" +
+ "authentication mode to use. In the future, you will need to tell\n" +
+ "Vault which mode to use. For now, Vault will attempt to guess based\n" +
+ "on the API response. This guess involves creating a temporary\n" +
+ "credential, reading its type, and then revoking it. To reduce the\n" +
+ "number of API calls and surface area, specify -mode directly.")
+ secret, cred, err := c.generateCredential()
if err != nil {
- c.Ui.Error(fmt.Sprintf("Error creating temporary file: %v", err))
+ // This is _very_ hacky, but is the only sane backwards-compatible way
+ // to do this. If the error is "key type unknown", we just assume the
+ // type is "ca". In the future, mode will be required as an option.
+ if strings.Contains(err.Error(), "key type unknown") {
+ c.mode = ssh.KeyTypeCA
+ } else {
+ c.Ui.Error(fmt.Sprintf("Error getting credential: %s", err))
+ return 1
+ }
+ } else {
+ c.mode = cred.KeyType
+ }
+
+ // Revoke the secret, since the child functions will generate their own
+ // credential. Users wishing to avoid this should specify -mode.
+ if secret != nil {
+ if err := c.client.Sys().Revoke(secret.LeaseID); err != nil {
+ c.Ui.Warn(fmt.Sprintf("Failed to revoke temporary key: %s", err))
+ }
+ }
+ }
+
+ switch strings.ToLower(c.mode) {
+ case ssh.KeyTypeCA:
+ if err := c.handleTypeCA(); err != nil {
+ c.Ui.Error(err.Error())
return 1
}
-
- // Ensure that we delete the temporary file
- defer os.Remove(sshDynamicKeyFile.Name())
-
- if err = ioutil.WriteFile(sshDynamicKeyFile.Name(),
- []byte(resp.Key), 0600); err != nil {
- c.Ui.Error(fmt.Sprintf("Error storing the dynamic key into the temporary file: %v", err))
+ case ssh.KeyTypeOTP:
+ if err := c.handleTypeOTP(); err != nil {
+ c.Ui.Error(err.Error())
return 1
}
- sshCmdArgs = append(sshCmdArgs, []string{"-i", sshDynamicKeyFile.Name()}...)
-
- } else if resp.KeyType == ssh.KeyTypeOTP {
- // Check if the application 'sshpass' is installed in the client machine.
- // If it is then, use it to automate typing in OTP to the prompt. Unfortunately,
- // it was not possible to automate it without a third-party application, with
- // only the Go libraries.
- // Feel free to try and remove this dependency.
- sshpassPath, err := exec.LookPath("sshpass")
- if err == nil {
- sshCmdArgs = append(sshCmdArgs, []string{"-p", string(resp.Key), "ssh", "-o UserKnownHostsFile=" + userKnownHostsFile, "-o StrictHostKeyChecking=" + strictHostKeyChecking, "-p", resp.Port, username + "@" + ip.String()}...)
- if len(args) > 1 {
- sshCmdArgs = append(sshCmdArgs, args[1:]...)
- }
- sshCmd := exec.Command(sshpassPath, sshCmdArgs...)
- sshCmd.Stdin = os.Stdin
- sshCmd.Stdout = os.Stdout
- err = sshCmd.Run()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Failed to establish SSH connection: %q", err))
- }
- return 0
+ case ssh.KeyTypeDynamic:
+ if err := c.handleTypeDynamic(); err != nil {
+ c.Ui.Error(err.Error())
+ return 1
}
- c.Ui.Output("OTP for the session is " + resp.Key)
- c.Ui.Output("[Note: Install 'sshpass' to automate typing in OTP]")
- }
- sshCmdArgs = append(sshCmdArgs, []string{"-o UserKnownHostsFile=" + userKnownHostsFile, "-o StrictHostKeyChecking=" + strictHostKeyChecking, "-p", resp.Port, username + "@" + ip.String()}...)
- if len(args) > 1 {
- sshCmdArgs = append(sshCmdArgs, args[1:]...)
- }
-
- sshCmd := exec.Command("ssh", sshCmdArgs...)
- sshCmd.Stdin = os.Stdin
- sshCmd.Stdout = os.Stdout
-
- // Running the command as a separate command. The reason for using exec.Command instead
- // of using crypto/ssh package is that, this way, user can have the same feeling of
- // connecting to remote hosts with the ssh command. Package crypto/ssh did not have a way
- // to establish an independent session like this.
- err = sshCmd.Run()
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error while running ssh command: %q", err))
- }
-
- // If the session established was longer than the lease expiry, the secret
- // might have been revoked already. If not, then revoke it. Since the key
- // file is deleted and since user doesn't know the credential anymore, there
- // is not point in Vault maintaining this secret anymore. Every time the command
- // is run, a fresh credential is generated anyways.
- err = client.Sys().Revoke(keySecret.LeaseID)
- if err != nil {
- c.Ui.Error(fmt.Sprintf("Error revoking the key: %q", err))
+ default:
+ c.Ui.Error(fmt.Sprintf("Unknown SSH mode: %s", c.mode))
+ return 1
}
return 0
}
+// handleTypeCA is used to handle SSH logins using the "CA" key type.
+func (c *SSHCommand) handleTypeCA() error {
+ // Read the key from disk
+ publicKey, err := ioutil.ReadFile(c.publicKeyPath)
+ if err != nil {
+ return errors.Wrap(err, "failed to read public key")
+ }
+
+ // Attempt to sign the public key
+ secret, err := c.sshClient.SignKey(c.role, map[string]interface{}{
+ // WARNING: publicKey is []byte, which is b64 encoded on JSON upload. We
+ // have to convert it to a string. SV lost many hours to this...
+ "public_key": string(publicKey),
+ "valid_principals": c.username,
+ "cert_type": "user",
+
+ // TODO: let the user configure these. In the interim, if users want to
+ // customize these values, they can produce the key themselves.
+ "extensions": map[string]string{
+ "permit-X11-forwarding": "",
+ "permit-agent-forwarding": "",
+ "permit-port-forwarding": "",
+ "permit-pty": "",
+ "permit-user-rc": "",
+ },
+ })
+ if err != nil {
+ return errors.Wrap(err, "failed to sign public key")
+ }
+ if secret == nil || secret.Data == nil {
+ return fmt.Errorf("client signing returned empty credentials")
+ }
+
+ // Handle no-exec
+ if c.noExec {
+ // This is hacky, but OutputSecret returns an int, not an error :(
+ if i := OutputSecret(c.Ui, c.format, secret); i != 0 {
+ return fmt.Errorf("an error occurred outputting the secret")
+ }
+ return nil
+ }
+
+ // Extract public key
+ key, ok := secret.Data["signed_key"].(string)
+ if !ok {
+ return fmt.Errorf("missing signed key")
+ }
+
+ // Capture the current value - this could be overwritten later if the user
+ // enabled host key signing verification.
+ userKnownHostsFile := c.userKnownHostsFile
+ strictHostKeyChecking := c.strictHostKeyChecking
+
+ // Handle host key signing verification. If the user specified a mount point,
+ // download the public key, trust it with the given domains, and use that
+ // instead of the user's regular known_hosts file.
+ if c.hostKeyMountPoint != "" {
+ secret, err := c.client.Logical().Read(c.hostKeyMountPoint + "/config/ca")
+ if err != nil {
+ return errors.Wrap(err, "failed to get host signing key")
+ }
+ if secret == nil || secret.Data == nil {
+ return fmt.Errorf("missing host signing key")
+ }
+ publicKey, ok := secret.Data["public_key"].(string)
+ if !ok {
+ return fmt.Errorf("host signing key is empty")
+ }
+
+ // Write the known_hosts file
+ name := fmt.Sprintf("vault_ssh_ca_known_hosts_%s_%s", c.username, c.ip)
+ data := fmt.Sprintf("@cert-authority %s %s", c.hostKeyHostnames, publicKey)
+ knownHosts, err, closer := c.writeTemporaryFile(name, []byte(data), 0644)
+ defer closer()
+ if err != nil {
+ return errors.Wrap(err, "failed to write host public key")
+ }
+
+ // Update the variables
+ userKnownHostsFile = knownHosts
+ strictHostKeyChecking = "yes"
+ }
+
+ // Write the signed public key to disk
+ name := fmt.Sprintf("vault_ssh_ca_%s_%s", c.username, c.ip)
+ signedPublicKeyPath, err, closer := c.writeTemporaryKey(name, []byte(key))
+ defer closer()
+ if err != nil {
+ return errors.Wrap(err, "failed to write signed public key")
+ }
+
+ args := append([]string{
+ "-i", c.privateKeyPath,
+ "-i", signedPublicKeyPath,
+ "-o UserKnownHostsFile=" + userKnownHostsFile,
+ "-o StrictHostKeyChecking=" + strictHostKeyChecking,
+ c.username + "@" + c.ip,
+ }, c.sshArgs...)
+
+ cmd := exec.Command("ssh", args...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err = cmd.Run()
+ if err != nil {
+ return errors.Wrap(err, "failed to run ssh command")
+ }
+
+ // There is no secret to revoke, since it's a certificate signing
+
+ return nil
+}
+
+// handleTypeOTP is used to handle SSH logins using the "otp" key type.
+func (c *SSHCommand) handleTypeOTP() error {
+ secret, cred, err := c.generateCredential()
+ if err != nil {
+ return errors.Wrap(err, "failed to generate credential")
+ }
+
+ // Handle no-exec
+ if c.noExec {
+ // This is hacky, but OutputSecret returns an int, not an error :(
+ if i := OutputSecret(c.Ui, c.format, secret); i != 0 {
+ return fmt.Errorf("an error occurred outputting the secret")
+ }
+ return nil
+ }
+
+ var cmd *exec.Cmd
+
+ // Check if the application 'sshpass' is installed in the client machine.
+ // If it is then, use it to automate typing in OTP to the prompt. Unfortunately,
+ // it was not possible to automate it without a third-party application, with
+ // only the Go libraries.
+ // Feel free to try and remove this dependency.
+ sshpassPath, err := exec.LookPath("sshpass")
+ if err != nil {
+ c.Ui.Warn("" +
+ "Vault could not locate sshpass. The OTP code for the session will be\n" +
+ "displayed below. Enter this code in the SSH password prompt. If you\n" +
+ "install sshpass, Vault can automatically perform this step for you.")
+ c.Ui.Output("OTP for the session is " + cred.Key)
+
+ args := append([]string{
+ "-o UserKnownHostsFile=" + c.userKnownHostsFile,
+ "-o StrictHostKeyChecking=" + c.strictHostKeyChecking,
+ "-p", cred.Port,
+ c.username + "@" + c.ip,
+ }, c.sshArgs...)
+ cmd = exec.Command("ssh", args...)
+ } else {
+ args := append([]string{
+ "-e", // Read password for SSHPASS environment variable
+ "ssh",
+ "-o UserKnownHostsFile=" + c.userKnownHostsFile,
+ "-o StrictHostKeyChecking=" + c.strictHostKeyChecking,
+ "-p", cred.Port,
+ c.username + "@" + c.ip,
+ }, c.sshArgs...)
+ cmd = exec.Command(sshpassPath, args...)
+ env := os.Environ()
+ env = append(env, fmt.Sprintf("SSHPASS=%s", string(cred.Key)))
+ cmd.Env = env
+ }
+
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err = cmd.Run()
+ if err != nil {
+ return errors.Wrap(err, "failed to run ssh command")
+ }
+
+ // Revoke the key if it's longer than expected
+ if err := c.client.Sys().Revoke(secret.LeaseID); err != nil {
+ return errors.Wrap(err, "failed to revoke key")
+ }
+
+ return nil
+}
+
+// handleTypeDynamic is used to handle SSH logins using the "dyanmic" key type.
+func (c *SSHCommand) handleTypeDynamic() error {
+ // Generate the credential
+ secret, cred, err := c.generateCredential()
+ if err != nil {
+ return errors.Wrap(err, "failed to generate credential")
+ }
+
+ // Handle no-exec
+ if c.noExec {
+ // This is hacky, but OutputSecret returns an int, not an error :(
+ if i := OutputSecret(c.Ui, c.format, secret); i != 0 {
+ return fmt.Errorf("an error occurred outputting the secret")
+ }
+ return nil
+ }
+
+ // Write the dynamic key to disk
+ name := fmt.Sprintf("vault_ssh_dynamic_%s_%s", c.username, c.ip)
+ keyPath, err, closer := c.writeTemporaryKey(name, []byte(cred.Key))
+ defer closer()
+ if err != nil {
+ return errors.Wrap(err, "failed to save dyanmic key")
+ }
+
+ args := append([]string{
+ "-i", keyPath,
+ "-o UserKnownHostsFile=" + c.userKnownHostsFile,
+ "-o StrictHostKeyChecking=" + c.strictHostKeyChecking,
+ "-p", cred.Port,
+ c.username + "@" + c.ip,
+ }, c.sshArgs...)
+
+ cmd := exec.Command("ssh", args...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err = cmd.Run()
+ if err != nil {
+ return errors.Wrap(err, "failed to run ssh command")
+ }
+
+ // Revoke the key if it's longer than expected
+ if err := c.client.Sys().Revoke(secret.LeaseID); err != nil {
+ return errors.Wrap(err, "failed to revoke key")
+ }
+
+ return nil
+}
+
+// generateCredential generates a credential for the given role and returns the
+// decoded secret data.
+func (c *SSHCommand) generateCredential() (*api.Secret, *SSHCredentialResp, error) {
+ // Attempt to generate the credential.
+ secret, err := c.sshClient.Credential(c.role, map[string]interface{}{
+ "username": c.username,
+ "ip": c.ip,
+ })
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to get credentials")
+ }
+ if secret == nil || secret.Data == nil {
+ return nil, nil, fmt.Errorf("vault returned empty credentials")
+ }
+
+ // Port comes back as a json.Number which mapstructure doesn't like, so
+ // convert it
+ if d, ok := secret.Data["port"].(json.Number); ok {
+ secret.Data["port"] = d.String()
+ }
+
+ // Use mapstructure to decode the response
+ var resp SSHCredentialResp
+ if err := mapstructure.Decode(secret.Data, &resp); err != nil {
+ return nil, nil, errors.Wrap(err, "failed to decode credential")
+ }
+
+ // Check for an empty key response
+ if len(resp.Key) == 0 {
+ return nil, nil, fmt.Errorf("vault returned an invalid key")
+ }
+
+ return secret, &resp, nil
+}
+
+// writeTemporaryFile writes a file to a temp location with the given data and
+// file permissions.
+func (c *SSHCommand) writeTemporaryFile(name string, data []byte, perms os.FileMode) (string, error, func() error) {
+ // default closer to prevent panic
+ closer := func() error { return nil }
+
+ f, err := ioutil.TempFile("", name)
+ if err != nil {
+ return "", errors.Wrap(err, "creating temporary file"), closer
+ }
+
+ closer = func() error { return os.Remove(f.Name()) }
+
+ if err := ioutil.WriteFile(f.Name(), data, perms); err != nil {
+ return "", errors.Wrap(err, "writing temporary key"), closer
+ }
+
+ return f.Name(), nil, closer
+}
+
+// writeTemporaryKey writes the key to a temporary file and returns the path.
+// The caller should defer the closer to cleanup the key.
+func (c *SSHCommand) writeTemporaryKey(name string, data []byte) (string, error, func() error) {
+ return c.writeTemporaryFile(name, data, 0600)
+}
+
// If user did not provide the role with which SSH connection has
// to be established and if there is only one role associated with
// the IP, it is used by default.
@@ -247,7 +523,7 @@ func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) {
return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err)
}
- if secret == nil {
+ if secret == nil || secret.Data == nil {
return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err)
}
@@ -270,61 +546,136 @@ func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) {
}
}
+// userAndIP takes an argument in the format foo@1.2.3.4 and separates the IP
+// and user parts, returning any errors.
+func (c *SSHCommand) userAndIP(s string) (string, string, error) {
+ // split the parameter username@ip
+ input := strings.Split(s, "@")
+ var username, address string
+
+ // If only IP is mentioned and username is skipped, assume username to
+ // be the current username. Vault SSH role's default username could have
+ // been used, but in order to retain the consistency with SSH command,
+ // current username is employed.
+ switch len(input) {
+ case 1:
+ u, err := user.Current()
+ if err != nil {
+ return "", "", errors.Wrap(err, "failed to fetch current user")
+ }
+ username, address = u.Username, input[0]
+ case 2:
+ username, address = input[0], input[1]
+ default:
+ return "", "", fmt.Errorf("invalid arguments: %q", s)
+ }
+
+ // Resolving domain names to IP address on the client side.
+ // Vault only deals with IP addresses.
+ ipAddr, err := net.ResolveIPAddr("ip", address)
+ if err != nil {
+ return "", "", errors.Wrap(err, "failed to resolve IP address")
+ }
+ ip := ipAddr.String()
+
+ return username, ip, nil
+}
+
func (c *SSHCommand) Synopsis() string {
return "Initiate an SSH session"
}
func (c *SSHCommand) Help() string {
helpText := `
-Usage: vault ssh [options] username@ip
+Usage: vault ssh [options] username@ip [ssh options]
Establishes an SSH connection with the target machine.
- This command generates a key and uses it to establish an SSH
- connection with the target machine. This operation requires
- that the SSH backend is mounted and at least one 'role' is
- registered with Vault beforehand.
+ This command uses one of the SSH authentication backends to authenticate and
+ automatically establish an SSH connection to a host. This operation requires
+ that the SSH backend is mounted and configured.
- For setting up SSH backends with one-time-passwords, installation
- of vault-ssh-helper or a compatible agent on target machines
- is required. See [https://github.com/hashicorp/vault-ssh-agent].
+ SSH using the OTP mode (requires sshpass for full automation):
+
+ $ vault ssh -mode=otp -role=my-role user@1.2.3.4
+
+ SSH using the CA mode:
+
+ $ vault ssh -mode=ca -role=my-role user@1.2.3.4
+
+ SSH using CA mode with host key verification:
+
+ $ vault ssh \
+ -mode=ca \
+ -role=my-role \
+ -host-key-mount-point=host-signer \
+ -host-key-hostnames=example.com \
+ user@example.com
+
+ For the full list of options and arguments, please see the documentation.
General Options:
` + meta.GeneralOptionsUsage() + `
SSH Options:
- -role Role to be used to create the key.
- Each IP is associated with a role. To see the associated
- roles with IP, use "lookup" endpoint. If you are certain
- that there is only one role associated with the IP, you can
- skip mentioning the role. It will be chosen by default. If
- there are no roles associated with the IP, register the
- CIDR block of that IP using the "roles/" endpoint.
+ -role Role to be used to create the key. Each IP is associated with
+ a role. To see the associated roles with IP, use "lookup"
+ endpoint. If you are certain that there is only one role
+ associated with the IP, you can skip mentioning the role. It
+ will be chosen by default. If there are no roles associated
+ with the IP, register the CIDR block of that IP using the
+ "roles/" endpoint.
- -no-exec Shows the credentials but does not establish connection.
+ -no-exec Shows the credentials but does not establish connection.
- -mount-point Mount point of SSH backend. If the backend is mounted at
- 'ssh', which is the default as well, this parameter can be
- skipped.
+ -mount-point Mount point of SSH backend. If the backend is mounted at
+ "ssh" (default), this parameter can be skipped.
- -format If no-exec option is enabled, then the credentials will be
- printed out and SSH connection will not be established. The
- format of the output can be 'json' or 'table'. JSON output
- is useful when writing scripts. Default is 'table'.
+ -format If the "no-exec" option is enabled, the credentials will be
+ printed out and SSH connection will not be established. The
+ format of the output can be "json" or "table" (default).
- -strict-host-key-checking This option corresponds to StrictHostKeyChecking of SSH configuration.
- If 'sshpass' is employed to enable automated login, then if host key
- is not "known" to the client, 'vault ssh' command will fail. Set this
- option to "no" to bypass the host key checking. Defaults to "ask".
- Can also be specified with VAULT_SSH_STRICT_HOST_KEY_CHECKING environment
- variable.
+ -strict-host-key-checking This option corresponds to "StrictHostKeyChecking"
+ of SSH configuration. If "sshpass" is employed to enable
+ automated login, then if host key is not "known" to the
+ client, "vault ssh" command will fail. Set this option to
+ "no" to bypass the host key checking. Defaults to "ask".
+ Can also be specified with the
+ "VAULT_SSH_STRICT_HOST_KEY_CHECKING" environment variable.
- -user-known-hosts-file This option corresponds to UserKnownHostsFile of SSH configuration.
- Assigns the file to use for storing the host keys. If this option is
- set to "/dev/null" along with "-strict-host-key-checking=no", both
- warnings and host key checking can be avoided while establishing the
- connection. Defaults to "~/.ssh/known_hosts". Can also be specified
- with VAULT_SSH_USER_KNOWN_HOSTS_FILE environment variable.
+ -user-known-hosts-file This option corresponds to "UserKnownHostsFile" of
+ SSH configuration. Assigns the file to use for storing the
+ host keys. If this option is set to "/dev/null" along with
+ "-strict-host-key-checking=no", both warnings and host key
+ checking can be avoided while establishing the connection.
+ Defaults to "~/.ssh/known_hosts". Can also be specified with
+ "VAULT_SSH_USER_KNOWN_HOSTS_FILE" environment variable.
+
+CA Mode Options:
+
+ - public-key-path=
+ The path to the public key to send to Vault for signing. The default value
+ is ~/.ssh/id_rsa.pub.
+
+ - private-key-path=
+ The path to the private key to use for authentication. This must be the
+ corresponding private key to -public-key-path. The default value is
+ ~/.ssh/id_rsa.
+
+ - host-key-mount-point=
+ The mount point to the SSH backend where host keys are signed. When given
+ a value, Vault will generate a custom known_hosts file with delegation to
+ the CA at the provided mount point and verify the SSH connection's host
+ keys against the provided CA. By default, this command uses the users's
+ existing known_hosts file. When this flag is set, this command will force
+ strict host key checking and will override any values provided for a
+ custom -user-known-hosts-file.
+
+ - host-key-hostnames=
+ The list of hostnames to delegate for this certificate authority. By
+ default, this is "*", which allows all domains and IPs. To restrict
+ validation to a series of hostnames, specify them as comma-separated
+ values here.
`
return strings.TrimSpace(helpText)
}
diff --git a/vendor/github.com/hashicorp/vault/command/status.go b/vendor/github.com/hashicorp/vault/command/status.go
index 3d584c7..7b6cce3 100644
--- a/vendor/github.com/hashicorp/vault/command/status.go
+++ b/vendor/github.com/hashicorp/vault/command/status.go
@@ -84,7 +84,10 @@ func (c *StatusCommand) Run(args []string) int {
if leaderStatus.LeaderAddress == "" {
leaderStatus.LeaderAddress = ""
}
- c.Ui.Output(fmt.Sprintf("\tLeader: %s", leaderStatus.LeaderAddress))
+ if leaderStatus.LeaderClusterAddress == "" {
+ leaderStatus.LeaderClusterAddress = ""
+ }
+ c.Ui.Output(fmt.Sprintf("\tLeader Cluster Address: %s", leaderStatus.LeaderClusterAddress))
}
}
diff --git a/vendor/github.com/hashicorp/vault/command/util.go b/vendor/github.com/hashicorp/vault/command/util.go
index 0ec3916..1eefc92 100644
--- a/vendor/github.com/hashicorp/vault/command/util.go
+++ b/vendor/github.com/hashicorp/vault/command/util.go
@@ -57,6 +57,8 @@ func PrintRawField(ui cli.Ui, secret *api.Secret, field string) int {
val = secret.WrapInfo.TTL
case "wrapping_token_creation_time":
val = secret.WrapInfo.CreationTime.Format(time.RFC3339Nano)
+ case "wrapping_token_creation_path":
+ val = secret.WrapInfo.CreationPath
case "wrapped_accessor":
val = secret.WrapInfo.WrappedAccessor
default:
diff --git a/vendor/github.com/hashicorp/vault/command/write.go b/vendor/github.com/hashicorp/vault/command/write.go
index 0614f9b..6f7b495 100644
--- a/vendor/github.com/hashicorp/vault/command/write.go
+++ b/vendor/github.com/hashicorp/vault/command/write.go
@@ -8,6 +8,7 @@ import (
"github.com/hashicorp/vault/helper/kv-builder"
"github.com/hashicorp/vault/meta"
+ "github.com/posener/complete"
)
// WriteCommand is a Command that puts data into the Vault.
@@ -32,6 +33,12 @@ func (c *WriteCommand) Run(args []string) int {
}
args = flags.Args()
+ if len(args) < 1 {
+ c.Ui.Error("write requires a path")
+ flags.Usage()
+ return 1
+ }
+
if len(args) < 2 && !force {
c.Ui.Error("write expects at least two arguments; use -f to perform the write anyways")
flags.Usage()
@@ -133,3 +140,15 @@ Write Options:
`
return strings.TrimSpace(helpText)
}
+
+func (c *WriteCommand) AutocompleteArgs() complete.Predictor {
+ return complete.PredictNothing
+}
+
+func (c *WriteCommand) AutocompleteFlags() complete.Flags {
+ return complete.Flags{
+ "-force": complete.PredictNothing,
+ "-format": predictFormat,
+ "-field": complete.PredictNothing,
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/command/write_test.go b/vendor/github.com/hashicorp/vault/command/write_test.go
index 786bbc3..5aa3c1e 100644
--- a/vendor/github.com/hashicorp/vault/command/write_test.go
+++ b/vendor/github.com/hashicorp/vault/command/write_test.go
@@ -243,8 +243,8 @@ func TestWrite_Output(t *testing.T) {
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
- if !strings.Contains(string(ui.OutputWriter.Bytes()), "Key") {
- t.Fatalf("bad: %s", string(ui.OutputWriter.Bytes()))
+ if !strings.Contains(ui.OutputWriter.String(), "Key") {
+ t.Fatalf("bad: %s", ui.OutputWriter.String())
}
}
diff --git a/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go b/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go
index 7399a5c..6b18968 100644
--- a/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go
+++ b/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go
@@ -6,9 +6,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/aws/defaults"
)
type CredentialsConfig struct {
@@ -65,14 +63,16 @@ func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials,
Profile: c.Profile,
})
- // Add the instance metadata role provider
- providers = append(providers, &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.New(session.New(&aws.Config{
- Region: aws.String(c.Region),
- HTTPClient: c.HTTPClient,
- })),
- ExpiryWindow: 15,
- })
+ // Add the remote provider
+ def := defaults.Get()
+ if c.Region != "" {
+ def.Config.Region = aws.String(c.Region)
+ }
+ if c.HTTPClient != nil {
+ def.Config.HTTPClient = c.HTTPClient
+ }
+
+ providers = append(providers, defaults.RemoteCredProvider(*def.Config, def.Handlers))
// Create the credentials required to access the API.
creds := credentials.NewChainCredentials(providers)
diff --git a/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go b/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go
new file mode 100644
index 0000000..df424ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go
@@ -0,0 +1,50 @@
+package builtinplugins
+
+import (
+ "github.com/hashicorp/vault/plugins/database/cassandra"
+ "github.com/hashicorp/vault/plugins/database/hana"
+ "github.com/hashicorp/vault/plugins/database/mongodb"
+ "github.com/hashicorp/vault/plugins/database/mssql"
+ "github.com/hashicorp/vault/plugins/database/mysql"
+ "github.com/hashicorp/vault/plugins/database/postgresql"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+)
+
+// BuiltinFactory is the func signature that should be returned by
+// the plugin's New() func.
+type BuiltinFactory func() (interface{}, error)
+
+var plugins = map[string]BuiltinFactory{
+ // These four plugins all use the same mysql implementation but with
+ // different username settings passed by the constructor.
+ "mysql-database-plugin": mysql.New(mysql.MetadataLen, mysql.MetadataLen, mysql.UsernameLen),
+ "mysql-aurora-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen),
+ "mysql-rds-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen),
+ "mysql-legacy-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen),
+
+ "postgresql-database-plugin": postgresql.New,
+ "mssql-database-plugin": mssql.New,
+ "cassandra-database-plugin": cassandra.New,
+ "mongodb-database-plugin": mongodb.New,
+ "hana-database-plugin": hana.New,
+}
+
+// Get returns the BuiltinFactory func for a particular backend plugin
+// from the plugins map.
+func Get(name string) (BuiltinFactory, bool) {
+ f, ok := plugins[name]
+ return f, ok
+}
+
+// Keys returns the list of plugin names that are considered builtin plugins.
+func Keys() []string {
+ keys := make([]string, len(plugins))
+
+ i := 0
+ for k := range plugins {
+ keys[i] = k
+ i++
+ }
+
+ return keys
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/helper/certutil/types.go
index 35b7317..c955222 100644
--- a/vendor/github.com/hashicorp/vault/helper/certutil/types.go
+++ b/vendor/github.com/hashicorp/vault/helper/certutil/types.go
@@ -286,7 +286,7 @@ func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) {
}
// Verify checks if the parsed bundle is valid. It validates the public
-// key of the certificate to the private key and checks the certficate trust
+// key of the certificate to the private key and checks the certificate trust
// chain for path issues.
func (p *ParsedCertBundle) Verify() error {
// If private key exists, check if it matches the public key of cert
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
index e485f2f..31a2dcd 100644
--- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
+++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
@@ -6,6 +6,8 @@ import (
"compress/lzw"
"fmt"
"io"
+
+ "github.com/golang/snappy"
)
const (
@@ -20,16 +22,35 @@ const (
// Byte value used as canary when using Lzw format
CompressionCanaryLzw byte = 'L'
+ // Byte value used as canary when using Snappy format
+ CompressionCanarySnappy byte = 'S'
+
CompressionTypeLzw = "lzw"
CompressionTypeGzip = "gzip"
+
+ CompressionTypeSnappy = "snappy"
)
+// SnappyReadCloser embeds the snappy reader which implements the io.Reader
+// interface. The decompress procedure in this utility expectes an
+// io.ReadCloser. This type implements the io.Closer interface to retain the
+// generic way of decompression.
+type SnappyReadCloser struct {
+ *snappy.Reader
+}
+
+// Close is a noop method implemented only to satisfy the io.Closer interface
+func (s *SnappyReadCloser) Close() error {
+ return nil
+}
+
// CompressionConfig is used to select a compression type to be performed by
// Compress and Decompress utilities.
// Supported types are:
// * CompressionTypeLzw
// * CompressionTypeGzip
+// * CompressionTypeSnappy
//
// When using CompressionTypeGzip, the compression levels can also be chosen:
// * gzip.DefaultCompression
@@ -78,9 +99,13 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
config.GzipCompressionLevel = gzip.DefaultCompression
}
writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel)
+ case CompressionTypeSnappy:
+ buf.Write([]byte{CompressionCanarySnappy})
+ writer = snappy.NewBufferedWriter(&buf)
default:
return nil, fmt.Errorf("unsupported compression type")
}
+
if err != nil {
return nil, fmt.Errorf("failed to create a compression writer; err: %v", err)
}
@@ -117,22 +142,29 @@ func Decompress(data []byte) ([]byte, bool, error) {
}
switch {
+ // If the first byte matches the canary byte, remove the canary
+ // byte and try to decompress the data that is after the canary.
case data[0] == CompressionCanaryGzip:
- // If the first byte matches the canary byte, remove the canary
- // byte and try to decompress the data that is after the canary.
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
data = data[1:]
reader, err = gzip.NewReader(bytes.NewReader(data))
case data[0] == CompressionCanaryLzw:
- // If the first byte matches the canary byte, remove the canary
- // byte and try to decompress the data that is after the canary.
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
data = data[1:]
reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8)
+
+ case data[0] == CompressionCanarySnappy:
+ if len(data) < 2 {
+ return nil, false, fmt.Errorf("invalid 'data' after the canary")
+ }
+ data = data[1:]
+ reader = &SnappyReadCloser{
+ Reader: snappy.NewReader(bytes.NewReader(data)),
+ }
default:
// If the first byte doesn't match the canary byte, it means
// that the content was not compressed at all. Indicate the
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go
index 52b03d5..5eeeea8 100644
--- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go
+++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go
@@ -7,6 +7,47 @@ import (
"testing"
)
+func TestCompressUtil_CompressSnappy(t *testing.T) {
+ input := map[string]interface{}{
+ "sample": "data",
+ "verification": "process",
+ }
+
+ // Encode input into JSON
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+ if err := enc.Encode(input); err != nil {
+ t.Fatal(err)
+ }
+ inputJSONBytes := buf.Bytes()
+
+ // Set Snappy compression in the configuration
+ compressionConfig := &CompressionConfig{
+ Type: CompressionTypeSnappy,
+ }
+
+ // Compress the input
+ compressedJSONBytes, err := Compress(inputJSONBytes, compressionConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check if the input for decompress was not compressed in the first place
+ if wasNotCompressed {
+ t.Fatalf("bad: expected compressed bytes")
+ }
+
+ // Compare the value after decompression
+ if string(inputJSONBytes) != string(decompressedJSONBytes) {
+ t.Fatalf("bad: decompressed value;\nexpected: %q\nactual: %q", string(inputJSONBytes), string(decompressedJSONBytes))
+ }
+}
+
func TestCompressUtil_CompressDecompress(t *testing.T) {
input := map[string]interface{}{
"sample": "data",
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/error.go b/vendor/github.com/hashicorp/vault/helper/consts/error.go
index d96ba4f..06977d5 100644
--- a/vendor/github.com/hashicorp/vault/helper/consts/error.go
+++ b/vendor/github.com/hashicorp/vault/helper/consts/error.go
@@ -10,4 +10,7 @@ var (
// ErrStandby is returned if an operation is performed on a standby Vault.
// No operation is expected to succeed until active.
ErrStandby = errors.New("Vault is in standby mode")
+
+ // Used when .. is used in a path
+ ErrPathContainsParentReferences = errors.New("path cannot contain parent references")
)
diff --git a/vendor/github.com/hashicorp/vault/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/helper/consts/replication.go
index 62bbcb3..7fbeb88 100644
--- a/vendor/github.com/hashicorp/vault/helper/consts/replication.go
+++ b/vendor/github.com/hashicorp/vault/helper/consts/replication.go
@@ -3,18 +3,37 @@ package consts
type ReplicationState uint32
const (
- ReplicationDisabled ReplicationState = iota
- ReplicationPrimary
- ReplicationSecondary
+ _ ReplicationState = iota
+ OldReplicationPrimary
+ OldReplicationSecondary
+ OldReplicationBootstrapping
+
+ ReplicationDisabled ReplicationState = 0
+ ReplicationPerformancePrimary ReplicationState = 1 << iota
+ ReplicationPerformanceSecondary
+ ReplicationBootstrapping
+ ReplicationDRPrimary
+ ReplicationDRSecondary
)
func (r ReplicationState) String() string {
switch r {
- case ReplicationSecondary:
- return "secondary"
- case ReplicationPrimary:
- return "primary"
+ case ReplicationPerformanceSecondary:
+ return "perf-secondary"
+ case ReplicationPerformancePrimary:
+ return "perf-primary"
+ case ReplicationBootstrapping:
+ return "bootstrapping"
+ case ReplicationDRPrimary:
+ return "dr-primary"
+ case ReplicationDRSecondary:
+ return "dr-secondary"
}
return "disabled"
}
+
+func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 }
+func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag }
+func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag }
+func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag }
diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go
index 27ca3fc..d146a37 100644
--- a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go
+++ b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
// source: types.proto
-// DO NOT EDIT!
/*
Package forwarding is a generated protocol buffer package.
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go b/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go
index e0bdd64..7588199 100644
--- a/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go
+++ b/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go
@@ -243,15 +243,24 @@ func (lm *LockManager) getPolicyCommon(req PolicyRequest, lockType bool) (*Polic
switch req.KeyType {
case KeyType_AES256_GCM96:
if req.Convergent && !req.Derived {
+ lm.UnlockPolicy(lock, lockType)
return nil, nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled")
}
case KeyType_ECDSA_P256:
if req.Derived || req.Convergent {
- return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", KeyType_ECDSA_P256)
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType)
+ }
+
+ case KeyType_ED25519:
+ if req.Convergent {
+ lm.UnlockPolicy(lock, lockType)
+ return nil, nil, false, fmt.Errorf("convergent encryption not not supported for keys of type %v", req.KeyType)
}
default:
+ lm.UnlockPolicy(lock, lockType)
return nil, nil, false, fmt.Errorf("unsupported key type %v", req.KeyType)
}
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go
index 1faaca4..7c4a691 100644
--- a/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go
+++ b/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go
@@ -2,6 +2,7 @@ package keysutil
import (
"bytes"
+ "crypto"
"crypto/aes"
"crypto/cipher"
"crypto/ecdsa"
@@ -21,6 +22,7 @@ import (
"strings"
"time"
+ "golang.org/x/crypto/ed25519"
"golang.org/x/crypto/hkdf"
uuid "github.com/hashicorp/go-uuid"
@@ -41,10 +43,16 @@ const (
const (
KeyType_AES256_GCM96 = iota
KeyType_ECDSA_P256
+ KeyType_ED25519
)
const ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)"
+type SigningResult struct {
+ Signature string
+ PublicKey []byte
+}
+
type ecdsaSignature struct {
R, S *big.Int
}
@@ -68,6 +76,14 @@ func (kt KeyType) DecryptionSupported() bool {
}
func (kt KeyType) SigningSupported() bool {
+ switch kt {
+ case KeyType_ECDSA_P256, KeyType_ED25519:
+ return true
+ }
+ return false
+}
+
+func (kt KeyType) HashSignatureInput() bool {
switch kt {
case KeyType_ECDSA_P256:
return true
@@ -77,7 +93,7 @@ func (kt KeyType) SigningSupported() bool {
func (kt KeyType) DerivationSupported() bool {
switch kt {
- case KeyType_AES256_GCM96:
+ case KeyType_AES256_GCM96, KeyType_ED25519:
return true
}
return false
@@ -89,6 +105,8 @@ func (kt KeyType) String() string {
return "aes256-gcm96"
case KeyType_ECDSA_P256:
return "ecdsa-p256"
+ case KeyType_ED25519:
+ return "ed25519"
}
return "[unknown]"
@@ -96,13 +114,25 @@ func (kt KeyType) String() string {
// KeyEntry stores the key and metadata
type KeyEntry struct {
- AESKey []byte `json:"key"`
- HMACKey []byte `json:"hmac_key"`
- CreationTime int64 `json:"creation_time"`
- EC_X *big.Int `json:"ec_x"`
- EC_Y *big.Int `json:"ec_y"`
- EC_D *big.Int `json:"ec_d"`
- FormattedPublicKey string `json:"public_key"`
+ // AES or some other kind that is a pure byte slice like ED25519
+ Key []byte `json:"key"`
+
+ // Key used for HMAC functions
+ HMACKey []byte `json:"hmac_key"`
+
+ // Time of creation
+ CreationTime time.Time `json:"time"`
+
+ EC_X *big.Int `json:"ec_x"`
+ EC_Y *big.Int `json:"ec_y"`
+ EC_D *big.Int `json:"ec_d"`
+
+ // The public key in an appropriate format for the type of key
+ FormattedPublicKey string `json:"public_key"`
+
+ // This is deprecated (but still filled) in favor of the value above which
+ // is more precise
+ DeprecatedCreationTime int64 `json:"creation_time"`
}
// keyEntryMap is used to allow JSON marshal/unmarshal
@@ -150,10 +180,12 @@ type Policy struct {
// Whether the key is exportable
Exportable bool `json:"exportable"`
- // The minimum version of the key allowed to be used
- // for decryption
+ // The minimum version of the key allowed to be used for decryption
MinDecryptionVersion int `json:"min_decryption_version"`
+ // The minimum version of the key allowed to be used for encryption
+ MinEncryptionVersion int `json:"min_encryption_version"`
+
// The latest key version in this policy
LatestVersion int `json:"latest_version"`
@@ -239,6 +271,9 @@ func (p *Policy) handleArchiving(storage logical.Storage) error {
case p.ArchiveVersion > p.LatestVersion:
return fmt.Errorf("archive version of %d is greater than the latest version %d",
p.ArchiveVersion, p.LatestVersion)
+ case p.MinEncryptionVersion > 0 && p.MinEncryptionVersion < p.MinDecryptionVersion:
+ return fmt.Errorf("minimum decryption version of %d is greater than minimum encryption version %d",
+ p.MinDecryptionVersion, p.MinEncryptionVersion)
case p.MinDecryptionVersion > p.LatestVersion:
return fmt.Errorf("minimum decryption version of %d is greater than the latest version %d",
p.MinDecryptionVersion, p.LatestVersion)
@@ -427,41 +462,59 @@ func (p *Policy) DeriveKey(context []byte, ver int) ([]byte, error) {
// Fast-path non-derived keys
if !p.Derived {
- return p.Keys[ver].AESKey, nil
+ return p.Keys[ver].Key, nil
}
// Ensure a context is provided
if len(context) == 0 {
- return nil, errutil.UserError{Err: "missing 'context' for key deriviation. The key was created using a derived key, which means additional, per-request information must be included in order to encrypt or decrypt information"}
+ return nil, errutil.UserError{Err: "missing 'context' for key derivation; the key was created using a derived key, which means additional, per-request information must be included in order to perform operations with the key"}
}
switch p.KDF {
case Kdf_hmac_sha256_counter:
prf := kdf.HMACSHA256PRF
prfLen := kdf.HMACSHA256PRFLen
- return kdf.CounterMode(prf, prfLen, p.Keys[ver].AESKey, context, 256)
+ return kdf.CounterMode(prf, prfLen, p.Keys[ver].Key, context, 256)
+
case Kdf_hkdf_sha256:
- reader := hkdf.New(sha256.New, p.Keys[ver].AESKey, nil, context)
+ reader := hkdf.New(sha256.New, p.Keys[ver].Key, nil, context)
derBytes := bytes.NewBuffer(nil)
derBytes.Grow(32)
limReader := &io.LimitedReader{
R: reader,
N: 32,
}
- n, err := derBytes.ReadFrom(limReader)
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)}
+
+ switch p.Type {
+ case KeyType_AES256_GCM96:
+ n, err := derBytes.ReadFrom(limReader)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)}
+ }
+ if n != 32 {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed 32, got %d", n)}
+ }
+ return derBytes.Bytes(), nil
+
+ case KeyType_ED25519:
+ // We use the limited reader containing the derived bytes as the
+ // "random" input to the generation function
+ _, pri, err := ed25519.GenerateKey(limReader)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error generating derived key: %v", err)}
+ }
+ return pri, nil
+
+ default:
+ return nil, errutil.InternalError{Err: "unsupported key type for derivation"}
}
- if n != 32 {
- return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed 32, got %d", n)}
- }
- return derBytes.Bytes(), nil
+
default:
return nil, errutil.InternalError{Err: "unsupported key derivation mode"}
}
}
-func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) {
+func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, error) {
if !p.Type.EncryptionSupported() {
return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)}
}
@@ -479,8 +532,19 @@ func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) {
return "", errutil.UserError{Err: "failed to base64-decode plaintext"}
}
+ switch {
+ case ver == 0:
+ ver = p.LatestVersion
+ case ver < 0:
+ return "", errutil.UserError{Err: "requested version for encryption is negative"}
+ case ver > p.LatestVersion:
+ return "", errutil.UserError{Err: "requested version for encryption is higher than the latest key version"}
+ case ver < p.MinEncryptionVersion:
+ return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"}
+ }
+
// Derive the key that should be used
- key, err := p.DeriveKey(context, p.LatestVersion)
+ key, err := p.DeriveKey(context, ver)
if err != nil {
return "", err
}
@@ -537,7 +601,7 @@ func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) {
encoded := base64.StdEncoding.EncodeToString(full)
// Prepend some information
- encoded = "vault:v" + strconv.Itoa(p.LatestVersion) + ":" + encoded
+ encoded = "vault:v" + strconv.Itoa(ver) + ":" + encoded
return encoded, nil
}
@@ -630,11 +694,10 @@ func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) {
}
func (p *Policy) HMACKey(version int) ([]byte, error) {
- if version < p.MinDecryptionVersion {
- return nil, fmt.Errorf("key version disallowed by policy (minimum is %d)", p.MinDecryptionVersion)
- }
-
- if version > p.LatestVersion {
+ switch {
+ case version < 0:
+ return nil, fmt.Errorf("key version does not exist (cannot be negative)")
+ case version > p.LatestVersion:
return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion)
}
@@ -645,15 +708,28 @@ func (p *Policy) HMACKey(version int) ([]byte, error) {
return p.Keys[version].HMACKey, nil
}
-func (p *Policy) Sign(hashedInput []byte) (string, error) {
+func (p *Policy) Sign(ver int, context, input []byte) (*SigningResult, error) {
if !p.Type.SigningSupported() {
- return "", fmt.Errorf("message signing not supported for key type %v", p.Type)
+ return nil, fmt.Errorf("message signing not supported for key type %v", p.Type)
+ }
+
+ switch {
+ case ver == 0:
+ ver = p.LatestVersion
+ case ver < 0:
+ return nil, errutil.UserError{Err: "requested version for signing is negative"}
+ case ver > p.LatestVersion:
+ return nil, errutil.UserError{Err: "requested version for signing is higher than the latest key version"}
+ case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion:
+ return nil, errutil.UserError{Err: "requested version for signing is less than the minimum encryption key version"}
}
var sig []byte
+ var pubKey []byte
+ var err error
switch p.Type {
case KeyType_ECDSA_P256:
- keyParams := p.Keys[p.LatestVersion]
+ keyParams := p.Keys[ver]
key := &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: elliptic.P256(),
@@ -662,33 +738,57 @@ func (p *Policy) Sign(hashedInput []byte) (string, error) {
},
D: keyParams.EC_D,
}
- r, s, err := ecdsa.Sign(rand.Reader, key, hashedInput)
+ r, s, err := ecdsa.Sign(rand.Reader, key, input)
if err != nil {
- return "", err
+ return nil, err
}
marshaledSig, err := asn1.Marshal(ecdsaSignature{
R: r,
S: s,
})
if err != nil {
- return "", err
+ return nil, err
}
sig = marshaledSig
+ case KeyType_ED25519:
+ var key ed25519.PrivateKey
+
+ if p.Derived {
+ // Derive the key that should be used
+ var err error
+ key, err = p.DeriveKey(context, ver)
+ if err != nil {
+ return nil, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)}
+ }
+ pubKey = key.Public().(ed25519.PublicKey)
+ } else {
+ key = ed25519.PrivateKey(p.Keys[ver].Key)
+ }
+
+ // Per docs, do not pre-hash ed25519; it does two passes and performs
+ // its own hashing
+ sig, err = key.Sign(rand.Reader, input, crypto.Hash(0))
+ if err != nil {
+ return nil, err
+ }
+
default:
- return "", fmt.Errorf("unsupported key type %v", p.Type)
+ return nil, fmt.Errorf("unsupported key type %v", p.Type)
}
// Convert to base64
encoded := base64.StdEncoding.EncodeToString(sig)
- // Prepend some information
- encoded = "vault:v" + strconv.Itoa(p.LatestVersion) + ":" + encoded
+ res := &SigningResult{
+ Signature: "vault:v" + strconv.Itoa(ver) + ":" + encoded,
+ PublicKey: pubKey,
+ }
- return encoded, nil
+ return res, nil
}
-func (p *Policy) VerifySignature(hashedInput []byte, sig string) (bool, error) {
+func (p *Policy) VerifySignature(context, input []byte, sig string) (bool, error) {
if !p.Type.SigningSupported() {
return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)}
}
@@ -716,15 +816,15 @@ func (p *Policy) VerifySignature(hashedInput []byte, sig string) (bool, error) {
return false, errutil.UserError{Err: ErrTooOld}
}
+ sigBytes, err := base64.StdEncoding.DecodeString(splitVerSig[1])
+ if err != nil {
+ return false, errutil.UserError{Err: "invalid base64 signature value"}
+ }
+
switch p.Type {
case KeyType_ECDSA_P256:
- asn1Sig, err := base64.StdEncoding.DecodeString(splitVerSig[1])
- if err != nil {
- return false, errutil.UserError{Err: "invalid base64 signature value"}
- }
-
var ecdsaSig ecdsaSignature
- rest, err := asn1.Unmarshal(asn1Sig, &ecdsaSig)
+ rest, err := asn1.Unmarshal(sigBytes, &ecdsaSig)
if err != nil {
return false, errutil.UserError{Err: "supplied signature is invalid"}
}
@@ -739,7 +839,24 @@ func (p *Policy) VerifySignature(hashedInput []byte, sig string) (bool, error) {
Y: keyParams.EC_Y,
}
- return ecdsa.Verify(key, hashedInput, ecdsaSig.R, ecdsaSig.S), nil
+ return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil
+
+ case KeyType_ED25519:
+ var key ed25519.PrivateKey
+
+ if p.Derived {
+ // Derive the key that should be used
+ var err error
+ key, err = p.DeriveKey(context, ver)
+ if err != nil {
+ return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)}
+ }
+ } else {
+ key = ed25519.PrivateKey(p.Keys[ver].Key)
+ }
+
+ return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil
+
default:
return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)}
}
@@ -756,8 +873,10 @@ func (p *Policy) Rotate(storage logical.Storage) error {
}
p.LatestVersion += 1
+ now := time.Now()
entry := KeyEntry{
- CreationTime: time.Now().Unix(),
+ CreationTime: now,
+ DeprecatedCreationTime: now.Unix(),
}
hmacKey, err := uuid.GenerateRandomBytes(32)
@@ -773,7 +892,7 @@ func (p *Policy) Rotate(storage logical.Storage) error {
if err != nil {
return err
}
- entry.AESKey = newKey
+ entry.Key = newKey
case KeyType_ECDSA_P256:
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
@@ -796,6 +915,14 @@ func (p *Policy) Rotate(storage logical.Storage) error {
return fmt.Errorf("error PEM-encoding public key")
}
entry.FormattedPublicKey = string(pemBytes)
+
+ case KeyType_ED25519:
+ pub, pri, err := ed25519.GenerateKey(rand.Reader)
+ if err != nil {
+ return err
+ }
+ entry.Key = pri
+ entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub)
}
p.Keys[p.LatestVersion] = entry
@@ -811,10 +938,12 @@ func (p *Policy) Rotate(storage logical.Storage) error {
}
func (p *Policy) MigrateKeyToKeysMap() {
+ now := time.Now()
p.Keys = keyEntryMap{
1: KeyEntry{
- AESKey: p.Key,
- CreationTime: time.Now().Unix(),
+ Key: p.Key,
+ CreationTime: now,
+ DeprecatedCreationTime: now.Unix(),
},
}
p.Key = nil
diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go
index 600238b..7969cf9 100644
--- a/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go
+++ b/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go
@@ -40,10 +40,10 @@ func testKeyUpgradeCommon(t *testing.T, lm *LockManager) {
t.Fatal("expected an upsert")
}
- testBytes := make([]byte, len(p.Keys[1].AESKey))
- copy(testBytes, p.Keys[1].AESKey)
+ testBytes := make([]byte, len(p.Keys[1].Key))
+ copy(testBytes, p.Keys[1].Key)
- p.Key = p.Keys[1].AESKey
+ p.Key = p.Keys[1].Key
p.Keys = nil
p.MigrateKeyToKeysMap()
if p.Key != nil {
@@ -52,7 +52,7 @@ func testKeyUpgradeCommon(t *testing.T, lm *LockManager) {
if len(p.Keys) != 1 {
t.Fatal("policy.Keys is the wrong size")
}
- if !reflect.DeepEqual(testBytes, p.Keys[1].AESKey) {
+ if !reflect.DeepEqual(testBytes, p.Keys[1].Key) {
t.Fatal("key mismatch")
}
}
@@ -198,7 +198,8 @@ func Test_Archiving(t *testing.T) {
func testArchivingCommon(t *testing.T, lm *LockManager) {
resetKeysArchive()
- // First, we generate a policy and rotate it a number of times. Each time // we'll ensure that we have the expected number of keys in the archive and
+ // First, we generate a policy and rotate it a number of times. Each time
+ // we'll ensure that we have the expected number of keys in the archive and
// the main keys object, which without changing the min version should be
// zero and latest, respectively
@@ -330,14 +331,21 @@ func checkKeys(t *testing.T,
}
for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
+ // Travis has weird time zone issues and gets super unhappy
+ if !p.Keys[i].CreationTime.Equal(keysArchive[i].CreationTime) {
+ t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[i], keysArchive[i])
+ }
+ polKey := p.Keys[i]
+ polKey.CreationTime = keysArchive[i].CreationTime
+ p.Keys[i] = polKey
if !reflect.DeepEqual(p.Keys[i], keysArchive[i]) {
- t.Fatalf("key %d not equivalent between policy keys and test keys archive", i)
+ t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[i], keysArchive[i])
}
}
for i := 1; i < len(archive.Keys); i++ {
- if !reflect.DeepEqual(archive.Keys[i].AESKey, keysArchive[i].AESKey) {
- t.Fatalf("key %d not equivalent between policy archive and test keys archive", i)
+ if !reflect.DeepEqual(archive.Keys[i].Key, keysArchive[i].Key) {
+ t.Fatalf("key %d not equivalent between policy archive and test keys archive; policy archive:\n%#v\ntest keys archive:\n%#v\n", i, archive.Keys[i].Key, keysArchive[i].Key)
}
}
}
diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go
index 7ecf754..685624a 100644
--- a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go
+++ b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go
@@ -48,33 +48,36 @@ func (b *Builder) add(raw string) error {
return nil
}
- // If the arg is exactly "-", then we need to read from stdin
- // and merge the results into the resulting structure.
- if raw == "-" {
- if b.Stdin == nil {
- return fmt.Errorf("stdin is not supported")
- }
- if b.stdin {
- return fmt.Errorf("stdin already consumed")
- }
-
- b.stdin = true
- return b.addReader(b.Stdin)
- }
-
- // If the arg begins with "@" then we need to read a file directly
- if raw[0] == '@' {
- f, err := os.Open(raw[1:])
- if err != nil {
- return err
- }
- defer f.Close()
-
- return b.addReader(f)
- }
-
// Split into key/value
parts := strings.SplitN(raw, "=", 2)
+
+ // If the arg is exactly "-", then we need to read from stdin
+ // and merge the results into the resulting structure.
+ if len(parts) == 1 {
+ if raw == "-" {
+ if b.Stdin == nil {
+ return fmt.Errorf("stdin is not supported")
+ }
+ if b.stdin {
+ return fmt.Errorf("stdin already consumed")
+ }
+
+ b.stdin = true
+ return b.addReader(b.Stdin)
+ }
+
+ // If the arg begins with "@" then we need to read a file directly
+ if raw[0] == '@' {
+ f, err := os.Open(raw[1:])
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return b.addReader(f)
+ }
+ }
+
if len(parts) != 2 {
return fmt.Errorf("format must be key=value")
}
diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go
index 9b0cffb..aa31784 100644
--- a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go
+++ b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go
@@ -118,3 +118,22 @@ func TestBuilder_sameKeyMultipleTimes(t *testing.T) {
t.Fatalf("bad: %#v", actual)
}
}
+
+func TestBuilder_specialCharacteresInKey(t *testing.T) {
+ var b Builder
+ b.Stdin = bytes.NewBufferString("{\"foo\": \"bay\"}")
+ err := b.Add("@foo=bar", "-foo=baz", "-")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "@foo": "bar",
+ "-foo": "baz",
+ "foo": "bay",
+ }
+ actual := b.Map()
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
index 9ba2bf7..957d533 100644
--- a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
+++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
@@ -19,6 +19,9 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) {
switch in.(type) {
case string:
inp := in.(string)
+ if inp == "" {
+ return time.Duration(0), nil
+ }
var err error
// Look for a suffix otherwise its a plain second value
if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") {
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go
new file mode 100644
index 0000000..fff8ff1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go
@@ -0,0 +1,158 @@
+package pluginutil
+
+import (
+ "bytes"
+ "fmt"
+ stdlog "log"
+ "strings"
+
+ hclog "github.com/hashicorp/go-hclog"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// pluginLogFaker is a wrapper on logxi.Logger that
+// implements hclog.Logger
+type hclogFaker struct {
+ logger log.Logger
+
+ name string
+ implied []interface{}
+}
+
+func (f *hclogFaker) buildLog(msg string, args ...interface{}) (string, []interface{}) {
+ if f.name != "" {
+ msg = fmt.Sprintf("%s: %s", f.name, msg)
+ }
+ args = append(f.implied, args...)
+
+ return msg, args
+}
+
+func (f *hclogFaker) Trace(msg string, args ...interface{}) {
+ msg, args = f.buildLog(msg, args...)
+ f.logger.Trace(msg, args...)
+}
+
+func (f *hclogFaker) Debug(msg string, args ...interface{}) {
+ msg, args = f.buildLog(msg, args...)
+ f.logger.Debug(msg, args...)
+}
+
+func (f *hclogFaker) Info(msg string, args ...interface{}) {
+ msg, args = f.buildLog(msg, args...)
+ f.logger.Info(msg, args...)
+}
+
+func (f *hclogFaker) Warn(msg string, args ...interface{}) {
+ msg, args = f.buildLog(msg, args...)
+ f.logger.Warn(msg, args...)
+}
+
+func (f *hclogFaker) Error(msg string, args ...interface{}) {
+ msg, args = f.buildLog(msg, args...)
+ f.logger.Error(msg, args...)
+}
+
+func (f *hclogFaker) IsTrace() bool {
+ return f.logger.IsTrace()
+}
+
+func (f *hclogFaker) IsDebug() bool {
+ return f.logger.IsDebug()
+}
+
+func (f *hclogFaker) IsInfo() bool {
+ return f.logger.IsInfo()
+}
+
+func (f *hclogFaker) IsWarn() bool {
+ return f.logger.IsWarn()
+}
+
+func (f *hclogFaker) IsError() bool {
+ return !f.logger.IsTrace() && !f.logger.IsDebug() && !f.logger.IsInfo() && !f.IsWarn()
+}
+
+func (f *hclogFaker) With(args ...interface{}) hclog.Logger {
+ var nf = *f
+ nf.implied = append(nf.implied, args...)
+ return f
+}
+
+func (f *hclogFaker) Named(name string) hclog.Logger {
+ var nf = *f
+ if nf.name != "" {
+ nf.name = nf.name + "." + name
+ }
+ return &nf
+}
+
+func (f *hclogFaker) ResetNamed(name string) hclog.Logger {
+ var nf = *f
+ nf.name = name
+ return &nf
+}
+
+func (f *hclogFaker) StandardLogger(opts *hclog.StandardLoggerOptions) *stdlog.Logger {
+ if opts == nil {
+ opts = &hclog.StandardLoggerOptions{}
+ }
+
+ return stdlog.New(&stdlogAdapter{f, opts.InferLevels}, "", 0)
+}
+
+// Provides a io.Writer to shim the data out of *log.Logger
+// and back into our Logger. This is basically the only way to
+// build upon *log.Logger.
+type stdlogAdapter struct {
+ hl hclog.Logger
+ inferLevels bool
+}
+
+// Take the data, infer the levels if configured, and send it through
+// a regular Logger
+func (s *stdlogAdapter) Write(data []byte) (int, error) {
+ str := string(bytes.TrimRight(data, " \t\n"))
+
+ if s.inferLevels {
+ level, str := s.pickLevel(str)
+ switch level {
+ case hclog.Trace:
+ s.hl.Trace(str)
+ case hclog.Debug:
+ s.hl.Debug(str)
+ case hclog.Info:
+ s.hl.Info(str)
+ case hclog.Warn:
+ s.hl.Warn(str)
+ case hclog.Error:
+ s.hl.Error(str)
+ default:
+ s.hl.Info(str)
+ }
+ } else {
+ s.hl.Info(str)
+ }
+
+ return len(data), nil
+}
+
+// Detect, based on conventions, what log level this is
+func (s *stdlogAdapter) pickLevel(str string) (hclog.Level, string) {
+ switch {
+ case strings.HasPrefix(str, "[DEBUG]"):
+ return hclog.Debug, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[TRACE]"):
+ return hclog.Trace, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[INFO]"):
+ return hclog.Info, strings.TrimSpace(str[6:])
+ case strings.HasPrefix(str, "[WARN]"):
+ return hclog.Warn, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[ERROR]"):
+ return hclog.Error, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[ERR]"):
+ return hclog.Error, strings.TrimSpace(str[5:])
+ default:
+ return hclog.Info, str
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go
new file mode 100644
index 0000000..1660ca8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go
@@ -0,0 +1,23 @@
+package pluginutil
+
+import (
+ "os"
+
+ "github.com/hashicorp/vault/helper/mlock"
+)
+
+var (
+ // PluginMlockEnabled is the ENV name used to pass the configuration for
+ // enabling mlock
+ PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED"
+)
+
+// OptionallyEnableMlock determines if mlock should be called, and if so enables
+// mlock.
+func OptionallyEnableMlock() error {
+ if os.Getenv(PluginMlockEnabled) == "true" {
+ return mlock.LockMemory()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go
new file mode 100644
index 0000000..2047651
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go
@@ -0,0 +1,166 @@
+package pluginutil
+
+import (
+ "crypto/sha256"
+ "crypto/tls"
+ "flag"
+ "fmt"
+ "os/exec"
+ "time"
+
+ plugin "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/wrapping"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// Looker defines the plugin Lookup function that looks into the plugin catalog
+// for availible plugins and returns a PluginRunner
+type Looker interface {
+ LookupPlugin(string) (*PluginRunner, error)
+}
+
+// Wrapper interface defines the functions needed by the runner to wrap the
+// metadata needed to run a plugin process. This includes looking up Mlock
+// configuration and wrapping data in a respose wrapped token.
+// logical.SystemView implementataions satisfy this interface.
+type RunnerUtil interface {
+ ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
+ MlockEnabled() bool
+}
+
+// LookWrapper defines the functions for both Looker and Wrapper
+type LookRunnerUtil interface {
+ Looker
+ RunnerUtil
+}
+
+// PluginRunner defines the metadata needed to run a plugin securely with
+// go-plugin.
+type PluginRunner struct {
+ Name string `json:"name" structs:"name"`
+ Command string `json:"command" structs:"command"`
+ Args []string `json:"args" structs:"args"`
+ Sha256 []byte `json:"sha256" structs:"sha256"`
+ Builtin bool `json:"builtin" structs:"builtin"`
+ BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"`
+}
+
+// Run takes a wrapper RunnerUtil instance along with the go-plugin paramaters and
+// returns a configured plugin.Client with TLS Configured and a wrapping token set
+// on PluginUnwrapTokenEnv for plugin process consumption.
+func (r *PluginRunner) Run(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) {
+ return r.runCommon(wrapper, pluginMap, hs, env, logger, false)
+}
+
+// RunMetadataMode returns a configured plugin.Client that will dispense a plugin
+// in metadata mode. The PluginMetadaModeEnv is passed in as part of the Cmd to
+// plugin.Client, and consumed by the plugin process on pluginutil.VaultPluginTLSProvider.
+func (r *PluginRunner) RunMetadataMode(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) {
+ return r.runCommon(wrapper, pluginMap, hs, env, logger, true)
+
+}
+
+func (r *PluginRunner) runCommon(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger, isMetadataMode bool) (*plugin.Client, error) {
+ cmd := exec.Command(r.Command, r.Args...)
+ cmd.Env = append(cmd.Env, env...)
+
+ // Add the mlock setting to the ENV of the plugin
+ if wrapper.MlockEnabled() {
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true"))
+ }
+
+ // Create logger for the plugin client
+ clogger := &hclogFaker{
+ logger: logger,
+ }
+ namedLogger := clogger.ResetNamed("plugin")
+
+ var clientTLSConfig *tls.Config
+ if !isMetadataMode {
+ // Add the metadata mode ENV and set it to false
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadaModeEnv, "false"))
+
+ // Get a CA TLS Certificate
+ certBytes, key, err := generateCert()
+ if err != nil {
+ return nil, err
+ }
+
+ // Use CA to sign a client cert and return a configured TLS config
+ clientTLSConfig, err = createClientTLSConfig(certBytes, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Use CA to sign a server cert and wrap the values in a response wrapped
+ // token.
+ wrapToken, err := wrapServerConfig(wrapper, certBytes, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add the response wrap token to the ENV of the plugin
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken))
+ } else {
+ namedLogger = clogger.ResetNamed("plugin.metadata")
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadaModeEnv, "true"))
+ }
+
+ secureConfig := &plugin.SecureConfig{
+ Checksum: r.Sha256,
+ Hash: sha256.New(),
+ }
+
+ clientConfig := &plugin.ClientConfig{
+ HandshakeConfig: hs,
+ Plugins: pluginMap,
+ Cmd: cmd,
+ SecureConfig: secureConfig,
+ TLSConfig: clientTLSConfig,
+ Logger: namedLogger,
+ }
+
+ client := plugin.NewClient(clientConfig)
+
+ return client, nil
+}
+
+type APIClientMeta struct {
+ // These are set by the command line flags.
+ flagCACert string
+ flagCAPath string
+ flagClientCert string
+ flagClientKey string
+ flagInsecure bool
+}
+
+func (f *APIClientMeta) FlagSet() *flag.FlagSet {
+ fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError)
+
+ fs.StringVar(&f.flagCACert, "ca-cert", "", "")
+ fs.StringVar(&f.flagCAPath, "ca-path", "", "")
+ fs.StringVar(&f.flagClientCert, "client-cert", "", "")
+ fs.StringVar(&f.flagClientKey, "client-key", "", "")
+ fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "")
+
+ return fs
+}
+
+func (f *APIClientMeta) GetTLSConfig() *api.TLSConfig {
+ // If we need custom TLS configuration, then set it
+ if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure {
+ t := &api.TLSConfig{
+ CACert: f.flagCACert,
+ CAPath: f.flagCAPath,
+ ClientCert: f.flagClientCert,
+ ClientKey: f.flagClientKey,
+ TLSServerName: "",
+ Insecure: f.flagInsecure,
+ }
+
+ return t
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go
new file mode 100644
index 0000000..112d33c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go
@@ -0,0 +1,242 @@
+package pluginutil
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/SermoDigital/jose/jws"
+ "github.com/hashicorp/errwrap"
+ uuid "github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/helper/certutil"
+)
+
+var (
+ // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the
+ // plugin.
+ PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN"
+
+ // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded
+ // string. Used for testing.
+ PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM"
+
+ // PluginMetadaModeEnv is an ENV name used to disable TLS communication
+ // to bootstrap mounting plugins.
+ PluginMetadaModeEnv = "VAULT_PLUGIN_METADATA_MODE"
+)
+
+// generateCert is used internally to create certificates for the plugin
+// client and server.
+func generateCert() ([]byte, *ecdsa.PrivateKey, error) {
+ key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ host, err := uuid.GenerateUUID()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sn, err := certutil.GenerateSerialNumber()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ template := &x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: host,
+ },
+ DNSNames: []string{host},
+ ExtKeyUsage: []x509.ExtKeyUsage{
+ x509.ExtKeyUsageClientAuth,
+ x509.ExtKeyUsageServerAuth,
+ },
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
+ SerialNumber: sn,
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: time.Now().Add(262980 * time.Hour),
+ IsCA: true,
+ }
+
+ certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
+ if err != nil {
+ return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err)
+ }
+
+ return certBytes, key, nil
+}
+
+// createClientTLSConfig creates a signed certificate and returns a configured
+// TLS config.
+func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) {
+ clientCert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing generated plugin certificate: %v", err)
+ }
+
+ cert := tls.Certificate{
+ Certificate: [][]byte{certBytes},
+ PrivateKey: key,
+ Leaf: clientCert,
+ }
+
+ clientCertPool := x509.NewCertPool()
+ clientCertPool.AddCert(clientCert)
+
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: clientCertPool,
+ ServerName: clientCert.Subject.CommonName,
+ MinVersion: tls.VersionTLS12,
+ }
+
+ tlsConfig.BuildNameToCertificate()
+
+ return tlsConfig, nil
+}
+
+// wrapServerConfig is used to create a server certificate and private key, then
+// wrap them in an unwrap token for later retrieval by the plugin.
+func wrapServerConfig(sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) {
+ rawKey, err := x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return "", err
+ }
+
+ wrapInfo, err := sys.ResponseWrapData(map[string]interface{}{
+ "ServerCert": certBytes,
+ "ServerKey": rawKey,
+ }, time.Second*60, true)
+ if err != nil {
+ return "", err
+ }
+
+ return wrapInfo.Token, nil
+}
+
+// VaultPluginTLSProvider is run inside a plugin and retrives the response
+// wrapped TLS certificate from vault. It returns a configured TLS Config.
+func VaultPluginTLSProvider(apiTLSConfig *api.TLSConfig) func() (*tls.Config, error) {
+ if os.Getenv(PluginMetadaModeEnv) == "true" {
+ return nil
+ }
+
+ return func() (*tls.Config, error) {
+ unwrapToken := os.Getenv(PluginUnwrapTokenEnv)
+
+ // Parse the JWT and retrieve the vault address
+ wt, err := jws.ParseJWT([]byte(unwrapToken))
+ if err != nil {
+ return nil, fmt.Errorf("error decoding token: %s", err)
+ }
+ if wt == nil {
+ return nil, errors.New("nil decoded token")
+ }
+
+ addrRaw := wt.Claims().Get("addr")
+ if addrRaw == nil {
+ return nil, errors.New("decoded token does not contain primary cluster address")
+ }
+ vaultAddr, ok := addrRaw.(string)
+ if !ok {
+ return nil, errors.New("decoded token's address not valid")
+ }
+ if vaultAddr == "" {
+ return nil, errors.New(`no address for the vault found`)
+ }
+
+ // Sanity check the value
+ if _, err := url.Parse(vaultAddr); err != nil {
+ return nil, fmt.Errorf("error parsing the vault address: %s", err)
+ }
+
+ // Unwrap the token
+ clientConf := api.DefaultConfig()
+ clientConf.Address = vaultAddr
+ if apiTLSConfig != nil {
+ err := clientConf.ConfigureTLS(apiTLSConfig)
+ if err != nil {
+ return nil, errwrap.Wrapf("error configuring api client {{err}}", err)
+ }
+ }
+ client, err := api.NewClient(clientConf)
+ if err != nil {
+ return nil, errwrap.Wrapf("error during api client creation: {{err}}", err)
+ }
+
+ secret, err := client.Logical().Unwrap(unwrapToken)
+ if err != nil {
+ return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err)
+ }
+ if secret == nil {
+ return nil, errors.New("error during token unwrap request: secret is nil")
+ }
+
+ // Retrieve and parse the server's certificate
+ serverCertBytesRaw, ok := secret.Data["ServerCert"].(string)
+ if !ok {
+ return nil, errors.New("error unmarshalling certificate")
+ }
+
+ serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing certificate: %v", err)
+ }
+
+ serverCert, err := x509.ParseCertificate(serverCertBytes)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing certificate: %v", err)
+ }
+
+ // Retrieve and parse the server's private key
+ serverKeyB64, ok := secret.Data["ServerKey"].(string)
+ if !ok {
+ return nil, errors.New("error unmarshalling certificate")
+ }
+
+ serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing certificate: %v", err)
+ }
+
+ serverKey, err := x509.ParseECPrivateKey(serverKeyRaw)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing certificate: %v", err)
+ }
+
+ // Add CA cert to the cert pool
+ caCertPool := x509.NewCertPool()
+ caCertPool.AddCert(serverCert)
+
+ // Build a certificate object out of the server's cert and private key.
+ cert := tls.Certificate{
+ Certificate: [][]byte{serverCertBytes},
+ PrivateKey: serverKey,
+ Leaf: serverCert,
+ }
+
+ // Setup TLS config
+ tlsConfig := &tls.Config{
+ ClientCAs: caCertPool,
+ RootCAs: caCertPool,
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ // TLS 1.2 minimum
+ MinVersion: tls.VersionTLS12,
+ Certificates: []tls.Certificate{cert},
+ }
+ tlsConfig.BuildNameToCertificate()
+
+ return tlsConfig, nil
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go
index 9ac9b93..f6d9f66 100644
--- a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go
+++ b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go
@@ -18,14 +18,23 @@ const (
// all other policies will be ignored, the result will contain
// just the 'root'. In cases where 'root' is not present, if
// 'default' policy is not already present, it will be added.
-func ParsePolicies(policiesRaw string) []string {
- if policiesRaw == "" {
+func ParsePolicies(policiesRaw interface{}) []string {
+ if policiesRaw == nil {
return []string{"default"}
}
- policies := strings.Split(policiesRaw, ",")
+ var policies []string
+ switch policiesRaw.(type) {
+ case string:
+ if policiesRaw.(string) == "" {
+ return []string{}
+ }
+ policies = strings.Split(policiesRaw.(string), ",")
+ case []string:
+ policies = policiesRaw.([]string)
+ }
- return SanitizePolicies(policies, true)
+ return SanitizePolicies(policies, false)
}
// SanitizePolicies performs the common input validation tasks
diff --git a/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go b/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go
new file mode 100644
index 0000000..5ff59b1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go
@@ -0,0 +1,107 @@
+package proxyutil
+
+import (
+ "fmt"
+ "net"
+ "sync"
+
+ proxyproto "github.com/armon/go-proxyproto"
+ "github.com/hashicorp/errwrap"
+ sockaddr "github.com/hashicorp/go-sockaddr"
+ "github.com/hashicorp/vault/helper/strutil"
+)
+
+// ProxyProtoConfig contains configuration for the PROXY protocol
+type ProxyProtoConfig struct {
+ sync.RWMutex
+ Behavior string
+ AuthorizedAddrs []*sockaddr.SockAddrMarshaler `json:"authorized_addrs"`
+}
+
+func (p *ProxyProtoConfig) SetAuthorizedAddrs(addrs interface{}) error {
+ p.AuthorizedAddrs = make([]*sockaddr.SockAddrMarshaler, 0)
+ stringAddrs := make([]string, 0)
+
+ switch addrs.(type) {
+ case string:
+ stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",")
+ if len(stringAddrs) == 0 {
+ return fmt.Errorf("unable to parse addresses from %v", addrs)
+ }
+
+ case []string:
+ stringAddrs = addrs.([]string)
+
+ case []interface{}:
+ for _, v := range addrs.([]interface{}) {
+ stringAddr, ok := v.(string)
+ if !ok {
+ return fmt.Errorf("error parsing %q as string")
+ }
+ stringAddrs = append(stringAddrs, stringAddr)
+ }
+
+ default:
+ return fmt.Errorf("unknown address input type %T", addrs)
+ }
+
+ for _, addr := range stringAddrs {
+ sa, err := sockaddr.NewSockAddr(addr)
+ if err != nil {
+ return errwrap.Wrapf("error parsing authorized address: {{err}}", err)
+ }
+ p.AuthorizedAddrs = append(p.AuthorizedAddrs, &sockaddr.SockAddrMarshaler{
+ SockAddr: sa,
+ })
+ }
+
+ return nil
+}
+
+// WrapInProxyProto wraps the given listener in the PROXY protocol. If behavior
+// is "use_if_authorized" or "deny_if_unauthorized" it also configures a
+// SourceCheck based on the given ProxyProtoConfig. In an error case it returns
+// the original listener and the error.
+func WrapInProxyProto(listener net.Listener, config *ProxyProtoConfig) (net.Listener, error) {
+ config.Lock()
+ defer config.Unlock()
+
+ var newLn *proxyproto.Listener
+
+ switch config.Behavior {
+ case "use_always":
+ newLn = &proxyproto.Listener{
+ Listener: listener,
+ }
+
+ case "allow_authorized", "deny_unauthorized":
+ newLn = &proxyproto.Listener{
+ Listener: listener,
+ SourceCheck: func(addr net.Addr) (bool, error) {
+ config.RLock()
+ defer config.RUnlock()
+
+ sa, err := sockaddr.NewSockAddr(addr.String())
+ if err != nil {
+ return false, errwrap.Wrapf("error parsing remote address: {{err}}", err)
+ }
+
+ for _, authorizedAddr := range config.AuthorizedAddrs {
+ if authorizedAddr.Contains(sa) {
+ return true, nil
+ }
+ }
+
+ if config.Behavior == "allow_authorized" {
+ return false, nil
+ }
+
+ return false, proxyproto.ErrInvalidUpstream
+ },
+ }
+ default:
+ return listener, fmt.Errorf("unknown behavior type for proxy proto config")
+ }
+
+ return newLn, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/reload/reload.go b/vendor/github.com/hashicorp/vault/helper/reload/reload.go
new file mode 100644
index 0000000..cc450b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/reload/reload.go
@@ -0,0 +1,54 @@
+package reload
+
+import (
+ "crypto/tls"
+ "fmt"
+ "sync"
+)
+
+// ReloadFunc are functions that are called when a reload is requested
+type ReloadFunc func(map[string]interface{}) error
+
+// CertificateGetter satisfies ReloadFunc and its GetCertificate method
+// satisfies the tls.GetCertificate function signature. Currently it does not
+// allow changing paths after the fact.
+type CertificateGetter struct {
+ sync.RWMutex
+
+ cert *tls.Certificate
+
+ certFile string
+ keyFile string
+}
+
+func NewCertificateGetter(certFile, keyFile string) *CertificateGetter {
+ return &CertificateGetter{
+ certFile: certFile,
+ keyFile: keyFile,
+ }
+}
+
+func (cg *CertificateGetter) Reload(_ map[string]interface{}) error {
+ cert, err := tls.LoadX509KeyPair(cg.certFile, cg.keyFile)
+ if err != nil {
+ return err
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+
+ cg.cert = &cert
+
+ return nil
+}
+
+func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ cg.RLock()
+ defer cg.RUnlock()
+
+ if cg.cert == nil {
+ return nil, fmt.Errorf("nil certificate")
+ }
+
+ return cg.cert, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
index 7c7f64d..b5e69c4 100644
--- a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
+++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
@@ -29,6 +29,19 @@ func StrListSubset(super, sub []string) bool {
return true
}
+// Parses a comma separated list of strings into a slice of strings.
+// The return slice will be sorted and will not contain duplicate or
+// empty items.
+func ParseDedupAndSortStrings(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ parsed := []string{}
+ if input == "" {
+ // Don't return nil
+ return parsed
+ }
+ return RemoveDuplicates(strings.Split(input, sep), false)
+}
+
// Parses a comma separated list of strings into a slice of strings.
// The return slice will be sorted and will not contain duplicate or
// empty items. The values will be converted to lower case.
@@ -56,6 +69,10 @@ func ParseKeyValues(input string, out map[string]string, sep string) error {
for _, keyValue := range keyValues {
shards := strings.Split(keyValue, "=")
+ if len(shards) != 2 {
+ return fmt.Errorf("invalid format")
+ }
+
key := strings.TrimSpace(shards[0])
value := strings.TrimSpace(shards[1])
if key == "" || value == "" {
@@ -286,3 +303,11 @@ func GlobbedStringsMatch(item, val string) bool {
return val == item
}
+
+// AppendIfMissing adds a string to a slice if the given string is not present
+func AppendIfMissing(slice []string, i string) []string {
+ if StrListContains(slice, i) {
+ return slice
+ }
+ return append(slice, i)
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go
index 9fd3bef..ce02719 100644
--- a/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go
+++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go
@@ -139,7 +139,7 @@ func TestStrutil_ParseKeyValues(t *testing.T) {
input = "key1 = value1, key2 = "
err = ParseKeyValues(input, actual, ",")
if err == nil {
- t.Fatal("expected an error")
+ t.Fatalf("expected an error")
}
for k, _ := range actual {
delete(actual, k)
@@ -148,11 +148,17 @@ func TestStrutil_ParseKeyValues(t *testing.T) {
input = "key1 = value1, = value2 "
err = ParseKeyValues(input, actual, ",")
if err == nil {
- t.Fatal("expected an error")
+ t.Fatalf("expected an error")
}
for k, _ := range actual {
delete(actual, k)
}
+
+ input = "key1"
+ err = ParseKeyValues(input, actual, ",")
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
}
func TestStrutil_ParseArbitraryKeyValues(t *testing.T) {
@@ -324,3 +330,40 @@ func TestTrimStrings(t *testing.T) {
t.Fatalf("Bad TrimStrings: expected:%#v, got:%#v", expected, actual)
}
}
+
+func TestStrutil_AppendIfMissing(t *testing.T) {
+ keys := []string{}
+
+ keys = AppendIfMissing(keys, "foo")
+
+ if len(keys) != 1 {
+ t.Fatalf("expected slice to be length of 1: %v", keys)
+ }
+ if keys[0] != "foo" {
+ t.Fatalf("expected slice to contain key 'foo': %v", keys)
+ }
+
+ keys = AppendIfMissing(keys, "bar")
+
+ if len(keys) != 2 {
+ t.Fatalf("expected slice to be length of 2: %v", keys)
+ }
+ if keys[0] != "foo" {
+ t.Fatalf("expected slice to contain key 'foo': %v", keys)
+ }
+ if keys[1] != "bar" {
+ t.Fatalf("expected slice to contain key 'bar': %v", keys)
+ }
+
+ keys = AppendIfMissing(keys, "foo")
+
+ if len(keys) != 2 {
+ t.Fatalf("expected slice to still be length of 2: %v", keys)
+ }
+ if keys[0] != "foo" {
+ t.Fatalf("expected slice to still contain key 'foo': %v", keys)
+ }
+ if keys[1] != "bar" {
+ t.Fatalf("expected slice to still contain key 'bar': %v", keys)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go
similarity index 83%
rename from vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go
rename to vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go
index 5cbd060..08b3ebd 100644
--- a/vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go
+++ b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go
@@ -23,6 +23,7 @@ func ParseCiphers(cipherStr string) ([]uint16, error) {
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
@@ -32,10 +33,14 @@ func ParseCiphers(cipherStr string) ([]uint16, error) {
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
}
for _, cipher := range ciphers {
if v, ok := cipherMap[cipher]; ok {
diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go
index a8e9e77..79aac9b 100644
--- a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go
+++ b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go
@@ -7,12 +7,12 @@ import (
)
func TestParseCiphers(t *testing.T) {
- testOk := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"
+ testOk := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"
v, err := ParseCiphers(testOk)
if err != nil {
t.Fatal(err)
}
- if len(v) != 12 {
+ if len(v) != 17 {
t.Fatal("missed ciphers after parse")
}
diff --git a/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go b/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go
new file mode 100644
index 0000000..2242c7b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go
@@ -0,0 +1,27 @@
+package wrapping
+
+import "time"
+
+type ResponseWrapInfo struct {
+ // Setting to non-zero specifies that the response should be wrapped.
+ // Specifies the desired TTL of the wrapping token.
+ TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
+
+ // The token containing the wrapped response
+ Token string `json:"token" structs:"token" mapstructure:"token"`
+
+ // The creation time. This can be used with the TTL to figure out an
+ // expected expiration.
+ CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"`
+
+ // If the contained response is the output of a token creation call, the
+ // created token's accessor will be accessible here
+ WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"`
+
+ // The format to use. This doesn't get returned, it's only internal.
+ Format string `json:"format" structs:"format" mapstructure:"format"`
+
+ // CreationPath is the original request path that was used to create
+ // the wrapped response.
+ CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path"`
+}
diff --git a/vendor/github.com/hashicorp/vault/http/cors.go b/vendor/github.com/hashicorp/vault/http/cors.go
new file mode 100644
index 0000000..a01228b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/cors.go
@@ -0,0 +1,62 @@
+package http
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/vault"
+)
+
+var allowedMethods = []string{
+ http.MethodDelete,
+ http.MethodGet,
+ http.MethodOptions,
+ http.MethodPost,
+ http.MethodPut,
+ "LIST", // LIST is not an official HTTP method, but Vault supports it.
+}
+
+func wrapCORSHandler(h http.Handler, core *vault.Core) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ corsConf := core.CORSConfig()
+
+ origin := req.Header.Get("Origin")
+ requestMethod := req.Header.Get("Access-Control-Request-Method")
+
+ // If CORS is not enabled or if no Origin header is present (i.e. the request
+ // is from the Vault CLI. A browser will always send an Origin header), then
+ // just return a 204.
+ if !corsConf.IsEnabled() || origin == "" {
+ h.ServeHTTP(w, req)
+ return
+ }
+
+ // Return a 403 if the origin is not allowed to make cross-origin requests.
+ if !corsConf.IsValidOrigin(origin) {
+ respondError(w, http.StatusForbidden, fmt.Errorf("origin not allowed"))
+ return
+ }
+
+ if req.Method == http.MethodOptions && !strutil.StrListContains(allowedMethods, requestMethod) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("Access-Control-Allow-Origin", origin)
+ w.Header().Set("Vary", "Origin")
+
+ // apply headers for preflight requests
+ if req.Method == http.MethodOptions {
+ w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ","))
+ w.Header().Set("Access-Control-Allow-Headers", strings.Join(corsConf.AllowedHeaders, ","))
+ w.Header().Set("Access-Control-Max-Age", "300")
+
+ return
+ }
+
+ h.ServeHTTP(w, req)
+ return
+ })
+}
diff --git a/vendor/github.com/hashicorp/vault/http/forwarding_test.go b/vendor/github.com/hashicorp/vault/http/forwarding_test.go
index fdc3b76..4f1aefe 100644
--- a/vendor/github.com/hashicorp/vault/http/forwarding_test.go
+++ b/vendor/github.com/hashicorp/vault/http/forwarding_test.go
@@ -8,7 +8,6 @@ import (
"io"
"math/rand"
"net/http"
- "os"
"strings"
"sync"
"sync/atomic"
@@ -27,10 +26,6 @@ import (
)
func TestHTTP_Fallback_Bad_Address(t *testing.T) {
- handler1 := http.NewServeMux()
- handler2 := http.NewServeMux()
- handler3 := http.NewServeMux()
-
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"transit": transit.Factory,
@@ -38,22 +33,17 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) {
ClusterAddr: "https://127.3.4.1:8382",
}
- // Chicken-and-egg: Handler needs a core. So we create handlers first, then
- // add routes chained to a Handler-created handler.
- cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
- for _, core := range cores {
- defer core.CloseListeners()
- }
- handler1.Handle("/", Handler(cores[0].Core))
- handler2.Handle("/", Handler(cores[1].Core))
- handler3.Handle("/", Handler(cores[2].Core))
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+ cores := cluster.Cores
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
- root := cores[0].Root
-
addrs := []string{
fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
@@ -68,7 +58,7 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- client.SetToken(root)
+ client.SetToken(cluster.RootToken)
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
@@ -77,17 +67,13 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) {
if secret == nil {
t.Fatal("secret is nil")
}
- if secret.Data["id"].(string) != root {
+ if secret.Data["id"].(string) != cluster.RootToken {
t.Fatal("token mismatch")
}
}
}
func TestHTTP_Fallback_Disabled(t *testing.T) {
- handler1 := http.NewServeMux()
- handler2 := http.NewServeMux()
- handler3 := http.NewServeMux()
-
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"transit": transit.Factory,
@@ -95,22 +81,17 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
ClusterAddr: "empty",
}
- // Chicken-and-egg: Handler needs a core. So we create handlers first, then
- // add routes chained to a Handler-created handler.
- cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
- for _, core := range cores {
- defer core.CloseListeners()
- }
- handler1.Handle("/", Handler(cores[0].Core))
- handler2.Handle("/", Handler(cores[1].Core))
- handler3.Handle("/", Handler(cores[2].Core))
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+ cores := cluster.Cores
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
- root := cores[0].Root
-
addrs := []string{
fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
@@ -125,7 +106,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- client.SetToken(root)
+ client.SetToken(cluster.RootToken)
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
@@ -134,7 +115,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
if secret == nil {
t.Fatal("secret is nil")
}
- if secret.Data["id"].(string) != root {
+ if secret.Data["id"].(string) != cluster.RootToken {
t.Fatal("token mismatch")
}
}
@@ -143,49 +124,31 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
// This function recreates the fuzzy testing from transit to pipe a large
// number of requests from the standbys to the active node.
func TestHTTP_Forwarding_Stress(t *testing.T) {
- testHTTP_Forwarding_Stress_Common(t, false, false, 50)
- testHTTP_Forwarding_Stress_Common(t, false, true, 50)
- testHTTP_Forwarding_Stress_Common(t, true, false, 50)
- testHTTP_Forwarding_Stress_Common(t, true, true, 50)
- os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
+ testHTTP_Forwarding_Stress_Common(t, false, 50)
+ testHTTP_Forwarding_Stress_Common(t, true, 50)
}
-func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uint64) {
+func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) {
testPlaintext := "the quick brown fox"
testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
- if rpc {
- os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1")
- } else {
- os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
- }
-
- handler1 := http.NewServeMux()
- handler2 := http.NewServeMux()
- handler3 := http.NewServeMux()
-
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"transit": transit.Factory,
},
}
- // Chicken-and-egg: Handler needs a core. So we create handlers first, then
- // add routes chained to a Handler-created handler.
- cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
- for _, core := range cores {
- defer core.CloseListeners()
- }
- handler1.Handle("/", Handler(cores[0].Core))
- handler2.Handle("/", Handler(cores[1].Core))
- handler3.Handle("/", Handler(cores[2].Core))
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+ cores := cluster.Cores
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
- root := cores[0].Root
-
wg := sync.WaitGroup{}
funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"}
@@ -216,7 +179,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin
if err != nil {
t.Fatal(err)
}
- req.Header.Set(AuthHeaderName, root)
+ req.Header.Set(AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@@ -265,7 +228,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin
if err != nil {
return nil, err
}
- req.Header.Set(AuthHeaderName, root)
+ req.Header.Set(AuthHeaderName, cluster.RootToken)
resp, err := client.Do(req)
if err != nil {
return nil, err
@@ -465,40 +428,31 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin
wg.Wait()
if totalOps == 0 || totalOps != successfulOps {
- t.Fatalf("total/successful ops zero or mismatch: %d/%d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
+ t.Fatalf("total/successful ops zero or mismatch: %d/%d; parallel: %t, num %d", totalOps, successfulOps, parallel, num)
}
- t.Logf("total operations tried: %d, total successful: %d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
+ t.Logf("total operations tried: %d, total successful: %d; parallel: %t, num %d", totalOps, successfulOps, parallel, num)
}
// This tests TLS connection state forwarding by ensuring that we can use a
// client TLS to authenticate against the cert backend
func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
- handler1 := http.NewServeMux()
- handler2 := http.NewServeMux()
- handler3 := http.NewServeMux()
-
coreConfig := &vault.CoreConfig{
CredentialBackends: map[string]logical.Factory{
"cert": credCert.Factory,
},
}
- // Chicken-and-egg: Handler needs a core. So we create handlers first, then
- // add routes chained to a Handler-created handler.
- cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
- for _, core := range cores {
- defer core.CloseListeners()
- }
- handler1.Handle("/", Handler(cores[0].Core))
- handler2.Handle("/", Handler(cores[1].Core))
- handler3.Handle("/", Handler(cores[2].Core))
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+ cores := cluster.Cores
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
- root := cores[0].Root
-
transport := cleanhttp.DefaultTransport()
transport.TLSClientConfig = cores[0].TLSConfig
if err := http2.ConfigureTransport(transport); err != nil {
@@ -514,7 +468,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- req.Header.Set(AuthHeaderName, root)
+ req.Header.Set(AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@@ -525,7 +479,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
Policies string `json:"policies"`
}
encodedCertConfig, err := json.Marshal(&certConfig{
- Certificate: vault.TestClusterCACert,
+ Certificate: string(cluster.CACertPEM),
Policies: "default",
})
if err != nil {
@@ -536,7 +490,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- req.Header.Set(AuthHeaderName, root)
+ req.Header.Set(AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@@ -559,7 +513,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
},
}
- //cores[0].Logger().Printf("root token is %s", root)
+ //cores[0].Logger().Printf("cluster.RootToken token is %s", cluster.RootToken)
//time.Sleep(4 * time.Hour)
for _, addr := range addrs {
@@ -595,3 +549,27 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
}
}
}
+
+func TestHTTP_Forwarding_HelpOperation(t *testing.T) {
+ cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
+ HandlerFunc: Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+ cores := cluster.Cores
+
+ vault.TestWaitActive(t, cores[0].Core)
+
+ testHelp := func(client *api.Client) {
+ help, err := client.Help("auth/token")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if help == nil {
+ t.Fatal("help was nil")
+ }
+ }
+
+ testHelp(cores[0].Client)
+ testHelp(cores[1].Client)
+}
diff --git a/vendor/github.com/hashicorp/vault/http/handler.go b/vendor/github.com/hashicorp/vault/http/handler.go
index fb9b7a8..6290768 100644
--- a/vendor/github.com/hashicorp/vault/http/handler.go
+++ b/vendor/github.com/hashicorp/vault/http/handler.go
@@ -10,8 +10,8 @@ import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
@@ -46,10 +46,11 @@ func Handler(core *vault.Core) http.Handler {
mux.Handle("/v1/sys/init", handleSysInit(core))
mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
mux.Handle("/v1/sys/seal", handleSysSeal(core))
- mux.Handle("/v1/sys/step-down", handleSysStepDown(core))
+ mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core)))
mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
mux.Handle("/v1/sys/renew", handleRequestForwarding(core, handleLogical(core, false, nil)))
mux.Handle("/v1/sys/renew/", handleRequestForwarding(core, handleLogical(core, false, nil)))
+ mux.Handle("/v1/sys/leases/", handleRequestForwarding(core, handleLogical(core, false, nil)))
mux.Handle("/v1/sys/leader", handleSysLeader(core))
mux.Handle("/v1/sys/health", handleSysHealth(core))
mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, handleSysGenerateRootAttempt(core)))
@@ -67,10 +68,11 @@ func Handler(core *vault.Core) http.Handler {
// Wrap the handler in another handler to trigger all help paths.
helpWrappedHandler := wrapHelpHandler(mux, core)
+ corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
// Wrap the help wrapped handler with another layer with a generic
// handler
- genericWrappedHandler := wrapGenericHandler(helpWrappedHandler)
+ genericWrappedHandler := wrapGenericHandler(corsWrappedHandler)
return genericWrappedHandler
}
@@ -152,7 +154,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle
// Note: in an HA setup, this call will also ensure that connections to
// the leader are set up, as that happens once the advertised cluster
// values are read during this function
- isLeader, leaderAddr, err := core.Leader()
+ isLeader, leaderAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve request normally
@@ -169,7 +171,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle
return
}
if leaderAddr == "" {
- respondError(w, http.StatusInternalServerError, fmt.Errorf("node not active but active node not found"))
+ respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found"))
return
}
@@ -221,7 +223,7 @@ func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *l
// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
// Request the leader address
- _, redirectAddr, err := core.Leader()
+ _, redirectAddr, _, err := core.Leader()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
diff --git a/vendor/github.com/hashicorp/vault/http/handler_test.go b/vendor/github.com/hashicorp/vault/http/handler_test.go
index 149e603..8eae984 100644
--- a/vendor/github.com/hashicorp/vault/http/handler_test.go
+++ b/vendor/github.com/hashicorp/vault/http/handler_test.go
@@ -6,6 +6,7 @@ import (
"net/http"
"net/http/httptest"
"reflect"
+ "strings"
"testing"
"github.com/hashicorp/go-cleanhttp"
@@ -14,6 +15,87 @@ import (
"github.com/hashicorp/vault/vault"
)
+func TestHandler_cors(t *testing.T) {
+ core, _, _ := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+
+ // Enable CORS and allow from any origin for testing.
+ corsConfig := core.CORSConfig()
+ err := corsConfig.Enable([]string{addr}, nil)
+ if err != nil {
+ t.Fatalf("Error enabling CORS: %s", err)
+ }
+
+ req, err := http.NewRequest(http.MethodOptions, addr+"/v1/sys/seal-status", nil)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ req.Header.Set("Origin", "BAD ORIGIN")
+
+ // Requests from unacceptable origins will be rejected with a 403.
+ client := cleanhttp.DefaultClient()
+ resp, err := client.Do(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if resp.StatusCode != http.StatusForbidden {
+ t.Fatalf("Bad status:\nexpected: 403 Forbidden\nactual: %s", resp.Status)
+ }
+
+ //
+ // Test preflight requests
+ //
+
+ // Set a valid origin
+ req.Header.Set("Origin", addr)
+
+ // Server should NOT accept arbitrary methods.
+ req.Header.Set("Access-Control-Request-Method", "FOO")
+
+ client = cleanhttp.DefaultClient()
+ resp, err = client.Do(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Fail if an arbitrary method is accepted.
+ if resp.StatusCode != http.StatusMethodNotAllowed {
+ t.Fatalf("Bad status:\nexpected: 405 Method Not Allowed\nactual: %s", resp.Status)
+ }
+
+ // Server SHOULD accept acceptable methods.
+ req.Header.Set("Access-Control-Request-Method", http.MethodPost)
+
+ client = cleanhttp.DefaultClient()
+ resp, err = client.Do(req)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ //
+ // Test that the CORS headers are applied correctly.
+ //
+ expHeaders := map[string]string{
+ "Access-Control-Allow-Origin": addr,
+ "Access-Control-Allow-Headers": strings.Join(vault.StdAllowedHeaders, ","),
+ "Access-Control-Max-Age": "300",
+ "Vary": "Origin",
+ }
+
+ for expHeader, expected := range expHeaders {
+ actual := resp.Header.Get(expHeader)
+ if actual == "" {
+ t.Fatalf("bad:\nHeader: %#v was not on response.", expHeader)
+ }
+
+ if actual != expected {
+ t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual)
+ }
+ }
+}
+
func TestHandler_CacheControlNoStore(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
@@ -75,8 +157,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"auth": nil,
"data": map[string]interface{}{
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -106,8 +188,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
},
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -140,6 +222,13 @@ func TestSysMounts_headerAuth(t *testing.T) {
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual)
@@ -194,6 +283,12 @@ func TestSysMounts_headerAuth_Wrapped(t *testing.T) {
}
expected["wrap_info"].(map[string]interface{})["creation_time"] = actualCreationTime
+ actualCreationPath, ok := actual["wrap_info"].(map[string]interface{})["creation_path"]
+ if !ok || actualCreationPath == "" {
+ t.Fatal("creation_path missing in wrap info")
+ }
+ expected["wrap_info"].(map[string]interface{})["creation_path"] = actualCreationPath
+
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n%T %T", expected, actual, actual["warnings"], actual["data"])
}
diff --git a/vendor/github.com/hashicorp/vault/http/help.go b/vendor/github.com/hashicorp/vault/http/help.go
index f0ca8b1..1c3a956 100644
--- a/vendor/github.com/hashicorp/vault/http/help.go
+++ b/vendor/github.com/hashicorp/vault/http/help.go
@@ -8,14 +8,18 @@ import (
)
func wrapHelpHandler(h http.Handler, core *vault.Core) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- // If the help parameter is not blank, then show the help
+ return http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) {
+ // If the help parameter is not blank, then show the help. We request
+ // forward because standby nodes do not have mounts and other state.
if v := req.URL.Query().Get("help"); v != "" || req.Method == "HELP" {
- handleHelp(core, w, req)
+ handleRequestForwarding(core,
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handleHelp(core, w, r)
+ })).ServeHTTP(writer, req)
return
}
- h.ServeHTTP(w, req)
+ h.ServeHTTP(writer, req)
return
})
}
diff --git a/vendor/github.com/hashicorp/vault/http/http_test.go b/vendor/github.com/hashicorp/vault/http/http_test.go
index 16e0521..eb43817 100644
--- a/vendor/github.com/hashicorp/vault/http/http_test.go
+++ b/vendor/github.com/hashicorp/vault/http/http_test.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"net/http"
+ "regexp"
"strings"
"testing"
"time"
@@ -55,6 +56,11 @@ func testHttpData(t *testing.T, method string, token string, addr string, body i
t.Fatalf("err: %s", err)
}
+ // Get the address of the local listener in order to attach it to an Origin header.
+ // This will allow for the testing of requests that require CORS, without using a browser.
+ hostURLRegexp, _ := regexp.Compile("http[s]?://.+:[0-9]+")
+ req.Header.Set("Origin", hostURLRegexp.FindString(addr))
+
req.Header.Set("Content-Type", "application/json")
if len(token) != 0 {
diff --git a/vendor/github.com/hashicorp/vault/http/logical.go b/vendor/github.com/hashicorp/vault/http/logical.go
index f73e532..642314e 100644
--- a/vendor/github.com/hashicorp/vault/http/logical.go
+++ b/vendor/github.com/hashicorp/vault/http/logical.go
@@ -49,6 +49,7 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques
op = logical.UpdateOperation
case "LIST":
op = logical.ListOperation
+ case "OPTIONS":
default:
return nil, http.StatusMethodNotAllowed, nil
}
@@ -95,7 +96,7 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques
return req, 0, nil
}
-func handleLogical(core *vault.Core, dataOnly bool, prepareRequestCallback PrepareRequestFunc) http.Handler {
+func handleLogical(core *vault.Core, injectDataIntoTopLevel bool, prepareRequestCallback PrepareRequestFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
req, statusCode, err := buildLogicalRequest(core, w, r)
if err != nil || statusCode != 0 {
@@ -124,11 +125,11 @@ func handleLogical(core *vault.Core, dataOnly bool, prepareRequestCallback Prepa
}
// Build the proper response
- respondLogical(w, r, req, dataOnly, resp)
+ respondLogical(w, r, req, injectDataIntoTopLevel, resp)
})
}
-func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request, dataOnly bool, resp *logical.Response) {
+func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request, injectDataIntoTopLevel bool, resp *logical.Response) {
var httpResp *logical.HTTPResponse
var ret interface{}
@@ -152,6 +153,7 @@ func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request
Token: resp.WrapInfo.Token,
TTL: int(resp.WrapInfo.TTL.Seconds()),
CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano),
+ CreationPath: resp.WrapInfo.CreationPath,
WrappedAccessor: resp.WrapInfo.WrappedAccessor,
},
}
@@ -162,7 +164,7 @@ func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request
ret = httpResp
- if dataOnly {
+ if injectDataIntoTopLevel {
injector := logical.HTTPSysInjector{
Response: httpResp,
}
diff --git a/vendor/github.com/hashicorp/vault/http/logical_test.go b/vendor/github.com/hashicorp/vault/http/logical_test.go
index bbbd892..e4101a5 100644
--- a/vendor/github.com/hashicorp/vault/http/logical_test.go
+++ b/vendor/github.com/hashicorp/vault/http/logical_test.go
@@ -15,6 +15,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
"github.com/hashicorp/vault/vault"
)
@@ -83,10 +84,13 @@ func TestLogical_StandbyRedirect(t *testing.T) {
// Create an HA Vault
logger := logformat.NewVaultLogger(log.LevelTrace)
- inmha := physical.NewInmemHA(logger)
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
conf := &vault.CoreConfig{
Physical: inmha,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: addr1,
DisableMlock: true,
}
@@ -108,7 +112,7 @@ func TestLogical_StandbyRedirect(t *testing.T) {
// Create a second HA Vault
conf2 := &vault.CoreConfig{
Physical: inmha,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: addr2,
DisableMlock: true,
}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_auth_test.go b/vendor/github.com/hashicorp/vault/http/sys_auth_test.go
index 9e19391..fa3c692 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_auth_test.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_auth_test.go
@@ -49,6 +49,13 @@ func TestSysAuth(t *testing.T) {
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
@@ -120,6 +127,13 @@ func TestSysEnableAuth(t *testing.T) {
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
@@ -176,6 +190,13 @@ func TestSysDisableAuth(t *testing.T) {
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
diff --git a/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go b/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go
new file mode 100644
index 0000000..bd6c7ae
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go
@@ -0,0 +1,78 @@
+package http
+
+import (
+ "encoding/json"
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSysConfigCors(t *testing.T) {
+ var resp *http.Response
+
+ core, _, token := vault.TestCoreUnsealed(t)
+ ln, addr := TestServer(t, core)
+ defer ln.Close()
+ TestServerAuth(t, addr, token)
+
+ corsConf := core.CORSConfig()
+
+ // Try to enable CORS without providing a value for allowed_origins
+ resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{
+ "allowed_headers": "X-Custom-Header",
+ })
+
+ testResponseStatus(t, resp, 500)
+
+ // Enable CORS, but provide an origin this time.
+ resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{
+ "allowed_origins": addr,
+ "allowed_headers": "X-Custom-Header",
+ })
+
+ testResponseStatus(t, resp, 204)
+
+ // Read the CORS configuration
+ resp = testHttpGet(t, token, addr+"/v1/sys/config/cors")
+ testResponseStatus(t, resp, 200)
+
+ var actual map[string]interface{}
+ var expected map[string]interface{}
+
+ lenStdHeaders := len(corsConf.AllowedHeaders)
+
+ expectedHeaders := make([]interface{}, lenStdHeaders)
+
+ for i := range corsConf.AllowedHeaders {
+ expectedHeaders[i] = corsConf.AllowedHeaders[i]
+ }
+
+ expected = map[string]interface{}{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": json.Number("0"),
+ "wrap_info": nil,
+ "warnings": nil,
+ "auth": nil,
+ "data": map[string]interface{}{
+ "enabled": true,
+ "allowed_origins": []interface{}{addr},
+ "allowed_headers": expectedHeaders,
+ },
+ "enabled": true,
+ "allowed_origins": []interface{}{addr},
+ "allowed_headers": expectedHeaders,
+ }
+
+ testResponseStatus(t, resp, 200)
+
+ testResponseBody(t, resp, &actual)
+ expected["request_id"] = actual["request_id"]
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader.go b/vendor/github.com/hashicorp/vault/http/sys_leader.go
index ad5f281..98eb04a 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_leader.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_leader.go
@@ -20,7 +20,7 @@ func handleSysLeader(core *vault.Core) http.Handler {
func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
haEnabled := true
- isLeader, address, err := core.Leader()
+ isLeader, address, clusterAddr, err := core.Leader()
if errwrap.Contains(err, vault.ErrHANotEnabled.Error()) {
haEnabled = false
err = nil
@@ -31,14 +31,16 @@ func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request
}
respondOk(w, &LeaderResponse{
- HAEnabled: haEnabled,
- IsSelf: isLeader,
- LeaderAddress: address,
+ HAEnabled: haEnabled,
+ IsSelf: isLeader,
+ LeaderAddress: address,
+ LeaderClusterAddress: clusterAddr,
})
}
type LeaderResponse struct {
- HAEnabled bool `json:"ha_enabled"`
- IsSelf bool `json:"is_self"`
- LeaderAddress string `json:"leader_address"`
+ HAEnabled bool `json:"ha_enabled"`
+ IsSelf bool `json:"is_self"`
+ LeaderAddress string `json:"leader_address"`
+ LeaderClusterAddress string `json:"leader_cluster_address"`
}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader_test.go b/vendor/github.com/hashicorp/vault/http/sys_leader_test.go
index 9c0c7d2..afe0dbd 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_leader_test.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_leader_test.go
@@ -20,9 +20,10 @@ func TestSysLeader_get(t *testing.T) {
var actual map[string]interface{}
expected := map[string]interface{}{
- "ha_enabled": false,
- "is_self": false,
- "leader_address": "",
+ "ha_enabled": false,
+ "is_self": false,
+ "leader_address": "",
+ "leader_cluster_address": "",
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
diff --git a/vendor/github.com/hashicorp/vault/http/sys_lease_test.go b/vendor/github.com/hashicorp/vault/http/sys_lease_test.go
index 6b7bc34..de1dc6c 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_lease_test.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_lease_test.go
@@ -23,14 +23,33 @@ func TestSysRenew(t *testing.T) {
// read secret
resp = testHttpGet(t, token, addr+"/v1/secret/foo")
var result struct {
- LeaseId string `json:"lease_id"`
+ LeaseID string `json:"lease_id"`
}
if err := jsonutil.DecodeJSONFromReader(resp.Body, &result); err != nil {
t.Fatalf("bad: %s", err)
}
- resp = testHttpPut(t, token, addr+"/v1/sys/renew/"+result.LeaseId, nil)
+ var renewResult struct {
+ LeaseID string `json:"lease_id"`
+ Data map[string]interface{} `json:"data"`
+ }
+ resp = testHttpPut(t, token, addr+"/v1/sys/renew/"+result.LeaseID, nil)
testResponseStatus(t, resp, 200)
+ if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil {
+ t.Fatal(err)
+ }
+ if result.LeaseID != renewResult.LeaseID {
+ t.Fatal("lease id changed in renew request")
+ }
+
+ resp = testHttpPut(t, token, addr+"/v1/sys/leases/renew/"+result.LeaseID, nil)
+ testResponseStatus(t, resp, 200)
+ if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil {
+ t.Fatal(err)
+ }
+ if result.LeaseID != renewResult.LeaseID {
+ t.Fatal("lease id changed in renew request")
+ }
}
func TestSysRevoke(t *testing.T) {
diff --git a/vendor/github.com/hashicorp/vault/http/sys_mount_test.go b/vendor/github.com/hashicorp/vault/http/sys_mount_test.go
index 2e12f0f..57f6dd7 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_mount_test.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_mount_test.go
@@ -27,8 +27,8 @@ func TestSysMounts(t *testing.T) {
"auth": nil,
"data": map[string]interface{}{
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -58,8 +58,8 @@ func TestSysMounts(t *testing.T) {
},
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -91,6 +91,14 @@ func TestSysMounts(t *testing.T) {
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
+
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
@@ -103,7 +111,7 @@ func TestSysMount(t *testing.T) {
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "generic",
+ "type": "kv",
"description": "foo",
})
testResponseStatus(t, resp, 204)
@@ -121,7 +129,7 @@ func TestSysMount(t *testing.T) {
"data": map[string]interface{}{
"foo/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -130,8 +138,8 @@ func TestSysMount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -162,7 +170,7 @@ func TestSysMount(t *testing.T) {
},
"foo/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -171,8 +179,8 @@ func TestSysMount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -204,6 +212,14 @@ func TestSysMount(t *testing.T) {
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
+
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
@@ -216,7 +232,7 @@ func TestSysMount_put(t *testing.T) {
TestServerAuth(t, addr, token)
resp := testHttpPut(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "generic",
+ "type": "kv",
"description": "foo",
})
testResponseStatus(t, resp, 204)
@@ -232,7 +248,7 @@ func TestSysRemount(t *testing.T) {
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "generic",
+ "type": "kv",
"description": "foo",
})
testResponseStatus(t, resp, 204)
@@ -256,7 +272,7 @@ func TestSysRemount(t *testing.T) {
"data": map[string]interface{}{
"bar/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -265,8 +281,8 @@ func TestSysRemount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -297,7 +313,7 @@ func TestSysRemount(t *testing.T) {
},
"bar/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -306,8 +322,8 @@ func TestSysRemount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -339,6 +355,14 @@ func TestSysRemount(t *testing.T) {
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
+
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
@@ -351,7 +375,7 @@ func TestSysUnmount(t *testing.T) {
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "generic",
+ "type": "kv",
"description": "foo",
})
testResponseStatus(t, resp, 204)
@@ -371,8 +395,8 @@ func TestSysUnmount(t *testing.T) {
"auth": nil,
"data": map[string]interface{}{
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -402,8 +426,8 @@ func TestSysUnmount(t *testing.T) {
},
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -435,6 +459,14 @@ func TestSysUnmount(t *testing.T) {
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
+
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
@@ -447,7 +479,7 @@ func TestSysTuneMount(t *testing.T) {
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{
- "type": "generic",
+ "type": "kv",
"description": "foo",
})
testResponseStatus(t, resp, 204)
@@ -465,7 +497,7 @@ func TestSysTuneMount(t *testing.T) {
"data": map[string]interface{}{
"foo/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -474,8 +506,8 @@ func TestSysTuneMount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -506,7 +538,7 @@ func TestSysTuneMount(t *testing.T) {
},
"foo/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -515,8 +547,8 @@ func TestSysTuneMount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -548,6 +580,14 @@ func TestSysTuneMount(t *testing.T) {
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
+
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
@@ -562,7 +602,7 @@ func TestSysTuneMount(t *testing.T) {
resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
"default_lease_ttl": "72000h",
})
- testResponseStatus(t, resp, 400)
+ testResponseStatus(t, resp, 204)
// Longer than system default
resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{
@@ -599,7 +639,7 @@ func TestSysTuneMount(t *testing.T) {
"data": map[string]interface{}{
"foo/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
@@ -608,8 +648,8 @@ func TestSysTuneMount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -640,7 +680,7 @@ func TestSysTuneMount(t *testing.T) {
},
"foo/": map[string]interface{}{
"description": "foo",
- "type": "generic",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
@@ -649,8 +689,8 @@ func TestSysTuneMount(t *testing.T) {
"local": false,
},
"secret/": map[string]interface{}{
- "description": "generic secret storage",
- "type": "generic",
+ "description": "key/value secret storage",
+ "type": "kv",
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
@@ -683,6 +723,14 @@ func TestSysTuneMount(t *testing.T) {
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
+ for k, v := range actual["data"].(map[string]interface{}) {
+ if v.(map[string]interface{})["accessor"] == "" {
+ t.Fatalf("no accessor from %s", k)
+ }
+ expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"]
+ }
+
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual)
}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go b/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go
index 5dc0bf9..53e4996 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go
@@ -54,12 +54,12 @@ func TestSysMountConfig(t *testing.T) {
}
}
-// testMount sets up a test mount of a generic backend w/ a random path; caller
+// testMount sets up a test mount of a kv backend w/ a random path; caller
// is responsible for unmounting
func testMount(client *api.Client) (string, error) {
rand.Seed(time.Now().UTC().UnixNano())
randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
path := fmt.Sprintf("testmount-%d", randInt)
- err := client.Sys().Mount(path, &api.MountInput{Type: "generic"})
+ err := client.Sys().Mount(path, &api.MountInput{Type: "kv"})
return path, err
}
diff --git a/vendor/github.com/hashicorp/vault/http/sys_policy_test.go b/vendor/github.com/hashicorp/vault/http/sys_policy_test.go
index 6a8a33b..42c1e4b 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_policy_test.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_policy_test.go
@@ -77,7 +77,7 @@ func TestSysWritePolicy(t *testing.T) {
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
- "rules": ``,
+ "rules": `path "*" { capabilities = ["read"] }`,
})
testResponseStatus(t, resp, 204)
@@ -118,7 +118,7 @@ func TestSysDeletePolicy(t *testing.T) {
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
- "rules": ``,
+ "rules": `path "*" { capabilities = ["read"] }`,
})
testResponseStatus(t, resp, 204)
diff --git a/vendor/github.com/hashicorp/vault/http/sys_rekey.go b/vendor/github.com/hashicorp/vault/http/sys_rekey.go
index bd597b6..9f26f3b 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_rekey.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_rekey.go
@@ -21,7 +21,7 @@ func handleSysRekeyInit(core *vault.Core, recovery bool) http.Handler {
}
repState := core.ReplicationState()
- if repState == consts.ReplicationSecondary {
+ if repState.HasState(consts.ReplicationPerformanceSecondary) {
respondError(w, http.StatusBadRequest,
fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated"))
return
diff --git a/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go b/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go
index 9c27ebb..7ab2143 100644
--- a/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go
+++ b/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go
@@ -2,7 +2,6 @@ package http
import (
"encoding/json"
- "net/http"
"reflect"
"testing"
"time"
@@ -14,29 +13,20 @@ import (
// Test wrapping functionality
func TestHTTP_Wrapping(t *testing.T) {
- handler1 := http.NewServeMux()
- handler2 := http.NewServeMux()
- handler3 := http.NewServeMux()
+ cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
+ HandlerFunc: Handler,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
- coreConfig := &vault.CoreConfig{}
-
- // Chicken-and-egg: Handler needs a core. So we create handlers first, then
- // add routes chained to a Handler-created handler.
- cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true)
- for _, core := range cores {
- defer core.CloseListeners()
- }
- handler1.Handle("/", Handler(cores[0].Core))
- handler2.Handle("/", Handler(cores[1].Core))
- handler3.Handle("/", Handler(cores[2].Core))
+ cores := cluster.Cores
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
- root := cores[0].Root
client := cores[0].Client
- client.SetToken(root)
+ client.SetToken(cluster.RootToken)
// Write a value that we will use with wrapping for lookup
_, err := client.Logical().Write("secret/foo", map[string]interface{}{
@@ -78,7 +68,7 @@ func TestHTTP_Wrapping(t *testing.T) {
// Second: basic things that should fail, unwrap edition
// Root token isn't a wrapping token
- _, err = client.Logical().Unwrap(root)
+ _, err = client.Logical().Unwrap(cluster.RootToken)
if err == nil {
t.Fatal("expected error")
}
@@ -121,6 +111,9 @@ func TestHTTP_Wrapping(t *testing.T) {
secret, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{
"token": wrapInfo.Token,
})
+ if err != nil {
+ t.Fatal(err)
+ }
if secret == nil || secret.Data == nil {
t.Fatal("secret or secret data is nil")
}
@@ -150,6 +143,9 @@ func TestHTTP_Wrapping(t *testing.T) {
// Test unwrap via the client token
client.SetToken(wrapInfo.Token)
secret, err = client.Logical().Write("sys/wrapping/unwrap", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
if secret == nil || secret.Data == nil {
t.Fatal("secret or secret data is nil")
}
@@ -161,7 +157,7 @@ func TestHTTP_Wrapping(t *testing.T) {
}
// Create a wrapping token
- client.SetToken(root)
+ client.SetToken(cluster.RootToken)
secret, err = client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
@@ -175,6 +171,9 @@ func TestHTTP_Wrapping(t *testing.T) {
secret, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
"token": wrapInfo.Token,
})
+ if err != nil {
+ t.Fatal(err)
+ }
ret2 := secret
// Should be expired and fail
_, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
@@ -197,6 +196,9 @@ func TestHTTP_Wrapping(t *testing.T) {
// Read response directly
client.SetToken(wrapInfo.Token)
secret, err = client.Logical().Read("cubbyhole/response")
+ if err != nil {
+ t.Fatal(err)
+ }
ret3 := secret
// Should be expired and fail
_, err = client.Logical().Write("cubbyhole/response", nil)
@@ -205,7 +207,7 @@ func TestHTTP_Wrapping(t *testing.T) {
}
// Create a wrapping token
- client.SetToken(root)
+ client.SetToken(cluster.RootToken)
secret, err = client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
@@ -217,6 +219,9 @@ func TestHTTP_Wrapping(t *testing.T) {
// Read via Unwrap method
secret, err = client.Logical().Unwrap(wrapInfo.Token)
+ if err != nil {
+ t.Fatal(err)
+ }
ret4 := secret
// Should be expired and fail
_, err = client.Logical().Unwrap(wrapInfo.Token)
@@ -254,7 +259,7 @@ func TestHTTP_Wrapping(t *testing.T) {
// Custom wrapping
//
- client.SetToken(root)
+ client.SetToken(cluster.RootToken)
data := map[string]interface{}{
"zip": "zap",
"three": json.Number("2"),
@@ -303,10 +308,24 @@ func TestHTTP_Wrapping(t *testing.T) {
}
wrapInfo = secret.WrapInfo
+ // Check for correct CreationPath before rewrap
+ if wrapInfo.CreationPath != "secret/foo" {
+ t.Fatal("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath)
+ }
+
// Test rewrapping
secret, err = client.Logical().Write("sys/wrapping/rewrap", map[string]interface{}{
"token": wrapInfo.Token,
})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check for correct Creation path after rewrap
+ if wrapInfo.CreationPath != "secret/foo" {
+ t.Fatal("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath)
+ }
+
// Should be expired and fail
_, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{
"token": wrapInfo.Token,
diff --git a/vendor/github.com/hashicorp/vault/logical/auth.go b/vendor/github.com/hashicorp/vault/logical/auth.go
index b454790..09694c4 100644
--- a/vendor/github.com/hashicorp/vault/logical/auth.go
+++ b/vendor/github.com/hashicorp/vault/logical/auth.go
@@ -51,6 +51,10 @@ type Auth struct {
// Number of allowed uses of the issued token
NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
+
+ // Persona is the information about the authenticated client returned by
+ // the auth backend
+ Persona *Persona `json:"persona" structs:"persona" mapstructure:"persona"`
}
func (a *Auth) GoString() string {
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/backend.go b/vendor/github.com/hashicorp/vault/logical/framework/backend.go
index e94ea04..477a926 100644
--- a/vendor/github.com/hashicorp/vault/logical/framework/backend.go
+++ b/vendor/github.com/hashicorp/vault/logical/framework/backend.go
@@ -82,6 +82,12 @@ type Backend struct {
// See the built-in AuthRenew helpers in lease.go for common callbacks.
AuthRenew OperationFunc
+ // LicenseRegistration is called to register the license for a backend.
+ LicenseRegistration LicenseRegistrationFunc
+
+ // Type is the logical.BackendType for the backend implementation
+ BackendType logical.BackendType
+
logger log.Logger
system logical.SystemView
once sync.Once
@@ -107,6 +113,10 @@ type InitializeFunc func() error
// InvalidateFunc is the callback for backend key invalidation.
type InvalidateFunc func(string)
+// LicenseRegistrationFunc is the callback for backend license registration.
+type LicenseRegistrationFunc func(interface{}) error
+
+// HandleExistenceCheck is the logical.Backend implementation.
func (b *Backend) HandleExistenceCheck(req *logical.Request) (checkFound bool, exists bool, err error) {
b.once.Do(b.init)
@@ -154,7 +164,7 @@ func (b *Backend) HandleExistenceCheck(req *logical.Request) (checkFound bool, e
return
}
-// logical.Backend impl.
+// HandleRequest is the logical.Backend implementation.
func (b *Backend) HandleRequest(req *logical.Request) (*logical.Response, error) {
b.once.Do(b.init)
@@ -221,18 +231,11 @@ func (b *Backend) HandleRequest(req *logical.Request) (*logical.Response, error)
return callback(req, &fd)
}
-// logical.Backend impl.
+// SpecialPaths is the logical.Backend implementation.
func (b *Backend) SpecialPaths() *logical.Paths {
return b.PathsSpecial
}
-// Setup is used to initialize the backend with the initial backend configuration
-func (b *Backend) Setup(config *logical.BackendConfig) (logical.Backend, error) {
- b.logger = config.Logger
- b.system = config.System
- return b, nil
-}
-
// Cleanup is used to release resources and prepare to stop the backend
func (b *Backend) Cleanup() {
if b.Clean != nil {
@@ -240,6 +243,7 @@ func (b *Backend) Cleanup() {
}
}
+// Initialize calls the backend's Init func if set.
func (b *Backend) Initialize() error {
if b.Init != nil {
return b.Init()
@@ -255,6 +259,13 @@ func (b *Backend) InvalidateKey(key string) {
}
}
+// Setup is used to initialize the backend with the initial backend configuration
+func (b *Backend) Setup(config *logical.BackendConfig) error {
+ b.logger = config.Logger
+ b.system = config.System
+ return nil
+}
+
// Logger can be used to get the logger. If no logger has been set,
// the logs will be discarded.
func (b *Backend) Logger() log.Logger {
@@ -265,11 +276,25 @@ func (b *Backend) Logger() log.Logger {
return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff)
}
+// System returns the backend's system view.
func (b *Backend) System() logical.SystemView {
return b.system
}
-// This method takes in the TTL and MaxTTL values provided by the user,
+// Type returns the backend type
+func (b *Backend) Type() logical.BackendType {
+ return b.BackendType
+}
+
+// RegisterLicense performs backend license registration.
+func (b *Backend) RegisterLicense(license interface{}) error {
+ if b.LicenseRegistration == nil {
+ return nil
+ }
+ return b.LicenseRegistration(license)
+}
+
+// SanitizeTTLStr takes in the TTL and MaxTTL values provided by the user,
// compares those with the SystemView values. If they are empty a value of 0 is
// set, which will cause initial secret or LeaseExtend operations to use the
// mount/system defaults. If they are set, their boundaries are validated.
@@ -297,7 +322,8 @@ func (b *Backend) SanitizeTTLStr(ttlStr, maxTTLStr string) (ttl, maxTTL time.Dur
return
}
-// Caps the boundaries of ttl and max_ttl values to the backend mount's max_ttl value.
+// SanitizeTTL caps the boundaries of ttl and max_ttl values to the
+// backend mount's max_ttl value.
func (b *Backend) SanitizeTTL(ttl, maxTTL time.Duration) (time.Duration, time.Duration, error) {
sysMaxTTL := b.System().MaxLeaseTTL()
if ttl > sysMaxTTL {
@@ -575,6 +601,7 @@ func (s *FieldSchema) DefaultOrZero() interface{} {
return s.Type.Zero()
}
+// Zero returns the correct zero-value for a specific FieldType
func (t FieldType) Zero() interface{} {
switch t {
case TypeString:
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data.go
index 9783802..7fac976 100644
--- a/vendor/github.com/hashicorp/vault/logical/framework/field_data.go
+++ b/vendor/github.com/hashicorp/vault/logical/framework/field_data.go
@@ -2,7 +2,9 @@ package framework
import (
"encoding/json"
+ "errors"
"fmt"
+ "regexp"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/strutil"
@@ -18,7 +20,7 @@ type FieldData struct {
Schema map[string]*FieldSchema
}
-// Cycle through raw data and validate conversions in
+// Validate cycles through raw data and validate conversions in
// the schema, so we don't get an error/panic later when
// trying to get data out. Data not in the schema is not
// an error at this point, so we don't worry about it.
@@ -31,8 +33,8 @@ func (d *FieldData) Validate() error {
}
switch schema.Type {
- case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, TypeSlice,
- TypeStringSlice, TypeCommaStringSlice:
+ case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString,
+ TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice:
_, _, err := d.getPrimitive(field, schema)
if err != nil {
return fmt.Errorf("Error converting input %v for field %s: %s", value, field, err)
@@ -108,7 +110,7 @@ func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) {
switch schema.Type {
case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString,
- TypeSlice, TypeStringSlice, TypeCommaStringSlice:
+ TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice:
return d.getPrimitive(k, schema)
default:
return nil, false,
@@ -145,6 +147,20 @@ func (d *FieldData) getPrimitive(
}
return result, true, nil
+ case TypeNameString:
+ var result string
+ if err := mapstructure.WeakDecode(raw, &result); err != nil {
+ return nil, true, err
+ }
+ matched, err := regexp.MatchString("^\\w(([\\w-.]+)?\\w)?$", result)
+ if err != nil {
+ return nil, true, err
+ }
+ if !matched {
+ return nil, true, errors.New("field does not match the formatting rules")
+ }
+ return result, true, nil
+
case TypeMap:
var result map[string]interface{}
if err := mapstructure.WeakDecode(raw, &result); err != nil {
@@ -159,6 +175,16 @@ func (d *FieldData) getPrimitive(
return nil, false, nil
case int:
result = inp
+ case int32:
+ result = int(inp)
+ case int64:
+ result = int(inp)
+ case uint:
+ result = int(inp)
+ case uint32:
+ result = int(inp)
+ case uint64:
+ result = int(inp)
case float32:
result = int(inp)
case float64:
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go
index a801f9c..a9bc474 100644
--- a/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go
+++ b/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go
@@ -180,6 +180,17 @@ func TestFieldDataGet(t *testing.T) {
[]string{"123", "abc"},
},
+ "string slice type, single value": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeStringSlice},
+ },
+ map[string]interface{}{
+ "foo": "abc",
+ },
+ "foo",
+ []string{"abc"},
+ },
+
"comma string slice type, comma string with one value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeCommaStringSlice},
@@ -245,6 +256,28 @@ func TestFieldDataGet(t *testing.T) {
"foo",
[]string{},
},
+
+ "name string type, valid string": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeNameString},
+ },
+ map[string]interface{}{
+ "foo": "bar",
+ },
+ "foo",
+ "bar",
+ },
+
+ "name string type, valid value with special characters": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeNameString},
+ },
+ map[string]interface{}{
+ "foo": "bar.baz-bay123",
+ },
+ "foo",
+ "bar.baz-bay123",
+ },
}
for name, tc := range cases {
@@ -253,6 +286,10 @@ func TestFieldDataGet(t *testing.T) {
Schema: tc.Schema,
}
+ if err := data.Validate(); err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+
actual := data.Get(tc.Key)
if !reflect.DeepEqual(actual, tc.Value) {
t.Fatalf(
@@ -261,3 +298,60 @@ func TestFieldDataGet(t *testing.T) {
}
}
}
+
+func TestFieldDataGet_Error(t *testing.T) {
+ cases := map[string]struct {
+ Schema map[string]*FieldSchema
+ Raw map[string]interface{}
+ Key string
+ }{
+ "name string type, invalid value with invalid characters": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeNameString},
+ },
+ map[string]interface{}{
+ "foo": "bar baz",
+ },
+ "foo",
+ },
+ "name string type, invalid value with special characters at beginning": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeNameString},
+ },
+ map[string]interface{}{
+ "foo": ".barbaz",
+ },
+ "foo",
+ },
+ "name string type, invalid value with special characters at end": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeNameString},
+ },
+ map[string]interface{}{
+ "foo": "barbaz-",
+ },
+ "foo",
+ },
+ "name string type, empty string": {
+ map[string]*FieldSchema{
+ "foo": &FieldSchema{Type: TypeNameString},
+ },
+ map[string]interface{}{
+ "foo": "",
+ },
+ "foo",
+ },
+ }
+
+ for _, tc := range cases {
+ data := &FieldData{
+ Raw: tc.Raw,
+ Schema: tc.Schema,
+ }
+
+ _, _, err := data.GetOkErr(tc.Key)
+ if err == nil {
+ t.Fatalf("error expected, none received")
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_type.go b/vendor/github.com/hashicorp/vault/logical/framework/field_type.go
index 034d0fe..304d45f 100644
--- a/vendor/github.com/hashicorp/vault/logical/framework/field_type.go
+++ b/vendor/github.com/hashicorp/vault/logical/framework/field_type.go
@@ -23,12 +23,19 @@ const (
// slice of strings and also supports parsing a comma-separated list in
// a string field
TypeCommaStringSlice
+
+ // TypeNameString represents a name that is URI safe and follows specific
+ // rules. These rules include start and end with an alphanumeric
+ // character and characters in the middle can be alphanumeric or . or -.
+ TypeNameString
)
func (t FieldType) String() string {
switch t {
case TypeString:
return "string"
+ case TypeNameString:
+ return "name string"
case TypeInt:
return "int"
case TypeBool:
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
index ff1d277..f9fa3a6 100644
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
+++ b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
@@ -21,6 +21,7 @@ type PathMap struct {
Schema map[string]*FieldSchema
CaseSensitive bool
Salt *salt.Salt
+ SaltFunc func() (*salt.Salt, error)
once sync.Once
}
@@ -41,7 +42,7 @@ func (p *PathMap) init() {
}
// pathStruct returns the pathStruct for this mapping
-func (p *PathMap) pathStruct(k string) *PathStruct {
+func (p *PathMap) pathStruct(s logical.Storage, k string) (*PathStruct, error) {
p.once.Do(p.init)
// If we don't care about casing, store everything lowercase
@@ -49,30 +50,90 @@ func (p *PathMap) pathStruct(k string) *PathStruct {
k = strings.ToLower(k)
}
+ // The original key before any salting
+ origKey := k
+
// If we have a salt, apply it before lookup
- if p.Salt != nil {
- k = p.Salt.SaltID(k)
+ salt := p.Salt
+ var err error
+ if p.SaltFunc != nil {
+ salt, err = p.SaltFunc()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if salt != nil {
+ k = salt.SaltID(k)
}
- return &PathStruct{
- Name: fmt.Sprintf("map/%s/%s", p.Name, k),
+ finalName := fmt.Sprintf("map/%s/%s", p.Name, k)
+ ps := &PathStruct{
+ Name: finalName,
Schema: p.Schema,
}
+
+ // Check for unsalted version and upgrade if so
+ if k != origKey {
+ // Generate the unsalted name
+ unsaltedName := fmt.Sprintf("map/%s/%s", p.Name, origKey)
+ // Set the path struct to use the unsalted name
+ ps.Name = unsaltedName
+ // Ensure that no matter what happens what is returned is the final
+ // path
+ defer func() {
+ ps.Name = finalName
+ }()
+ val, err := ps.Get(s)
+ if err != nil {
+ return nil, err
+ }
+ // If not nil, we have an unsalted entry -- upgrade it
+ if val != nil {
+ // Set the path struct to use the desired final name
+ ps.Name = finalName
+ err = ps.Put(s, val)
+ if err != nil {
+ return nil, err
+ }
+ // Set it back to the old path and delete
+ ps.Name = unsaltedName
+ err = ps.Delete(s)
+ if err != nil {
+ return nil, err
+ }
+ // We'll set this in the deferred function but doesn't hurt here
+ ps.Name = finalName
+ }
+ }
+
+ return ps, nil
}
// Get reads a value out of the mapping
func (p *PathMap) Get(s logical.Storage, k string) (map[string]interface{}, error) {
- return p.pathStruct(k).Get(s)
+ ps, err := p.pathStruct(s, k)
+ if err != nil {
+ return nil, err
+ }
+ return ps.Get(s)
}
// Put writes a value into the mapping
func (p *PathMap) Put(s logical.Storage, k string, v map[string]interface{}) error {
- return p.pathStruct(k).Put(s, v)
+ ps, err := p.pathStruct(s, k)
+ if err != nil {
+ return err
+ }
+ return ps.Put(s, v)
}
// Delete removes a value from the mapping
func (p *PathMap) Delete(s logical.Storage, k string) error {
- return p.pathStruct(k).Delete(s)
+ ps, err := p.pathStruct(s, k)
+ if err != nil {
+ return err
+ }
+ return ps.Delete(s)
}
// List reads the keys under a given path
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go
index 7d30d7d..ce9215b 100644
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go
+++ b/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go
@@ -254,4 +254,192 @@ func TestPathMap_Salted(t *testing.T) {
if v != nil {
t.Fatalf("bad: %#v", v)
}
+
+ // Put in a non-salted version and make sure that after reading it's been
+ // upgraded
+ err = storage.Put(&logical.StorageEntry{
+ Key: "struct/map/foo/b",
+ Value: []byte(`{"foo": "bar"}`),
+ })
+ if err != nil {
+ t.Fatal("err: %v", err)
+ }
+ // A read should transparently upgrade
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/b",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ list, _ := storage.List("struct/map/foo/")
+ if len(list) != 1 {
+ t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list))
+ }
+ found := false
+ for _, v := range list {
+ if v == salt.SaltID("b") {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatal("did not find upgraded value")
+ }
+}
+
+func TestPathMap_SaltFunc(t *testing.T) {
+ storage := new(logical.InmemStorage)
+ locSalt, err := salt.NewSalt(storage, &salt.Config{
+ HashFunc: salt.SHA1Hash,
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ saltFunc := func() (*salt.Salt, error) {
+ return locSalt, nil
+ }
+ p := &PathMap{Name: "foo", SaltFunc: saltFunc}
+ var b logical.Backend = &Backend{Paths: p.Paths()}
+
+ // Write via HTTP
+ _, err = b.HandleRequest(&logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: "map/foo/a",
+ Data: map[string]interface{}{
+ "value": "bar",
+ },
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+
+ // Non-salted version should not be there
+ out, err := storage.Get("struct/map/foo/a")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("non-salted key found")
+ }
+
+ // Ensure the path is salted
+ expect := locSalt.SaltID("a")
+ out, err = storage.Get("struct/map/foo/" + expect)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out == nil {
+ t.Fatalf("missing salted key")
+ }
+
+ // Read via HTTP
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp.Data["value"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Read via API
+ v, err := p.Get(storage, "a")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v["value"] != "bar" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Read via API with other casing
+ v, err = p.Get(storage, "A")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v["value"] != "bar" {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Verify List
+ keys, err := p.List(storage, "")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if len(keys) != 1 || keys[0] != expect {
+ t.Fatalf("bad: %#v", keys)
+ }
+
+ // Delete via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.DeleteOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via HTTP
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/a",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if _, ok := resp.Data["value"]; ok {
+ t.Fatalf("bad: %#v", resp)
+ }
+
+ // Re-read via API
+ v, err = p.Get(storage, "a")
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+ if v != nil {
+ t.Fatalf("bad: %#v", v)
+ }
+
+ // Put in a non-salted version and make sure that after reading it's been
+ // upgraded
+ err = storage.Put(&logical.StorageEntry{
+ Key: "struct/map/foo/b",
+ Value: []byte(`{"foo": "bar"}`),
+ })
+ if err != nil {
+ t.Fatal("err: %v", err)
+ }
+ // A read should transparently upgrade
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "map/foo/b",
+ Storage: storage,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ list, _ := storage.List("struct/map/foo/")
+ if len(list) != 1 {
+ t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list))
+ }
+ found := false
+ for _, v := range list {
+ if v == locSalt.SaltID("b") {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatal("did not find upgraded value")
+ }
}
diff --git a/vendor/github.com/hashicorp/vault/logical/identity.go b/vendor/github.com/hashicorp/vault/logical/identity.go
new file mode 100644
index 0000000..fbc4fbb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/identity.go
@@ -0,0 +1,26 @@
+package logical
+
+// Persona represents the information used by core to create implicit entity.
+// Implicit entities get created when a client authenticates successfully from
+// any of the authentication backends (except token backend).
+//
+// This is applicable to enterprise binaries only. Persona should be set in the
+// Auth response returned by the credential backends. This structure is placed
+// in the open source repository only to enable custom authetication plugins to
+// be used along with enterprise binary. The custom auth plugins should make
+// use of this and fill out the Persona information in the authentication
+// response.
+type Persona struct {
+ // MountType is the backend mount's type to which this identity belongs
+ // to.
+ MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"`
+
+ // MountAccessor is the identifier of the mount entry to which
+ // this identity
+ // belongs to.
+ MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor"`
+
+ // Name is the identifier of this identity in its
+ // authentication source.
+ Name string `json:"name" structs:"name" mapstructure:"name"`
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/logical.go b/vendor/github.com/hashicorp/vault/logical/logical.go
index 3b66fba..9ce0d85 100644
--- a/vendor/github.com/hashicorp/vault/logical/logical.go
+++ b/vendor/github.com/hashicorp/vault/logical/logical.go
@@ -2,6 +2,29 @@ package logical
import log "github.com/mgutz/logxi/v1"
+// BackendType is the type of backend that is being implemented
+type BackendType uint32
+
+// The these are the types of backends that can be derived from
+// logical.Backend
+const (
+ TypeUnknown BackendType = 0 // This is also the zero-value for BackendType
+ TypeLogical BackendType = 1
+ TypeCredential BackendType = 2
+)
+
+// Stringer implementation
+func (b BackendType) String() string {
+ switch b {
+ case TypeLogical:
+ return "secret"
+ case TypeCredential:
+ return "auth"
+ }
+
+ return "unknown"
+}
+
// Backend interface must be implemented to be "mountable" at
// a given path. Requests flow through a router which has various mount
// points that flow to a logical backend. The logic of each backend is flexible,
@@ -27,6 +50,11 @@ type Backend interface {
// information, such as globally configured default and max lease TTLs.
System() SystemView
+ // Logger provides an interface to access the underlying logger. This
+ // is useful when a struct embeds a Backend-implemented struct that
+ // contains a private instance of logger.
+ Logger() log.Logger
+
// HandleExistenceCheck is used to handle a request and generate a response
// indicating whether the given path exists or not; this is used to
// understand whether the request must have a Create or Update capability
@@ -47,6 +75,16 @@ type Backend interface {
// to the backend. The backend can use this to clear any caches or reset
// internal state as needed.
InvalidateKey(key string)
+
+ // Setup is used to set up the backend based on the provided backend
+ // configuration.
+ Setup(*BackendConfig) error
+
+ // Type returns the BackendType for the particular backend
+ Type() BackendType
+
+ // RegisterLicense performs backend license registration
+ RegisterLicense(interface{}) error
}
// BackendConfig is provided to the factory to initialize the backend
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend.go
new file mode 100644
index 0000000..081922c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend.go
@@ -0,0 +1,24 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/logical"
+)
+
+// BackendPlugin is the plugin.Plugin implementation
+type BackendPlugin struct {
+ Factory func(*logical.BackendConfig) (logical.Backend, error)
+ metadataMode bool
+}
+
+// Server gets called when on plugin.Serve()
+func (b *BackendPlugin) Server(broker *plugin.MuxBroker) (interface{}, error) {
+ return &backendPluginServer{factory: b.Factory, broker: broker}, nil
+}
+
+// Client gets called on plugin.NewClient()
+func (b BackendPlugin) Client(broker *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+ return &backendPluginClient{client: c, broker: broker, metadataMode: b.metadataMode}, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go
new file mode 100644
index 0000000..cc2d83b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go
@@ -0,0 +1,285 @@
+package plugin
+
+import (
+ "errors"
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/logical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+var (
+ ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode")
+)
+
+// backendPluginClient implements logical.Backend and is the
+// go-plugin client.
+type backendPluginClient struct {
+ broker *plugin.MuxBroker
+ client *rpc.Client
+ metadataMode bool
+
+ system logical.SystemView
+ logger log.Logger
+}
+
+// HandleRequestArgs is the args for HandleRequest method.
+type HandleRequestArgs struct {
+ StorageID uint32
+ Request *logical.Request
+}
+
+// HandleRequestReply is the reply for HandleRequest method.
+type HandleRequestReply struct {
+ Response *logical.Response
+ Error *plugin.BasicError
+}
+
+// SpecialPathsReply is the reply for SpecialPaths method.
+type SpecialPathsReply struct {
+ Paths *logical.Paths
+}
+
+// SystemReply is the reply for System method.
+type SystemReply struct {
+ SystemView logical.SystemView
+ Error *plugin.BasicError
+}
+
+// HandleExistenceCheckArgs is the args for HandleExistenceCheck method.
+type HandleExistenceCheckArgs struct {
+ StorageID uint32
+ Request *logical.Request
+}
+
+// HandleExistenceCheckReply is the reply for HandleExistenceCheck method.
+type HandleExistenceCheckReply struct {
+ CheckFound bool
+ Exists bool
+ Error *plugin.BasicError
+}
+
+// SetupArgs is the args for Setup method.
+type SetupArgs struct {
+ StorageID uint32
+ LoggerID uint32
+ SysViewID uint32
+ Config map[string]string
+}
+
+// SetupReply is the reply for Setup method.
+type SetupReply struct {
+ Error *plugin.BasicError
+}
+
+// TypeReply is the reply for the Type method.
+type TypeReply struct {
+ Type logical.BackendType
+}
+
+// RegisterLicenseArgs is the args for the RegisterLicense method.
+type RegisterLicenseArgs struct {
+ License interface{}
+}
+
+// RegisterLicenseReply is the reply for the RegisterLicense method.
+type RegisterLicenseReply struct {
+ Error *plugin.BasicError
+}
+
+func (b *backendPluginClient) HandleRequest(req *logical.Request) (*logical.Response, error) {
+ if b.metadataMode {
+ return nil, ErrClientInMetadataMode
+ }
+
+ // Do not send the storage, since go-plugin cannot serialize
+ // interfaces. The server will pick up the storage from the shim.
+ req.Storage = nil
+ args := &HandleRequestArgs{
+ Request: req,
+ }
+ var reply HandleRequestReply
+
+ if req.Connection != nil {
+ oldConnState := req.Connection.ConnState
+ req.Connection.ConnState = nil
+ defer func() {
+ req.Connection.ConnState = oldConnState
+ }()
+ }
+
+ err := b.client.Call("Plugin.HandleRequest", args, &reply)
+ if err != nil {
+ return nil, err
+ }
+ if reply.Error != nil {
+ if reply.Error.Error() == logical.ErrUnsupportedOperation.Error() {
+ return nil, logical.ErrUnsupportedOperation
+ }
+ return nil, reply.Error
+ }
+
+ return reply.Response, nil
+}
+
+func (b *backendPluginClient) SpecialPaths() *logical.Paths {
+ var reply SpecialPathsReply
+ err := b.client.Call("Plugin.SpecialPaths", new(interface{}), &reply)
+ if err != nil {
+ return nil
+ }
+
+ return reply.Paths
+}
+
+// System returns vault's system view. The backend client stores the view during
+// Setup, so there is no need to shim the system just to get it back.
+func (b *backendPluginClient) System() logical.SystemView {
+ return b.system
+}
+
+// Logger returns vault's logger. The backend client stores the logger during
+// Setup, so there is no need to shim the logger just to get it back.
+func (b *backendPluginClient) Logger() log.Logger {
+ return b.logger
+}
+
+func (b *backendPluginClient) HandleExistenceCheck(req *logical.Request) (bool, bool, error) {
+ if b.metadataMode {
+ return false, false, ErrClientInMetadataMode
+ }
+
+ // Do not send the storage, since go-plugin cannot serialize
+ // interfaces. The server will pick up the storage from the shim.
+ req.Storage = nil
+ args := &HandleExistenceCheckArgs{
+ Request: req,
+ }
+ var reply HandleExistenceCheckReply
+
+ if req.Connection != nil {
+ oldConnState := req.Connection.ConnState
+ req.Connection.ConnState = nil
+ defer func() {
+ req.Connection.ConnState = oldConnState
+ }()
+ }
+
+ err := b.client.Call("Plugin.HandleExistenceCheck", args, &reply)
+ if err != nil {
+ return false, false, err
+ }
+ if reply.Error != nil {
+ // THINKING: Should be be a switch on all error types?
+ if reply.Error.Error() == logical.ErrUnsupportedPath.Error() {
+ return false, false, logical.ErrUnsupportedPath
+ }
+ return false, false, reply.Error
+ }
+
+ return reply.CheckFound, reply.Exists, nil
+}
+
+func (b *backendPluginClient) Cleanup() {
+ b.client.Call("Plugin.Cleanup", new(interface{}), &struct{}{})
+}
+
+func (b *backendPluginClient) Initialize() error {
+ if b.metadataMode {
+ return ErrClientInMetadataMode
+ }
+ err := b.client.Call("Plugin.Initialize", new(interface{}), &struct{}{})
+ return err
+}
+
+func (b *backendPluginClient) InvalidateKey(key string) {
+ if b.metadataMode {
+ return
+ }
+ b.client.Call("Plugin.InvalidateKey", key, &struct{}{})
+}
+
+func (b *backendPluginClient) Setup(config *logical.BackendConfig) error {
+ // Shim logical.Storage
+ storageImpl := config.StorageView
+ if b.metadataMode {
+ storageImpl = &NOOPStorage{}
+ }
+ storageID := b.broker.NextId()
+ go b.broker.AcceptAndServe(storageID, &StorageServer{
+ impl: storageImpl,
+ })
+
+ // Shim log.Logger
+ loggerImpl := config.Logger
+ if b.metadataMode {
+ loggerImpl = log.NullLog
+ }
+ loggerID := b.broker.NextId()
+ go b.broker.AcceptAndServe(loggerID, &LoggerServer{
+ logger: loggerImpl,
+ })
+
+ // Shim logical.SystemView
+ sysViewImpl := config.System
+ if b.metadataMode {
+ sysViewImpl = &logical.StaticSystemView{}
+ }
+ sysViewID := b.broker.NextId()
+ go b.broker.AcceptAndServe(sysViewID, &SystemViewServer{
+ impl: sysViewImpl,
+ })
+
+ args := &SetupArgs{
+ StorageID: storageID,
+ LoggerID: loggerID,
+ SysViewID: sysViewID,
+ Config: config.Config,
+ }
+ var reply SetupReply
+
+ err := b.client.Call("Plugin.Setup", args, &reply)
+ if err != nil {
+ return err
+ }
+ if reply.Error != nil {
+ return reply.Error
+ }
+
+ // Set system and logger for getter methods
+ b.system = config.System
+ b.logger = config.Logger
+
+ return nil
+}
+
+func (b *backendPluginClient) Type() logical.BackendType {
+ var reply TypeReply
+ err := b.client.Call("Plugin.Type", new(interface{}), &reply)
+ if err != nil {
+ return logical.TypeUnknown
+ }
+
+ return logical.BackendType(reply.Type)
+}
+
+func (b *backendPluginClient) RegisterLicense(license interface{}) error {
+ if b.metadataMode {
+ return ErrClientInMetadataMode
+ }
+
+ var reply RegisterLicenseReply
+ args := RegisterLicenseArgs{
+ License: license,
+ }
+ err := b.client.Call("Plugin.RegisterLicense", args, &reply)
+ if err != nil {
+ return err
+ }
+ if reply.Error != nil {
+ return reply.Error
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go
new file mode 100644
index 0000000..47045b1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go
@@ -0,0 +1,187 @@
+package plugin
+
+import (
+ "errors"
+ "net/rpc"
+ "os"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+var (
+ ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode")
+)
+
+// backendPluginServer is the RPC server that backendPluginClient talks to,
+// it methods conforming to requirements by net/rpc
+type backendPluginServer struct {
+ broker *plugin.MuxBroker
+ backend logical.Backend
+ factory func(*logical.BackendConfig) (logical.Backend, error)
+
+ loggerClient *rpc.Client
+ sysViewClient *rpc.Client
+ storageClient *rpc.Client
+}
+
+func inMetadataMode() bool {
+ return os.Getenv(pluginutil.PluginMetadaModeEnv) == "true"
+}
+
+func (b *backendPluginServer) HandleRequest(args *HandleRequestArgs, reply *HandleRequestReply) error {
+ if inMetadataMode() {
+ return ErrServerInMetadataMode
+ }
+
+ storage := &StorageClient{client: b.storageClient}
+ args.Request.Storage = storage
+
+ resp, err := b.backend.HandleRequest(args.Request)
+ *reply = HandleRequestReply{
+ Response: resp,
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
+
+func (b *backendPluginServer) SpecialPaths(_ interface{}, reply *SpecialPathsReply) error {
+ *reply = SpecialPathsReply{
+ Paths: b.backend.SpecialPaths(),
+ }
+ return nil
+}
+
+func (b *backendPluginServer) HandleExistenceCheck(args *HandleExistenceCheckArgs, reply *HandleExistenceCheckReply) error {
+ if inMetadataMode() {
+ return ErrServerInMetadataMode
+ }
+
+ storage := &StorageClient{client: b.storageClient}
+ args.Request.Storage = storage
+
+ checkFound, exists, err := b.backend.HandleExistenceCheck(args.Request)
+ *reply = HandleExistenceCheckReply{
+ CheckFound: checkFound,
+ Exists: exists,
+ Error: plugin.NewBasicError(err),
+ }
+
+ return nil
+}
+
+func (b *backendPluginServer) Cleanup(_ interface{}, _ *struct{}) error {
+ b.backend.Cleanup()
+
+ // Close rpc clients
+ b.loggerClient.Close()
+ b.sysViewClient.Close()
+ b.storageClient.Close()
+ return nil
+}
+
+func (b *backendPluginServer) Initialize(_ interface{}, _ *struct{}) error {
+ if inMetadataMode() {
+ return ErrServerInMetadataMode
+ }
+
+ err := b.backend.Initialize()
+ return err
+}
+
+func (b *backendPluginServer) InvalidateKey(args string, _ *struct{}) error {
+ if inMetadataMode() {
+ return ErrServerInMetadataMode
+ }
+
+ b.backend.InvalidateKey(args)
+ return nil
+}
+
+// Setup dials into the plugin's broker to get a shimmed storage, logger, and
+// system view of the backend. This method also instantiates the underlying
+// backend through its factory func for the server side of the plugin.
+func (b *backendPluginServer) Setup(args *SetupArgs, reply *SetupReply) error {
+ // Dial for storage
+ storageConn, err := b.broker.Dial(args.StorageID)
+ if err != nil {
+ *reply = SetupReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ rawStorageClient := rpc.NewClient(storageConn)
+ b.storageClient = rawStorageClient
+
+ storage := &StorageClient{client: rawStorageClient}
+
+ // Dial for logger
+ loggerConn, err := b.broker.Dial(args.LoggerID)
+ if err != nil {
+ *reply = SetupReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ rawLoggerClient := rpc.NewClient(loggerConn)
+ b.loggerClient = rawLoggerClient
+
+ logger := &LoggerClient{client: rawLoggerClient}
+
+ // Dial for sys view
+ sysViewConn, err := b.broker.Dial(args.SysViewID)
+ if err != nil {
+ *reply = SetupReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ rawSysViewClient := rpc.NewClient(sysViewConn)
+ b.sysViewClient = rawSysViewClient
+
+ sysView := &SystemViewClient{client: rawSysViewClient}
+
+ config := &logical.BackendConfig{
+ StorageView: storage,
+ Logger: logger,
+ System: sysView,
+ Config: args.Config,
+ }
+
+ // Call the underlying backend factory after shims have been created
+ // to set b.backend
+ backend, err := b.factory(config)
+ if err != nil {
+ *reply = SetupReply{
+ Error: plugin.NewBasicError(err),
+ }
+ }
+ b.backend = backend
+
+ return nil
+}
+
+func (b *backendPluginServer) Type(_ interface{}, reply *TypeReply) error {
+ *reply = TypeReply{
+ Type: b.backend.Type(),
+ }
+
+ return nil
+}
+
+func (b *backendPluginServer) RegisterLicense(args *RegisterLicenseArgs, reply *RegisterLicenseReply) error {
+ if inMetadataMode() {
+ return ErrServerInMetadataMode
+ }
+
+ err := b.backend.RegisterLicense(args.License)
+ if err != nil {
+ *reply = RegisterLicenseReply{
+ Error: plugin.NewBasicError(err),
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go
new file mode 100644
index 0000000..deb5b63
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go
@@ -0,0 +1,178 @@
+package plugin
+
+import (
+ "testing"
+ "time"
+
+ gplugin "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/plugin/mock"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestBackendPlugin_impl(t *testing.T) {
+ var _ gplugin.Plugin = new(BackendPlugin)
+ var _ logical.Backend = new(backendPluginClient)
+}
+
+func TestBackendPlugin_HandleRequest(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "kv/foo",
+ Data: map[string]interface{}{
+ "value": "bar",
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Data["value"] != "bar" {
+ t.Fatalf("bad: %#v", resp)
+ }
+}
+
+func TestBackendPlugin_SpecialPaths(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ paths := b.SpecialPaths()
+ if paths == nil {
+ t.Fatal("SpecialPaths() returned nil")
+ }
+}
+
+func TestBackendPlugin_System(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ sys := b.System()
+ if sys == nil {
+ t.Fatal("System() returned nil")
+ }
+
+ actual := sys.DefaultLeaseTTL()
+ expected := 300 * time.Second
+
+ if actual != expected {
+ t.Fatalf("bad: %v, expected %v", actual, expected)
+ }
+}
+
+func TestBackendPlugin_Logger(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ logger := b.Logger()
+ if logger == nil {
+ t.Fatal("Logger() returned nil")
+ }
+}
+
+func TestBackendPlugin_HandleExistenceCheck(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ checkFound, exists, err := b.HandleExistenceCheck(&logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "kv/foo",
+ Data: map[string]interface{}{"value": "bar"},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !checkFound {
+ t.Fatal("existence check not found for path 'kv/foo")
+ }
+ if exists {
+ t.Fatal("existence check should have returned 'false' for 'kv/foo'")
+ }
+}
+
+func TestBackendPlugin_Cleanup(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ b.Cleanup()
+}
+
+func TestBackendPlugin_Initialize(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ err := b.Initialize()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackendPlugin_InvalidateKey(t *testing.T) {
+ b, cleanup := testBackend(t)
+ defer cleanup()
+
+ resp, err := b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "internal",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Data["value"] == "" {
+ t.Fatalf("bad: %#v, expected non-empty value", resp)
+ }
+
+ b.InvalidateKey("internal")
+
+ resp, err = b.HandleRequest(&logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "internal",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Data["value"] != "" {
+ t.Fatalf("bad: expected empty response data, got %#v", resp)
+ }
+}
+
+func TestBackendPlugin_Setup(t *testing.T) {
+ _, cleanup := testBackend(t)
+ defer cleanup()
+}
+
+func testBackend(t *testing.T) (logical.Backend, func()) {
+ // Create a mock provider
+ pluginMap := map[string]gplugin.Plugin{
+ "backend": &BackendPlugin{
+ Factory: mock.Factory,
+ },
+ }
+ client, _ := gplugin.TestPluginRPCConn(t, pluginMap)
+ cleanup := func() {
+ client.Close()
+ }
+
+ // Request the backend
+ raw, err := client.Dispense(BackendPluginName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b := raw.(logical.Backend)
+
+ err = b.Setup(&logical.BackendConfig{
+ Logger: logformat.NewVaultLogger(log.LevelTrace),
+ System: &logical.StaticSystemView{
+ DefaultLeaseTTLVal: 300 * time.Second,
+ MaxLeaseTTLVal: 1800 * time.Second,
+ },
+ StorageView: &logical.InmemStorage{},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return b, cleanup
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/logger.go b/vendor/github.com/hashicorp/vault/logical/plugin/logger.go
new file mode 100644
index 0000000..ceb8947
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/logger.go
@@ -0,0 +1,205 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ plugin "github.com/hashicorp/go-plugin"
+ log "github.com/mgutz/logxi/v1"
+)
+
+type LoggerClient struct {
+ client *rpc.Client
+}
+
+func (l *LoggerClient) Trace(msg string, args ...interface{}) {
+ cArgs := &LoggerArgs{
+ Msg: msg,
+ Args: args,
+ }
+ l.client.Call("Plugin.Trace", cArgs, &struct{}{})
+}
+
+func (l *LoggerClient) Debug(msg string, args ...interface{}) {
+ cArgs := &LoggerArgs{
+ Msg: msg,
+ Args: args,
+ }
+ l.client.Call("Plugin.Debug", cArgs, &struct{}{})
+}
+
+func (l *LoggerClient) Info(msg string, args ...interface{}) {
+ cArgs := &LoggerArgs{
+ Msg: msg,
+ Args: args,
+ }
+ l.client.Call("Plugin.Info", cArgs, &struct{}{})
+}
+func (l *LoggerClient) Warn(msg string, args ...interface{}) error {
+ var reply LoggerReply
+ cArgs := &LoggerArgs{
+ Msg: msg,
+ Args: args,
+ }
+ err := l.client.Call("Plugin.Warn", cArgs, &reply)
+ if err != nil {
+ return err
+ }
+ if reply.Error != nil {
+ return reply.Error
+ }
+
+ return nil
+}
+func (l *LoggerClient) Error(msg string, args ...interface{}) error {
+ var reply LoggerReply
+ cArgs := &LoggerArgs{
+ Msg: msg,
+ Args: args,
+ }
+ err := l.client.Call("Plugin.Error", cArgs, &reply)
+ if err != nil {
+ return err
+ }
+ if reply.Error != nil {
+ return reply.Error
+ }
+
+ return nil
+}
+
+func (l *LoggerClient) Fatal(msg string, args ...interface{}) {
+ // NOOP since it's not actually used within vault
+ return
+}
+
+func (l *LoggerClient) Log(level int, msg string, args []interface{}) {
+ cArgs := &LoggerArgs{
+ Level: level,
+ Msg: msg,
+ Args: args,
+ }
+ l.client.Call("Plugin.Log", cArgs, &struct{}{})
+}
+
+func (l *LoggerClient) SetLevel(level int) {
+ l.client.Call("Plugin.SetLevel", level, &struct{}{})
+}
+
+func (l *LoggerClient) IsTrace() bool {
+ var reply LoggerReply
+ l.client.Call("Plugin.IsTrace", new(interface{}), &reply)
+ return reply.IsTrue
+}
+func (l *LoggerClient) IsDebug() bool {
+ var reply LoggerReply
+ l.client.Call("Plugin.IsDebug", new(interface{}), &reply)
+ return reply.IsTrue
+}
+
+func (l *LoggerClient) IsInfo() bool {
+ var reply LoggerReply
+ l.client.Call("Plugin.IsInfo", new(interface{}), &reply)
+ return reply.IsTrue
+}
+
+func (l *LoggerClient) IsWarn() bool {
+ var reply LoggerReply
+ l.client.Call("Plugin.IsWarn", new(interface{}), &reply)
+ return reply.IsTrue
+}
+
+type LoggerServer struct {
+ logger log.Logger
+}
+
+func (l *LoggerServer) Trace(args *LoggerArgs, _ *struct{}) error {
+ l.logger.Trace(args.Msg, args.Args)
+ return nil
+}
+
+func (l *LoggerServer) Debug(args *LoggerArgs, _ *struct{}) error {
+ l.logger.Debug(args.Msg, args.Args)
+ return nil
+}
+
+func (l *LoggerServer) Info(args *LoggerArgs, _ *struct{}) error {
+ l.logger.Info(args.Msg, args.Args)
+ return nil
+}
+
+func (l *LoggerServer) Warn(args *LoggerArgs, reply *LoggerReply) error {
+ err := l.logger.Warn(args.Msg, args.Args)
+ if err != nil {
+ *reply = LoggerReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ return nil
+}
+
+func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error {
+ err := l.logger.Error(args.Msg, args.Args)
+ if err != nil {
+ *reply = LoggerReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ return nil
+}
+
+func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error {
+ l.logger.Log(args.Level, args.Msg, args.Args)
+ return nil
+}
+
+func (l *LoggerServer) SetLevel(args int, _ *struct{}) error {
+ l.logger.SetLevel(args)
+ return nil
+}
+
+func (l *LoggerServer) IsTrace(args interface{}, reply *LoggerReply) error {
+ result := l.logger.IsTrace()
+ *reply = LoggerReply{
+ IsTrue: result,
+ }
+ return nil
+}
+
+func (l *LoggerServer) IsDebug(args interface{}, reply *LoggerReply) error {
+ result := l.logger.IsDebug()
+ *reply = LoggerReply{
+ IsTrue: result,
+ }
+ return nil
+}
+
+func (l *LoggerServer) IsInfo(args interface{}, reply *LoggerReply) error {
+ result := l.logger.IsInfo()
+ *reply = LoggerReply{
+ IsTrue: result,
+ }
+ return nil
+}
+
+func (l *LoggerServer) IsWarn(args interface{}, reply *LoggerReply) error {
+ result := l.logger.IsWarn()
+ *reply = LoggerReply{
+ IsTrue: result,
+ }
+ return nil
+}
+
+type LoggerArgs struct {
+ Level int
+ Msg string
+ Args []interface{}
+}
+
+// LoggerReply contains the RPC reply. Not all fields may be used
+// for a particular RPC call.
+type LoggerReply struct {
+ IsTrue bool
+ Error *plugin.BasicError
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go
new file mode 100644
index 0000000..10b389c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go
@@ -0,0 +1,163 @@
+package plugin
+
+import (
+ "bufio"
+ "bytes"
+ "io/ioutil"
+ "strings"
+ "testing"
+
+ plugin "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/logformat"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestLogger_impl(t *testing.T) {
+ var _ log.Logger = new(LoggerClient)
+}
+
+func TestLogger_levels(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ var buf bytes.Buffer
+ writer := bufio.NewWriter(&buf)
+
+ l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace)
+
+ server.RegisterName("Plugin", &LoggerServer{
+ logger: l,
+ })
+
+ expected := "foobar"
+ testLogger := &LoggerClient{client: client}
+
+ // Test trace
+ testLogger.Trace(expected)
+ if err := writer.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ result := buf.String()
+ buf.Reset()
+ if !strings.Contains(result, expected) {
+ t.Fatalf("expected log to contain %s, got %s", expected, result)
+ }
+
+ // Test debug
+ testLogger.Debug(expected)
+ if err := writer.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ result = buf.String()
+ buf.Reset()
+ if !strings.Contains(result, expected) {
+ t.Fatalf("expected log to contain %s, got %s", expected, result)
+ }
+
+ // Test debug
+ testLogger.Info(expected)
+ if err := writer.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ result = buf.String()
+ buf.Reset()
+ if !strings.Contains(result, expected) {
+ t.Fatalf("expected log to contain %s, got %s", expected, result)
+ }
+
+ // Test warn
+ testLogger.Warn(expected)
+ if err := writer.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ result = buf.String()
+ buf.Reset()
+ if !strings.Contains(result, expected) {
+ t.Fatalf("expected log to contain %s, got %s", expected, result)
+ }
+
+ // Test error
+ testLogger.Error(expected)
+ if err := writer.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ result = buf.String()
+ buf.Reset()
+ if !strings.Contains(result, expected) {
+ t.Fatalf("expected log to contain %s, got %s", expected, result)
+ }
+
+ // Test fatal
+ testLogger.Fatal(expected)
+ if err := writer.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ result = buf.String()
+ buf.Reset()
+ if result != "" {
+ t.Fatalf("expected log Fatal() to be no-op, got %s", result)
+ }
+}
+
+func TestLogger_isLevels(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ l := logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelAll)
+
+ server.RegisterName("Plugin", &LoggerServer{
+ logger: l,
+ })
+
+ testLogger := &LoggerClient{client: client}
+
+ if !testLogger.IsDebug() || !testLogger.IsInfo() || !testLogger.IsTrace() || !testLogger.IsWarn() {
+ t.Fatal("expected logger to return true for all logger level checks")
+ }
+}
+
+func TestLogger_log(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ var buf bytes.Buffer
+ writer := bufio.NewWriter(&buf)
+
+ l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace)
+
+ server.RegisterName("Plugin", &LoggerServer{
+ logger: l,
+ })
+
+ expected := "foobar"
+ testLogger := &LoggerClient{client: client}
+
+ // Test trace
+ testLogger.Log(log.LevelInfo, expected, nil)
+ if err := writer.Flush(); err != nil {
+ t.Fatal(err)
+ }
+ result := buf.String()
+ if !strings.Contains(result, expected) {
+ t.Fatalf("expected log to contain %s, got %s", expected, result)
+ }
+
+}
+
+func TestLogger_setLevel(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ l := log.NewLogger(ioutil.Discard, "test-logger")
+
+ server.RegisterName("Plugin", &LoggerServer{
+ logger: l,
+ })
+
+ testLogger := &LoggerClient{client: client}
+ testLogger.SetLevel(log.LevelWarn)
+
+ if !testLogger.IsWarn() {
+ t.Fatal("expected logger to support warn level")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go
new file mode 100644
index 0000000..ac8c0ba
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go
@@ -0,0 +1,74 @@
+package mock
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// New returns a new backend as an interface. This func
+// is only necessary for builtin backend plugins.
+func New() (interface{}, error) {
+ return Backend(), nil
+}
+
+// Factory returns a new backend as logical.Backend.
+func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
+ b := Backend()
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// FactoryType is a wrapper func that allows the Factory func to specify
+// the backend type for the mock backend plugin instance.
+func FactoryType(backendType logical.BackendType) func(*logical.BackendConfig) (logical.Backend, error) {
+ return func(conf *logical.BackendConfig) (logical.Backend, error) {
+ b := Backend()
+ b.BackendType = backendType
+ if err := b.Setup(conf); err != nil {
+ return nil, err
+ }
+ return b, nil
+ }
+}
+
+// Backend returns a private embedded struct of framework.Backend.
+func Backend() *backend {
+ var b backend
+ b.Backend = &framework.Backend{
+ Help: "",
+ Paths: framework.PathAppend(
+ errorPaths(&b),
+ kvPaths(&b),
+ []*framework.Path{
+ pathInternal(&b),
+ pathSpecial(&b),
+ },
+ ),
+ PathsSpecial: &logical.Paths{
+ Unauthenticated: []string{
+ "special",
+ },
+ },
+ Secrets: []*framework.Secret{},
+ Invalidate: b.invalidate,
+ BackendType: logical.TypeLogical,
+ }
+ b.internal = "bar"
+ return &b
+}
+
+type backend struct {
+ *framework.Backend
+
+ // internal is used to test invalidate
+ internal string
+}
+
+func (b *backend) invalidate(key string) {
+ switch key {
+ case "internal":
+ b.internal = ""
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go
new file mode 100644
index 0000000..075911c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go
@@ -0,0 +1,11 @@
+package mock
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestBackend_impl(t *testing.T) {
+ var _ logical.Backend = new(backend)
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go
new file mode 100644
index 0000000..b1b7fbd
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/plugin"
+ "github.com/hashicorp/vault/logical/plugin/mock"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:]) // Ignore command, strictly parse flags
+
+ tlsConfig := apiClientMeta.GetTLSConfig()
+ tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig)
+
+ factoryFunc := mock.FactoryType(logical.TypeLogical)
+
+ err := plugin.Serve(&plugin.ServeOpts{
+ BackendFactoryFunc: factoryFunc,
+ TLSProviderFunc: tlsProviderFunc,
+ })
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go
new file mode 100644
index 0000000..00c4e3d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go
@@ -0,0 +1,32 @@
+package mock
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// pathInternal is used to test viewing internal backend values. In this case,
+// it is used to test the invalidate func.
+func errorPaths(b *backend) []*framework.Path {
+ return []*framework.Path{
+ &framework.Path{
+ Pattern: "errors/rpc",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathErrorRPCRead,
+ },
+ },
+ &framework.Path{
+ Pattern: "errors/kill",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathErrorRPCRead,
+ },
+ },
+ }
+}
+
+func (b *backend) pathErrorRPCRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ return nil, rpc.ErrShutdown
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go
new file mode 100644
index 0000000..92c4f8b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go
@@ -0,0 +1,41 @@
+package mock
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// pathInternal is used to test viewing internal backend values. In this case,
+// it is used to test the invalidate func.
+func pathInternal(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "internal",
+ Fields: map[string]*framework.FieldSchema{
+ "value": &framework.FieldSchema{Type: framework.TypeString},
+ },
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.pathInternalUpdate,
+ logical.ReadOperation: b.pathInternalRead,
+ },
+ }
+}
+
+func (b *backend) pathInternalUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ value := data.Get("value").(string)
+ b.internal = value
+ // Return the secret
+ return nil, nil
+
+}
+
+func (b *backend) pathInternalRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Return the secret
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": b.internal,
+ },
+ }, nil
+
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go
new file mode 100644
index 0000000..badede2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go
@@ -0,0 +1,103 @@
+package mock
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// kvPaths is used to test CRUD and List operations. It is a simplified
+// version of the passthrough backend that only accepts string values.
+func kvPaths(b *backend) []*framework.Path {
+ return []*framework.Path{
+ &framework.Path{
+ Pattern: "kv/?",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.pathKVList,
+ },
+ },
+ &framework.Path{
+ Pattern: "kv/" + framework.GenericNameRegex("key"),
+ Fields: map[string]*framework.FieldSchema{
+ "key": &framework.FieldSchema{Type: framework.TypeString},
+ "value": &framework.FieldSchema{Type: framework.TypeString},
+ },
+ ExistenceCheck: b.pathExistenceCheck,
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathKVRead,
+ logical.CreateOperation: b.pathKVCreateUpdate,
+ logical.UpdateOperation: b.pathKVCreateUpdate,
+ logical.DeleteOperation: b.pathKVDelete,
+ },
+ },
+ }
+}
+
+func (b *backend) pathExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) {
+ out, err := req.Storage.Get(req.Path)
+ if err != nil {
+ return false, fmt.Errorf("existence check failed: %v", err)
+ }
+
+ return out != nil, nil
+}
+
+func (b *backend) pathKVRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ entry, err := req.Storage.Get(req.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ if entry == nil {
+ return nil, nil
+ }
+
+ value := string(entry.Value)
+
+ // Return the secret
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": value,
+ },
+ }, nil
+}
+
+func (b *backend) pathKVCreateUpdate(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ value := data.Get("value").(string)
+
+ entry := &logical.StorageEntry{
+ Key: req.Path,
+ Value: []byte(value),
+ }
+
+ s := req.Storage
+ err := s.Put(entry)
+ if err != nil {
+ return nil, err
+ }
+
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "value": value,
+ },
+ }, nil
+}
+
+func (b *backend) pathKVDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ if err := req.Storage.Delete(req.Path); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *backend) pathKVList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ vals, err := req.Storage.List("kv/")
+ if err != nil {
+ return nil, err
+ }
+ return logical.ListResponse(vals), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go
new file mode 100644
index 0000000..f695e20
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go
@@ -0,0 +1,27 @@
+package mock
+
+import (
+ "github.com/hashicorp/vault/logical"
+ "github.com/hashicorp/vault/logical/framework"
+)
+
+// pathSpecial is used to test special paths.
+func pathSpecial(b *backend) *framework.Path {
+ return &framework.Path{
+ Pattern: "special",
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.pathSpecialRead,
+ },
+ }
+}
+
+func (b *backend) pathSpecialRead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // Return the secret
+ return &logical.Response{
+ Data: map[string]interface{}{
+ "data": "foo",
+ },
+ }, nil
+
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go b/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go
new file mode 100644
index 0000000..ede0622
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go
@@ -0,0 +1,119 @@
+package plugin
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "encoding/gob"
+ "fmt"
+ "time"
+
+ "sync"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/logical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// Register these types since we have to serialize and de-serialize tls.ConnectionState
+// over the wire as part of logical.Request.Connection.
+func init() {
+ gob.Register(rsa.PublicKey{})
+ gob.Register(ecdsa.PublicKey{})
+ gob.Register(time.Duration(0))
+}
+
+// BackendPluginClient is a wrapper around backendPluginClient
+// that also contains its plugin.Client instance. It's primarily
+// used to cleanly kill the client on Cleanup()
+type BackendPluginClient struct {
+ client *plugin.Client
+ sync.Mutex
+
+ *backendPluginClient
+}
+
+// Cleanup calls the RPC client's Cleanup() func and also calls
+// the go-plugin's client Kill() func
+func (b *BackendPluginClient) Cleanup() {
+ b.backendPluginClient.Cleanup()
+ b.client.Kill()
+}
+
+// NewBackend will return an instance of an RPC-based client implementation of the backend for
+// external plugins, or a concrete implementation of the backend if it is a builtin backend.
+// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether
+// the plugin should run in metadata mode.
+func NewBackend(pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger, isMetadataMode bool) (logical.Backend, error) {
+ // Look for plugin in the plugin catalog
+ pluginRunner, err := sys.LookupPlugin(pluginName)
+ if err != nil {
+ return nil, err
+ }
+
+ var backend logical.Backend
+ if pluginRunner.Builtin {
+ // Plugin is builtin so we can retrieve an instance of the interface
+ // from the pluginRunner. Then cast it to logical.Backend.
+ backendRaw, err := pluginRunner.BuiltinFactory()
+ if err != nil {
+ return nil, fmt.Errorf("error getting plugin type: %s", err)
+ }
+
+ var ok bool
+ backend, ok = backendRaw.(logical.Backend)
+ if !ok {
+ return nil, fmt.Errorf("unsuported backend type: %s", pluginName)
+ }
+
+ } else {
+ // create a backendPluginClient instance
+ backend, err = newPluginClient(sys, pluginRunner, logger, isMetadataMode)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return backend, nil
+}
+
+func newPluginClient(sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) {
+ // pluginMap is the map of plugins we can dispense.
+ pluginMap := map[string]plugin.Plugin{
+ "backend": &BackendPlugin{
+ metadataMode: isMetadataMode,
+ },
+ }
+
+ var client *plugin.Client
+ var err error
+ if isMetadataMode {
+ client, err = pluginRunner.RunMetadataMode(sys, pluginMap, handshakeConfig, []string{}, logger)
+ } else {
+ client, err = pluginRunner.Run(sys, pluginMap, handshakeConfig, []string{}, logger)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Connect via RPC
+ rpcClient, err := client.Client()
+ if err != nil {
+ return nil, err
+ }
+
+ // Request the plugin
+ raw, err := rpcClient.Dispense("backend")
+ if err != nil {
+ return nil, err
+ }
+
+ // We should have a logical backend type now. This feels like a normal interface
+ // implementation but is in fact over an RPC connection.
+ backendRPC := raw.(*backendPluginClient)
+
+ return &BackendPluginClient{
+ client: client,
+ backendPluginClient: backendRPC,
+ }, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/serve.go b/vendor/github.com/hashicorp/vault/logical/plugin/serve.go
new file mode 100644
index 0000000..1d70b3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/serve.go
@@ -0,0 +1,56 @@
+package plugin
+
+import (
+ "crypto/tls"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+// BackendPluginName is the name of the plugin that can be
+// dispensed rom the plugin server.
+const BackendPluginName = "backend"
+
+type BackendFactoryFunc func(*logical.BackendConfig) (logical.Backend, error)
+type TLSProdiverFunc func() (*tls.Config, error)
+
+type ServeOpts struct {
+ BackendFactoryFunc BackendFactoryFunc
+ TLSProviderFunc TLSProdiverFunc
+}
+
+// Serve is a helper function used to serve a backend plugin. This
+// should be ran on the plugin's main process.
+func Serve(opts *ServeOpts) error {
+ // pluginMap is the map of plugins we can dispense.
+ var pluginMap = map[string]plugin.Plugin{
+ "backend": &BackendPlugin{
+ Factory: opts.BackendFactoryFunc,
+ },
+ }
+
+ err := pluginutil.OptionallyEnableMlock()
+ if err != nil {
+ return err
+ }
+
+ // If FetchMetadata is true, run without TLSProvider
+ plugin.Serve(&plugin.ServeConfig{
+ HandshakeConfig: handshakeConfig,
+ Plugins: pluginMap,
+ TLSProvider: opts.TLSProviderFunc,
+ })
+
+ return nil
+}
+
+// handshakeConfigs are used to just do a basic handshake between
+// a plugin and host. If the handshake fails, a user friendly error is shown.
+// This prevents users from executing bad plugins or executing a plugin
+// directory. It is a UX feature, not a security feature.
+var handshakeConfig = plugin.HandshakeConfig{
+ ProtocolVersion: 2,
+ MagicCookieKey: "VAULT_BACKEND_PLUGIN",
+ MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20",
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/storage.go b/vendor/github.com/hashicorp/vault/logical/plugin/storage.go
new file mode 100644
index 0000000..99c21f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/storage.go
@@ -0,0 +1,139 @@
+package plugin
+
+import (
+ "net/rpc"
+
+ "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/logical"
+)
+
+// StorageClient is an implementation of logical.Storage that communicates
+// over RPC.
+type StorageClient struct {
+ client *rpc.Client
+}
+
+func (s *StorageClient) List(prefix string) ([]string, error) {
+ var reply StorageListReply
+ err := s.client.Call("Plugin.List", prefix, &reply)
+ if err != nil {
+ return reply.Keys, err
+ }
+ if reply.Error != nil {
+ return reply.Keys, reply.Error
+ }
+ return reply.Keys, nil
+}
+
+func (s *StorageClient) Get(key string) (*logical.StorageEntry, error) {
+ var reply StorageGetReply
+ err := s.client.Call("Plugin.Get", key, &reply)
+ if err != nil {
+ return nil, err
+ }
+ if reply.Error != nil {
+ return nil, reply.Error
+ }
+ return reply.StorageEntry, nil
+}
+
+func (s *StorageClient) Put(entry *logical.StorageEntry) error {
+ var reply StoragePutReply
+ err := s.client.Call("Plugin.Put", entry, &reply)
+ if err != nil {
+ return err
+ }
+ if reply.Error != nil {
+ return reply.Error
+ }
+ return nil
+}
+
+func (s *StorageClient) Delete(key string) error {
+ var reply StorageDeleteReply
+ err := s.client.Call("Plugin.Delete", key, &reply)
+ if err != nil {
+ return err
+ }
+ if reply.Error != nil {
+ return reply.Error
+ }
+ return nil
+}
+
+// StorageServer is a net/rpc compatible structure for serving
+type StorageServer struct {
+ impl logical.Storage
+}
+
+func (s *StorageServer) List(prefix string, reply *StorageListReply) error {
+ keys, err := s.impl.List(prefix)
+ *reply = StorageListReply{
+ Keys: keys,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *StorageServer) Get(key string, reply *StorageGetReply) error {
+ storageEntry, err := s.impl.Get(key)
+ *reply = StorageGetReply{
+ StorageEntry: storageEntry,
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *StorageServer) Put(entry *logical.StorageEntry, reply *StoragePutReply) error {
+ err := s.impl.Put(entry)
+ *reply = StoragePutReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+func (s *StorageServer) Delete(key string, reply *StorageDeleteReply) error {
+ err := s.impl.Delete(key)
+ *reply = StorageDeleteReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+}
+
+type StorageListReply struct {
+ Keys []string
+ Error *plugin.BasicError
+}
+
+type StorageGetReply struct {
+ StorageEntry *logical.StorageEntry
+ Error *plugin.BasicError
+}
+
+type StoragePutReply struct {
+ Error *plugin.BasicError
+}
+
+type StorageDeleteReply struct {
+ Error *plugin.BasicError
+}
+
+// NOOPStorage is used to deny access to the storage interface while running a
+// backend plugin in metadata mode.
+type NOOPStorage struct{}
+
+func (s *NOOPStorage) List(prefix string) ([]string, error) {
+ return []string{}, nil
+}
+
+func (s *NOOPStorage) Get(key string) (*logical.StorageEntry, error) {
+ return nil, nil
+}
+
+func (s *NOOPStorage) Put(entry *logical.StorageEntry) error {
+ return nil
+}
+
+func (s *NOOPStorage) Delete(key string) error {
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go
new file mode 100644
index 0000000..9899a82
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go
@@ -0,0 +1,27 @@
+package plugin
+
+import (
+ "testing"
+
+ plugin "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/logical"
+)
+
+func TestStorage_impl(t *testing.T) {
+ var _ logical.Storage = new(StorageClient)
+}
+
+func TestStorage_operations(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ storage := &logical.InmemStorage{}
+
+ server.RegisterName("Plugin", &StorageServer{
+ impl: storage,
+ })
+
+ testStorage := &StorageClient{client: client}
+
+ logical.TestStorage(t, testStorage)
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/system.go b/vendor/github.com/hashicorp/vault/logical/plugin/system.go
new file mode 100644
index 0000000..16f67df
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/system.go
@@ -0,0 +1,247 @@
+package plugin
+
+import (
+ "net/rpc"
+ "time"
+
+ "fmt"
+
+ plugin "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/helper/wrapping"
+ "github.com/hashicorp/vault/logical"
+)
+
+type SystemViewClient struct {
+ client *rpc.Client
+}
+
+func (s *SystemViewClient) DefaultLeaseTTL() time.Duration {
+ var reply DefaultLeaseTTLReply
+ err := s.client.Call("Plugin.DefaultLeaseTTL", new(interface{}), &reply)
+ if err != nil {
+ return 0
+ }
+
+ return reply.DefaultLeaseTTL
+}
+
+func (s *SystemViewClient) MaxLeaseTTL() time.Duration {
+ var reply MaxLeaseTTLReply
+ err := s.client.Call("Plugin.MaxLeaseTTL", new(interface{}), &reply)
+ if err != nil {
+ return 0
+ }
+
+ return reply.MaxLeaseTTL
+}
+
+func (s *SystemViewClient) SudoPrivilege(path string, token string) bool {
+ var reply SudoPrivilegeReply
+ args := &SudoPrivilegeArgs{
+ Path: path,
+ Token: token,
+ }
+
+ err := s.client.Call("Plugin.SudoPrivilege", args, &reply)
+ if err != nil {
+ return false
+ }
+
+ return reply.Sudo
+}
+
+func (s *SystemViewClient) Tainted() bool {
+ var reply TaintedReply
+
+ err := s.client.Call("Plugin.Tainted", new(interface{}), &reply)
+ if err != nil {
+ return false
+ }
+
+ return reply.Tainted
+}
+
+func (s *SystemViewClient) CachingDisabled() bool {
+ var reply CachingDisabledReply
+
+ err := s.client.Call("Plugin.CachingDisabled", new(interface{}), &reply)
+ if err != nil {
+ return false
+ }
+
+ return reply.CachingDisabled
+}
+
+func (s *SystemViewClient) ReplicationState() consts.ReplicationState {
+ var reply ReplicationStateReply
+
+ err := s.client.Call("Plugin.ReplicationState", new(interface{}), &reply)
+ if err != nil {
+ return consts.ReplicationDisabled
+ }
+
+ return reply.ReplicationState
+}
+
+func (s *SystemViewClient) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
+ var reply ResponseWrapDataReply
+ // Do not allow JWTs to be returned
+ args := &ResponseWrapDataArgs{
+ Data: data,
+ TTL: ttl,
+ JWT: false,
+ }
+
+ err := s.client.Call("Plugin.ResponseWrapData", args, &reply)
+ if err != nil {
+ return nil, err
+ }
+ if reply.Error != nil {
+ return nil, reply.Error
+ }
+
+ return reply.ResponseWrapInfo, nil
+}
+
+func (s *SystemViewClient) LookupPlugin(name string) (*pluginutil.PluginRunner, error) {
+ return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend")
+}
+
+func (s *SystemViewClient) MlockEnabled() bool {
+ var reply MlockEnabledReply
+ err := s.client.Call("Plugin.MlockEnabled", new(interface{}), &reply)
+ if err != nil {
+ return false
+ }
+
+ return reply.MlockEnabled
+}
+
+type SystemViewServer struct {
+ impl logical.SystemView
+}
+
+func (s *SystemViewServer) DefaultLeaseTTL(_ interface{}, reply *DefaultLeaseTTLReply) error {
+ ttl := s.impl.DefaultLeaseTTL()
+ *reply = DefaultLeaseTTLReply{
+ DefaultLeaseTTL: ttl,
+ }
+
+ return nil
+}
+
+func (s *SystemViewServer) MaxLeaseTTL(_ interface{}, reply *MaxLeaseTTLReply) error {
+ ttl := s.impl.MaxLeaseTTL()
+ *reply = MaxLeaseTTLReply{
+ MaxLeaseTTL: ttl,
+ }
+
+ return nil
+}
+
+func (s *SystemViewServer) SudoPrivilege(args *SudoPrivilegeArgs, reply *SudoPrivilegeReply) error {
+ sudo := s.impl.SudoPrivilege(args.Path, args.Token)
+ *reply = SudoPrivilegeReply{
+ Sudo: sudo,
+ }
+
+ return nil
+}
+
+func (s *SystemViewServer) Tainted(_ interface{}, reply *TaintedReply) error {
+ tainted := s.impl.Tainted()
+ *reply = TaintedReply{
+ Tainted: tainted,
+ }
+
+ return nil
+}
+
+func (s *SystemViewServer) CachingDisabled(_ interface{}, reply *CachingDisabledReply) error {
+ cachingDisabled := s.impl.CachingDisabled()
+ *reply = CachingDisabledReply{
+ CachingDisabled: cachingDisabled,
+ }
+
+ return nil
+}
+
+func (s *SystemViewServer) ReplicationState(_ interface{}, reply *ReplicationStateReply) error {
+ replicationState := s.impl.ReplicationState()
+ *reply = ReplicationStateReply{
+ ReplicationState: replicationState,
+ }
+
+ return nil
+}
+
+func (s *SystemViewServer) ResponseWrapData(args *ResponseWrapDataArgs, reply *ResponseWrapDataReply) error {
+ // Do not allow JWTs to be returned
+ info, err := s.impl.ResponseWrapData(args.Data, args.TTL, false)
+ if err != nil {
+ *reply = ResponseWrapDataReply{
+ Error: plugin.NewBasicError(err),
+ }
+ return nil
+ }
+ *reply = ResponseWrapDataReply{
+ ResponseWrapInfo: info,
+ }
+
+ return nil
+}
+
+func (s *SystemViewServer) MlockEnabled(_ interface{}, reply *MlockEnabledReply) error {
+ enabled := s.impl.MlockEnabled()
+ *reply = MlockEnabledReply{
+ MlockEnabled: enabled,
+ }
+
+ return nil
+}
+
+type DefaultLeaseTTLReply struct {
+ DefaultLeaseTTL time.Duration
+}
+
+type MaxLeaseTTLReply struct {
+ MaxLeaseTTL time.Duration
+}
+
+type SudoPrivilegeArgs struct {
+ Path string
+ Token string
+}
+
+type SudoPrivilegeReply struct {
+ Sudo bool
+}
+
+type TaintedReply struct {
+ Tainted bool
+}
+
+type CachingDisabledReply struct {
+ CachingDisabled bool
+}
+
+type ReplicationStateReply struct {
+ ReplicationState consts.ReplicationState
+}
+
+type ResponseWrapDataArgs struct {
+ Data map[string]interface{}
+ TTL time.Duration
+ JWT bool
+}
+
+type ResponseWrapDataReply struct {
+ ResponseWrapInfo *wrapping.ResponseWrapInfo
+ Error *plugin.BasicError
+}
+
+type MlockEnabledReply struct {
+ MlockEnabled bool
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go
new file mode 100644
index 0000000..57e386b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go
@@ -0,0 +1,174 @@
+package plugin
+
+import (
+ "testing"
+
+ "reflect"
+
+ plugin "github.com/hashicorp/go-plugin"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/logical"
+)
+
+func Test_impl(t *testing.T) {
+ var _ logical.SystemView = new(SystemViewClient)
+}
+
+func TestSystem_defaultLeaseTTL(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ expected := sys.DefaultLeaseTTL()
+ actual := testSystemView.DefaultLeaseTTL()
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected: %v, got: %v", expected, actual)
+ }
+}
+
+func TestSystem_maxLeaseTTL(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ expected := sys.MaxLeaseTTL()
+ actual := testSystemView.MaxLeaseTTL()
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected: %v, got: %v", expected, actual)
+ }
+}
+
+func TestSystem_sudoPrivilege(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+ sys.SudoPrivilegeVal = true
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ expected := sys.SudoPrivilege("foo", "bar")
+ actual := testSystemView.SudoPrivilege("foo", "bar")
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected: %v, got: %v", expected, actual)
+ }
+}
+
+func TestSystem_tainted(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+ sys.TaintedVal = true
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ expected := sys.Tainted()
+ actual := testSystemView.Tainted()
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected: %v, got: %v", expected, actual)
+ }
+}
+
+func TestSystem_cachingDisabled(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+ sys.CachingDisabledVal = true
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ expected := sys.CachingDisabled()
+ actual := testSystemView.CachingDisabled()
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected: %v, got: %v", expected, actual)
+ }
+}
+
+func TestSystem_replicationState(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+ sys.ReplicationStateVal = consts.ReplicationPerformancePrimary
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ expected := sys.ReplicationState()
+ actual := testSystemView.ReplicationState()
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected: %v, got: %v", expected, actual)
+ }
+}
+
+func TestSystem_responseWrapData(t *testing.T) {
+ t.SkipNow()
+}
+
+func TestSystem_lookupPlugin(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ if _, err := testSystemView.LookupPlugin("foo"); err == nil {
+ t.Fatal("LookPlugin(): expected error on due to unsupported call from plugin")
+ }
+}
+
+func TestSystem_mlockEnabled(t *testing.T) {
+ client, server := plugin.TestRPCConn(t)
+ defer client.Close()
+
+ sys := logical.TestSystemView()
+ sys.EnableMlock = true
+
+ server.RegisterName("Plugin", &SystemViewServer{
+ impl: sys,
+ })
+
+ testSystemView := &SystemViewClient{client: client}
+
+ expected := sys.MlockEnabled()
+ actual := testSystemView.MlockEnabled()
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected: %v, got: %v", expected, actual)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/request.go b/vendor/github.com/hashicorp/vault/logical/request.go
index c41b1dc..cee0f0c 100644
--- a/vendor/github.com/hashicorp/vault/logical/request.go
+++ b/vendor/github.com/hashicorp/vault/logical/request.go
@@ -174,12 +174,13 @@ type Operation string
const (
// The operations below are called per path
- CreateOperation Operation = "create"
- ReadOperation = "read"
- UpdateOperation = "update"
- DeleteOperation = "delete"
- ListOperation = "list"
- HelpOperation = "help"
+ CreateOperation Operation = "create"
+ ReadOperation = "read"
+ UpdateOperation = "update"
+ DeleteOperation = "delete"
+ ListOperation = "list"
+ HelpOperation = "help"
+ PersonaLookaheadOperation = "persona-lookahead"
// The operations below are called globally, the path is less relevant.
RevokeOperation Operation = "revoke"
diff --git a/vendor/github.com/hashicorp/vault/logical/response.go b/vendor/github.com/hashicorp/vault/logical/response.go
index ee6bfe1..6ee452b 100644
--- a/vendor/github.com/hashicorp/vault/logical/response.go
+++ b/vendor/github.com/hashicorp/vault/logical/response.go
@@ -2,11 +2,8 @@ package logical
import (
"errors"
- "fmt"
- "reflect"
- "time"
- "github.com/mitchellh/copystructure"
+ "github.com/hashicorp/vault/helper/wrapping"
)
const (
@@ -28,26 +25,6 @@ const (
HTTPStatusCode = "http_status_code"
)
-type ResponseWrapInfo struct {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
-
- // The token containing the wrapped response
- Token string `json:"token" structs:"token" mapstructure:"token"`
-
- // The creation time. This can be used with the TTL to figure out an
- // expected expiration.
- CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"cration_time"`
-
- // If the contained response is the output of a token creation call, the
- // created token's accessor will be accessible here
- WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"`
-
- // The format to use. This doesn't get returned, it's only internal.
- Format string `json:"format" structs:"format" mapstructure:"format"`
-}
-
// Response is a struct that stores the response of a request.
// It is used to abstract the details of the higher level request protocol.
type Response struct {
@@ -72,85 +49,18 @@ type Response struct {
// Warnings allow operations or backends to return warnings in response
// to user actions without failing the action outright.
- // Making it private helps ensure that it is easy for various parts of
- // Vault (backend, core, etc.) to add warnings without accidentally
- // replacing what exists.
- warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"`
+ Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"`
// Information for wrapping the response in a cubbyhole
- WrapInfo *ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
-}
-
-func init() {
- copystructure.Copiers[reflect.TypeOf(Response{})] = func(v interface{}) (interface{}, error) {
- input := v.(Response)
- ret := Response{
- Redirect: input.Redirect,
- }
-
- if input.Secret != nil {
- retSec, err := copystructure.Copy(input.Secret)
- if err != nil {
- return nil, fmt.Errorf("error copying Secret: %v", err)
- }
- ret.Secret = retSec.(*Secret)
- }
-
- if input.Auth != nil {
- retAuth, err := copystructure.Copy(input.Auth)
- if err != nil {
- return nil, fmt.Errorf("error copying Auth: %v", err)
- }
- ret.Auth = retAuth.(*Auth)
- }
-
- if input.Data != nil {
- retData, err := copystructure.Copy(&input.Data)
- if err != nil {
- return nil, fmt.Errorf("error copying Data: %v", err)
- }
- ret.Data = *(retData.(*map[string]interface{}))
- }
-
- if input.Warnings() != nil {
- for _, warning := range input.Warnings() {
- ret.AddWarning(warning)
- }
- }
-
- if input.WrapInfo != nil {
- retWrapInfo, err := copystructure.Copy(input.WrapInfo)
- if err != nil {
- return nil, fmt.Errorf("error copying WrapInfo: %v", err)
- }
- ret.WrapInfo = retWrapInfo.(*ResponseWrapInfo)
- }
-
- return &ret, nil
- }
+ WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
}
// AddWarning adds a warning into the response's warning list
func (r *Response) AddWarning(warning string) {
- if r.warnings == nil {
- r.warnings = make([]string, 0, 1)
+ if r.Warnings == nil {
+ r.Warnings = make([]string, 0, 1)
}
- r.warnings = append(r.warnings, warning)
-}
-
-// Warnings returns the list of warnings set on the response
-func (r *Response) Warnings() []string {
- return r.warnings
-}
-
-// ClearWarnings clears the response's warning list
-func (r *Response) ClearWarnings() {
- r.warnings = make([]string, 0, 1)
-}
-
-// Copies the warnings from the other response to this one
-func (r *Response) CloneWarnings(other *Response) {
- r.warnings = other.warnings
+ r.Warnings = append(r.Warnings, warning)
}
// IsError returns true if this response seems to indicate an error.
diff --git a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
index 64c6e2b..0112ae2 100644
--- a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
+++ b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
@@ -1,53 +1,90 @@
package logical
import (
+ "strings"
"sync"
- "github.com/hashicorp/vault/physical"
+ radix "github.com/armon/go-radix"
)
-// InmemStorage implements Storage and stores all data in memory.
+// InmemStorage implements Storage and stores all data in memory. It is
+// basically a straight copy of physical.Inmem, but it prevents backends from
+// having to load all of physical's dependencies (which are legion) just to
+// have some testing storage.
type InmemStorage struct {
- phys *physical.InmemBackend
-
+ sync.RWMutex
+ root *radix.Tree
once sync.Once
}
+func (s *InmemStorage) Get(key string) (*StorageEntry, error) {
+ s.once.Do(s.init)
+
+ s.RLock()
+ defer s.RUnlock()
+
+ if raw, ok := s.root.Get(key); ok {
+ se := raw.(*StorageEntry)
+ return &StorageEntry{
+ Key: se.Key,
+ Value: se.Value,
+ }, nil
+ }
+
+ return nil, nil
+}
+
+func (s *InmemStorage) Put(entry *StorageEntry) error {
+ s.once.Do(s.init)
+
+ s.Lock()
+ defer s.Unlock()
+
+ s.root.Insert(entry.Key, &StorageEntry{
+ Key: entry.Key,
+ Value: entry.Value,
+ })
+ return nil
+}
+
+func (s *InmemStorage) Delete(key string) error {
+ s.once.Do(s.init)
+
+ s.Lock()
+ defer s.Unlock()
+
+ s.root.Delete(key)
+ return nil
+}
+
func (s *InmemStorage) List(prefix string) ([]string, error) {
s.once.Do(s.init)
- return s.phys.List(prefix)
-}
+ s.RLock()
+ defer s.RUnlock()
-func (s *InmemStorage) Get(key string) (*StorageEntry, error) {
- s.once.Do(s.init)
- entry, err := s.phys.Get(key)
- if err != nil {
- return nil, err
+ var out []string
+ seen := make(map[string]interface{})
+ walkFn := func(s string, v interface{}) bool {
+ trimmed := strings.TrimPrefix(s, prefix)
+ sep := strings.Index(trimmed, "/")
+ if sep == -1 {
+ out = append(out, trimmed)
+ } else {
+ trimmed = trimmed[:sep+1]
+ if _, ok := seen[trimmed]; !ok {
+ out = append(out, trimmed)
+ seen[trimmed] = struct{}{}
+ }
+ }
+ return false
}
- if entry == nil {
- return nil, nil
- }
- return &StorageEntry{
- Key: entry.Key,
- Value: entry.Value,
- }, nil
-}
+ s.root.WalkPrefix(prefix, walkFn)
-func (s *InmemStorage) Put(entry *StorageEntry) error {
- s.once.Do(s.init)
- physEntry := &physical.Entry{
- Key: entry.Key,
- Value: entry.Value,
- }
- return s.phys.Put(physEntry)
-}
+ return out, nil
-func (s *InmemStorage) Delete(k string) error {
- s.once.Do(s.init)
- return s.phys.Delete(k)
}
func (s *InmemStorage) init() {
- s.phys = physical.NewInmem(nil)
+ s.root = radix.New()
}
diff --git a/vendor/github.com/hashicorp/vault/logical/system_view.go b/vendor/github.com/hashicorp/vault/logical/system_view.go
index d769397..64fc51c 100644
--- a/vendor/github.com/hashicorp/vault/logical/system_view.go
+++ b/vendor/github.com/hashicorp/vault/logical/system_view.go
@@ -1,9 +1,12 @@
package logical
import (
+ "errors"
"time"
"github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/helper/wrapping"
)
// SystemView exposes system configuration information in a safe way
@@ -37,6 +40,18 @@ type SystemView interface {
// ReplicationState indicates the state of cluster replication
ReplicationState() consts.ReplicationState
+
+ // ResponseWrapData wraps the given data in a cubbyhole and returns the
+ // token used to unwrap.
+ ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
+
+ // LookupPlugin looks into the plugin catalog for a plugin with the given
+ // name. Returns a PluginRunner or an error if a plugin can not be found.
+ LookupPlugin(string) (*pluginutil.PluginRunner, error)
+
+ // MlockEnabled returns the configuration setting for enabling mlock on
+ // plugins.
+ MlockEnabled() bool
}
type StaticSystemView struct {
@@ -46,6 +61,7 @@ type StaticSystemView struct {
TaintedVal bool
CachingDisabledVal bool
Primary bool
+ EnableMlock bool
ReplicationStateVal consts.ReplicationState
}
@@ -72,3 +88,15 @@ func (d StaticSystemView) CachingDisabled() bool {
func (d StaticSystemView) ReplicationState() consts.ReplicationState {
return d.ReplicationStateVal
}
+
+func (d StaticSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
+ return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView")
+}
+
+func (d StaticSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) {
+ return nil, errors.New("LookupPlugin is not implemented in StaticSystemView")
+}
+
+func (d StaticSystemView) MlockEnabled() bool {
+ return d.EnableMlock
+}
diff --git a/vendor/github.com/hashicorp/vault/logical/testing/testing.go b/vendor/github.com/hashicorp/vault/logical/testing/testing.go
index b2072ea..ca52cdd 100644
--- a/vendor/github.com/hashicorp/vault/logical/testing/testing.go
+++ b/vendor/github.com/hashicorp/vault/logical/testing/testing.go
@@ -15,7 +15,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
"github.com/hashicorp/vault/vault"
)
@@ -136,8 +136,14 @@ func Test(tt TestT, c TestCase) {
// Create an in-memory Vault core
logger := logformat.NewVaultLogger(log.LevelTrace)
+ phys, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ tt.Fatal(err)
+ return
+ }
+
core, err := vault.NewCore(&vault.CoreConfig{
- Physical: physical.NewInmem(logger),
+ Physical: phys,
LogicalBackends: map[string]logical.Factory{
"test": func(conf *logical.BackendConfig) (logical.Backend, error) {
if c.Backend != nil {
diff --git a/vendor/github.com/hashicorp/vault/logical/translate_response.go b/vendor/github.com/hashicorp/vault/logical/translate_response.go
index 048adaf..d3d7271 100644
--- a/vendor/github.com/hashicorp/vault/logical/translate_response.go
+++ b/vendor/github.com/hashicorp/vault/logical/translate_response.go
@@ -14,7 +14,7 @@ import (
func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse {
httpResp := &HTTPResponse{
Data: input.Data,
- Warnings: input.Warnings(),
+ Warnings: input.Warnings,
}
if input.Secret != nil {
@@ -42,7 +42,7 @@ func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse {
func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response {
logicalResp := &Response{
Data: input.Data,
- warnings: input.Warnings,
+ Warnings: input.Warnings,
}
if input.LeaseID != "" {
@@ -91,6 +91,7 @@ type HTTPWrapInfo struct {
Token string `json:"token"`
TTL int `json:"ttl"`
CreationTime string `json:"creation_time"`
+ CreationPath string `json:"creation_path"`
WrappedAccessor string `json:"wrapped_accessor,omitempty"`
}
diff --git a/vendor/github.com/hashicorp/vault/meta/meta.go b/vendor/github.com/hashicorp/vault/meta/meta.go
index 0f5fef9..a81cbde 100644
--- a/vendor/github.com/hashicorp/vault/meta/meta.go
+++ b/vendor/github.com/hashicorp/vault/meta/meta.go
@@ -29,7 +29,7 @@ var (
-wrap-ttl="" Indicates that the response should be wrapped in a
cubbyhole token with the requested TTL. The response
can be fetched by calling the "sys/wrapping/unwrap"
- endpoint, passing in the wrappping token's ID. This
+ endpoint, passing in the wrapping token's ID. This
is a numeric string with an optional suffix
"s", "m", or "h"; if no suffix is specified it will
be parsed as seconds. May also be specified via
diff --git a/vendor/github.com/hashicorp/vault/physical/azure.go b/vendor/github.com/hashicorp/vault/physical/azure/azure.go
similarity index 62%
rename from vendor/github.com/hashicorp/vault/physical/azure.go
rename to vendor/github.com/hashicorp/vault/physical/azure/azure.go
index 4d5083e..f938ae4 100644
--- a/vendor/github.com/hashicorp/vault/physical/azure.go
+++ b/vendor/github.com/hashicorp/vault/physical/azure/azure.go
@@ -1,4 +1,4 @@
-package physical
+package azure
import (
"encoding/base64"
@@ -10,11 +10,14 @@ import (
"strings"
"time"
+ storage "github.com/Azure/azure-sdk-for-go/storage"
log "github.com/mgutz/logxi/v1"
- "github.com/Azure/azure-storage-go"
"github.com/armon/go-metrics"
"github.com/hashicorp/errwrap"
+ cleanhttp "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/physical"
)
// MaxBlobSize at this time
@@ -23,21 +26,19 @@ var MaxBlobSize = 1024 * 1024 * 4
// AzureBackend is a physical backend that stores data
// within an Azure blob container.
type AzureBackend struct {
- container string
- client storage.BlobStorageClient
+ container *storage.Container
logger log.Logger
- permitPool *PermitPool
+ permitPool *physical.PermitPool
}
-// newAzureBackend constructs an Azure backend using a pre-existing
+// NewAzureBackend constructs an Azure backend using a pre-existing
// bucket. Credentials can be provided to the backend, sourced
// from the environment, AWS credential files or by IAM role.
-func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) {
-
- container := os.Getenv("AZURE_BLOB_CONTAINER")
- if container == "" {
- container = conf["container"]
- if container == "" {
+func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ name := os.Getenv("AZURE_BLOB_CONTAINER")
+ if name == "" {
+ name = conf["container"]
+ if name == "" {
return nil, fmt.Errorf("'container' must be set")
}
}
@@ -62,19 +63,15 @@ func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error)
if err != nil {
return nil, fmt.Errorf("failed to create Azure client: %v", err)
}
+ client.HTTPClient = cleanhttp.DefaultPooledClient()
- contObj := client.GetBlobService().GetContainerReference(container)
- created, err := contObj.CreateIfNotExists()
+ blobClient := client.GetBlobService()
+ container := blobClient.GetContainerReference(name)
+ _, err = container.CreateIfNotExists(&storage.CreateContainerOptions{
+ Access: storage.ContainerAccessTypePrivate,
+ })
if err != nil {
- return nil, fmt.Errorf("failed to upsert container: %v", err)
- }
- if created {
- err = contObj.SetPermissions(storage.ContainerPermissions{
- AccessType: storage.ContainerAccessTypePrivate,
- }, 0, "")
- if err != nil {
- return nil, fmt.Errorf("failed to set permissions on newly-created container: %v", err)
- }
+ return nil, fmt.Errorf("failed to create %q container: %v", name, err)
}
maxParStr, ok := conf["max_parallel"]
@@ -91,19 +88,18 @@ func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error)
a := &AzureBackend{
container: container,
- client: client.GetBlobService(),
logger: logger,
- permitPool: NewPermitPool(maxParInt),
+ permitPool: physical.NewPermitPool(maxParInt),
}
return a, nil
}
// Put is used to insert or update an entry
-func (a *AzureBackend) Put(entry *Entry) error {
+func (a *AzureBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"azure", "put"}, time.Now())
if len(entry.Value) >= MaxBlobSize {
- return fmt.Errorf("Value is bigger than the current supported limit of 4MBytes")
+ return fmt.Errorf("value is bigger than the current supported limit of 4MBytes")
}
blockID := base64.StdEncoding.EncodeToString([]byte("AAAA"))
@@ -113,34 +109,44 @@ func (a *AzureBackend) Put(entry *Entry) error {
a.permitPool.Acquire()
defer a.permitPool.Release()
- err := a.client.PutBlock(a.container, entry.Key, blockID, entry.Value)
+ blob := &storage.Blob{
+ Container: a.container,
+ Name: entry.Key,
+ }
+ if err := blob.PutBlock(blockID, entry.Value, nil); err != nil {
+ return err
+ }
- err = a.client.PutBlockList(a.container, entry.Key, blocks)
- return err
+ return blob.PutBlockList(blocks, nil)
}
// Get is used to fetch an entry
-func (a *AzureBackend) Get(key string) (*Entry, error) {
+func (a *AzureBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"azure", "get"}, time.Now())
a.permitPool.Acquire()
defer a.permitPool.Release()
- exists, _ := a.client.BlobExists(a.container, key)
-
+ blob := &storage.Blob{
+ Container: a.container,
+ Name: key,
+ }
+ exists, err := blob.Exists()
+ if err != nil {
+ return nil, err
+ }
if !exists {
return nil, nil
}
- reader, err := a.client.GetBlob(a.container, key)
-
+ reader, err := blob.Get(nil)
if err != nil {
return nil, err
}
-
+ defer reader.Close()
data, err := ioutil.ReadAll(reader)
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: data,
}
@@ -152,10 +158,15 @@ func (a *AzureBackend) Get(key string) (*Entry, error) {
func (a *AzureBackend) Delete(key string) error {
defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now())
+ blob := &storage.Blob{
+ Container: a.container,
+ Name: key,
+ }
+
a.permitPool.Acquire()
defer a.permitPool.Release()
- _, err := a.client.DeleteBlobIfExists(a.container, key, nil)
+ _, err := blob.DeleteIfExists(nil)
return err
}
@@ -165,15 +176,13 @@ func (a *AzureBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"azure", "list"}, time.Now())
a.permitPool.Acquire()
- defer a.permitPool.Release()
-
- contObj := a.client.GetContainerReference(a.container)
- list, err := contObj.ListBlobs(storage.ListBlobsParameters{Prefix: prefix})
-
+ list, err := a.container.ListBlobs(storage.ListBlobsParameters{Prefix: prefix})
if err != nil {
// Break early.
+ a.permitPool.Release()
return nil, err
}
+ a.permitPool.Release()
keys := []string{}
for _, blob := range list.Blobs {
@@ -181,7 +190,7 @@ func (a *AzureBackend) List(prefix string) ([]string, error) {
if i := strings.Index(key, "/"); i == -1 {
keys = append(keys, key)
} else {
- keys = appendIfMissing(keys, key[:i+1])
+ keys = strutil.AppendIfMissing(keys, key[:i+1])
}
}
diff --git a/vendor/github.com/hashicorp/vault/physical/azure_test.go b/vendor/github.com/hashicorp/vault/physical/azure/azure_test.go
similarity index 53%
rename from vendor/github.com/hashicorp/vault/physical/azure_test.go
rename to vendor/github.com/hashicorp/vault/physical/azure/azure_test.go
index 135e658..eb0c510 100644
--- a/vendor/github.com/hashicorp/vault/physical/azure_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/azure/azure_test.go
@@ -1,4 +1,4 @@
-package physical
+package azure
import (
"fmt"
@@ -6,10 +6,12 @@ import (
"testing"
"time"
+ cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
- "github.com/Azure/azure-storage-go"
+ storage "github.com/Azure/azure-sdk-for-go/storage"
)
func TestAzureBackend(t *testing.T) {
@@ -22,27 +24,29 @@ func TestAzureBackend(t *testing.T) {
accountKey := os.Getenv("AZURE_ACCOUNT_KEY")
ts := time.Now().UnixNano()
- container := fmt.Sprintf("vault-test-%d", ts)
+ name := fmt.Sprintf("vault-test-%d", ts)
cleanupClient, _ := storage.NewBasicClient(accountName, accountKey)
+ cleanupClient.HTTPClient = cleanhttp.DefaultPooledClient()
logger := logformat.NewVaultLogger(log.LevelTrace)
- backend, err := NewBackend("azure", logger, map[string]string{
- "container": container,
+ backend, err := NewAzureBackend(map[string]string{
+ "container": name,
"accountName": accountName,
"accountKey": accountKey,
- })
+ }, logger)
defer func() {
- contObj := cleanupClient.GetBlobService().GetContainerReference(container)
- contObj.DeleteIfExists()
+ blobService := cleanupClient.GetBlobService()
+ container := blobService.GetContainerReference(name)
+ container.DeleteIfExists(nil)
}()
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, backend)
- testBackend_ListPrefix(t, backend)
+ physical.ExerciseBackend(t, backend)
+ physical.ExerciseBackend_ListPrefix(t, backend)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/cache.go b/vendor/github.com/hashicorp/vault/physical/cache.go
index f1b1365..fc44d09 100644
--- a/vendor/github.com/hashicorp/vault/physical/cache.go
+++ b/vendor/github.com/hashicorp/vault/physical/cache.go
@@ -1,7 +1,6 @@
package physical
import (
- "fmt"
"strings"
"github.com/hashicorp/golang-lru"
@@ -19,11 +18,16 @@ const (
// Vault are for policy objects so there is a large read reduction
// by using a simple write-through cache.
type Cache struct {
- backend Backend
- transactional Transactional
- lru *lru.TwoQueueCache
- locks []*locksutil.LockEntry
- logger log.Logger
+ backend Backend
+ lru *lru.TwoQueueCache
+ locks []*locksutil.LockEntry
+ logger log.Logger
+}
+
+// TransactionalCache is a Cache that wraps the physical that is transactional
+type TransactionalCache struct {
+ *Cache
+ Transactional
}
// NewCache returns a physical cache of the given size.
@@ -43,10 +47,14 @@ func NewCache(b Backend, size int, logger log.Logger) *Cache {
logger: logger,
}
- if txnl, ok := c.backend.(Transactional); ok {
- c.transactional = txnl
- }
+ return c
+}
+func NewTransactionalCache(b Backend, size int, logger log.Logger) *TransactionalCache {
+ c := &TransactionalCache{
+ Cache: NewCache(b, size, logger),
+ Transactional: b.(Transactional),
+ }
return c
}
@@ -128,18 +136,14 @@ func (c *Cache) List(prefix string) ([]string, error) {
return c.backend.List(prefix)
}
-func (c *Cache) Transaction(txns []TxnEntry) error {
- if c.transactional == nil {
- return fmt.Errorf("physical/cache: underlying backend does not support transactions")
- }
-
+func (c *TransactionalCache) Transaction(txns []TxnEntry) error {
// Lock the world
for _, lock := range c.locks {
lock.Lock()
defer lock.Unlock()
}
- if err := c.transactional.Transaction(txns); err != nil {
+ if err := c.Transactional.Transaction(txns); err != nil {
return err
}
diff --git a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go
new file mode 100644
index 0000000..493e156
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go
@@ -0,0 +1,327 @@
+package cassandra
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+
+ "github.com/armon/go-metrics"
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/physical"
+)
+
+// CassandraBackend is a physical backend that stores data in Cassandra.
+type CassandraBackend struct {
+ sess *gocql.Session
+ table string
+
+ logger log.Logger
+}
+
+// NewCassandraBackend constructs a Cassandra backend using a pre-existing
+// keyspace and table.
+func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ splitArray := func(v string) []string {
+ return strings.FieldsFunc(v, func(r rune) bool {
+ return r == ','
+ })
+ }
+
+ var (
+ hosts = splitArray(conf["hosts"])
+ port = 9042
+ explicitPort = false
+ keyspace = conf["keyspace"]
+ table = conf["table"]
+ consistency = gocql.LocalQuorum
+ )
+
+ if len(hosts) == 0 {
+ hosts = []string{"localhost"}
+ }
+ for i, hp := range hosts {
+ h, ps, err := net.SplitHostPort(hp)
+ if err != nil {
+ continue
+ }
+ p, err := strconv.Atoi(ps)
+ if err != nil {
+ return nil, err
+ }
+
+ if explicitPort && p != port {
+ return nil, fmt.Errorf("all hosts must have the same port")
+ }
+ hosts[i], port = h, p
+ explicitPort = true
+ }
+
+ if keyspace == "" {
+ keyspace = "vault"
+ }
+ if table == "" {
+ table = "entries"
+ }
+ if cs, ok := conf["consistency"]; ok {
+ switch cs {
+ case "ANY":
+ consistency = gocql.Any
+ case "ONE":
+ consistency = gocql.One
+ case "TWO":
+ consistency = gocql.Two
+ case "THREE":
+ consistency = gocql.Three
+ case "QUORUM":
+ consistency = gocql.Quorum
+ case "ALL":
+ consistency = gocql.All
+ case "LOCAL_QUORUM":
+ consistency = gocql.LocalQuorum
+ case "EACH_QUORUM":
+ consistency = gocql.EachQuorum
+ case "LOCAL_ONE":
+ consistency = gocql.LocalOne
+ default:
+ return nil, fmt.Errorf("'consistency' must be one of {ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_QUORUM, EACH_QUORUM, LOCAL_ONE}")
+ }
+ }
+
+ connectStart := time.Now()
+ cluster := gocql.NewCluster(hosts...)
+ cluster.Port = port
+ cluster.Keyspace = keyspace
+
+ cluster.ProtoVersion = 2
+ if protoVersionStr, ok := conf["protocol_version"]; ok {
+ protoVersion, err := strconv.Atoi(protoVersionStr)
+ if err != nil {
+ return nil, fmt.Errorf("'protocol_version' must be an integer")
+ }
+ cluster.ProtoVersion = protoVersion
+ }
+
+ if username, ok := conf["username"]; ok {
+ if cluster.ProtoVersion < 2 {
+ return nil, fmt.Errorf("Authentication is not supported with protocol version < 2")
+ }
+ authenticator := gocql.PasswordAuthenticator{Username: username}
+ if password, ok := conf["password"]; ok {
+ authenticator.Password = password
+ }
+ cluster.Authenticator = authenticator
+ }
+
+ if connTimeoutStr, ok := conf["connection_timeout"]; ok {
+ connectionTimeout, err := strconv.Atoi(connTimeoutStr)
+ if err != nil {
+ return nil, fmt.Errorf("'connection_timeout' must be an integer")
+ }
+ cluster.Timeout = time.Duration(connectionTimeout) * time.Second
+ }
+
+ if err := setupCassandraTLS(conf, cluster); err != nil {
+ return nil, err
+ }
+
+ sess, err := cluster.CreateSession()
+ if err != nil {
+ return nil, err
+ }
+ metrics.MeasureSince([]string{"cassandra", "connect"}, connectStart)
+ sess.SetConsistency(consistency)
+
+ impl := &CassandraBackend{
+ sess: sess,
+ table: table,
+ logger: logger}
+ return impl, nil
+}
+
+func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) error {
+ tlsOnStr, ok := conf["tls"]
+ if !ok {
+ return nil
+ }
+
+ tlsOn, err := strconv.Atoi(tlsOnStr)
+ if err != nil {
+ return fmt.Errorf("'tls' must be an integer (0 or 1)")
+ }
+
+ if tlsOn == 0 {
+ return nil
+ }
+
+ var tlsConfig = &tls.Config{}
+ if pemBundlePath, ok := conf["pem_bundle_file"]; ok {
+ pemBundleData, err := ioutil.ReadFile(pemBundlePath)
+ if err != nil {
+ return fmt.Errorf("Error reading pem bundle from %s: %v", pemBundlePath, err)
+ }
+ pemBundle, err := certutil.ParsePEMBundle(string(pemBundleData))
+ if err != nil {
+ return fmt.Errorf("Error parsing 'pem_bundle': %v", err)
+ }
+ tlsConfig, err = pemBundle.GetTLSConfig(certutil.TLSClient)
+ if err != nil {
+ return err
+ }
+ } else {
+ if pemJSONPath, ok := conf["pem_json_file"]; ok {
+ pemJSONData, err := ioutil.ReadFile(pemJSONPath)
+ if err != nil {
+ return fmt.Errorf("Error reading json bundle from %s: %v", pemJSONPath, err)
+ }
+ pemJSON, err := certutil.ParsePKIJSON([]byte(pemJSONData))
+ if err != nil {
+ return err
+ }
+ tlsConfig, err = pemJSON.GetTLSConfig(certutil.TLSClient)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if tlsSkipVerifyStr, ok := conf["tls_skip_verify"]; ok {
+ tlsSkipVerify, err := strconv.Atoi(tlsSkipVerifyStr)
+ if err != nil {
+ return fmt.Errorf("'tls_skip_verify' must be an integer (0 or 1)")
+ }
+ if tlsSkipVerify == 0 {
+ tlsConfig.InsecureSkipVerify = false
+ } else {
+ tlsConfig.InsecureSkipVerify = true
+ }
+ }
+
+ if tlsMinVersion, ok := conf["tls_min_version"]; ok {
+ switch tlsMinVersion {
+ case "tls10":
+ tlsConfig.MinVersion = tls.VersionTLS10
+ case "tls11":
+ tlsConfig.MinVersion = tls.VersionTLS11
+ case "tls12":
+ tlsConfig.MinVersion = tls.VersionTLS12
+ default:
+ return fmt.Errorf("'tls_min_version' must be one of `tls10`, `tls11` or `tls12`")
+ }
+ }
+
+ cluster.SslOpts = &gocql.SslOptions{
+ Config: tlsConfig.Clone()}
+ return nil
+}
+
+// bucketName sanitises a bucket name for Cassandra
+func (c *CassandraBackend) bucketName(name string) string {
+ if name == "" {
+ name = "."
+ }
+ return strings.TrimRight(name, "/")
+}
+
+// bucket returns all the prefix buckets the key should be stored at
+func (c *CassandraBackend) buckets(key string) []string {
+ vals := append([]string{""}, physical.Prefixes(key)...)
+ for i, v := range vals {
+ vals[i] = c.bucketName(v)
+ }
+ return vals
+}
+
+// bucket returns the most specific bucket for the key
+func (c *CassandraBackend) bucket(key string) string {
+ bs := c.buckets(key)
+ return bs[len(bs)-1]
+}
+
+// Put is used to insert or update an entry
+func (c *CassandraBackend) Put(entry *physical.Entry) error {
+ defer metrics.MeasureSince([]string{"cassandra", "put"}, time.Now())
+
+ // Execute inserts to each key prefix simultaneously
+ stmt := fmt.Sprintf(`INSERT INTO "%s" (bucket, key, value) VALUES (?, ?, ?)`, c.table)
+ results := make(chan error)
+ buckets := c.buckets(entry.Key)
+ for _, _bucket := range buckets {
+ go func(bucket string) {
+ results <- c.sess.Query(stmt, bucket, entry.Key, entry.Value).Exec()
+ }(_bucket)
+ }
+ for i := 0; i < len(buckets); i++ {
+ if err := <-results; err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Get is used to fetch an entry
+func (c *CassandraBackend) Get(key string) (*physical.Entry, error) {
+ defer metrics.MeasureSince([]string{"cassandra", "get"}, time.Now())
+
+ v := []byte(nil)
+ stmt := fmt.Sprintf(`SELECT value FROM "%s" WHERE bucket = ? AND key = ? LIMIT 1`, c.table)
+ q := c.sess.Query(stmt, c.bucket(key), key)
+ if err := q.Scan(&v); err != nil {
+ if err == gocql.ErrNotFound {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ return &physical.Entry{
+ Key: key,
+ Value: v,
+ }, nil
+}
+
+// Delete is used to permanently delete an entry
+func (c *CassandraBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"cassandra", "delete"}, time.Now())
+
+ stmt := fmt.Sprintf(`DELETE FROM "%s" WHERE bucket = ? AND key = ?`, c.table)
+ batch := gocql.NewBatch(gocql.LoggedBatch)
+ for _, bucket := range c.buckets(key) {
+ batch.Entries = append(batch.Entries, gocql.BatchEntry{
+ Stmt: stmt,
+ Args: []interface{}{bucket, key}})
+ }
+ return c.sess.ExecuteBatch(batch)
+}
+
+// List is used ot list all the keys under a given
+// prefix, up to the next prefix.
+func (c *CassandraBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"cassandra", "list"}, time.Now())
+
+ stmt := fmt.Sprintf(`SELECT key FROM "%s" WHERE bucket = ?`, c.table)
+ q := c.sess.Query(stmt, c.bucketName(prefix))
+ iter := q.Iter()
+ k, keys := "", []string{}
+ for iter.Scan(&k) {
+ // Only return the next "component" (with a trailing slash if it has children)
+ k = strings.TrimPrefix(k, prefix)
+ if parts := strings.SplitN(k, "/", 2); len(parts) > 1 {
+ k = parts[0] + "/"
+ } else {
+ k = parts[0]
+ }
+
+ // Deduplicate; this works because the keys are sorted
+ if len(keys) > 0 && keys[len(keys)-1] == k {
+ continue
+ }
+ keys = append(keys, k)
+ }
+ return keys, iter.Close()
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go
new file mode 100644
index 0000000..1c9b1f1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go
@@ -0,0 +1,112 @@
+package cassandra
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+func TestCassandraBackend(t *testing.T) {
+ if testing.Short() {
+ t.Skipf("skipping in short mode")
+ }
+
+ cleanup, hosts := prepareCassandraTestContainer(t)
+ defer cleanup()
+
+ // Run vault tests
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ b, err := NewCassandraBackend(map[string]string{
+ "hosts": hosts,
+ "protocol_version": "3",
+ }, logger)
+
+ if err != nil {
+ t.Fatalf("Failed to create new backend: %v", err)
+ }
+
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
+}
+
+func TestCassandraBackendBuckets(t *testing.T) {
+ expectations := map[string][]string{
+ "": {"."},
+ "a": {"."},
+ "a/b": {".", "a"},
+ "a/b/c/d/e": {".", "a", "a/b", "a/b/c", "a/b/c/d"}}
+
+ b := &CassandraBackend{}
+ for input, expected := range expectations {
+ actual := b.buckets(input)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Errorf("bad: %v expected: %v", actual, expected)
+ }
+ }
+}
+
+func prepareCassandraTestContainer(t *testing.T) (func(), string) {
+ if os.Getenv("CASSANDRA_HOSTS") != "" {
+ return func() {}, os.Getenv("CASSANDRA_HOSTS")
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("cassandra: failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("cassandra", "3.11", []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"})
+ if err != nil {
+ t.Fatalf("cassandra: could not start container: %s", err)
+ }
+
+ cleanup := func() {
+ pool.Purge(resource)
+ }
+
+ setup := func() error {
+ cluster := gocql.NewCluster("127.0.0.1")
+ p, _ := strconv.Atoi(resource.GetPort("9042/tcp"))
+ cluster.Port = p
+ cluster.Timeout = 5 * time.Second
+ sess, err := cluster.CreateSession()
+ if err != nil {
+ return err
+ }
+ defer sess.Close()
+
+ // Create keyspace
+ q := sess.Query(`CREATE KEYSPACE "vault" WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };`)
+ if err := q.Exec(); err != nil {
+ t.Fatalf("could not create cassandra keyspace: %v", err)
+ }
+
+ // Create table
+ q = sess.Query(`CREATE TABLE "vault"."entries" (
+ bucket text,
+ key text,
+ value blob,
+ PRIMARY KEY (bucket, key)
+ ) WITH CLUSTERING ORDER BY (key ASC);`)
+ if err := q.Exec(); err != nil {
+ t.Fatalf("could not create cassandra table: %v", err)
+ }
+
+ return nil
+ }
+ if pool.Retry(setup); err != nil {
+ cleanup()
+ t.Fatalf("cassandra: could not setup container: %s", err)
+ }
+
+ return cleanup, fmt.Sprintf("127.0.0.1:%s", resource.GetPort("9042/tcp"))
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go
new file mode 100644
index 0000000..395c2da
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go
@@ -0,0 +1,237 @@
+package cockroachdb
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/armon/go-metrics"
+ "github.com/cockroachdb/cockroach-go/crdb"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+
+ // CockroachDB uses the Postgres SQL driver
+ _ "github.com/lib/pq"
+)
+
+// CockroachDBBackend Backend is a physical backend that stores data
+// within a CockroachDB database.
+type CockroachDBBackend struct {
+ table string
+ client *sql.DB
+ rawStatements map[string]string
+ statements map[string]*sql.Stmt
+ logger log.Logger
+ permitPool *physical.PermitPool
+}
+
+// NewCockroachDBBackend constructs a CockroachDB backend using the given
+// API client, server address, credentials, and database.
+func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ // Get the CockroachDB credentials to perform read/write operations.
+ connURL, ok := conf["connection_url"]
+ if !ok || connURL == "" {
+ return nil, fmt.Errorf("missing connection_url")
+ }
+
+ dbTable, ok := conf["table"]
+ if !ok {
+ dbTable = "vault_kv_store"
+ }
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ var err error
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("cockroachdb: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ // Create CockroachDB handle for the database.
+ db, err := sql.Open("postgres", connURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect to cockroachdb: %v", err)
+ }
+
+ // Create the required table if it doesn't exists.
+ createQuery := "CREATE TABLE IF NOT EXISTS " + dbTable +
+ " (path STRING, value BYTES, PRIMARY KEY (path))"
+ if _, err := db.Exec(createQuery); err != nil {
+ return nil, fmt.Errorf("failed to create mysql table: %v", err)
+ }
+
+ // Setup the backend
+ c := &CockroachDBBackend{
+ table: dbTable,
+ client: db,
+ rawStatements: map[string]string{
+ "put": "INSERT INTO " + dbTable + " VALUES($1, $2)" +
+ " ON CONFLICT (path) DO " +
+ " UPDATE SET (path, value) = ($1, $2)",
+ "get": "SELECT value FROM " + dbTable + " WHERE path = $1",
+ "delete": "DELETE FROM " + dbTable + " WHERE path = $1",
+ "list": "SELECT path FROM " + dbTable + " WHERE path LIKE $1",
+ },
+ statements: make(map[string]*sql.Stmt),
+ logger: logger,
+ permitPool: physical.NewPermitPool(maxParInt),
+ }
+
+ // Prepare all the statements required
+ for name, query := range c.rawStatements {
+ if err := c.prepare(name, query); err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
+}
+
+// prepare is a helper to prepare a query for future execution
+func (c *CockroachDBBackend) prepare(name, query string) error {
+ stmt, err := c.client.Prepare(query)
+ if err != nil {
+ return fmt.Errorf("failed to prepare '%s': %v", name, err)
+ }
+ c.statements[name] = stmt
+ return nil
+}
+
+// Put is used to insert or update an entry.
+func (c *CockroachDBBackend) Put(entry *physical.Entry) error {
+ defer metrics.MeasureSince([]string{"cockroachdb", "put"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ _, err := c.statements["put"].Exec(entry.Key, entry.Value)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Get is used to fetch and entry.
+func (c *CockroachDBBackend) Get(key string) (*physical.Entry, error) {
+ defer metrics.MeasureSince([]string{"cockroachdb", "get"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ var result []byte
+ err := c.statements["get"].QueryRow(key).Scan(&result)
+ if err == sql.ErrNoRows {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ ent := &physical.Entry{
+ Key: key,
+ Value: result,
+ }
+ return ent, nil
+}
+
+// Delete is used to permanently delete an entry
+func (c *CockroachDBBackend) Delete(key string) error {
+ defer metrics.MeasureSince([]string{"cockroachdb", "delete"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ _, err := c.statements["delete"].Exec(key)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// List is used to list all the keys under a given
+// prefix, up to the next prefix.
+func (c *CockroachDBBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"cockroachdb", "list"}, time.Now())
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ likePrefix := prefix + "%"
+ rows, err := c.statements["list"].Query(likePrefix)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var keys []string
+ for rows.Next() {
+ var key string
+ err = rows.Scan(&key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan rows: %v", err)
+ }
+
+ key = strings.TrimPrefix(key, prefix)
+ if i := strings.Index(key, "/"); i == -1 {
+ // Add objects only from the current 'folder'
+ keys = append(keys, key)
+ } else if i != -1 {
+ // Add truncated 'folder' paths
+ keys = strutil.AppendIfMissing(keys, string(key[:i+1]))
+ }
+ }
+
+ sort.Strings(keys)
+ return keys, nil
+}
+
+// Transaction is used to run multiple entries via a transaction
+func (c *CockroachDBBackend) Transaction(txns []physical.TxnEntry) error {
+ defer metrics.MeasureSince([]string{"cockroachdb", "transaction"}, time.Now())
+ if len(txns) == 0 {
+ return nil
+ }
+
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
+ return crdb.ExecuteTx(context.Background(), c.client, nil, func(tx *sql.Tx) error {
+ return c.transaction(tx, txns)
+ })
+}
+
+func (c *CockroachDBBackend) transaction(tx *sql.Tx, txns []physical.TxnEntry) error {
+ deleteStmt, err := tx.Prepare(c.rawStatements["delete"])
+ if err != nil {
+ return err
+ }
+ putStmt, err := tx.Prepare(c.rawStatements["put"])
+ if err != nil {
+ return err
+ }
+
+ for _, op := range txns {
+ switch op.Operation {
+ case physical.DeleteOperation:
+ _, err = deleteStmt.Exec(op.Entry.Key)
+ case physical.PutOperation:
+ _, err = putStmt.Exec(op.Entry.Key, op.Entry.Value)
+ default:
+ return fmt.Errorf("%q is not a supported transaction operation", op.Operation)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go
new file mode 100644
index 0000000..35bcecf
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go
@@ -0,0 +1,103 @@
+package cockroachdb
+
+import (
+ "database/sql"
+ "fmt"
+ "os"
+ "testing"
+
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+
+ _ "github.com/lib/pq"
+)
+
+func prepareCockroachDBTestContainer(t *testing.T) (cleanup func(), retURL, tableName string) {
+ tableName = os.Getenv("CR_TABLE")
+ if tableName == "" {
+ tableName = "vault_kv_store"
+ }
+ retURL = os.Getenv("CR_URL")
+ if retURL != "" {
+ return func() {}, retURL, tableName
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ dockerOptions := &dockertest.RunOptions{
+ Repository: "cockroachdb/cockroach",
+ Tag: "release-1.0",
+ Cmd: []string{"start", "--insecure"},
+ }
+ resource, err := pool.RunWithOptions(dockerOptions)
+ if err != nil {
+ t.Fatalf("Could not start local CockroachDB docker container: %s", err)
+ }
+
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local container: %s", err)
+ }
+ }
+
+ retURL = fmt.Sprintf("postgresql://root@localhost:%s/?sslmode=disable", resource.GetPort("26257/tcp"))
+ database := "database"
+ tableName = database + ".vault_kv"
+
+ // exponential backoff-retry
+ if err = pool.Retry(func() error {
+ var err error
+ db, err := sql.Open("postgres", retURL)
+ if err != nil {
+ return err
+ }
+ _, err = db.Exec("CREATE DATABASE database")
+ return err
+ }); err != nil {
+ cleanup()
+ t.Fatalf("Could not connect to docker: %s", err)
+ }
+ return cleanup, retURL, tableName
+}
+
+func TestCockroachDBBackend(t *testing.T) {
+ cleanup, connURL, table := prepareCockroachDBTestContainer(t)
+ defer cleanup()
+
+ // Run vault tests
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewCockroachDBBackend(map[string]string{
+ "connection_url": connURL,
+ "table": table,
+ }, logger)
+
+ if err != nil {
+ t.Fatalf("Failed to create new backend: %v", err)
+ }
+
+ defer func() {
+ truncate(t, b)
+ }()
+
+ physical.ExerciseBackend(t, b)
+ truncate(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
+ truncate(t, b)
+ physical.ExerciseTransactionalBackend(t, b)
+}
+
+func truncate(t *testing.T, b physical.Backend) {
+ crdb := b.(*CockroachDBBackend)
+ _, err := crdb.client.Exec("TRUNCATE TABLE " + crdb.table)
+ if err != nil {
+ t.Fatalf("Failed to drop table: %v", err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/consul.go b/vendor/github.com/hashicorp/vault/physical/consul/consul.go
similarity index 92%
rename from vendor/github.com/hashicorp/vault/physical/consul.go
rename to vendor/github.com/hashicorp/vault/physical/consul/consul.go
index 93aabf0..6c31466 100644
--- a/vendor/github.com/hashicorp/vault/physical/consul.go
+++ b/vendor/github.com/hashicorp/vault/physical/consul/consul.go
@@ -1,10 +1,11 @@
-package physical
+package consul
import (
"errors"
"fmt"
"io/ioutil"
"net"
+ "net/http"
"net/url"
"strconv"
"strings"
@@ -23,11 +24,11 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-cleanhttp"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/helper/tlsutil"
+ "github.com/hashicorp/vault/physical"
)
const (
@@ -72,7 +73,7 @@ type ConsulBackend struct {
logger log.Logger
client *api.Client
kv *api.KV
- permitPool *PermitPool
+ permitPool *physical.PermitPool
serviceLock sync.RWMutex
redirectHost string
redirectPort int64
@@ -86,9 +87,9 @@ type ConsulBackend struct {
notifySealedCh chan notifyEvent
}
-// newConsulBackend constructs a Consul backend using the given API client
+// NewConsulBackend constructs a Consul backend using the given API client
// and the prefix in the KV store.
-func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the path in Consul
path, ok := conf["path"]
if !ok {
@@ -160,9 +161,7 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error
// Configure the client
consulConf := api.DefaultConfig()
// Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore
- tr := cleanhttp.DefaultPooledTransport()
- tr.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
- consulConf.HttpClient.Transport = tr
+ consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
if addr, ok := conf["address"]; ok {
consulConf.Address = addr
@@ -187,16 +186,14 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error
return nil, err
}
- transport := cleanhttp.DefaultPooledTransport()
- transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
- transport.TLSClientConfig = tlsClientConfig
- if err := http2.ConfigureTransport(transport); err != nil {
+ consulConf.Transport.TLSClientConfig = tlsClientConfig
+ if err := http2.ConfigureTransport(consulConf.Transport); err != nil {
return nil, err
}
- consulConf.HttpClient.Transport = transport
logger.Debug("physical/consul: configured TLS")
}
+ consulConf.HttpClient = &http.Client{Transport: consulConf.Transport}
client, err := api.NewClient(consulConf)
if err != nil {
return nil, errwrap.Wrapf("client setup failed: {{err}}", err)
@@ -231,7 +228,7 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error
logger: logger,
client: client,
kv: client.KV(),
- permitPool: NewPermitPool(maxParInt),
+ permitPool: physical.NewPermitPool(maxParInt),
serviceName: service,
serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","),
checkTimeout: checkTimeout,
@@ -244,7 +241,14 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error
}
func setupTLSConfig(conf map[string]string) (*tls.Config, error) {
- serverName := strings.Split(conf["address"], ":")
+ serverName, _, err := net.SplitHostPort(conf["address"])
+ switch {
+ case err == nil:
+ case strings.Contains(err.Error(), "missing port"):
+ serverName = conf["address"]
+ default:
+ return nil, err
+ }
insecureSkipVerify := false
if _, ok := conf["tls_skip_verify"]; ok {
@@ -265,7 +269,7 @@ func setupTLSConfig(conf map[string]string) (*tls.Config, error) {
tlsClientConfig := &tls.Config{
MinVersion: tlsMinVersion,
InsecureSkipVerify: insecureSkipVerify,
- ServerName: serverName[0],
+ ServerName: serverName,
}
_, okCert := conf["tls_cert_file"]
@@ -299,7 +303,7 @@ func setupTLSConfig(conf map[string]string) (*tls.Config, error) {
}
// Used to run multiple entries via a transaction
-func (c *ConsulBackend) Transaction(txns []TxnEntry) error {
+func (c *ConsulBackend) Transaction(txns []physical.TxnEntry) error {
if len(txns) == 0 {
return nil
}
@@ -311,9 +315,9 @@ func (c *ConsulBackend) Transaction(txns []TxnEntry) error {
Key: c.path + op.Entry.Key,
}
switch op.Operation {
- case DeleteOperation:
+ case physical.DeleteOperation:
cop.Verb = api.KVDelete
- case PutOperation:
+ case physical.PutOperation:
cop.Verb = api.KVSet
cop.Value = op.Entry.Value
default:
@@ -343,7 +347,7 @@ func (c *ConsulBackend) Transaction(txns []TxnEntry) error {
}
// Put is used to insert or update an entry
-func (c *ConsulBackend) Put(entry *Entry) error {
+func (c *ConsulBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"consul", "put"}, time.Now())
c.permitPool.Acquire()
@@ -359,7 +363,7 @@ func (c *ConsulBackend) Put(entry *Entry) error {
}
// Get is used to fetch an entry
-func (c *ConsulBackend) Get(key string) (*Entry, error) {
+func (c *ConsulBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"consul", "get"}, time.Now())
c.permitPool.Acquire()
@@ -379,7 +383,7 @@ func (c *ConsulBackend) Get(key string) (*Entry, error) {
if pair == nil {
return nil, nil
}
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: pair.Value,
}
@@ -422,7 +426,7 @@ func (c *ConsulBackend) List(prefix string) ([]string, error) {
}
// Lock is used for mutual exclusion based on the given key.
-func (c *ConsulBackend) LockWith(key, value string) (Lock, error) {
+func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) {
// Create the lock
opts := &api.LockOptions{
Key: c.path + key,
@@ -529,7 +533,7 @@ func (c *ConsulBackend) checkDuration() time.Duration {
return lib.DurationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor)
}
-func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) (err error) {
+func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (err error) {
if err := c.setRedirectAddr(redirectAddr); err != nil {
return err
}
@@ -542,7 +546,7 @@ func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownC
return nil
}
-func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) {
+func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) {
// This defer statement should be executed last. So push it first.
defer waitGroup.Done()
@@ -659,7 +663,7 @@ func (c *ConsulBackend) serviceID() string {
// without any locks held and can be run concurrently, therefore no changes
// to ConsulBackend can be made in this method (i.e. wtb const receiver for
// compiler enforced safety).
-func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc activeFunction, sealedFunc sealedFunction) (serviceID string, err error) {
+func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (serviceID string, err error) {
// Query vault Core for its current state
active := activeFunc()
sealed := sealedFunc()
diff --git a/vendor/github.com/hashicorp/vault/physical/consul_test.go b/vendor/github.com/hashicorp/vault/physical/consul/consul_test.go
similarity index 94%
rename from vendor/github.com/hashicorp/vault/physical/consul_test.go
rename to vendor/github.com/hashicorp/vault/physical/consul/consul_test.go
index 59b1294..4d3230c 100644
--- a/vendor/github.com/hashicorp/vault/physical/consul_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/consul/consul_test.go
@@ -1,4 +1,4 @@
-package physical
+package consul
import (
"fmt"
@@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/physical"
dockertest "gopkg.in/ory-am/dockertest.v2"
)
@@ -37,7 +38,7 @@ func testConsulBackend(t *testing.T) *ConsulBackend {
func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend {
logger := logformat.NewVaultLogger(log.LevelTrace)
- be, err := newConsulBackend(*conf, logger)
+ be, err := NewConsulBackend(*conf, logger)
if err != nil {
t.Fatalf("Expected Consul to initialize: %v", err)
}
@@ -57,7 +58,7 @@ func testConsul_testConsulBackend(t *testing.T) {
}
}
-func testActiveFunc(activePct float64) activeFunction {
+func testActiveFunc(activePct float64) physical.ActiveFunction {
return func() bool {
var active bool
standbyProb := rand.Float64()
@@ -68,7 +69,7 @@ func testActiveFunc(activePct float64) activeFunction {
}
}
-func testSealedFunc(sealedPct float64) sealedFunction {
+func testSealedFunc(sealedPct float64) physical.SealedFunction {
return func() bool {
var sealed bool
unsealedProb := rand.Float64()
@@ -94,7 +95,7 @@ func TestConsul_ServiceTags(t *testing.T) {
}
logger := logformat.NewVaultLogger(log.LevelTrace)
- be, err := newConsulBackend(consulConfig, logger)
+ be, err := NewConsulBackend(consulConfig, logger)
if err != nil {
t.Fatal(err)
}
@@ -182,7 +183,7 @@ func TestConsul_newConsulBackend(t *testing.T) {
for _, test := range tests {
logger := logformat.NewVaultLogger(log.LevelTrace)
- be, err := newConsulBackend(test.consulConfig, logger)
+ be, err := NewConsulBackend(test.consulConfig, logger)
if test.fail {
if err == nil {
t.Fatalf(`Expected config "%s" to fail`, test.name)
@@ -206,7 +207,7 @@ func TestConsul_newConsulBackend(t *testing.T) {
}
}
- var shutdownCh ShutdownChannel
+ var shutdownCh physical.ShutdownChannel
waitGroup := &sync.WaitGroup{}
if err := c.RunServiceDiscovery(waitGroup, shutdownCh, test.redirectAddr, testActiveFunc(0.5), testSealedFunc(0.5)); err != nil {
t.Fatalf("bad: %v", err)
@@ -411,18 +412,18 @@ func TestConsulBackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("consul", logger, map[string]string{
+ b, err := NewConsulBackend(map[string]string{
"address": conf.Address,
"path": randPath,
"max_parallel": "256",
"token": conf.Token,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
func TestConsulHABackend(t *testing.T) {
@@ -452,23 +453,23 @@ func TestConsulHABackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("consul", logger, map[string]string{
+ b, err := NewConsulBackend(map[string]string{
"address": conf.Address,
"path": randPath,
"max_parallel": "-1",
"token": conf.Token,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- ha, ok := b.(HABackend)
+ ha, ok := b.(physical.HABackend)
if !ok {
t.Fatalf("consul does not implement HABackend")
}
- testHABackend(t, ha, ha)
+ physical.ExerciseHABackend(t, ha, ha)
- detect, ok := b.(RedirectDetect)
+ detect, ok := b.(physical.RedirectDetect)
if !ok {
t.Fatalf("consul does not implement RedirectDetect")
}
diff --git a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go
new file mode 100644
index 0000000..e7f945f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go
@@ -0,0 +1,305 @@
+package couchdb
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/armon/go-metrics"
+ "github.com/hashicorp/errwrap"
+ cleanhttp "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+// CouchDBBackend allows the management of couchdb users
+type CouchDBBackend struct {
+ logger log.Logger
+ client *couchDBClient
+ permitPool *physical.PermitPool
+}
+
+type couchDBClient struct {
+ endpoint string
+ username string
+ password string
+ *http.Client
+}
+
+type couchDBListItem struct {
+ ID string `json:"id"`
+ Key string `json:"key"`
+ Value struct {
+ Revision string
+ } `json:"value"`
+}
+
+type couchDBList struct {
+ TotalRows int `json:"total_rows"`
+ Offset int `json:"offset"`
+ Rows []couchDBListItem `json:"rows"`
+}
+
+func (m *couchDBClient) rev(key string) (string, error) {
+ req, err := http.NewRequest("HEAD", fmt.Sprintf("%s/%s", m.endpoint, key), nil)
+ if err != nil {
+ return "", err
+ }
+ req.SetBasicAuth(m.username, m.password)
+
+ resp, err := m.Client.Do(req)
+ if err != nil {
+ return "", err
+ }
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return "", nil
+ }
+ etag := resp.Header.Get("Etag")
+ if len(etag) < 2 {
+ return "", nil
+ }
+ return etag[1 : len(etag)-1], nil
+}
+
+func (m *couchDBClient) put(e couchDBEntry) error {
+ bs, err := json.Marshal(e)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", m.endpoint, e.ID), bytes.NewReader(bs))
+ if err != nil {
+ return err
+ }
+ req.SetBasicAuth(m.username, m.password)
+ _, err = m.Client.Do(req)
+
+ return err
+}
+
+func (m *couchDBClient) get(key string) (*physical.Entry, error) {
+ req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", m.endpoint, url.PathEscape(key)), nil)
+ if err != nil {
+ return nil, err
+ }
+ req.SetBasicAuth(m.username, m.password)
+ resp, err := m.Client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, nil
+ } else if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("GET returned %s", resp.Status)
+ }
+ bs, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ entry := couchDBEntry{}
+ if err := json.Unmarshal(bs, &entry); err != nil {
+ return nil, err
+ }
+ return entry.Entry, nil
+}
+
+func (m *couchDBClient) list(prefix string) ([]couchDBListItem, error) {
+ req, _ := http.NewRequest("GET", fmt.Sprintf("%s/_all_docs", m.endpoint), nil)
+ req.SetBasicAuth(m.username, m.password)
+ values := req.URL.Query()
+ values.Set("skip", "0")
+ values.Set("limit", "100")
+ values.Set("include_docs", "false")
+ if prefix != "" {
+ values.Set("startkey", fmt.Sprintf("%q", prefix))
+ values.Set("endkey", fmt.Sprintf("%q", prefix+"{}"))
+ }
+ req.URL.RawQuery = values.Encode()
+
+ resp, err := m.Client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ results := couchDBList{}
+ if err := json.Unmarshal(data, &results); err != nil {
+ return nil, err
+ }
+
+ return results.Rows, nil
+}
+
+func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBackend, error) {
+ endpoint := os.Getenv("COUCHDB_ENDPOINT")
+ if endpoint == "" {
+ endpoint = conf["endpoint"]
+ }
+ if endpoint == "" {
+ return nil, fmt.Errorf("missing endpoint")
+ }
+
+ username := os.Getenv("COUCHDB_USERNAME")
+ if username == "" {
+ username = conf["username"]
+ }
+
+ password := os.Getenv("COUCHDB_PASSWORD")
+ if password == "" {
+ password = conf["password"]
+ }
+
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ var err error
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("couchdb: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
+ return &CouchDBBackend{
+ client: &couchDBClient{
+ endpoint: endpoint,
+ username: username,
+ password: password,
+ Client: cleanhttp.DefaultPooledClient(),
+ },
+ logger: logger,
+ permitPool: physical.NewPermitPool(maxParInt),
+ }, nil
+}
+
+func NewCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ return buildCouchDBBackend(conf, logger)
+}
+
+type couchDBEntry struct {
+ Entry *physical.Entry `json:"entry"`
+ Rev string `json:"_rev,omitempty"`
+ ID string `json:"_id"`
+ Deleted *bool `json:"_deleted,omitempty"`
+}
+
+// Put is used to insert or update an entry
+func (m *CouchDBBackend) Put(entry *physical.Entry) error {
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
+ return m.PutInternal(entry)
+}
+
+// Get is used to fetch an entry
+func (m *CouchDBBackend) Get(key string) (*physical.Entry, error) {
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
+ return m.GetInternal(key)
+}
+
+// Delete is used to permanently delete an entry
+func (m *CouchDBBackend) Delete(key string) error {
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
+ return m.DeleteInternal(key)
+}
+
+// List is used to list all the keys under a given prefix
+func (m *CouchDBBackend) List(prefix string) ([]string, error) {
+ defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now())
+
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
+ items, err := m.client.list(prefix)
+ if err != nil {
+ return nil, err
+ }
+
+ var out []string
+ seen := make(map[string]interface{})
+ for _, result := range items {
+ trimmed := strings.TrimPrefix(result.ID, prefix)
+ sep := strings.Index(trimmed, "/")
+ if sep == -1 {
+ out = append(out, trimmed)
+ } else {
+ trimmed = trimmed[:sep+1]
+ if _, ok := seen[trimmed]; !ok {
+ out = append(out, trimmed)
+ seen[trimmed] = struct{}{}
+ }
+ }
+ }
+ return out, nil
+}
+
+// TransactionalCouchDBBackend creates a couchdb backend that forces all operations to happen
+// in serial
+type TransactionalCouchDBBackend struct {
+ CouchDBBackend
+}
+
+func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ backend, err := buildCouchDBBackend(conf, logger)
+ if err != nil {
+ return nil, err
+ }
+ backend.permitPool = physical.NewPermitPool(1)
+
+ return &TransactionalCouchDBBackend{
+ CouchDBBackend: *backend,
+ }, nil
+}
+
+// GetInternal is used to fetch an entry
+func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) {
+ defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now())
+
+ return m.client.get(key)
+}
+
+// PutInternal is used to insert or update an entry
+func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error {
+ defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now())
+
+ revision, _ := m.client.rev(url.PathEscape(entry.Key))
+
+ return m.client.put(couchDBEntry{
+ Entry: entry,
+ Rev: revision,
+ ID: url.PathEscape(entry.Key),
+ })
+}
+
+// DeleteInternal is used to permanently delete an entry
+func (m *CouchDBBackend) DeleteInternal(key string) error {
+ defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now())
+
+ revision, _ := m.client.rev(url.PathEscape(key))
+ deleted := true
+ return m.client.put(couchDBEntry{
+ ID: url.PathEscape(key),
+ Rev: revision,
+ Deleted: &deleted,
+ })
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go
new file mode 100644
index 0000000..de4d05d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go
@@ -0,0 +1,132 @@
+package couchdb
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+func TestCouchDBBackend(t *testing.T) {
+ cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t)
+ defer cleanup()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewCouchDBBackend(map[string]string{
+ "endpoint": endpoint,
+ "username": username,
+ "password": password,
+ }, logger)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
+}
+
+func TestTransactionalCouchDBBackend(t *testing.T) {
+ cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t)
+ defer cleanup()
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewTransactionalCouchDBBackend(map[string]string{
+ "endpoint": endpoint,
+ "username": username,
+ "password": password,
+ }, logger)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
+}
+
+func prepareCouchdbDBTestContainer(t *testing.T) (cleanup func(), retAddress, username, password string) {
+ // If environment variable is set, assume caller wants to target a real
+ // DynamoDB.
+ if os.Getenv("COUCHDB_ENDPOINT") != "" {
+ return func() {}, os.Getenv("COUCHDB_ENDPOINT"), os.Getenv("COUCHDB_USERNAME"), os.Getenv("COUCHDB_PASSWORD")
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("couchdb", "1.6", []string{})
+ if err != nil {
+ t.Fatalf("Could not start local DynamoDB: %s", err)
+ }
+
+ retAddress = "http://localhost:" + resource.GetPort("5984/tcp")
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local DynamoDB: %s", err)
+ }
+ }
+
+ // exponential backoff-retry, because the couchDB may not be able to accept
+ // connections yet
+ if err := pool.Retry(func() error {
+ var err error
+ resp, err := http.Get(retAddress)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("Expected couchdb to return status code 200, got (%s) instead.", resp.Status)
+ }
+ return nil
+ }); err != nil {
+ t.Fatalf("Could not connect to docker: %s", err)
+ }
+
+ dbName := fmt.Sprintf("vault-test-%d", time.Now().Unix())
+ {
+ req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", retAddress, dbName), nil)
+ if err != nil {
+ t.Fatalf("Could not create create database request: %q", err)
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("Could not create database: %q", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusCreated {
+ bs, _ := ioutil.ReadAll(resp.Body)
+ t.Fatalf("Failed to create database: %s %s\n", resp.Status, string(bs))
+ }
+ }
+ {
+ req, err := http.NewRequest("PUT", fmt.Sprintf("%s/_config/admins/admin", retAddress), strings.NewReader(`"admin"`))
+ if err != nil {
+ t.Fatalf("Could not create admin user request: %q", err)
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("Could not create admin user: %q", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ bs, _ := ioutil.ReadAll(resp.Body)
+ t.Fatalf("Failed to create admin user: %s %s\n", resp.Status, string(bs))
+ }
+ }
+
+ return cleanup, retAddress + "/" + dbName, "admin", "admin"
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb.go b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go
similarity index 87%
rename from vendor/github.com/hashicorp/vault/physical/dynamodb.go
rename to vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go
index 4c7cefb..c0b3f3e 100644
--- a/vendor/github.com/hashicorp/vault/physical/dynamodb.go
+++ b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go
@@ -1,10 +1,11 @@
-package physical
+package dynamodb
import (
"fmt"
"math"
+ "net/http"
"os"
- "path/filepath"
+ pkgPath "path"
"sort"
"strconv"
"strings"
@@ -16,14 +17,15 @@ import (
"github.com/armon/go-metrics"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/hashicorp/errwrap"
+ cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/awsutil"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/physical"
)
const (
@@ -75,7 +77,7 @@ type DynamoDBBackend struct {
recovery bool
logger log.Logger
haEnabled bool
- permitPool *PermitPool
+ permitPool *physical.PermitPool
}
// DynamoDBRecord is the representation of a vault entry in
@@ -109,9 +111,9 @@ type DynamoDBLockRecord struct {
Expires int64
}
-// newDynamoDBBackend constructs a DynamoDB backend. If the
+// NewDynamoDBBackend constructs a DynamoDB backend. If the
// configured DynamoDB table does not exist, it creates it.
-func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
table := os.Getenv("AWS_DYNAMODB_TABLE")
if table == "" {
table = conf["table"]
@@ -166,29 +168,37 @@ func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, err
if endpoint == "" {
endpoint = conf["endpoint"]
}
- region := os.Getenv("AWS_DEFAULT_REGION")
+ region := os.Getenv("AWS_REGION")
if region == "" {
- region = conf["region"]
+ region = os.Getenv("AWS_DEFAULT_REGION")
if region == "" {
- region = DefaultDynamoDBRegion
+ region = conf["region"]
+ if region == "" {
+ region = DefaultDynamoDBRegion
+ }
}
}
- creds := credentials.NewChainCredentials([]credentials.Provider{
- &credentials.StaticProvider{Value: credentials.Value{
- AccessKeyID: accessKey,
- SecretAccessKey: secretKey,
- SessionToken: sessionToken,
- }},
- &credentials.EnvProvider{},
- &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
- &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
- })
+ credsConfig := &awsutil.CredentialsConfig{
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ SessionToken: sessionToken,
+ }
+ creds, err := credsConfig.GenerateCredentialChain()
+ if err != nil {
+ return nil, err
+ }
+
+ pooledTransport := cleanhttp.DefaultPooledTransport()
+ pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
awsConf := aws.NewConfig().
WithCredentials(creds).
WithRegion(region).
- WithEndpoint(endpoint)
+ WithEndpoint(endpoint).
+ WithHTTPClient(&http.Client{
+ Transport: pooledTransport,
+ })
client := dynamodb.New(session.New(awsConf))
if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil {
@@ -222,7 +232,7 @@ func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, err
return &DynamoDBBackend{
table: table,
client: client,
- permitPool: NewPermitPool(maxParInt),
+ permitPool: physical.NewPermitPool(maxParInt),
recovery: recoveryModeBool,
haEnabled: haEnabledBool,
logger: logger,
@@ -230,7 +240,7 @@ func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, err
}
// Put is used to insert or update an entry
-func (d *DynamoDBBackend) Put(entry *Entry) error {
+func (d *DynamoDBBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"dynamodb", "put"}, time.Now())
record := DynamoDBRecord{
@@ -248,7 +258,7 @@ func (d *DynamoDBBackend) Put(entry *Entry) error {
},
}}
- for _, prefix := range prefixes(entry.Key) {
+ for _, prefix := range physical.Prefixes(entry.Key) {
record = DynamoDBRecord{
Path: recordPathForVaultKey(prefix),
Key: fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)),
@@ -268,7 +278,7 @@ func (d *DynamoDBBackend) Put(entry *Entry) error {
}
// Get is used to fetch an entry
-func (d *DynamoDBBackend) Get(key string) (*Entry, error) {
+func (d *DynamoDBBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"dynamodb", "get"}, time.Now())
d.permitPool.Acquire()
@@ -294,7 +304,7 @@ func (d *DynamoDBBackend) Get(key string) (*Entry, error) {
return nil, err
}
- return &Entry{
+ return &physical.Entry{
Key: vaultKey(record),
Value: record.Value,
}, nil
@@ -314,14 +324,14 @@ func (d *DynamoDBBackend) Delete(key string) error {
}}
// clean up now empty 'folders'
- prefixes := prefixes(key)
+ prefixes := physical.Prefixes(key)
sort.Sort(sort.Reverse(sort.StringSlice(prefixes)))
for _, prefix := range prefixes {
- items, err := d.List(prefix)
+ hasChildren, err := d.hasChildren(prefix)
if err != nil {
return err
}
- if len(items) == 1 {
+ if !hasChildren {
requests = append(requests, &dynamodb.WriteRequest{
DeleteRequest: &dynamodb.DeleteRequest{
Key: map[string]*dynamodb.AttributeValue{
@@ -378,15 +388,49 @@ func (d *DynamoDBBackend) List(prefix string) ([]string, error) {
return keys, nil
}
+// hasChildren returns true if there exist items below a certain path prefix.
+// To do so, the method fetches such items from DynamoDB. If there are more
+// than one item (which is the "directory" item), there are children.
+func (d *DynamoDBBackend) hasChildren(prefix string) (bool, error) {
+ prefix = strings.TrimSuffix(prefix, "/")
+ prefix = escapeEmptyPath(prefix)
+
+ queryInput := &dynamodb.QueryInput{
+ TableName: aws.String(d.table),
+ ConsistentRead: aws.Bool(true),
+ KeyConditions: map[string]*dynamodb.Condition{
+ "Path": {
+ ComparisonOperator: aws.String("EQ"),
+ AttributeValueList: []*dynamodb.AttributeValue{{
+ S: aws.String(prefix),
+ }},
+ },
+ },
+ // Avoid fetching too many items from DynamoDB for performance reasons.
+ // We need at least two because one is the directory item, all others
+ // are children.
+ Limit: aws.Int64(2),
+ }
+
+ d.permitPool.Acquire()
+ defer d.permitPool.Release()
+
+ out, err := d.client.Query(queryInput)
+ if err != nil {
+ return false, err
+ }
+ return len(out.Items) > 1, nil
+}
+
// LockWith is used for mutual exclusion based on the given key.
-func (d *DynamoDBBackend) LockWith(key, value string) (Lock, error) {
+func (d *DynamoDBBackend) LockWith(key, value string) (physical.Lock, error) {
identity, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
return &DynamoDBLock{
backend: d,
- key: filepath.Join(filepath.Dir(key), DynamoDBLockPrefix+filepath.Base(key)),
+ key: pkgPath.Join(pkgPath.Dir(key), DynamoDBLockPrefix+pkgPath.Base(key)),
value: value,
identity: identity,
recovery: d.recovery,
@@ -690,7 +734,7 @@ func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, wr
// its last component.
func recordPathForVaultKey(key string) string {
if strings.Contains(key, "/") {
- return filepath.Dir(key)
+ return pkgPath.Dir(key)
}
return DynamoDBEmptyPath
}
@@ -700,7 +744,7 @@ func recordPathForVaultKey(key string) string {
// property. This path equals the the vault key's
// last component.
func recordKeyForVaultKey(key string) string {
- return filepath.Base(key)
+ return pkgPath.Base(key)
}
// vaultKey returns the vault key for a given record
@@ -711,7 +755,7 @@ func vaultKey(record *DynamoDBRecord) string {
if path == "" {
return record.Key
}
- return filepath.Join(record.Path, record.Key)
+ return pkgPath.Join(record.Path, record.Key)
}
// escapeEmptyPath is used to escape the root key's path
@@ -731,15 +775,3 @@ func unescapeEmptyPath(s string) string {
}
return s
}
-
-// prefixes returns all parent 'folders' for a given
-// vault key.
-// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar']
-func prefixes(s string) []string {
- components := strings.Split(s, "/")
- result := []string{}
- for i := 1; i < len(components); i++ {
- result = append(result, strings.Join(components[:i], "/"))
- }
- return result
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb_test.go b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go
similarity index 94%
rename from vendor/github.com/hashicorp/vault/physical/dynamodb_test.go
rename to vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go
index daac8c8..426f23f 100644
--- a/vendor/github.com/hashicorp/vault/physical/dynamodb_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go
@@ -1,4 +1,4 @@
-package physical
+package dynamodb
import (
"fmt"
@@ -9,6 +9,7 @@ import (
"time"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
dockertest "gopkg.in/ory-am/dockertest.v3"
@@ -49,20 +50,20 @@ func TestDynamoDBBackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("dynamodb", logger, map[string]string{
+ b, err := NewDynamoDBBackend(map[string]string{
"access_key": creds.AccessKeyID,
"secret_key": creds.SecretAccessKey,
"session_token": creds.SessionToken,
"table": table,
"region": region,
"endpoint": endpoint,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
func TestDynamoDBHABackend(t *testing.T) {
@@ -95,30 +96,30 @@ func TestDynamoDBHABackend(t *testing.T) {
}()
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("dynamodb", logger, map[string]string{
+ b, err := NewDynamoDBBackend(map[string]string{
"access_key": creds.AccessKeyID,
"secret_key": creds.SecretAccessKey,
"session_token": creds.SessionToken,
"table": table,
"region": region,
"endpoint": endpoint,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- ha, ok := b.(HABackend)
+ ha, ok := b.(physical.HABackend)
if !ok {
t.Fatalf("dynamodb does not implement HABackend")
}
- testHABackend(t, ha, ha)
+ physical.ExerciseHABackend(t, ha, ha)
testDynamoDBLockTTL(t, ha)
}
// Similar to testHABackend, but using internal implementation details to
// trigger the lock failure scenario by setting the lock renew period for one
// of the locks to a higher value than the lock TTL.
-func testDynamoDBLockTTL(t *testing.T, ha HABackend) {
+func testDynamoDBLockTTL(t *testing.T, ha physical.HABackend) {
// Set much smaller lock times to speed up the test.
lockTTL := time.Second * 3
renewInterval := time.Second * 1
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd.go
similarity index 92%
rename from vendor/github.com/hashicorp/vault/physical/etcd.go
rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd.go
index 01a928d..5d9c26d 100644
--- a/vendor/github.com/hashicorp/vault/physical/etcd.go
+++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd.go
@@ -1,4 +1,4 @@
-package physical
+package etcd
import (
"context"
@@ -10,6 +10,7 @@ import (
"github.com/coreos/etcd/client"
"github.com/coreos/go-semver/semver"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
@@ -22,11 +23,11 @@ var (
EtcdLockHeldError = errors.New("lock already held")
EtcdLockNotHeldError = errors.New("lock not held")
EtcdSemaphoreKeyRemovedError = errors.New("semaphore key removed before lock aquisition")
- EtcdVersionUnknow = errors.New("etcd: unknown API version")
+ EtcdVersionUnknown = errors.New("etcd: unknown API version")
)
-// newEtcdBackend constructs a etcd backend using a given machine address.
-func newEtcdBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+// NewEtcdBackend constructs a etcd backend using a given machine address.
+func NewEtcdBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
var (
apiVersion string
ok bool
@@ -75,7 +76,7 @@ func newEtcdBackend(conf map[string]string, logger log.Logger) (Backend, error)
}
return newEtcd3Backend(conf, logger)
default:
- return nil, EtcdVersionUnknow
+ return nil, EtcdVersionUnknown
}
}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd2.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go
similarity index 97%
rename from vendor/github.com/hashicorp/vault/physical/etcd2.go
rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go
index 4ef4b08..4e08615 100644
--- a/vendor/github.com/hashicorp/vault/physical/etcd2.go
+++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go
@@ -1,4 +1,4 @@
-package physical
+package etcd
import (
"context"
@@ -14,6 +14,7 @@ import (
metrics "github.com/armon/go-metrics"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/transport"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
@@ -49,12 +50,12 @@ const (
type Etcd2Backend struct {
path string
kAPI client.KeysAPI
- permitPool *PermitPool
+ permitPool *physical.PermitPool
logger log.Logger
haEnabled bool
}
-func newEtcd2Backend(conf map[string]string, logger log.Logger) (Backend, error) {
+func newEtcd2Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the etcd path form the configuration.
path, ok := conf["path"]
if !ok {
@@ -110,7 +111,7 @@ func newEtcd2Backend(conf map[string]string, logger log.Logger) (Backend, error)
return &Etcd2Backend{
path: path,
kAPI: kAPI,
- permitPool: NewPermitPool(DefaultParallelOperations),
+ permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
logger: logger,
haEnabled: haEnabledBool,
}, nil
@@ -169,7 +170,7 @@ func newEtcdV2Client(conf map[string]string) (client.Client, error) {
}
// Put is used to insert or update an entry.
-func (c *Etcd2Backend) Put(entry *Entry) error {
+func (c *Etcd2Backend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
value := base64.StdEncoding.EncodeToString(entry.Value)
@@ -181,7 +182,7 @@ func (c *Etcd2Backend) Put(entry *Entry) error {
}
// Get is used to fetch an entry.
-func (c *Etcd2Backend) Get(key string) (*Entry, error) {
+func (c *Etcd2Backend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
c.permitPool.Acquire()
@@ -206,7 +207,7 @@ func (c *Etcd2Backend) Get(key string) (*Entry, error) {
}
// Construct and return a new entry.
- return &Entry{
+ return &physical.Entry{
Key: key,
Value: value,
}, nil
@@ -290,7 +291,7 @@ func (b *Etcd2Backend) nodePathLock(key string) string {
}
// Lock is used for mutual exclusion based on the given key.
-func (c *Etcd2Backend) LockWith(key, value string) (Lock, error) {
+func (c *Etcd2Backend) LockWith(key, value string) (physical.Lock, error) {
return &Etcd2Lock{
kAPI: c.kAPI,
value: value,
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd3.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go
similarity index 73%
rename from vendor/github.com/hashicorp/vault/physical/etcd3.go
rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go
index 6fecc73..04944e5 100644
--- a/vendor/github.com/hashicorp/vault/physical/etcd3.go
+++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go
@@ -1,4 +1,4 @@
-package physical
+package etcd
import (
"errors"
@@ -11,10 +11,11 @@ import (
"time"
metrics "github.com/armon/go-metrics"
- "github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/coreos/etcd/pkg/transport"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
"golang.org/x/net/context"
)
@@ -27,16 +28,21 @@ type EtcdBackend struct {
path string
haEnabled bool
- permitPool *PermitPool
+ permitPool *physical.PermitPool
etcd *clientv3.Client
}
-// etcd default lease duration is 60s. set to 15s for faster recovery.
-const etcd3LockTimeoutInSeconds = 15
+const (
+ // etcd3 default lease duration is 60s. set to 15s for faster recovery.
+ etcd3LockTimeoutInSeconds = 15
+ // etcd3 default request timeout is set to 5s. It should be long enough
+ // for most cases, even with internal retry.
+ etcd3RequestTimeout = 5 * time.Second
+)
// newEtcd3Backend constructs a etcd3 backend.
-func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
+func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the etcd path form the configuration.
path, ok := conf["path"]
if !ok {
@@ -117,7 +123,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error)
}
if sync {
- ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
err := etcd.Sync(ctx)
cancel()
if err != nil {
@@ -128,29 +134,33 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error)
return &EtcdBackend{
path: path,
etcd: etcd,
- permitPool: NewPermitPool(DefaultParallelOperations),
+ permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
logger: logger,
haEnabled: haEnabledBool,
}, nil
}
-func (c *EtcdBackend) Put(entry *Entry) error {
+func (c *EtcdBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
- _, err := c.etcd.Put(context.Background(), path.Join(c.path, entry.Key), string(entry.Value))
+ ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
+ defer cancel()
+ _, err := c.etcd.Put(ctx, path.Join(c.path, entry.Key), string(entry.Value))
return err
}
-func (c *EtcdBackend) Get(key string) (*Entry, error) {
+func (c *EtcdBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
- resp, err := c.etcd.Get(context.Background(), path.Join(c.path, key))
+ ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
+ defer cancel()
+ resp, err := c.etcd.Get(ctx, path.Join(c.path, key))
if err != nil {
return nil, err
}
@@ -161,7 +171,7 @@ func (c *EtcdBackend) Get(key string) (*Entry, error) {
if len(resp.Kvs) > 1 {
return nil, errors.New("unexpected number of keys from a get request")
}
- return &Entry{
+ return &physical.Entry{
Key: key,
Value: resp.Kvs[0].Value,
}, nil
@@ -173,7 +183,9 @@ func (c *EtcdBackend) Delete(key string) error {
c.permitPool.Acquire()
defer c.permitPool.Release()
- _, err := c.etcd.Delete(context.Background(), path.Join(c.path, key))
+ ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
+ defer cancel()
+ _, err := c.etcd.Delete(ctx, path.Join(c.path, key))
if err != nil {
return err
}
@@ -186,8 +198,10 @@ func (c *EtcdBackend) List(prefix string) ([]string, error) {
c.permitPool.Acquire()
defer c.permitPool.Release()
+ ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
+ defer cancel()
prefix = path.Join(c.path, prefix)
- resp, err := c.etcd.Get(context.Background(), prefix, clientv3.WithPrefix())
+ resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix())
if err != nil {
return nil, err
}
@@ -204,7 +218,7 @@ func (c *EtcdBackend) List(prefix string) ([]string, error) {
if i := strings.Index(key, "/"); i == -1 {
keys = append(keys, key)
} else if i != -1 {
- keys = appendIfMissing(keys, key[:i+1])
+ keys = strutil.AppendIfMissing(keys, key[:i+1])
}
}
return keys, nil
@@ -229,7 +243,7 @@ type EtcdLock struct {
}
// Lock is used for mutual exclusion based on the given key.
-func (c *EtcdBackend) LockWith(key, value string) (Lock, error) {
+func (c *EtcdBackend) LockWith(key, value string) (physical.Lock, error) {
session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(etcd3LockTimeoutInSeconds))
if err != nil {
return nil, err
@@ -264,7 +278,10 @@ func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
}
return nil, err
}
- if _, err := c.etcd.Put(ctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil {
+
+ pctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
+ defer cancel()
+ if _, err := c.etcd.Put(pctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil {
return nil, err
}
@@ -281,11 +298,16 @@ func (c *EtcdLock) Unlock() error {
return EtcdLockNotHeldError
}
- return c.etcdMu.Unlock(context.Background())
+ ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
+ defer cancel()
+ return c.etcdMu.Unlock(ctx)
}
func (c *EtcdLock) Value() (bool, string, error) {
- resp, err := c.etcd.Get(context.Background(),
+ ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout)
+ defer cancel()
+
+ resp, err := c.etcd.Get(ctx,
c.prefix, clientv3.WithPrefix(),
clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend))
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd3_test.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go
similarity index 66%
rename from vendor/github.com/hashicorp/vault/physical/etcd3_test.go
rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go
index 0724091..fbd842d 100644
--- a/vendor/github.com/hashicorp/vault/physical/etcd3_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go
@@ -1,4 +1,4 @@
-package physical
+package etcd
import (
"fmt"
@@ -7,6 +7,7 @@ import (
"time"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
@@ -18,20 +19,20 @@ func TestEtcd3Backend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("etcd", logger, map[string]string{
+ b, err := NewEtcdBackend(map[string]string{
"path": fmt.Sprintf("/vault-%d", time.Now().Unix()),
"etcd_api": "3",
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
- ha, ok := b.(HABackend)
+ ha, ok := b.(physical.HABackend)
if !ok {
t.Fatalf("etcd3 does not implement HABackend")
}
- testHABackend(t, ha, ha)
+ physical.ExerciseHABackend(t, ha, ha)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/etcd_test.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go
similarity index 82%
rename from vendor/github.com/hashicorp/vault/physical/etcd_test.go
rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go
index adddac2..d5c30bb 100644
--- a/vendor/github.com/hashicorp/vault/physical/etcd_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go
@@ -1,4 +1,4 @@
-package physical
+package etcd
import (
"fmt"
@@ -7,6 +7,7 @@ import (
"time"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
"github.com/coreos/etcd/client"
@@ -52,19 +53,19 @@ func TestEtcdBackend(t *testing.T) {
// need to provide it explicitly.
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("etcd", logger, map[string]string{
+ b, err := NewEtcdBackend(map[string]string{
"path": randPath,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
- ha, ok := b.(HABackend)
+ ha, ok := b.(physical.HABackend)
if !ok {
t.Fatalf("etcd does not implement HABackend")
}
- testHABackend(t, ha, ha)
+ physical.ExerciseHABackend(t, ha, ha)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/file.go b/vendor/github.com/hashicorp/vault/physical/file/file.go
similarity index 74%
rename from vendor/github.com/hashicorp/vault/physical/file.go
rename to vendor/github.com/hashicorp/vault/physical/file/file.go
index d9c5225..df05dba 100644
--- a/vendor/github.com/hashicorp/vault/physical/file.go
+++ b/vendor/github.com/hashicorp/vault/physical/file/file.go
@@ -1,4 +1,4 @@
-package physical
+package file
import (
"encoding/json"
@@ -11,7 +11,9 @@ import (
log "github.com/mgutz/logxi/v1"
+ "github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/physical"
)
// FileBackend is a physical backend that stores data on disk
@@ -25,15 +27,15 @@ type FileBackend struct {
sync.RWMutex
path string
logger log.Logger
- permitPool *PermitPool
+ permitPool *physical.PermitPool
}
type TransactionalFileBackend struct {
FileBackend
}
-// newFileBackend constructs a FileBackend using the given directory
-func newFileBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+// NewFileBackend constructs a FileBackend using the given directory
+func NewFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
path, ok := conf["path"]
if !ok {
return nil, fmt.Errorf("'path' must be set")
@@ -42,11 +44,11 @@ func newFileBackend(conf map[string]string, logger log.Logger) (Backend, error)
return &FileBackend{
path: path,
logger: logger,
- permitPool: NewPermitPool(DefaultParallelOperations),
+ permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
}, nil
}
-func newTransactionalFileBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+func NewTransactionalFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
path, ok := conf["path"]
if !ok {
return nil, fmt.Errorf("'path' must be set")
@@ -57,7 +59,7 @@ func newTransactionalFileBackend(conf map[string]string, logger log.Logger) (Bac
FileBackend: FileBackend{
path: path,
logger: logger,
- permitPool: NewPermitPool(1),
+ permitPool: physical.NewPermitPool(1),
},
}, nil
}
@@ -77,6 +79,10 @@ func (b *FileBackend) DeleteInternal(path string) error {
return nil
}
+ if err := b.validatePath(path); err != nil {
+ return err
+ }
+
basePath, key := b.expandPath(path)
fullPath := filepath.Join(basePath, key)
@@ -99,6 +105,9 @@ func (b *FileBackend) cleanupLogicalPath(path string) error {
dir, err := os.Open(fullPath)
if err != nil {
+ if dir != nil {
+ dir.Close()
+ }
if os.IsNotExist(err) {
return nil
} else {
@@ -124,7 +133,7 @@ func (b *FileBackend) cleanupLogicalPath(path string) error {
return nil
}
-func (b *FileBackend) Get(k string) (*Entry, error) {
+func (b *FileBackend) Get(k string) (*physical.Entry, error) {
b.permitPool.Acquire()
defer b.permitPool.Release()
@@ -134,11 +143,18 @@ func (b *FileBackend) Get(k string) (*Entry, error) {
return b.GetInternal(k)
}
-func (b *FileBackend) GetInternal(k string) (*Entry, error) {
+func (b *FileBackend) GetInternal(k string) (*physical.Entry, error) {
+ if err := b.validatePath(k); err != nil {
+ return nil, err
+ }
+
path, key := b.expandPath(k)
path = filepath.Join(path, key)
f, err := os.Open(path)
+ if f != nil {
+ defer f.Close()
+ }
if err != nil {
if os.IsNotExist(err) {
return nil, nil
@@ -146,9 +162,8 @@ func (b *FileBackend) GetInternal(k string) (*Entry, error) {
return nil, err
}
- defer f.Close()
- var entry Entry
+ var entry physical.Entry
if err := jsonutil.DecodeJSONFromReader(f, &entry); err != nil {
return nil, err
}
@@ -156,7 +171,7 @@ func (b *FileBackend) GetInternal(k string) (*Entry, error) {
return &entry, nil
}
-func (b *FileBackend) Put(entry *Entry) error {
+func (b *FileBackend) Put(entry *physical.Entry) error {
b.permitPool.Acquire()
defer b.permitPool.Release()
@@ -166,7 +181,11 @@ func (b *FileBackend) Put(entry *Entry) error {
return b.PutInternal(entry)
}
-func (b *FileBackend) PutInternal(entry *Entry) error {
+func (b *FileBackend) PutInternal(entry *physical.Entry) error {
+ if err := b.validatePath(entry.Key); err != nil {
+ return err
+ }
+
path, key := b.expandPath(entry.Key)
// Make the parent tree
@@ -179,10 +198,12 @@ func (b *FileBackend) PutInternal(entry *Entry) error {
filepath.Join(path, key),
os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
0600)
+ if f != nil {
+ defer f.Close()
+ }
if err != nil {
return err
}
- defer f.Close()
enc := json.NewEncoder(f)
return enc.Encode(entry)
}
@@ -198,6 +219,10 @@ func (b *FileBackend) List(prefix string) ([]string, error) {
}
func (b *FileBackend) ListInternal(prefix string) ([]string, error) {
+ if err := b.validatePath(prefix); err != nil {
+ return nil, err
+ }
+
path := b.path
if prefix != "" {
path = filepath.Join(path, prefix)
@@ -205,6 +230,9 @@ func (b *FileBackend) ListInternal(prefix string) ([]string, error) {
// Read the directory contents
f, err := os.Open(path)
+ if f != nil {
+ defer f.Close()
+ }
if err != nil {
if os.IsNotExist(err) {
return nil, nil
@@ -212,7 +240,6 @@ func (b *FileBackend) ListInternal(prefix string) ([]string, error) {
return nil, err
}
- defer f.Close()
names, err := f.Readdirnames(-1)
if err != nil {
@@ -237,12 +264,21 @@ func (b *FileBackend) expandPath(k string) (string, string) {
return path, "_" + key
}
-func (b *TransactionalFileBackend) Transaction(txns []TxnEntry) error {
+func (b *FileBackend) validatePath(path string) error {
+ switch {
+ case strings.Contains(path, ".."):
+ return consts.ErrPathContainsParentReferences
+ }
+
+ return nil
+}
+
+func (b *TransactionalFileBackend) Transaction(txns []physical.TxnEntry) error {
b.permitPool.Acquire()
defer b.permitPool.Release()
b.Lock()
defer b.Unlock()
- return genericTransactionHandler(b, txns)
+ return physical.GenericTransactionHandler(b, txns)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/file_test.go b/vendor/github.com/hashicorp/vault/physical/file/file_test.go
similarity index 77%
rename from vendor/github.com/hashicorp/vault/physical/file_test.go
rename to vendor/github.com/hashicorp/vault/physical/file/file_test.go
index 9810f4b..6438e21 100644
--- a/vendor/github.com/hashicorp/vault/physical/file_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/file/file_test.go
@@ -1,4 +1,4 @@
-package physical
+package file
import (
"encoding/json"
@@ -9,6 +9,7 @@ import (
"testing"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
@@ -21,9 +22,9 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("file", logger, map[string]string{
+ b, err := NewFileBackend(map[string]string{
"path": backendPath,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
@@ -39,7 +40,7 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) {
// Create a storage entry without base64 encoding the file name
rawFullPath := filepath.Join(backendPath, "_foo")
- e := &Entry{Key: "foo", Value: []byte("test")}
+ e := &physical.Entry{Key: "foo", Value: []byte("test")}
f, err := os.OpenFile(
rawFullPath,
os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
@@ -131,6 +132,30 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) {
}
}
+func TestFileBackend_ValidatePath(t *testing.T) {
+ dir, err := ioutil.TempDir("", "vault")
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ defer os.RemoveAll(dir)
+
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ b, err := NewFileBackend(map[string]string{
+ "path": dir,
+ }, logger)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := b.Delete("foo/bar/../zip"); err == nil {
+ t.Fatal("expected error")
+ }
+ if err := b.Delete("foo/bar/zip"); err != nil {
+ t.Fatal("did not expect error")
+ }
+}
+
func TestFileBackend(t *testing.T) {
dir, err := ioutil.TempDir("", "vault")
if err != nil {
@@ -140,13 +165,13 @@ func TestFileBackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("file", logger, map[string]string{
+ b, err := NewFileBackend(map[string]string{
"path": dir,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/gcs.go b/vendor/github.com/hashicorp/vault/physical/gcs/gcs.go
similarity index 75%
rename from vendor/github.com/hashicorp/vault/physical/gcs.go
rename to vendor/github.com/hashicorp/vault/physical/gcs/gcs.go
index e4d4187..5e7fc78 100644
--- a/vendor/github.com/hashicorp/vault/physical/gcs.go
+++ b/vendor/github.com/hashicorp/vault/physical/gcs/gcs.go
@@ -1,4 +1,4 @@
-package physical
+package gcs
import (
"fmt"
@@ -10,6 +10,7 @@ import (
"time"
"github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
"cloud.google.com/go/storage"
@@ -24,15 +25,14 @@ import (
type GCSBackend struct {
bucketName string
client *storage.Client
- permitPool *PermitPool
+ permitPool *physical.PermitPool
logger log.Logger
}
-// newGCSBackend constructs a Google Cloud Storage backend using a pre-existing
+// NewGCSBackend constructs a Google Cloud Storage backend using a pre-existing
// bucket. Credentials can be provided to the backend, sourced
// from environment variables or a service account file
-func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) {
-
+func NewGCSBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET")
if bucketName == "" {
@@ -42,26 +42,14 @@ func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) {
}
}
- // path to service account JSON file
- credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
- if credentialsFile == "" {
- credentialsFile = conf["credentials_file"]
- if credentialsFile == "" {
- return nil, fmt.Errorf("env var GOOGLE_APPLICATION_CREDENTIALS or configuration parameter 'credentials_file' must be set")
- }
- }
-
- client, err := storage.NewClient(
- context.Background(),
- option.WithServiceAccountFile(credentialsFile),
- )
-
+ ctx := context.Background()
+ client, err := newGCSClient(ctx, conf, logger)
if err != nil {
- return nil, fmt.Errorf("error establishing storage client: '%v'", err)
+ return nil, errwrap.Wrapf("error establishing strorage client: {{err}}", err)
}
// check client connectivity by getting bucket attributes
- _, err = client.Bucket(bucketName).Attrs(context.Background())
+ _, err = client.Bucket(bucketName).Attrs(ctx)
if err != nil {
return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err)
}
@@ -81,15 +69,38 @@ func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) {
g := GCSBackend{
bucketName: bucketName,
client: client,
- permitPool: NewPermitPool(maxParInt),
+ permitPool: physical.NewPermitPool(maxParInt),
logger: logger,
}
return &g, nil
}
+func newGCSClient(ctx context.Context, conf map[string]string, logger log.Logger) (*storage.Client, error) {
+ // if credentials_file is configured, try to use it
+ // else use application default credentials
+ credentialsFile, ok := conf["credentials_file"]
+ if ok {
+ client, err := storage.NewClient(
+ ctx,
+ option.WithServiceAccountFile(credentialsFile),
+ )
+
+ if err != nil {
+ return nil, fmt.Errorf("error with provided credentials: '%v'", err)
+ }
+ return client, nil
+ }
+
+ client, err := storage.NewClient(ctx)
+ if err != nil {
+ return nil, errwrap.Wrapf("error with application default credentials: {{err}}", err)
+ }
+ return client, nil
+}
+
// Put is used to insert or update an entry
-func (g *GCSBackend) Put(entry *Entry) error {
+func (g *GCSBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"gcs", "put"}, time.Now())
bucket := g.client.Bucket(g.bucketName)
@@ -105,7 +116,7 @@ func (g *GCSBackend) Put(entry *Entry) error {
}
// Get is used to fetch an entry
-func (g *GCSBackend) Get(key string) (*Entry, error) {
+func (g *GCSBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"gcs", "get"}, time.Now())
bucket := g.client.Bucket(g.bucketName)
@@ -127,7 +138,7 @@ func (g *GCSBackend) Get(key string) (*Entry, error) {
return nil, fmt.Errorf("error reading object '%v': '%v'", key, err)
}
- ent := Entry{
+ ent := physical.Entry{
Key: key,
Value: value,
}
diff --git a/vendor/github.com/hashicorp/vault/physical/gcs_test.go b/vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go
similarity index 80%
rename from vendor/github.com/hashicorp/vault/physical/gcs_test.go
rename to vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go
index 23c4d3a..dda6eed 100644
--- a/vendor/github.com/hashicorp/vault/physical/gcs_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go
@@ -1,4 +1,4 @@
-package physical
+package gcs
import (
"fmt"
@@ -11,16 +11,12 @@ import (
"cloud.google.com/go/storage"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
-var ConsistencyDelays = delays{
- beforeList: 5 * time.Second,
- beforeGet: 0 * time.Second,
-}
-
func TestGCSBackend(t *testing.T) {
credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
@@ -54,7 +50,6 @@ func TestGCSBackend(t *testing.T) {
defer func() {
objects_it := bucket.Objects(context.Background(), nil)
- time.Sleep(ConsistencyDelays.beforeList)
// have to delete all objects before deleting bucket
for {
objAttrs, err := objects_it.Next()
@@ -70,8 +65,6 @@ func TestGCSBackend(t *testing.T) {
bucket.Object(objAttrs.Name).Delete(context.Background())
}
- // not a list operation, but google lists to make sure the bucket is empty on delete
- time.Sleep(ConsistencyDelays.beforeList)
err := bucket.Delete(context.Background())
if err != nil {
t.Fatalf("error deleting bucket '%s': '%v'", bucketName, err)
@@ -80,16 +73,16 @@ func TestGCSBackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("gcs", logger, map[string]string{
+ b, err := NewGCSBackend(map[string]string{
"bucket": bucketName,
"credentials_file": credentialsFile,
- })
+ }, logger)
if err != nil {
t.Fatalf("error creating google cloud storage backend: '%s'", err)
}
- testEventuallyConsistentBackend(t, b, ConsistencyDelays)
- testEventuallyConsistentBackend_ListPrefix(t, b, ConsistencyDelays)
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/cache_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go
similarity index 75%
rename from vendor/github.com/hashicorp/vault/physical/cache_test.go
rename to vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go
index 151cf99..c771f03 100644
--- a/vendor/github.com/hashicorp/vault/physical/cache_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go
@@ -1,32 +1,39 @@
-package physical
+package inmem
import (
"testing"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
func TestCache(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- inm := NewInmem(logger)
- cache := NewCache(inm, 0, logger)
- testBackend(t, cache)
- testBackend_ListPrefix(t, cache)
+ inm, err := NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cache := physical.NewCache(inm, 0, logger)
+ physical.ExerciseBackend(t, cache)
+ physical.ExerciseBackend_ListPrefix(t, cache)
}
func TestCache_Purge(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- inm := NewInmem(logger)
- cache := NewCache(inm, 0, logger)
+ inm, err := NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cache := physical.NewCache(inm, 0, logger)
- ent := &Entry{
+ ent := &physical.Entry{
Key: "foo",
Value: []byte("bar"),
}
- err := cache.Put(ent)
+ err = cache.Put(ent)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -59,21 +66,24 @@ func TestCache_Purge(t *testing.T) {
func TestCache_IgnoreCore(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- inm := NewInmem(logger)
- cache := NewCache(inm, 0, logger)
+ inm, err := NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
- var ent *Entry
- var err error
+ cache := physical.NewCache(inm, 0, logger)
+
+ var ent *physical.Entry
// First try normal handling
- ent = &Entry{
+ ent = &physical.Entry{
Key: "foo",
Value: []byte("bar"),
}
if err := cache.Put(ent); err != nil {
t.Fatal(err)
}
- ent = &Entry{
+ ent = &physical.Entry{
Key: "foo",
Value: []byte("foobar"),
}
@@ -89,14 +99,14 @@ func TestCache_IgnoreCore(t *testing.T) {
}
// Now try core path
- ent = &Entry{
+ ent = &physical.Entry{
Key: "core/foo",
Value: []byte("bar"),
}
if err := cache.Put(ent); err != nil {
t.Fatal(err)
}
- ent = &Entry{
+ ent = &physical.Entry{
Key: "core/foo",
Value: []byte("foobar"),
}
@@ -112,7 +122,7 @@ func TestCache_IgnoreCore(t *testing.T) {
}
// Now make sure looked-up values aren't added
- ent = &Entry{
+ ent = &physical.Entry{
Key: "core/zip",
Value: []byte("zap"),
}
@@ -126,7 +136,7 @@ func TestCache_IgnoreCore(t *testing.T) {
if string(ent.Value) != "zap" {
t.Fatal("expected non-cached value")
}
- ent = &Entry{
+ ent = &physical.Entry{
Key: "core/zip",
Value: []byte("zipzap"),
}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go
similarity index 73%
rename from vendor/github.com/hashicorp/vault/physical/inmem.go
rename to vendor/github.com/hashicorp/vault/physical/inmem/inmem.go
index 47f18eb..d4f9201 100644
--- a/vendor/github.com/hashicorp/vault/physical/inmem.go
+++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go
@@ -1,9 +1,10 @@
-package physical
+package inmem
import (
"strings"
"sync"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
"github.com/armon/go-radix"
@@ -15,7 +16,7 @@ import (
type InmemBackend struct {
sync.RWMutex
root *radix.Tree
- permitPool *PermitPool
+ permitPool *physical.PermitPool
logger log.Logger
}
@@ -24,30 +25,30 @@ type TransactionalInmemBackend struct {
}
// NewInmem constructs a new in-memory backend
-func NewInmem(logger log.Logger) *InmemBackend {
+func NewInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
in := &InmemBackend{
root: radix.New(),
- permitPool: NewPermitPool(DefaultParallelOperations),
+ permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
logger: logger,
}
- return in
+ return in, nil
}
// Basically for now just creates a permit pool of size 1 so only one operation
// can run at a time
-func NewTransactionalInmem(logger log.Logger) *TransactionalInmemBackend {
+func NewTransactionalInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
in := &TransactionalInmemBackend{
InmemBackend: InmemBackend{
root: radix.New(),
- permitPool: NewPermitPool(1),
+ permitPool: physical.NewPermitPool(1),
logger: logger,
},
}
- return in
+ return in, nil
}
// Put is used to insert or update an entry
-func (i *InmemBackend) Put(entry *Entry) error {
+func (i *InmemBackend) Put(entry *physical.Entry) error {
i.permitPool.Acquire()
defer i.permitPool.Release()
@@ -57,13 +58,13 @@ func (i *InmemBackend) Put(entry *Entry) error {
return i.PutInternal(entry)
}
-func (i *InmemBackend) PutInternal(entry *Entry) error {
+func (i *InmemBackend) PutInternal(entry *physical.Entry) error {
i.root.Insert(entry.Key, entry)
return nil
}
// Get is used to fetch an entry
-func (i *InmemBackend) Get(key string) (*Entry, error) {
+func (i *InmemBackend) Get(key string) (*physical.Entry, error) {
i.permitPool.Acquire()
defer i.permitPool.Release()
@@ -73,9 +74,9 @@ func (i *InmemBackend) Get(key string) (*Entry, error) {
return i.GetInternal(key)
}
-func (i *InmemBackend) GetInternal(key string) (*Entry, error) {
+func (i *InmemBackend) GetInternal(key string) (*physical.Entry, error) {
if raw, ok := i.root.Get(key); ok {
- return raw.(*Entry), nil
+ return raw.(*physical.Entry), nil
}
return nil, nil
}
@@ -131,12 +132,12 @@ func (i *InmemBackend) ListInternal(prefix string) ([]string, error) {
}
// Implements the transaction interface
-func (t *TransactionalInmemBackend) Transaction(txns []TxnEntry) error {
+func (t *TransactionalInmemBackend) Transaction(txns []physical.TxnEntry) error {
t.permitPool.Acquire()
defer t.permitPool.Release()
t.Lock()
defer t.Unlock()
- return genericTransactionHandler(t, txns)
+ return physical.GenericTransactionHandler(t, txns)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_ha.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go
similarity index 81%
rename from vendor/github.com/hashicorp/vault/physical/inmem_ha.go
rename to vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go
index bc691c5..d322da2 100644
--- a/vendor/github.com/hashicorp/vault/physical/inmem_ha.go
+++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go
@@ -1,14 +1,15 @@
-package physical
+package inmem
import (
"fmt"
"sync"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
type InmemHABackend struct {
- Backend
+ physical.Backend
locks map[string]string
l sync.Mutex
cond *sync.Cond
@@ -16,23 +17,31 @@ type InmemHABackend struct {
}
type TransactionalInmemHABackend struct {
- Transactional
+ physical.Transactional
InmemHABackend
}
// NewInmemHA constructs a new in-memory HA backend. This is only for testing.
-func NewInmemHA(logger log.Logger) *InmemHABackend {
+func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
+ be, err := NewInmem(nil, logger)
+ if err != nil {
+ return nil, err
+ }
+
in := &InmemHABackend{
- Backend: NewInmem(logger),
+ Backend: be,
locks: make(map[string]string),
logger: logger,
}
in.cond = sync.NewCond(&in.l)
- return in
+ return in, nil
}
-func NewTransactionalInmemHA(logger log.Logger) *TransactionalInmemHABackend {
- transInmem := NewTransactionalInmem(logger)
+func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
+ transInmem, err := NewTransactionalInmem(nil, logger)
+ if err != nil {
+ return nil, err
+ }
inmemHA := InmemHABackend{
Backend: transInmem,
locks: make(map[string]string),
@@ -41,14 +50,14 @@ func NewTransactionalInmemHA(logger log.Logger) *TransactionalInmemHABackend {
in := &TransactionalInmemHABackend{
InmemHABackend: inmemHA,
- Transactional: transInmem,
+ Transactional: transInmem.(physical.Transactional),
}
in.cond = sync.NewCond(&in.l)
- return in
+ return in, nil
}
// LockWith is used for mutual exclusion based on the given key.
-func (i *InmemHABackend) LockWith(key, value string) (Lock, error) {
+func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) {
l := &InmemLock{
in: i,
key: key,
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go
new file mode 100644
index 0000000..8288595
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go
@@ -0,0 +1,19 @@
+package inmem
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestInmemHA(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm, err := NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ physical.ExerciseHABackend(t, inm.(physical.HABackend), inm.(physical.HABackend))
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go
new file mode 100644
index 0000000..998061b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go
@@ -0,0 +1,20 @@
+package inmem
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestInmem(t *testing.T) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+
+ inm, err := NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ physical.ExerciseBackend(t, inm)
+ physical.ExerciseBackend_ListPrefix(t, inm)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go
new file mode 100644
index 0000000..719642a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go
@@ -0,0 +1,120 @@
+package inmem
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
+ log "github.com/mgutz/logxi/v1"
+)
+
+func TestPhysicalView_impl(t *testing.T) {
+ var _ physical.Backend = new(physical.View)
+}
+
+func newInmemTestBackend() (physical.Backend, error) {
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ return NewInmem(nil, logger)
+}
+
+func TestPhysicalView_BadKeysKeys(t *testing.T) {
+ backend, err := newInmemTestBackend()
+ if err != nil {
+ t.Fatal(err)
+ }
+ view := physical.NewView(backend, "foo/")
+
+ _, err = view.List("../")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ _, err = view.Get("../")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ err = view.Delete("../foo")
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+
+ le := &physical.Entry{
+ Key: "../foo",
+ Value: []byte("test"),
+ }
+ err = view.Put(le)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+}
+
+func TestPhysicalView(t *testing.T) {
+ backend, err := newInmemTestBackend()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ view := physical.NewView(backend, "foo/")
+
+ // Write a key outside of foo/
+ entry := &physical.Entry{Key: "test", Value: []byte("test")}
+ if err := backend.Put(entry); err != nil {
+ t.Fatalf("bad: %v", err)
+ }
+
+ // List should have no visibility
+ keys, err := view.List("")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(keys) != 0 {
+ t.Fatalf("bad: %v", err)
+ }
+
+ // Get should have no visibility
+ out, err := view.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("bad: %v", out)
+ }
+
+ // Try to put the same entry via the view
+ if err := view.Put(entry); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check it is nested
+ entry, err = backend.Get("foo/test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry == nil {
+ t.Fatalf("missing nested foo/test")
+ }
+
+ // Delete nested
+ if err := view.Delete("test"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Check the nested key
+ entry, err = backend.Get("foo/test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry != nil {
+ t.Fatalf("nested foo/test should be gone")
+ }
+
+ // Check the non-nested key
+ entry, err = backend.Get("test")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if entry == nil {
+ t.Fatalf("root test missing")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/transactions_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go
similarity index 53%
rename from vendor/github.com/hashicorp/vault/physical/transactions_test.go
rename to vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go
index e365a95..5565fbe 100644
--- a/vendor/github.com/hashicorp/vault/physical/transactions_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go
@@ -1,4 +1,4 @@
-package physical
+package inmem
import (
"fmt"
@@ -8,6 +8,7 @@ import (
radix "github.com/armon/go-radix"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
@@ -16,11 +17,11 @@ type faultyPseudo struct {
faultyPaths map[string]struct{}
}
-func (f *faultyPseudo) Get(key string) (*Entry, error) {
+func (f *faultyPseudo) Get(key string) (*physical.Entry, error) {
return f.underlying.Get(key)
}
-func (f *faultyPseudo) Put(entry *Entry) error {
+func (f *faultyPseudo) Put(entry *physical.Entry) error {
return f.underlying.Put(entry)
}
@@ -28,14 +29,14 @@ func (f *faultyPseudo) Delete(key string) error {
return f.underlying.Delete(key)
}
-func (f *faultyPseudo) GetInternal(key string) (*Entry, error) {
+func (f *faultyPseudo) GetInternal(key string) (*physical.Entry, error) {
if _, ok := f.faultyPaths[key]; ok {
return nil, fmt.Errorf("fault")
}
return f.underlying.GetInternal(key)
}
-func (f *faultyPseudo) PutInternal(entry *Entry) error {
+func (f *faultyPseudo) PutInternal(entry *physical.Entry) error {
if _, ok := f.faultyPaths[entry.Key]; ok {
return fmt.Errorf("fault")
}
@@ -53,21 +54,21 @@ func (f *faultyPseudo) List(prefix string) ([]string, error) {
return f.underlying.List(prefix)
}
-func (f *faultyPseudo) Transaction(txns []TxnEntry) error {
+func (f *faultyPseudo) Transaction(txns []physical.TxnEntry) error {
f.underlying.permitPool.Acquire()
defer f.underlying.permitPool.Release()
f.underlying.Lock()
defer f.underlying.Unlock()
- return genericTransactionHandler(f, txns)
+ return physical.GenericTransactionHandler(f, txns)
}
func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo {
out := &faultyPseudo{
underlying: InmemBackend{
root: radix.New(),
- permitPool: NewPermitPool(1),
+ permitPool: physical.NewPermitPool(1),
logger: logger,
},
faultyPaths: make(map[string]struct{}, len(faultyPaths)),
@@ -81,68 +82,22 @@ func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo {
func TestPseudo_Basic(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
p := newFaultyPseudo(logger, nil)
- testBackend(t, p)
- testBackend_ListPrefix(t, p)
+ physical.ExerciseBackend(t, p)
+ physical.ExerciseBackend_ListPrefix(t, p)
}
func TestPseudo_SuccessfulTransaction(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
p := newFaultyPseudo(logger, nil)
- txns := setupPseudo(p, t)
-
- if err := p.Transaction(txns); err != nil {
- t.Fatal(err)
- }
-
- keys, err := p.List("")
- if err != nil {
- t.Fatal(err)
- }
-
- expected := []string{"foo", "zip"}
-
- sort.Strings(keys)
- sort.Strings(expected)
- if !reflect.DeepEqual(keys, expected) {
- t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
- }
-
- entry, err := p.Get("foo")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "bar3" {
- t.Fatal("updates did not apply correctly")
- }
-
- entry, err = p.Get("zip")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "zap3" {
- t.Fatal("updates did not apply correctly")
- }
+ physical.ExerciseTransactionalBackend(t, p)
}
func TestPseudo_FailedTransaction(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
p := newFaultyPseudo(logger, []string{"zip"})
- txns := setupPseudo(p, t)
-
+ txns := physical.SetupTestingTransactions(t, p)
if err := p.Transaction(txns); err == nil {
t.Fatal("expected error during transaction")
}
@@ -188,67 +143,3 @@ func TestPseudo_FailedTransaction(t *testing.T) {
t.Fatal("values did not rollback correctly")
}
}
-
-func setupPseudo(p *faultyPseudo, t *testing.T) []TxnEntry {
- // Add a few keys so that we test rollback with deletion
- if err := p.Put(&Entry{
- Key: "foo",
- Value: []byte("bar"),
- }); err != nil {
- t.Fatal(err)
- }
- if err := p.Put(&Entry{
- Key: "zip",
- Value: []byte("zap"),
- }); err != nil {
- t.Fatal(err)
- }
- if err := p.Put(&Entry{
- Key: "deleteme",
- }); err != nil {
- t.Fatal(err)
- }
- if err := p.Put(&Entry{
- Key: "deleteme2",
- }); err != nil {
- t.Fatal(err)
- }
-
- txns := []TxnEntry{
- TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "foo",
- Value: []byte("bar2"),
- },
- },
- TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: "deleteme",
- },
- },
- TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "foo",
- Value: []byte("bar3"),
- },
- },
- TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: "deleteme2",
- },
- },
- TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "zip",
- Value: []byte("zap3"),
- },
- },
- }
-
- return txns
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go b/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go
deleted file mode 100644
index 102f85b..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package physical
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestInmemHA(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm := NewInmemHA(logger)
- testHABackend(t, inm, inm)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_test.go b/vendor/github.com/hashicorp/vault/physical/inmem_test.go
deleted file mode 100644
index 7c3c788..0000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package physical
-
-import (
- "testing"
-
- "github.com/hashicorp/vault/helper/logformat"
- log "github.com/mgutz/logxi/v1"
-)
-
-func TestInmem(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- inm := NewInmem(logger)
- testBackend(t, inm)
- testBackend_ListPrefix(t, inm)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/latency.go b/vendor/github.com/hashicorp/vault/physical/latency.go
new file mode 100644
index 0000000..3253036
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/latency.go
@@ -0,0 +1,90 @@
+package physical
+
+import (
+ "math/rand"
+ "time"
+
+ log "github.com/mgutz/logxi/v1"
+)
+
+const (
+ // DefaultJitterPercent is used if no cache size is specified for NewCache
+ DefaultJitterPercent = 20
+)
+
+// LatencyInjector is used to add latency into underlying physical requests
+type LatencyInjector struct {
+ backend Backend
+ latency time.Duration
+ jitterPercent int
+ random *rand.Rand
+}
+
+// TransactionalLatencyInjector is the transactional version of the latency
+// injector
+type TransactionalLatencyInjector struct {
+ *LatencyInjector
+ Transactional
+}
+
+// NewLatencyInjector returns a wrapped physical backend to simulate latency
+func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector {
+ if jitter < 0 || jitter > 100 {
+ jitter = DefaultJitterPercent
+ }
+ logger.Info("physical/latency: creating latency injector")
+
+ return &LatencyInjector{
+ backend: b,
+ latency: latency,
+ jitterPercent: jitter,
+ random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
+ }
+}
+
+// NewTransactionalLatencyInjector creates a new transactional LatencyInjector
+func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector {
+ return &TransactionalLatencyInjector{
+ LatencyInjector: NewLatencyInjector(b, latency, jitter, logger),
+ Transactional: b.(Transactional),
+ }
+}
+
+func (l *LatencyInjector) addLatency() {
+ // Calculate a value between 1 +- jitter%
+ min := 100 - l.jitterPercent
+ max := 100 + l.jitterPercent
+ percent := l.random.Intn(max-min) + min
+ latencyDuration := time.Duration(int(l.latency) * percent / 100)
+ time.Sleep(latencyDuration)
+}
+
+// Put is a latent put request
+func (l *LatencyInjector) Put(entry *Entry) error {
+ l.addLatency()
+ return l.backend.Put(entry)
+}
+
+// Get is a latent get request
+func (l *LatencyInjector) Get(key string) (*Entry, error) {
+ l.addLatency()
+ return l.backend.Get(key)
+}
+
+// Delete is a latent delete request
+func (l *LatencyInjector) Delete(key string) error {
+ l.addLatency()
+ return l.backend.Delete(key)
+}
+
+// List is a latent list request
+func (l *LatencyInjector) List(prefix string) ([]string, error) {
+ l.addLatency()
+ return l.backend.List(prefix)
+}
+
+// Transaction is a latent transaction request
+func (l *TransactionalLatencyInjector) Transaction(txns []TxnEntry) error {
+ l.addLatency()
+ return l.Transactional.Transaction(txns)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/mssql.go b/vendor/github.com/hashicorp/vault/physical/mssql/mssql.go
similarity index 75%
rename from vendor/github.com/hashicorp/vault/physical/mssql.go
rename to vendor/github.com/hashicorp/vault/physical/mssql/mssql.go
index 25709a2..16228d6 100644
--- a/vendor/github.com/hashicorp/vault/physical/mssql.go
+++ b/vendor/github.com/hashicorp/vault/physical/mssql/mssql.go
@@ -1,25 +1,30 @@
-package physical
+package mssql
import (
"database/sql"
"fmt"
"sort"
+ "strconv"
"strings"
"time"
"github.com/armon/go-metrics"
_ "github.com/denisenkom/go-mssqldb"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
)
-type MsSQLBackend struct {
+type MSSQLBackend struct {
dbTable string
client *sql.DB
statements map[string]*sql.Stmt
logger log.Logger
+ permitPool *physical.PermitPool
}
-func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
username, ok := conf["username"]
if !ok {
username = ""
@@ -35,6 +40,21 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
return nil, fmt.Errorf("missing server")
}
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ var err error
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt)
+ }
+ } else {
+ maxParInt = physical.DefaultParallelOperations
+ }
+
database, ok := conf["database"]
if !ok {
database = "Vault"
@@ -79,6 +99,8 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
return nil, fmt.Errorf("failed to connect to mssql: %v", err)
}
+ db.SetMaxOpenConns(maxParInt)
+
if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil {
return nil, fmt.Errorf("failed to create mssql database: %v", err)
}
@@ -110,11 +132,12 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
return nil, fmt.Errorf("failed to create mssql table: %v", err)
}
- m := &MsSQLBackend{
+ m := &MSSQLBackend{
dbTable: dbTable,
client: db,
statements: make(map[string]*sql.Stmt),
logger: logger,
+ permitPool: physical.NewPermitPool(maxParInt),
}
statements := map[string]string{
@@ -134,7 +157,7 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
return m, nil
}
-func (m *MsSQLBackend) prepare(name, query string) error {
+func (m *MSSQLBackend) prepare(name, query string) error {
stmt, err := m.client.Prepare(query)
if err != nil {
return fmt.Errorf("failed to prepare '%s': %v", name, err)
@@ -145,9 +168,12 @@ func (m *MsSQLBackend) prepare(name, query string) error {
return nil
}
-func (m *MsSQLBackend) Put(entry *Entry) error {
+func (m *MSSQLBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"mssql", "put"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
_, err := m.statements["put"].Exec(entry.Key, entry.Value, entry.Key, entry.Key, entry.Value)
if err != nil {
return err
@@ -156,9 +182,12 @@ func (m *MsSQLBackend) Put(entry *Entry) error {
return nil
}
-func (m *MsSQLBackend) Get(key string) (*Entry, error) {
+func (m *MSSQLBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"mssql", "get"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
var result []byte
err := m.statements["get"].QueryRow(key).Scan(&result)
if err == sql.ErrNoRows {
@@ -169,7 +198,7 @@ func (m *MsSQLBackend) Get(key string) (*Entry, error) {
return nil, err
}
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: result,
}
@@ -177,9 +206,12 @@ func (m *MsSQLBackend) Get(key string) (*Entry, error) {
return ent, nil
}
-func (m *MsSQLBackend) Delete(key string) error {
+func (m *MSSQLBackend) Delete(key string) error {
defer metrics.MeasureSince([]string{"mssql", "delete"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
_, err := m.statements["delete"].Exec(key)
if err != nil {
return err
@@ -188,12 +220,17 @@ func (m *MsSQLBackend) Delete(key string) error {
return nil
}
-func (m *MsSQLBackend) List(prefix string) ([]string, error) {
+func (m *MSSQLBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"mssql", "list"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
likePrefix := prefix + "%"
rows, err := m.statements["list"].Query(likePrefix)
-
+ if err != nil {
+ return nil, err
+ }
var keys []string
for rows.Next() {
var key string
@@ -206,7 +243,7 @@ func (m *MsSQLBackend) List(prefix string) ([]string, error) {
if i := strings.Index(key, "/"); i == -1 {
keys = append(keys, key)
} else if i != -1 {
- keys = appendIfMissing(keys, string(key[:i+1]))
+ keys = strutil.AppendIfMissing(keys, string(key[:i+1]))
}
}
diff --git a/vendor/github.com/hashicorp/vault/physical/mssql_test.go b/vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go
similarity index 77%
rename from vendor/github.com/hashicorp/vault/physical/mssql_test.go
rename to vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go
index 11f4684..7e1446e 100644
--- a/vendor/github.com/hashicorp/vault/physical/mssql_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go
@@ -1,16 +1,17 @@
-package physical
+package mssql
import (
"os"
"testing"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
_ "github.com/denisenkom/go-mssqldb"
)
-func TestMsSQLBackend(t *testing.T) {
+func TestMSSQLBackend(t *testing.T) {
server := os.Getenv("MSSQL_SERVER")
if server == "" {
t.SkipNow()
@@ -32,27 +33,26 @@ func TestMsSQLBackend(t *testing.T) {
// Run vault tests
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("mssql", logger, map[string]string{
+ b, err := NewMSSQLBackend(map[string]string{
"server": server,
"database": database,
"table": table,
"username": username,
"password": password,
- })
+ }, logger)
if err != nil {
t.Fatalf("Failed to create new backend: %v", err)
}
defer func() {
- mssql := b.(*MsSQLBackend)
+ mssql := b.(*MSSQLBackend)
_, err := mssql.client.Exec("DROP TABLE " + mssql.dbTable)
if err != nil {
t.Fatalf("Failed to drop table: %v", err)
}
}()
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
-
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/mysql.go b/vendor/github.com/hashicorp/vault/physical/mysql/mysql.go
similarity index 81%
rename from vendor/github.com/hashicorp/vault/physical/mysql.go
rename to vendor/github.com/hashicorp/vault/physical/mysql/mysql.go
index ce13514..87daa9a 100644
--- a/vendor/github.com/hashicorp/vault/physical/mysql.go
+++ b/vendor/github.com/hashicorp/vault/physical/mysql/mysql.go
@@ -1,4 +1,4 @@
-package physical
+package mysql
import (
"crypto/tls"
@@ -8,6 +8,7 @@ import (
"io/ioutil"
"net/url"
"sort"
+ "strconv"
"strings"
"time"
@@ -15,6 +16,9 @@ import (
"github.com/armon/go-metrics"
mysql "github.com/go-sql-driver/mysql"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/physical"
)
// Unreserved tls key
@@ -28,11 +32,14 @@ type MySQLBackend struct {
client *sql.DB
statements map[string]*sql.Stmt
logger log.Logger
+ permitPool *physical.PermitPool
}
-// newMySQLBackend constructs a MySQL backend using the given API client and
+// NewMySQLBackend constructs a MySQL backend using the given API client and
// server address and credential for accessing mysql database.
-func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ var err error
+
// Get the MySQL credentials to perform read/write operations.
username, ok := conf["username"]
if !ok || username == "" {
@@ -60,6 +67,20 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
}
dbTable := database + "." + table
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt)
+ }
+ } else {
+ maxParInt = physical.DefaultParallelOperations
+ }
+
dsnParams := url.Values{}
tlsCaFile, ok := conf["tls_ca_file"]
if ok {
@@ -77,6 +98,8 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
return nil, fmt.Errorf("failed to connect to mysql: %v", err)
}
+ db.SetMaxOpenConns(maxParInt)
+
// Create the required database if it doesn't exists.
if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS " + database); err != nil {
return nil, fmt.Errorf("failed to create mysql database: %v", err)
@@ -95,6 +118,7 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
client: db,
statements: make(map[string]*sql.Stmt),
logger: logger,
+ permitPool: physical.NewPermitPool(maxParInt),
}
// Prepare all the statements required
@@ -110,6 +134,7 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error)
return nil, err
}
}
+
return m, nil
}
@@ -124,9 +149,12 @@ func (m *MySQLBackend) prepare(name, query string) error {
}
// Put is used to insert or update an entry.
-func (m *MySQLBackend) Put(entry *Entry) error {
+func (m *MySQLBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"mysql", "put"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
_, err := m.statements["put"].Exec(entry.Key, entry.Value)
if err != nil {
return err
@@ -135,9 +163,12 @@ func (m *MySQLBackend) Put(entry *Entry) error {
}
// Get is used to fetch and entry.
-func (m *MySQLBackend) Get(key string) (*Entry, error) {
+func (m *MySQLBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"mysql", "get"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
var result []byte
err := m.statements["get"].QueryRow(key).Scan(&result)
if err == sql.ErrNoRows {
@@ -147,7 +178,7 @@ func (m *MySQLBackend) Get(key string) (*Entry, error) {
return nil, err
}
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: result,
}
@@ -158,6 +189,9 @@ func (m *MySQLBackend) Get(key string) (*Entry, error) {
func (m *MySQLBackend) Delete(key string) error {
defer metrics.MeasureSince([]string{"mysql", "delete"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
_, err := m.statements["delete"].Exec(key)
if err != nil {
return err
@@ -170,6 +204,9 @@ func (m *MySQLBackend) Delete(key string) error {
func (m *MySQLBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"mysql", "list"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
// Add the % wildcard to the prefix to do the prefix search
likePrefix := prefix + "%"
rows, err := m.statements["list"].Query(likePrefix)
@@ -191,7 +228,7 @@ func (m *MySQLBackend) List(prefix string) ([]string, error) {
keys = append(keys, key)
} else if i != -1 {
// Add truncated 'folder' paths
- keys = appendIfMissing(keys, string(key[:i+1]))
+ keys = strutil.AppendIfMissing(keys, string(key[:i+1]))
}
}
diff --git a/vendor/github.com/hashicorp/vault/physical/mysql_test.go b/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go
similarity index 83%
rename from vendor/github.com/hashicorp/vault/physical/mysql_test.go
rename to vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go
index 1eabd9f..ecf8431 100644
--- a/vendor/github.com/hashicorp/vault/physical/mysql_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go
@@ -1,10 +1,11 @@
-package physical
+package mysql
import (
"os"
"testing"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
_ "github.com/go-sql-driver/mysql"
@@ -32,13 +33,13 @@ func TestMySQLBackend(t *testing.T) {
// Run vault tests
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("mysql", logger, map[string]string{
+ b, err := NewMySQLBackend(map[string]string{
"address": address,
"database": database,
"table": table,
"username": username,
"password": password,
- })
+ }, logger)
if err != nil {
t.Fatalf("Failed to create new backend: %v", err)
@@ -52,7 +53,6 @@ func TestMySQLBackend(t *testing.T) {
}
}()
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
-
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical.go b/vendor/github.com/hashicorp/vault/physical/physical.go
index b35d281..088a86b 100644
--- a/vendor/github.com/hashicorp/vault/physical/physical.go
+++ b/vendor/github.com/hashicorp/vault/physical/physical.go
@@ -1,7 +1,7 @@
package physical
import (
- "fmt"
+ "strings"
"sync"
log "github.com/mgutz/logxi/v1"
@@ -70,8 +70,8 @@ type RedirectDetect interface {
}
// Callback signatures for RunServiceDiscovery
-type activeFunction func() bool
-type sealedFunction func() bool
+type ActiveFunction func() bool
+type SealedFunction func() bool
// ServiceDiscovery is an optional interface that an HABackend can implement.
// If they do, the state of a backend is advertised to the service discovery
@@ -89,7 +89,7 @@ type ServiceDiscovery interface {
// Run executes any background service discovery tasks until the
// shutdown channel is closed.
- RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) error
+ RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc ActiveFunction, sealedFunc SealedFunction) error
}
type Lock interface {
@@ -115,46 +115,6 @@ type Entry struct {
// Factory is the factory function to create a physical backend.
type Factory func(config map[string]string, logger log.Logger) (Backend, error)
-// NewBackend returns a new backend with the given type and configuration.
-// The backend is looked up in the builtinBackends variable.
-func NewBackend(t string, logger log.Logger, conf map[string]string) (Backend, error) {
- f, ok := builtinBackends[t]
- if !ok {
- return nil, fmt.Errorf("unknown physical backend type: %s", t)
- }
- return f(conf, logger)
-}
-
-// BuiltinBackends is the list of built-in physical backends that can
-// be used with NewBackend.
-var builtinBackends = map[string]Factory{
- "inmem": func(_ map[string]string, logger log.Logger) (Backend, error) {
- return NewInmem(logger), nil
- },
- "inmem_transactional": func(_ map[string]string, logger log.Logger) (Backend, error) {
- return NewTransactionalInmem(logger), nil
- },
- "inmem_ha": func(_ map[string]string, logger log.Logger) (Backend, error) {
- return NewInmemHA(logger), nil
- },
- "inmem_transactional_ha": func(_ map[string]string, logger log.Logger) (Backend, error) {
- return NewTransactionalInmemHA(logger), nil
- },
- "file_transactional": newTransactionalFileBackend,
- "consul": newConsulBackend,
- "zookeeper": newZookeeperBackend,
- "file": newFileBackend,
- "s3": newS3Backend,
- "azure": newAzureBackend,
- "dynamodb": newDynamoDBBackend,
- "etcd": newEtcdBackend,
- "mssql": newMsSQLBackend,
- "mysql": newMySQLBackend,
- "postgresql": newPostgreSQLBackend,
- "swift": newSwiftBackend,
- "gcs": newGCSBackend,
-}
-
// PermitPool is used to limit maximum outstanding requests
type PermitPool struct {
sem chan int
@@ -180,3 +140,15 @@ func (c *PermitPool) Acquire() {
func (c *PermitPool) Release() {
<-c.sem
}
+
+// Prefixes is a shared helper function returns all parent 'folders' for a
+// given vault key.
+// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar']
+func Prefixes(s string) []string {
+ components := strings.Split(s, "/")
+ result := []string{}
+ for i := 1; i < len(components); i++ {
+ result = append(result, strings.Join(components[:i], "/"))
+ }
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical_view.go b/vendor/github.com/hashicorp/vault/physical/physical_view.go
new file mode 100644
index 0000000..38c16e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/physical/physical_view.go
@@ -0,0 +1,94 @@
+package physical
+
+import (
+ "errors"
+ "strings"
+)
+
+var (
+ ErrRelativePath = errors.New("relative paths not supported")
+)
+
+// View represents a prefixed view of a physical backend
+type View struct {
+ backend Backend
+ prefix string
+}
+
+// NewView takes an underlying physical backend and returns
+// a view of it that can only operate with the given prefix.
+func NewView(backend Backend, prefix string) *View {
+ return &View{
+ backend: backend,
+ prefix: prefix,
+ }
+}
+
+// List the contents of the prefixed view
+func (v *View) List(prefix string) ([]string, error) {
+ if err := v.sanityCheck(prefix); err != nil {
+ return nil, err
+ }
+ return v.backend.List(v.expandKey(prefix))
+}
+
+// Get the key of the prefixed view
+func (v *View) Get(key string) (*Entry, error) {
+ if err := v.sanityCheck(key); err != nil {
+ return nil, err
+ }
+ entry, err := v.backend.Get(v.expandKey(key))
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, nil
+ }
+ if entry != nil {
+ entry.Key = v.truncateKey(entry.Key)
+ }
+
+ return &Entry{
+ Key: entry.Key,
+ Value: entry.Value,
+ }, nil
+}
+
+// Put the entry into the prefix view
+func (v *View) Put(entry *Entry) error {
+ if err := v.sanityCheck(entry.Key); err != nil {
+ return err
+ }
+
+ nested := &Entry{
+ Key: v.expandKey(entry.Key),
+ Value: entry.Value,
+ }
+ return v.backend.Put(nested)
+}
+
+// Delete the entry from the prefix view
+func (v *View) Delete(key string) error {
+ if err := v.sanityCheck(key); err != nil {
+ return err
+ }
+ return v.backend.Delete(v.expandKey(key))
+}
+
+// sanityCheck is used to perform a sanity check on a key
+func (v *View) sanityCheck(key string) error {
+ if strings.Contains(key, "..") {
+ return ErrRelativePath
+ }
+ return nil
+}
+
+// expandKey is used to expand to the full key path with the prefix
+func (v *View) expandKey(suffix string) string {
+ return v.prefix + suffix
+}
+
+// truncateKey is used to remove the prefix of the key
+func (v *View) truncateKey(full string) string {
+ return strings.TrimPrefix(full, v.prefix)
+}
diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql.go b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go
similarity index 77%
rename from vendor/github.com/hashicorp/vault/physical/postgresql.go
rename to vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go
index 2b11d48..cb35782 100644
--- a/vendor/github.com/hashicorp/vault/physical/postgresql.go
+++ b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go
@@ -1,11 +1,14 @@
-package physical
+package postgresql
import (
"database/sql"
"fmt"
+ "strconv"
"strings"
"time"
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
"github.com/armon/go-metrics"
@@ -22,11 +25,12 @@ type PostgreSQLBackend struct {
delete_query string
list_query string
logger log.Logger
+ permitPool *physical.PermitPool
}
-// newPostgreSQLBackend constructs a PostgreSQL backend using the given
+// NewPostgreSQLBackend constructs a PostgreSQL backend using the given
// API client, server address, credentials, and database.
-func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the PostgreSQL credentials to perform read/write operations.
connURL, ok := conf["connection_url"]
if !ok || connURL == "" {
@@ -39,11 +43,27 @@ func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, e
}
quoted_table := pq.QuoteIdentifier(unquoted_table)
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ var err error
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("postgres: max_parallel set", "max_parallel", maxParInt)
+ }
+ } else {
+ maxParInt = physical.DefaultParallelOperations
+ }
+
// Create PostgreSQL handle for the database.
db, err := sql.Open("postgres", connURL)
if err != nil {
return nil, fmt.Errorf("failed to connect to postgres: %v", err)
}
+ db.SetMaxOpenConns(maxParInt)
// Determine if we should use an upsert function (versions < 9.5)
var upsert_required bool
@@ -72,8 +92,9 @@ func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, e
delete_query: "DELETE FROM " + quoted_table + " WHERE path = $1 AND key = $2",
list_query: "SELECT key FROM " + quoted_table + " WHERE path = $1" +
"UNION SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " +
- quoted_table + " WHERE parent_path LIKE concat($1, '%')",
- logger: logger,
+ quoted_table + " WHERE parent_path LIKE $1 || '%'",
+ logger: logger,
+ permitPool: physical.NewPermitPool(maxParInt),
}
return m, nil
@@ -104,9 +125,12 @@ func (m *PostgreSQLBackend) splitKey(fullPath string) (string, string, string) {
}
// Put is used to insert or update an entry.
-func (m *PostgreSQLBackend) Put(entry *Entry) error {
+func (m *PostgreSQLBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"postgres", "put"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
parentPath, path, key := m.splitKey(entry.Key)
_, err := m.client.Exec(m.put_query, parentPath, path, key, entry.Value)
@@ -117,9 +141,12 @@ func (m *PostgreSQLBackend) Put(entry *Entry) error {
}
// Get is used to fetch and entry.
-func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) {
+func (m *PostgreSQLBackend) Get(fullPath string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"postgres", "get"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
_, path, key := m.splitKey(fullPath)
var result []byte
@@ -131,7 +158,7 @@ func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) {
return nil, err
}
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: result,
}
@@ -142,6 +169,9 @@ func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) {
func (m *PostgreSQLBackend) Delete(fullPath string) error {
defer metrics.MeasureSince([]string{"postgres", "delete"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
_, path, key := m.splitKey(fullPath)
_, err := m.client.Exec(m.delete_query, path, key)
@@ -156,6 +186,9 @@ func (m *PostgreSQLBackend) Delete(fullPath string) error {
func (m *PostgreSQLBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"postgres", "list"}, time.Now())
+ m.permitPool.Acquire()
+ defer m.permitPool.Release()
+
rows, err := m.client.Query(m.list_query, "/"+prefix)
if err != nil {
return nil, err
diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql_test.go b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go
similarity index 78%
rename from vendor/github.com/hashicorp/vault/physical/postgresql_test.go
rename to vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go
index 5cdaaa0..940d0e2 100644
--- a/vendor/github.com/hashicorp/vault/physical/postgresql_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go
@@ -1,10 +1,11 @@
-package physical
+package postgresql
import (
"os"
"testing"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
_ "github.com/lib/pq"
@@ -24,11 +25,10 @@ func TestPostgreSQLBackend(t *testing.T) {
// Run vault tests
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("postgresql", logger, map[string]string{
+ b, err := NewPostgreSQLBackend(map[string]string{
"connection_url": connURL,
"table": table,
- })
-
+ }, logger)
if err != nil {
t.Fatalf("Failed to create new backend: %v", err)
}
@@ -41,7 +41,6 @@ func TestPostgreSQLBackend(t *testing.T) {
}
}()
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
-
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/s3.go b/vendor/github.com/hashicorp/vault/physical/s3/s3.go
similarity index 76%
rename from vendor/github.com/hashicorp/vault/physical/s3.go
rename to vendor/github.com/hashicorp/vault/physical/s3/s3.go
index 8271be7..7118e7d 100644
--- a/vendor/github.com/hashicorp/vault/physical/s3.go
+++ b/vendor/github.com/hashicorp/vault/physical/s3/s3.go
@@ -1,4 +1,4 @@
-package physical
+package s3
import (
"bytes"
@@ -22,6 +22,7 @@ import (
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/helper/awsutil"
"github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/physical"
)
// S3Backend is a physical backend that stores data
@@ -30,14 +31,13 @@ type S3Backend struct {
bucket string
client *s3.S3
logger log.Logger
- permitPool *PermitPool
+ permitPool *physical.PermitPool
}
-// newS3Backend constructs a S3 backend using a pre-existing
+// NewS3Backend constructs a S3 backend using a pre-existing
// bucket. Credentials can be provided to the backend, sourced
// from the environment, AWS credential files or by IAM role.
-func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
-
+func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
bucket := os.Getenv("AWS_S3_BUCKET")
if bucket == "" {
bucket = conf["bucket"]
@@ -62,11 +62,14 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
if endpoint == "" {
endpoint = conf["endpoint"]
}
- region := os.Getenv("AWS_DEFAULT_REGION")
+ region := os.Getenv("AWS_REGION")
if region == "" {
- region = conf["region"]
+ region = os.Getenv("AWS_DEFAULT_REGION")
if region == "" {
- region = "us-east-1"
+ region = conf["region"]
+ if region == "" {
+ region = "us-east-1"
+ }
}
}
@@ -92,9 +95,9 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
Region: aws.String(region),
}))
- _, err = s3conn.HeadBucket(&s3.HeadBucketInput{Bucket: &bucket})
+ _, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket})
if err != nil {
- return nil, fmt.Errorf("unable to access bucket '%s': %v", bucket, err)
+ return nil, fmt.Errorf("unable to access bucket '%s' in region %s: %v", bucket, region, err)
}
maxParStr, ok := conf["max_parallel"]
@@ -113,13 +116,13 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
client: s3conn,
bucket: bucket,
logger: logger,
- permitPool: NewPermitPool(maxParInt),
+ permitPool: physical.NewPermitPool(maxParInt),
}
return s, nil
}
// Put is used to insert or update an entry
-func (s *S3Backend) Put(entry *Entry) error {
+func (s *S3Backend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"s3", "put"}, time.Now())
s.permitPool.Acquire()
@@ -139,7 +142,7 @@ func (s *S3Backend) Put(entry *Entry) error {
}
// Get is used to fetch an entry
-func (s *S3Backend) Get(key string) (*Entry, error) {
+func (s *S3Backend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"s3", "get"}, time.Now())
s.permitPool.Acquire()
@@ -169,7 +172,7 @@ func (s *S3Backend) Get(key string) (*Entry, error) {
return nil, err
}
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: data,
}
@@ -205,23 +208,35 @@ func (s *S3Backend) List(prefix string) ([]string, error) {
defer s.permitPool.Release()
params := &s3.ListObjectsV2Input{
- Bucket: aws.String(s.bucket),
- Prefix: aws.String(prefix),
+ Bucket: aws.String(s.bucket),
+ Prefix: aws.String(prefix),
+ Delimiter: aws.String("/"),
}
keys := []string{}
err := s.client.ListObjectsV2Pages(params,
func(page *s3.ListObjectsV2Output, lastPage bool) bool {
- for _, key := range page.Contents {
- key := strings.TrimPrefix(*key.Key, prefix)
+ if page != nil {
+ // Add truncated 'folder' paths
+ for _, commonPrefix := range page.CommonPrefixes {
+ // Avoid panic
+ if commonPrefix == nil {
+ continue
+ }
- if i := strings.Index(key, "/"); i == -1 {
- // Add objects only from the current 'folder'
+ commonPrefix := strings.TrimPrefix(*commonPrefix.Prefix, prefix)
+ keys = append(keys, commonPrefix)
+ }
+ // Add objects only from the current 'folder'
+ for _, key := range page.Contents {
+ // Avoid panic
+ if key == nil {
+ continue
+ }
+
+ key := strings.TrimPrefix(*key.Key, prefix)
keys = append(keys, key)
- } else if i != -1 {
- // Add truncated 'folder' paths
- keys = appendIfMissing(keys, key[:i+1])
}
}
return true
@@ -235,12 +250,3 @@ func (s *S3Backend) List(prefix string) ([]string, error) {
return keys, nil
}
-
-func appendIfMissing(slice []string, i string) []string {
- for _, ele := range slice {
- if ele == i {
- return slice
- }
- }
- return append(slice, i)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/s3_test.go b/vendor/github.com/hashicorp/vault/physical/s3/s3_test.go
similarity index 75%
rename from vendor/github.com/hashicorp/vault/physical/s3_test.go
rename to vendor/github.com/hashicorp/vault/physical/s3/s3_test.go
index 8fdb882..dbe4c93 100644
--- a/vendor/github.com/hashicorp/vault/physical/s3_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/s3/s3_test.go
@@ -1,4 +1,4 @@
-package physical
+package s3
import (
"fmt"
@@ -7,23 +7,27 @@ import (
"testing"
"time"
+ "github.com/hashicorp/vault/helper/awsutil"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
func TestS3Backend(t *testing.T) {
- if os.Getenv("AWS_ACCESS_KEY_ID") == "" || os.Getenv("AWS_SECRET_ACCESS_KEY") == "" {
+ credsConfig := &awsutil.CredentialsConfig{}
+
+ credsChain, err := credsConfig.GenerateCredentialChain()
+ if err != nil {
t.SkipNow()
}
- creds, err := credentials.NewEnvCredentials().Get()
+ _, err = credsChain.Get()
if err != nil {
- t.Fatalf("err: %v", err)
+ t.SkipNow()
}
// If the variable is empty or doesn't exist, the default
@@ -36,7 +40,7 @@ func TestS3Backend(t *testing.T) {
}
s3conn := s3.New(session.New(&aws.Config{
- Credentials: credentials.NewEnvCredentials(),
+ Credentials: credsChain,
Endpoint: aws.String(endpoint),
Region: aws.String(region),
}))
@@ -77,17 +81,14 @@ func TestS3Backend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("s3", logger, map[string]string{
- "access_key": creds.AccessKeyID,
- "secret_key": creds.SecretAccessKey,
- "session_token": creds.SessionToken,
- "bucket": bucket,
- })
+ // This uses the same logic to find the AWS credentials as we did at the beginning of the test
+ b, err := NewS3Backend(map[string]string{
+ "bucket": bucket,
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
-
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/swift.go b/vendor/github.com/hashicorp/vault/physical/swift/swift.go
similarity index 87%
rename from vendor/github.com/hashicorp/vault/physical/swift.go
rename to vendor/github.com/hashicorp/vault/physical/swift/swift.go
index 0ed4fe6..30d7e66 100644
--- a/vendor/github.com/hashicorp/vault/physical/swift.go
+++ b/vendor/github.com/hashicorp/vault/physical/swift/swift.go
@@ -1,4 +1,4 @@
-package physical
+package swift
import (
"fmt"
@@ -13,6 +13,8 @@ import (
"github.com/armon/go-metrics"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/physical"
"github.com/ncw/swift"
)
@@ -22,13 +24,14 @@ type SwiftBackend struct {
container string
client *swift.Connection
logger log.Logger
- permitPool *PermitPool
+ permitPool *physical.PermitPool
}
-// newSwiftBackend constructs a Swift backend using a pre-existing
+// NewSwiftBackend constructs a Swift backend using a pre-existing
// container. Credentials can be provided to the backend, sourced
// from the environment.
-func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) {
+func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ var ok bool
username := os.Getenv("OS_USERNAME")
if username == "" {
@@ -60,11 +63,9 @@ func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error)
}
project := os.Getenv("OS_PROJECT_NAME")
if project == "" {
- project = conf["project"]
-
- if project == "" {
+ if project, ok = conf["project"]; !ok {
// Check for KeyStone naming prior to V3
- project := os.Getenv("OS_TENANT_NAME")
+ project = os.Getenv("OS_TENANT_NAME")
if project == "" {
project = conf["tenant"]
}
@@ -116,13 +117,13 @@ func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error)
client: &c,
container: container,
logger: logger,
- permitPool: NewPermitPool(maxParInt),
+ permitPool: physical.NewPermitPool(maxParInt),
}
return s, nil
}
// Put is used to insert or update an entry
-func (s *SwiftBackend) Put(entry *Entry) error {
+func (s *SwiftBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"swift", "put"}, time.Now())
s.permitPool.Acquire()
@@ -138,7 +139,7 @@ func (s *SwiftBackend) Put(entry *Entry) error {
}
// Get is used to fetch an entry
-func (s *SwiftBackend) Get(key string) (*Entry, error) {
+func (s *SwiftBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"swift", "get"}, time.Now())
s.permitPool.Acquire()
@@ -161,7 +162,7 @@ func (s *SwiftBackend) Get(key string) (*Entry, error) {
if err != nil {
return nil, err
}
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: data,
}
@@ -207,7 +208,7 @@ func (s *SwiftBackend) List(prefix string) ([]string, error) {
keys = append(keys, key)
} else if i != -1 {
// Add truncated 'folder' paths
- keys = appendIfMissing(keys, key[:i+1])
+ keys = strutil.AppendIfMissing(keys, key[:i+1])
}
}
diff --git a/vendor/github.com/hashicorp/vault/physical/swift_test.go b/vendor/github.com/hashicorp/vault/physical/swift/swift_test.go
similarity index 90%
rename from vendor/github.com/hashicorp/vault/physical/swift_test.go
rename to vendor/github.com/hashicorp/vault/physical/swift/swift_test.go
index 2da37f0..5aa2ec9 100644
--- a/vendor/github.com/hashicorp/vault/physical/swift_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/swift/swift_test.go
@@ -1,4 +1,4 @@
-package physical
+package swift
import (
"fmt"
@@ -10,6 +10,7 @@ import (
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
"github.com/ncw/swift"
)
@@ -66,7 +67,7 @@ func TestSwiftBackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("swift", logger, map[string]string{
+ b, err := NewSwiftBackend(map[string]string{
"username": username,
"password": password,
"container": container,
@@ -74,12 +75,11 @@ func TestSwiftBackend(t *testing.T) {
"project": project,
"domain": domain,
"project-domain": projectDomain,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
-
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical_test.go b/vendor/github.com/hashicorp/vault/physical/testing.go
similarity index 52%
rename from vendor/github.com/hashicorp/vault/physical/physical_test.go
rename to vendor/github.com/hashicorp/vault/physical/testing.go
index de1b9cb..69f7167 100644
--- a/vendor/github.com/hashicorp/vault/physical/physical_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/testing.go
@@ -5,30 +5,9 @@ import (
"sort"
"testing"
"time"
-
- "github.com/hashicorp/vault/helper/logformat"
- log "github.com/mgutz/logxi/v1"
)
-func testNewBackend(t *testing.T) {
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- _, err := NewBackend("foobar", logger, nil)
- if err == nil {
- t.Fatalf("expected error")
- }
-
- b, err := NewBackend("inmem", logger, nil)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if b == nil {
- t.Fatalf("expected backend")
- }
-}
-
-func testBackend(t *testing.T, b Backend) {
+func ExerciseBackend(t *testing.T, b Backend) {
// Should be empty
keys, err := b.List("")
if err != nil {
@@ -216,7 +195,7 @@ func testBackend(t *testing.T, b Backend) {
}
}
-func testBackend_ListPrefix(t *testing.T, b Backend) {
+func ExerciseBackend_ListPrefix(t *testing.T, b Backend) {
e1 := &Entry{Key: "foo", Value: []byte("test")}
e2 := &Entry{Key: "foo/bar", Value: []byte("test")}
e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")}
@@ -286,7 +265,7 @@ func testBackend_ListPrefix(t *testing.T, b Backend) {
}
}
-func testHABackend(t *testing.T, b HABackend, b2 HABackend) {
+func ExerciseHABackend(t *testing.T, b HABackend, b2 HABackend) {
// Get the lock
lock, err := b.LockWith("foo", "bar")
if err != nil {
@@ -362,275 +341,120 @@ func testHABackend(t *testing.T, b HABackend, b2 HABackend) {
lock2.Unlock()
}
-type delays struct {
- beforeGet time.Duration
- beforeList time.Duration
-}
+func ExerciseTransactionalBackend(t *testing.T, b Backend) {
+ tb, ok := b.(Transactional)
+ if !ok {
+ t.Fatal("Not a transactional backend")
+ }
-func testEventuallyConsistentBackend(t *testing.T, b Backend, d delays) {
+ txns := SetupTestingTransactions(t, b)
+
+ if err := tb.Transaction(txns); err != nil {
+ t.Fatal(err)
+ }
- // no delay required: nothing written to bucket
- // Should be empty
keys, err := b.List("")
if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", keys)
+ t.Fatal(err)
}
- // Delete should work if it does not exist
- err = b.Delete("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
+ expected := []string{"foo", "zip"}
- // no delay required: nothing written to bucket
- // Get should fail
- out, err := b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Make an entry
- e := &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Get should work
- time.Sleep(d.beforeGet)
- out, err = b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !reflect.DeepEqual(out, e) {
- t.Fatalf("bad: %v expected: %v", out, e)
- }
-
- // List should not be empty
- time.Sleep(d.beforeList)
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 {
- t.Fatalf("bad: %v", keys)
- }
- if keys[0] != "foo" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Delete should work
- err = b.Delete("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Should be empty
- time.Sleep(d.beforeList)
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", keys)
- }
-
- // Get should fail
- time.Sleep(d.beforeGet)
- out, err = b.Get("foo")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if out != nil {
- t.Fatalf("bad: %v", out)
- }
-
- // Multiple Puts should work; GH-189
- e = &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- e = &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Make a nested entry
- e = &Entry{Key: "foo/bar", Value: []byte("baz")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- time.Sleep(d.beforeList)
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 2 {
- t.Fatalf("bad: %v", keys)
- }
sort.Strings(keys)
- if keys[0] != "foo" || keys[1] != "foo/" {
- t.Fatalf("bad: %v", keys)
+ sort.Strings(expected)
+ if !reflect.DeepEqual(keys, expected) {
+ t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
}
- // Delete with children should work
- err = b.Delete("foo")
+ entry, err := b.Get("foo")
if err != nil {
- t.Fatalf("err: %v", err)
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatal("got nil entry")
+ }
+ if entry.Value == nil {
+ t.Fatal("got nil value")
+ }
+ if string(entry.Value) != "bar3" {
+ t.Fatal("updates did not apply correctly")
}
- // Get should return the child
- time.Sleep(d.beforeGet)
- out, err = b.Get("foo/bar")
+ entry, err = b.Get("zip")
if err != nil {
- t.Fatalf("err: %v", err)
+ t.Fatal(err)
}
- if out == nil {
- t.Fatalf("missing child")
+ if entry == nil {
+ t.Fatal("got nil entry")
}
-
- // Removal of nested secret should not leave artifacts
- e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
+ if entry.Value == nil {
+ t.Fatal("got nil value")
}
-
- err = b.Delete("foo/nested1/nested2/nested3")
- if err != nil {
- t.Fatalf("failed to remove nested secret: %v", err)
- }
-
- time.Sleep(d.beforeList)
- keys, err = b.List("foo/")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- if len(keys) != 1 {
- t.Fatalf("there should be only one key left after deleting nested "+
- "secret: %v", keys)
- }
-
- if keys[0] != "bar" {
- t.Fatalf("bad keys after deleting nested: %v", keys)
- }
-
- // Make a second nested entry to test prefix removal
- e = &Entry{Key: "foo/zip", Value: []byte("zap")}
- err = b.Put(e)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- // Delete should not remove the prefix
- err = b.Delete("foo/bar")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- time.Sleep(d.beforeList)
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 {
- t.Fatalf("bad: %v", keys)
- }
- if keys[0] != "foo/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Delete should remove the prefix
- err = b.Delete("foo/zip")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- time.Sleep(d.beforeList)
- keys, err = b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 0 {
- t.Fatalf("bad: %v", keys)
+ if string(entry.Value) != "zap3" {
+ t.Fatal("updates did not apply correctly")
}
}
-func testEventuallyConsistentBackend_ListPrefix(t *testing.T, b Backend, d delays) {
- e1 := &Entry{Key: "foo", Value: []byte("test")}
- e2 := &Entry{Key: "foo/bar", Value: []byte("test")}
- e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")}
-
- err := b.Put(e1)
- if err != nil {
- t.Fatalf("err: %v", err)
+func SetupTestingTransactions(t *testing.T, b Backend) []TxnEntry {
+ // Add a few keys so that we test rollback with deletion
+ if err := b.Put(&Entry{
+ Key: "foo",
+ Value: []byte("bar"),
+ }); err != nil {
+ t.Fatal(err)
}
- err = b.Put(e2)
- if err != nil {
- t.Fatalf("err: %v", err)
+ if err := b.Put(&Entry{
+ Key: "zip",
+ Value: []byte("zap"),
+ }); err != nil {
+ t.Fatal(err)
}
- err = b.Put(e3)
- if err != nil {
- t.Fatalf("err: %v", err)
+ if err := b.Put(&Entry{
+ Key: "deleteme",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put(&Entry{
+ Key: "deleteme2",
+ }); err != nil {
+ t.Fatal(err)
}
- // Scan the root
- time.Sleep(d.beforeList)
- keys, err := b.List("")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 2 {
- t.Fatalf("bad: %v", keys)
- }
- sort.Strings(keys)
- if keys[0] != "foo" {
- t.Fatalf("bad: %v", keys)
- }
- if keys[1] != "foo/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Scan foo/
- time.Sleep(d.beforeList)
- keys, err = b.List("foo/")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 2 {
- t.Fatalf("bad: %v", keys)
- }
- sort.Strings(keys)
- if keys[0] != "bar" {
- t.Fatalf("bad: %v", keys)
- }
- if keys[1] != "bar/" {
- t.Fatalf("bad: %v", keys)
- }
-
- // Scan foo/bar/
- time.Sleep(d.beforeList)
- keys, err = b.List("foo/bar/")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- sort.Strings(keys)
- if len(keys) != 1 {
- t.Fatalf("bad: %v", keys)
- }
- if keys[0] != "baz" {
- t.Fatalf("bad: %v", keys)
+ txns := []TxnEntry{
+ TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: "foo",
+ Value: []byte("bar2"),
+ },
+ },
+ TxnEntry{
+ Operation: DeleteOperation,
+ Entry: &Entry{
+ Key: "deleteme",
+ },
+ },
+ TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: "foo",
+ Value: []byte("bar3"),
+ },
+ },
+ TxnEntry{
+ Operation: DeleteOperation,
+ Entry: &Entry{
+ Key: "deleteme2",
+ },
+ },
+ TxnEntry{
+ Operation: PutOperation,
+ Entry: &Entry{
+ Key: "zip",
+ Value: []byte("zap3"),
+ },
+ },
}
+ return txns
}
diff --git a/vendor/github.com/hashicorp/vault/physical/transactions.go b/vendor/github.com/hashicorp/vault/physical/transactions.go
index b9ddffa..f8668d2 100644
--- a/vendor/github.com/hashicorp/vault/physical/transactions.go
+++ b/vendor/github.com/hashicorp/vault/physical/transactions.go
@@ -27,7 +27,7 @@ type PseudoTransactional interface {
}
// Implements the transaction interface
-func genericTransactionHandler(t PseudoTransactional, txns []TxnEntry) (retErr error) {
+func GenericTransactionHandler(t PseudoTransactional, txns []TxnEntry) (retErr error) {
rollbackStack := make([]TxnEntry, 0, len(txns))
var dirty bool
diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper.go b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go
similarity index 84%
rename from vendor/github.com/hashicorp/vault/physical/zookeeper.go
rename to vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go
index 6bc9061..8ecc0d6 100644
--- a/vendor/github.com/hashicorp/vault/physical/zookeeper.go
+++ b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go
@@ -1,4 +1,4 @@
-package physical
+package zookeeper
import (
"fmt"
@@ -8,6 +8,7 @@ import (
"sync"
"time"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
metrics "github.com/armon/go-metrics"
@@ -22,20 +23,20 @@ const (
ZKNodeFilePrefix = "_"
)
-// ZookeeperBackend is a physical backend that stores data at specific
-// prefix within Zookeeper. It is used in production situations as
+// ZooKeeperBackend is a physical backend that stores data at specific
+// prefix within ZooKeeper. It is used in production situations as
// it allows Vault to run on multiple machines in a highly-available manner.
-type ZookeeperBackend struct {
+type ZooKeeperBackend struct {
path string
client *zk.Conn
acl []zk.ACL
logger log.Logger
}
-// newZookeeperBackend constructs a Zookeeper backend using the given API client
+// NewZooKeeperBackend constructs a ZooKeeper backend using the given API client
// and the prefix in the KV store.
-func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, error) {
- // Get the path in Zookeeper
+func NewZooKeeperBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
+ // Get the path in ZooKeeper
path, ok := conf["path"]
if !ok {
path = "vault/"
@@ -114,12 +115,12 @@ func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, er
if useAddAuth {
err = client.AddAuth(schema, []byte(owner))
if err != nil {
- return nil, fmt.Errorf("Zookeeper rejected authentication information provided at auth_info: %v", err)
+ return nil, fmt.Errorf("ZooKeeper rejected authentication information provided at auth_info: %v", err)
}
}
// Setup the backend
- c := &ZookeeperBackend{
+ c := &ZooKeeperBackend{
path: path,
client: client,
acl: acl,
@@ -131,7 +132,7 @@ func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, er
// ensurePath is used to create each node in the path hierarchy.
// We avoid calling this optimistically, and invoke it when we get
// an error during an operation
-func (c *ZookeeperBackend) ensurePath(path string, value []byte) error {
+func (c *ZooKeeperBackend) ensurePath(path string, value []byte) error {
nodes := strings.Split(path, "/")
fullPath := ""
for index, node := range nodes {
@@ -161,7 +162,7 @@ func (c *ZookeeperBackend) ensurePath(path string, value []byte) error {
// cleanupLogicalPath is used to remove all empty nodes, begining with deepest one,
// aborting on first non-empty one, up to top-level node.
-func (c *ZookeeperBackend) cleanupLogicalPath(path string) error {
+func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error {
nodes := strings.Split(path, "/")
for i := len(nodes) - 1; i > 0; i-- {
fullPath := c.path + strings.Join(nodes[:i], "/")
@@ -192,12 +193,12 @@ func (c *ZookeeperBackend) cleanupLogicalPath(path string) error {
}
// nodePath returns an zk path based on the given key.
-func (c *ZookeeperBackend) nodePath(key string) string {
+func (c *ZooKeeperBackend) nodePath(key string) string {
return filepath.Join(c.path, filepath.Dir(key), ZKNodeFilePrefix+filepath.Base(key))
}
// Put is used to insert or update an entry
-func (c *ZookeeperBackend) Put(entry *Entry) error {
+func (c *ZooKeeperBackend) Put(entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"zookeeper", "put"}, time.Now())
// Attempt to set the full path
@@ -212,7 +213,7 @@ func (c *ZookeeperBackend) Put(entry *Entry) error {
}
// Get is used to fetch an entry
-func (c *ZookeeperBackend) Get(key string) (*Entry, error) {
+func (c *ZooKeeperBackend) Get(key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"zookeeper", "get"}, time.Now())
// Attempt to read the full path
@@ -231,7 +232,7 @@ func (c *ZookeeperBackend) Get(key string) (*Entry, error) {
if value == nil {
return nil, nil
}
- ent := &Entry{
+ ent := &physical.Entry{
Key: key,
Value: value,
}
@@ -239,7 +240,7 @@ func (c *ZookeeperBackend) Get(key string) (*Entry, error) {
}
// Delete is used to permanently delete an entry
-func (c *ZookeeperBackend) Delete(key string) error {
+func (c *ZooKeeperBackend) Delete(key string) error {
defer metrics.MeasureSince([]string{"zookeeper", "delete"}, time.Now())
if key == "" {
@@ -262,7 +263,7 @@ func (c *ZookeeperBackend) Delete(key string) error {
// List is used ot list all the keys under a given
// prefix, up to the next prefix.
-func (c *ZookeeperBackend) List(prefix string) ([]string, error) {
+func (c *ZooKeeperBackend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"zookeeper", "list"}, time.Now())
// Query the children at the full path
@@ -289,8 +290,14 @@ func (c *ZookeeperBackend) List(prefix string) ([]string, error) {
// and append the slash which is what Vault depends on
// for iteration
if stat.DataLength > 0 && stat.NumChildren > 0 {
- msgFmt := "Node %q is both of data and leaf type ??"
- panic(fmt.Sprintf(msgFmt, childPath))
+ if childPath == c.nodePath("core/lock") {
+ // go-zookeeper Lock() breaks Vault semantics and creates a directory
+ // under the lock file; just treat it like the file Vault expects
+ children = append(children, key[1:])
+ } else {
+ msgFmt := "Node %q is both of data and leaf type ??"
+ panic(fmt.Sprintf(msgFmt, childPath))
+ }
} else if stat.DataLength == 0 {
// No, we cannot differentiate here on number of children as node
// can have all it leafs remoed, and it still is a node.
@@ -304,8 +311,8 @@ func (c *ZookeeperBackend) List(prefix string) ([]string, error) {
}
// LockWith is used for mutual exclusion based on the given key.
-func (c *ZookeeperBackend) LockWith(key, value string) (Lock, error) {
- l := &ZookeeperHALock{
+func (c *ZooKeeperBackend) LockWith(key, value string) (physical.Lock, error) {
+ l := &ZooKeeperHALock{
in: c,
key: key,
value: value,
@@ -315,13 +322,13 @@ func (c *ZookeeperBackend) LockWith(key, value string) (Lock, error) {
// HAEnabled indicates whether the HA functionality should be exposed.
// Currently always returns true.
-func (c *ZookeeperBackend) HAEnabled() bool {
+func (c *ZooKeeperBackend) HAEnabled() bool {
return true
}
-// ZookeeperHALock is a Zookeeper Lock implementation for the HABackend
-type ZookeeperHALock struct {
- in *ZookeeperBackend
+// ZooKeeperHALock is a ZooKeeper Lock implementation for the HABackend
+type ZooKeeperHALock struct {
+ in *ZooKeeperBackend
key string
value string
@@ -331,7 +338,7 @@ type ZookeeperHALock struct {
zkLock *zk.Lock
}
-func (i *ZookeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+func (i *ZooKeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
i.localLock.Lock()
defer i.localLock.Unlock()
if i.held {
@@ -373,7 +380,7 @@ func (i *ZookeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error)
return i.leaderCh, nil
}
-func (i *ZookeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) {
+func (i *ZooKeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) {
// Wait to acquire the lock in ZK
lock := zk.NewLock(i.in.client, lockpath, i.in.acl)
err := lock.Lock()
@@ -401,7 +408,7 @@ func (i *ZookeeperHALock) attemptLock(lockpath string, didLock chan struct{}, fa
}
}
-func (i *ZookeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) {
+func (i *ZooKeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) {
for {
select {
case event := <-lockeventCh:
@@ -426,7 +433,7 @@ func (i *ZookeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan
}
}
-func (i *ZookeeperHALock) Unlock() error {
+func (i *ZooKeeperHALock) Unlock() error {
i.localLock.Lock()
defer i.localLock.Unlock()
if !i.held {
@@ -438,7 +445,7 @@ func (i *ZookeeperHALock) Unlock() error {
return nil
}
-func (i *ZookeeperHALock) Value() (bool, string, error) {
+func (i *ZooKeeperHALock) Value() (bool, string, error) {
lockpath := i.in.nodePath(i.key)
value, _, err := i.in.client.Get(lockpath)
return (value != nil), string(value), err
diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper_test.go b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go
similarity index 80%
rename from vendor/github.com/hashicorp/vault/physical/zookeeper_test.go
rename to vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go
index b9969ae..a85c27c 100644
--- a/vendor/github.com/hashicorp/vault/physical/zookeeper_test.go
+++ b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go
@@ -1,4 +1,4 @@
-package physical
+package zookeeper
import (
"fmt"
@@ -7,12 +7,13 @@ import (
"time"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/physical"
log "github.com/mgutz/logxi/v1"
"github.com/samuel/go-zookeeper/zk"
)
-func TestZookeeperBackend(t *testing.T) {
+func TestZooKeeperBackend(t *testing.T) {
addr := os.Getenv("ZOOKEEPER_ADDR")
if addr == "" {
t.SkipNow()
@@ -45,19 +46,19 @@ func TestZookeeperBackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("zookeeper", logger, map[string]string{
+ b, err := NewZooKeeperBackend(map[string]string{
"address": addr + "," + addr,
"path": randPath,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- testBackend(t, b)
- testBackend_ListPrefix(t, b)
+ physical.ExerciseBackend(t, b)
+ physical.ExerciseBackend_ListPrefix(t, b)
}
-func TestZookeeperHABackend(t *testing.T) {
+func TestZooKeeperHABackend(t *testing.T) {
addr := os.Getenv("ZOOKEEPER_ADDR")
if addr == "" {
t.SkipNow()
@@ -85,17 +86,17 @@ func TestZookeeperHABackend(t *testing.T) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- b, err := NewBackend("zookeeper", logger, map[string]string{
+ b, err := NewZooKeeperBackend(map[string]string{
"address": addr + "," + addr,
"path": randPath,
- })
+ }, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
- ha, ok := b.(HABackend)
+ ha, ok := b.(physical.HABackend)
if !ok {
t.Fatalf("zookeeper does not implement HABackend")
}
- testHABackend(t, ha, ha)
+ physical.ExerciseHABackend(t, ha, ha)
}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go
new file mode 100644
index 0000000..f9bfdeb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/plugins/database/cassandra"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:])
+
+ err := cassandra.Run(apiClientMeta.GetTLSConfig())
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go
new file mode 100644
index 0000000..c0b5fd5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go
@@ -0,0 +1,177 @@
+package cassandra
+
+import (
+ "strings"
+ "time"
+
+ "github.com/gocql/gocql"
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/plugins"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+ "github.com/hashicorp/vault/plugins/helper/database/dbutil"
+)
+
+const (
+ defaultUserCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;`
+ defaultUserDeletionCQL = `DROP USER '{{username}}';`
+ cassandraTypeName = "cassandra"
+)
+
+// Cassandra is an implementation of Database interface
+type Cassandra struct {
+ connutil.ConnectionProducer
+ credsutil.CredentialsProducer
+}
+
+// New returns a new Cassandra instance
+func New() (interface{}, error) {
+ connProducer := &cassandraConnectionProducer{}
+ connProducer.Type = cassandraTypeName
+
+ credsProducer := &credsutil.SQLCredentialsProducer{
+ DisplayNameLen: 15,
+ RoleNameLen: 15,
+ UsernameLen: 100,
+ Separator: "_",
+ }
+
+ dbType := &Cassandra{
+ ConnectionProducer: connProducer,
+ CredentialsProducer: credsProducer,
+ }
+
+ return dbType, nil
+}
+
+// Run instantiates a Cassandra object, and runs the RPC server for the plugin
+func Run(apiTLSConfig *api.TLSConfig) error {
+ dbType, err := New()
+ if err != nil {
+ return err
+ }
+
+ plugins.Serve(dbType.(*Cassandra), apiTLSConfig)
+
+ return nil
+}
+
+// Type returns the TypeName for this backend
+func (c *Cassandra) Type() (string, error) {
+ return cassandraTypeName, nil
+}
+
+func (c *Cassandra) getConnection() (*gocql.Session, error) {
+ session, err := c.Connection()
+ if err != nil {
+ return nil, err
+ }
+
+ return session.(*gocql.Session), nil
+}
+
+// CreateUser generates the username/password on the underlying Cassandra secret backend as instructed by
+// the CreationStatement provided.
+func (c *Cassandra) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ // Grab the lock
+ c.Lock()
+ defer c.Unlock()
+
+ // Get the connection
+ session, err := c.getConnection()
+ if err != nil {
+ return "", "", err
+ }
+
+ creationCQL := statements.CreationStatements
+ if creationCQL == "" {
+ creationCQL = defaultUserCreationCQL
+ }
+ rollbackCQL := statements.RollbackStatements
+ if rollbackCQL == "" {
+ rollbackCQL = defaultUserDeletionCQL
+ }
+
+ username, err = c.GenerateUsername(usernameConfig)
+ username = strings.Replace(username, "-", "_", -1)
+ if err != nil {
+ return "", "", err
+ }
+ // Cassandra doesn't like the uppercase usernames
+ username = strings.ToLower(username)
+
+ password, err = c.GeneratePassword()
+ if err != nil {
+ return "", "", err
+ }
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(creationCQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ err = session.Query(dbutil.QueryHelper(query, map[string]string{
+ "username": username,
+ "password": password,
+ })).Exec()
+ if err != nil {
+ for _, query := range strutil.ParseArbitraryStringSlice(rollbackCQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ session.Query(dbutil.QueryHelper(query, map[string]string{
+ "username": username,
+ })).Exec()
+ }
+ return "", "", err
+ }
+ }
+
+ return username, password, nil
+}
+
+// RenewUser is not supported on Cassandra, so this is a no-op.
+func (c *Cassandra) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
+ // NOOP
+ return nil
+}
+
+// RevokeUser attempts to drop the specified user.
+func (c *Cassandra) RevokeUser(statements dbplugin.Statements, username string) error {
+ // Grab the lock
+ c.Lock()
+ defer c.Unlock()
+
+ session, err := c.getConnection()
+ if err != nil {
+ return err
+ }
+
+ revocationCQL := statements.RevocationStatements
+ if revocationCQL == "" {
+ revocationCQL = defaultUserDeletionCQL
+ }
+
+ var result *multierror.Error
+ for _, query := range strutil.ParseArbitraryStringSlice(revocationCQL, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ err := session.Query(dbutil.QueryHelper(query, map[string]string{
+ "username": username,
+ })).Exec()
+
+ result = multierror.Append(result, err)
+ }
+
+ return result.ErrorOrNil()
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go
new file mode 100644
index 0000000..0f4d330
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go
@@ -0,0 +1,277 @@
+package cassandra
+
+import (
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ "fmt"
+
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+func prepareCassandraTestContainer(t *testing.T) (func(), string, int) {
+ if os.Getenv("CASSANDRA_HOST") != "" {
+ return func() {}, os.Getenv("CASSANDRA_HOST"), 0
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ cwd, _ := os.Getwd()
+ cassandraMountPath := fmt.Sprintf("%s/test-fixtures/:/etc/cassandra/", cwd)
+
+ ro := &dockertest.RunOptions{
+ Repository: "cassandra",
+ Tag: "latest",
+ Env: []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"},
+ Mounts: []string{cassandraMountPath},
+ }
+ resource, err := pool.RunWithOptions(ro)
+ if err != nil {
+ t.Fatalf("Could not start local cassandra docker container: %s", err)
+ }
+
+ cleanup := func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local container: %s", err)
+ }
+ }
+
+ port, _ := strconv.Atoi(resource.GetPort("9042/tcp"))
+ address := fmt.Sprintf("127.0.0.1:%d", port)
+
+ // exponential backoff-retry
+ if err = pool.Retry(func() error {
+ clusterConfig := gocql.NewCluster(address)
+ clusterConfig.Authenticator = gocql.PasswordAuthenticator{
+ Username: "cassandra",
+ Password: "cassandra",
+ }
+ clusterConfig.ProtoVersion = 4
+ clusterConfig.Port = port
+
+ session, err := clusterConfig.CreateSession()
+ if err != nil {
+ return fmt.Errorf("error creating session: %s", err)
+ }
+ defer session.Close()
+ return nil
+ }); err != nil {
+ cleanup()
+ t.Fatalf("Could not connect to cassandra docker container: %s", err)
+ }
+ return cleanup, address, port
+}
+
+func TestCassandra_Initialize(t *testing.T) {
+ if os.Getenv("TRAVIS") != "true" {
+ t.SkipNow()
+ }
+ cleanup, address, port := prepareCassandraTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "hosts": address,
+ "port": port,
+ "username": "cassandra",
+ "password": "cassandra",
+ "protocol_version": 4,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*Cassandra)
+ connProducer := db.ConnectionProducer.(*cassandraConnectionProducer)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !connProducer.Initialized {
+ t.Fatal("Database should be initalized")
+ }
+
+ err = db.Close()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // test a string protocol
+ connectionDetails = map[string]interface{}{
+ "hosts": address,
+ "port": strconv.Itoa(port),
+ "username": "cassandra",
+ "password": "cassandra",
+ "protocol_version": "4",
+ }
+
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestCassandra_CreateUser(t *testing.T) {
+ if os.Getenv("TRAVIS") != "true" {
+ t.SkipNow()
+ }
+ cleanup, address, port := prepareCassandraTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "hosts": address,
+ "port": port,
+ "username": "cassandra",
+ "password": "cassandra",
+ "protocol_version": 4,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*Cassandra)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testCassandraRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, address, port, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+}
+
+func TestMyCassandra_RenewUser(t *testing.T) {
+ if os.Getenv("TRAVIS") != "true" {
+ t.SkipNow()
+ }
+ cleanup, address, port := prepareCassandraTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "hosts": address,
+ "port": port,
+ "username": "cassandra",
+ "password": "cassandra",
+ "protocol_version": 4,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*Cassandra)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testCassandraRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, address, port, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ err = db.RenewUser(statements, username, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestCassandra_RevokeUser(t *testing.T) {
+ if os.Getenv("TRAVIS") != "true" {
+ t.SkipNow()
+ }
+ cleanup, address, port := prepareCassandraTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "hosts": address,
+ "port": port,
+ "username": "cassandra",
+ "password": "cassandra",
+ "protocol_version": 4,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*Cassandra)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testCassandraRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, address, port, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test default revoke statememts
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, address, port, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+}
+
+func testCredsExist(t testing.TB, address string, port int, username, password string) error {
+ clusterConfig := gocql.NewCluster(address)
+ clusterConfig.Authenticator = gocql.PasswordAuthenticator{
+ Username: username,
+ Password: password,
+ }
+ clusterConfig.ProtoVersion = 4
+ clusterConfig.Port = port
+
+ session, err := clusterConfig.CreateSession()
+ if err != nil {
+ return fmt.Errorf("error creating session: %s", err)
+ }
+ defer session.Close()
+ return nil
+}
+
+const testCassandraRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;
+GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go
new file mode 100644
index 0000000..44b0b7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go
@@ -0,0 +1,237 @@
+package cassandra
+
+import (
+ "crypto/tls"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+
+ "github.com/gocql/gocql"
+ "github.com/hashicorp/vault/helper/certutil"
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/tlsutil"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+)
+
+// cassandraConnectionProducer implements ConnectionProducer and provides an
+// interface for cassandra databases to make connections.
+type cassandraConnectionProducer struct {
+ Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"`
+ Port int `json:"port" structs:"port" mapstructure:"port"`
+ Username string `json:"username" structs:"username" mapstructure:"username"`
+ Password string `json:"password" structs:"password" mapstructure:"password"`
+ TLS bool `json:"tls" structs:"tls" mapstructure:"tls"`
+ InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"`
+ ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"`
+ ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"`
+ TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"`
+ Consistency string `json:"consistency" structs:"consistency" mapstructure:"consistency"`
+ PemBundle string `json:"pem_bundle" structs:"pem_bundle" mapstructure:"pem_bundle"`
+ PemJSON string `json:"pem_json" structs:"pem_json" mapstructure:"pem_json"`
+
+ connectTimeout time.Duration
+ certificate string
+ privateKey string
+ issuingCA string
+
+ Initialized bool
+ Type string
+ session *gocql.Session
+ sync.Mutex
+}
+
+func (c *cassandraConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error {
+ c.Lock()
+ defer c.Unlock()
+
+ err := mapstructure.WeakDecode(conf, c)
+ if err != nil {
+ return err
+ }
+
+ if c.ConnectTimeoutRaw == nil {
+ c.ConnectTimeoutRaw = "0s"
+ }
+ c.connectTimeout, err = parseutil.ParseDurationSecond(c.ConnectTimeoutRaw)
+ if err != nil {
+ return fmt.Errorf("invalid connect_timeout: %s", err)
+ }
+
+ switch {
+ case len(c.Hosts) == 0:
+ return fmt.Errorf("hosts cannot be empty")
+ case len(c.Username) == 0:
+ return fmt.Errorf("username cannot be empty")
+ case len(c.Password) == 0:
+ return fmt.Errorf("password cannot be empty")
+ }
+
+ var certBundle *certutil.CertBundle
+ var parsedCertBundle *certutil.ParsedCertBundle
+ switch {
+ case len(c.PemJSON) != 0:
+ parsedCertBundle, err = certutil.ParsePKIJSON([]byte(c.PemJSON))
+ if err != nil {
+ return fmt.Errorf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err)
+ }
+ certBundle, err = parsedCertBundle.ToCertBundle()
+ if err != nil {
+ return fmt.Errorf("Error marshaling PEM information: %s", err)
+ }
+ c.certificate = certBundle.Certificate
+ c.privateKey = certBundle.PrivateKey
+ c.issuingCA = certBundle.IssuingCA
+ c.TLS = true
+
+ case len(c.PemBundle) != 0:
+ parsedCertBundle, err = certutil.ParsePEMBundle(c.PemBundle)
+ if err != nil {
+ return fmt.Errorf("Error parsing the given PEM information: %s", err)
+ }
+ certBundle, err = parsedCertBundle.ToCertBundle()
+ if err != nil {
+ return fmt.Errorf("Error marshaling PEM information: %s", err)
+ }
+ c.certificate = certBundle.Certificate
+ c.privateKey = certBundle.PrivateKey
+ c.issuingCA = certBundle.IssuingCA
+ c.TLS = true
+ }
+
+ // Set initialized to true at this point since all fields are set,
+ // and the connection can be established at a later time.
+ c.Initialized = true
+
+ if verifyConnection {
+ if _, err := c.Connection(); err != nil {
+ return fmt.Errorf("error verifying connection: %s", err)
+ }
+ }
+
+ return nil
+}
+
+func (c *cassandraConnectionProducer) Connection() (interface{}, error) {
+ if !c.Initialized {
+ return nil, connutil.ErrNotInitialized
+ }
+
+ // If we already have a DB, return it
+ if c.session != nil {
+ return c.session, nil
+ }
+
+ session, err := c.createSession()
+ if err != nil {
+ return nil, err
+ }
+
+ // Store the session in backend for reuse
+ c.session = session
+
+ return session, nil
+}
+
+func (c *cassandraConnectionProducer) Close() error {
+ // Grab the write lock
+ c.Lock()
+ defer c.Unlock()
+
+ if c.session != nil {
+ c.session.Close()
+ }
+
+ c.session = nil
+
+ return nil
+}
+
+func (c *cassandraConnectionProducer) createSession() (*gocql.Session, error) {
+ hosts := strings.Split(c.Hosts, ",")
+ clusterConfig := gocql.NewCluster(hosts...)
+ clusterConfig.Authenticator = gocql.PasswordAuthenticator{
+ Username: c.Username,
+ Password: c.Password,
+ }
+
+ if c.Port != 0 {
+ clusterConfig.Port = c.Port
+ }
+
+ clusterConfig.ProtoVersion = c.ProtocolVersion
+ if clusterConfig.ProtoVersion == 0 {
+ clusterConfig.ProtoVersion = 2
+ }
+
+ clusterConfig.Timeout = c.connectTimeout
+ if c.TLS {
+ var tlsConfig *tls.Config
+ if len(c.certificate) > 0 || len(c.issuingCA) > 0 {
+ if len(c.certificate) > 0 && len(c.privateKey) == 0 {
+ return nil, fmt.Errorf("found certificate for TLS authentication but no private key")
+ }
+
+ certBundle := &certutil.CertBundle{}
+ if len(c.certificate) > 0 {
+ certBundle.Certificate = c.certificate
+ certBundle.PrivateKey = c.privateKey
+ }
+ if len(c.issuingCA) > 0 {
+ certBundle.IssuingCA = c.issuingCA
+ }
+
+ parsedCertBundle, err := certBundle.ToParsedCertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate bundle: %s", err)
+ }
+
+ tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient)
+ if err != nil || tlsConfig == nil {
+ return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%v", tlsConfig, err)
+ }
+ tlsConfig.InsecureSkipVerify = c.InsecureTLS
+
+ if c.TLSMinVersion != "" {
+ var ok bool
+ tlsConfig.MinVersion, ok = tlsutil.TLSLookup[c.TLSMinVersion]
+ if !ok {
+ return nil, fmt.Errorf("invalid 'tls_min_version' in config")
+ }
+ } else {
+ // MinVersion was not being set earlier. Reset it to
+ // zero to gracefully handle upgrades.
+ tlsConfig.MinVersion = 0
+ }
+ }
+
+ clusterConfig.SslOpts = &gocql.SslOptions{
+ Config: tlsConfig,
+ }
+ }
+
+ session, err := clusterConfig.CreateSession()
+ if err != nil {
+ return nil, fmt.Errorf("error creating session: %s", err)
+ }
+
+ // Set consistency
+ if c.Consistency != "" {
+ consistencyValue, err := gocql.ParseConsistencyWrapper(c.Consistency)
+ if err != nil {
+ return nil, err
+ }
+
+ session.SetConsistency(consistencyValue)
+ }
+
+ // Verify the info
+ err = session.Query(`LIST USERS`).Exec()
+ if err != nil {
+ return nil, fmt.Errorf("error validating connection info: %s", err)
+ }
+
+ return session, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml
new file mode 100644
index 0000000..7c28d84
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml
@@ -0,0 +1,1146 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting on the node's initial start,
+# on subsequent starts, this setting will apply even if initial token is set.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# Triggers automatic allocation of num_tokens tokens for this node. The allocation
+# algorithm attempts to choose tokens in a way that optimizes replicated load over
+# the nodes in the datacenter for the replication strategy used by the specified
+# keyspace.
+#
+# The load assigned to each node will be close to proportional to its number of
+# vnodes.
+#
+# Only supported with the Murmur3Partitioner.
+# allocate_tokens_for_keyspace: KEYSPACE
+
+# initial_token allows you to specify tokens manually. While you can use it with
+# vnodes (num_tokens > 1, above) -- in which case you should provide a
+# comma-separated list -- it's primarily used when adding nodes to legacy clusters
+# that do not have vnodes enabled.
+# initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+# May either be "true" or "false" to enable globally
+hinted_handoff_enabled: true
+
+# When hinted_handoff_enabled is true, a black list of data centers that will not
+# perform hinted handoff
+# hinted_handoff_disabled_datacenters:
+# - DC1
+# - DC2
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+
+# Maximum throttle in KBs per second, per delivery thread. This will be
+# reduced proportionally to the number of nodes in the cluster. (If there
+# are two nodes in the cluster, each delivery thread will use the maximum
+# rate; if there are three, each will throttle to half of the maximum,
+# since we expect two nodes to be delivering hints simultaneously.)
+hinted_handoff_throttle_in_kb: 1024
+
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# Directory where Cassandra should store hints.
+# If not set, the default directory is $CASSANDRA_HOME/data/hints.
+# hints_directory: /var/lib/cassandra/hints
+
+# How often hints should be flushed from the internal buffers to disk.
+# Will *not* trigger fsync.
+hints_flush_period_in_ms: 10000
+
+# Maximum size for a single hints file, in megabytes.
+max_hints_file_size_in_mb: 128
+
+# Compression to apply to the hint files. If omitted, hints files
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+#hints_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# Maximum throttle in KBs per second, total. This will be
+# reduced proportionally to the number of nodes in the cluster.
+batchlog_replay_throttle_in_kb: 1024
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.credentials table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
+authenticator: PasswordAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: CassandraAuthorizer
+
+# Part of the Authentication & Authorization backend, implementing IRoleManager; used
+# to maintain grants and memberships between roles.
+# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
+# which stores role information in the system_auth keyspace. Most functions of the
+# IRoleManager require an authenticated login, so unless the configured IAuthenticator
+# actually implements authentication, most of this functionality will be unavailable.
+#
+# - CassandraRoleManager stores role data in the system_auth keyspace. Please
+# increase system_auth keyspace replication factor if you use this role manager.
+role_manager: CassandraRoleManager
+
+# Validity period for roles cache (fetching granted roles can be an expensive
+# operation depending on the role manager, CassandraRoleManager is one example)
+# Granted roles are cached for authenticated sessions in AuthenticatedUser and
+# after the period specified here, become eligible for (async) reload.
+# Defaults to 2000, set to 0 to disable caching entirely.
+# Will be disabled automatically for AllowAllAuthenticator.
+roles_validity_in_ms: 2000
+
+# Refresh interval for roles cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If roles_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as roles_validity_in_ms.
+# roles_update_interval_in_ms: 2000
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# Validity period for credentials cache. This cache is tightly coupled to
+# the provided PasswordAuthenticator implementation of IAuthenticator. If
+# another IAuthenticator implementation is configured, this cache will not
+# be automatically used and so the following settings will have no effect.
+# Please note, credentials are cached in their encrypted form, so while
+# activating this cache may reduce the number of queries made to the
+# underlying table, it may not bring a significant reduction in the
+# latency of individual authentication attempts.
+# Defaults to 2000, set to 0 to disable credentials caching.
+credentials_validity_in_ms: 2000
+
+# Refresh interval for credentials cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If credentials_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as credentials_validity_in_ms.
+# credentials_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Besides Murmur3Partitioner, partitioners included for backwards
+# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
+# OrderPreservingPartitioner.
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Directories where Cassandra should store data on disk. Cassandra
+# will spread data evenly across them, subject to the granularity of
+# the configured compaction strategy.
+# If not set, the default directory is $CASSANDRA_HOME/data/data.
+data_file_directories:
+ - /var/lib/cassandra/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
+commitlog_directory: /var/lib/cassandra/commitlog
+
+# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
+# for write path allocation rejection (standard: never reject. cdc: reject Mutation
+# containing a CDC-enabled table if at space limit in cdc_raw_directory).
+cdc_enabled: false
+
+# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
+# segment contains mutations for a CDC-enabled table. This should be placed on a
+# separate spindle than the data directories. If not set, the default directory is
+# $CASSANDRA_HOME/data/cdc_raw.
+# cdc_raw_directory: /var/lib/cassandra/cdc_raw
+
+# Policy for data disk failures:
+#
+# die
+# shut down gossip and client transports and kill the JVM for any fs errors or
+# single-sstable errors, so the node can be replaced.
+#
+# stop_paranoid
+# shut down gossip and client transports even for single-sstable errors,
+# kill the JVM for errors during startup.
+#
+# stop
+# shut down gossip and client transports, leaving the node effectively dead, but
+# can still be inspected via JMX, kill the JVM for errors during startup.
+#
+# best_effort
+# stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+#
+# ignore
+# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Policy for commit disk failures:
+#
+# die
+# shut down gossip and Thrift and kill the JVM, so the node can be replaced.
+#
+# stop
+# shut down gossip and Thrift, leaving the node effectively dead, but
+# can still be inspected via JMX.
+#
+# stop_commit
+# shutdown the commit log, letting writes collect but
+# continuing to service reads, as in pre-2.0.5 Cassandra
+#
+# ignore
+# ignore fatal errors and let the batches fail
+commit_failure_policy: stop
+
+# Maximum size of the native protocol prepared statement cache
+#
+# Valid values are either "auto" (omitting the value) or a value greater 0.
+#
+# Note that specifying a too large value will result in long running GCs and possbily
+# out-of-memory errors. Keep the value at a small fraction of the heap.
+#
+# If you constantly see "prepared statements discarded in the last minute because
+# cache limit reached" messages, the first step is to investigate the root cause
+# of these messages and check whether prepared statements are used correctly -
+# i.e. use bind markers for variable parts.
+#
+# Do only change the default value, if you really have more prepared statements than
+# fit in the cache. In most cases it is not neccessary to change this value.
+# Constantly re-preparing statements is a performance penalty.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+prepared_statements_cache_size_mb:
+
+# Maximum size of the Thrift prepared statement cache
+#
+# If you do not use Thrift at all, it is safe to leave this value at "auto".
+#
+# See description of 'prepared_statements_cache_size_mb' above for more information.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+thrift_prepared_statements_cache_size_mb:
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must contain the entire row,
+# so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the key cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Row cache implementation class name. Available implementations:
+#
+# org.apache.cassandra.cache.OHCProvider
+# Fully off-heap row cache implementation (default).
+#
+# org.apache.cassandra.cache.SerializingCacheProvider
+# This is the row cache implementation availabile
+# in previous releases of Cassandra.
+# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+
+# Maximum size of the row cache in memory.
+# Please note that OHC cache implementation requires some additional off-heap memory to manage
+# the map structures and some in-flight memory during operations before/after cache entries can be
+# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
+# Do not specify more memory that the system can afford in the worst usual situation and leave some
+# headroom for OS block level cache. Do never allow your system to swap.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should save the row cache.
+# Caches are saved to saved_caches_directory as specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save.
+# Specify 0 (which is the default), meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# Maximum size of the counter cache in memory.
+#
+# Counter cache helps to reduce counter locks' contention for hot counter cells.
+# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
+# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
+# of the lock hold, helping with hot counter cell updates, but will not allow skipping
+# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
+# in memory, not the whole counter, so it's relatively cheap.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
+# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
+counter_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the counter cache (keys only). Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Default is 7200 or 2 hours.
+counter_cache_save_period: 7200
+
+# Number of keys from the counter cache to save
+# Disabled by default, meaning all keys are going to be saved
+# counter_cache_keys_to_save: 100
+
+# saved caches
+# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
+saved_caches_directory: /var/lib/cassandra/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+# Max mutation size is also configurable via max_mutation_size_in_kb setting in
+# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
+#
+# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
+# be set to at least twice the size of max_mutation_size_in_kb / 1024
+#
+commitlog_segment_size_in_mb: 32
+
+# Compression to apply to the commit log. If omitted, the commit log
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+# commitlog_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: ",,"
+ - seeds: "127.0.0.1"
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them. Same applies to
+# "concurrent_counter_writes", since counter writes read the current
+# values before incrementing and writing them back.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+concurrent_counter_writes: 32
+
+# For materialized view writes, as there is a read involved, so this should
+# be limited by the less of concurrent reads or concurrent writes.
+concurrent_materialized_view_writes: 32
+
+# Maximum memory to use for sstable chunk cache and buffer pooling.
+# 32MB of this are reserved for pooling buffers, the rest is used as an
+# cache that holds uncompressed sstable chunks.
+# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
+# so is in addition to the memory allocated for heap. The cache also has on-heap
+# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
+# if the default 64k chunk size is used).
+# Memory is only allocated when needed.
+# file_cache_size_in_mb: 512
+
+# Flag indicating whether to allocate on or off heap when the sstable buffer
+# pool is exhausted, that is when it has exceeded the maximum memory
+# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
+
+# buffer_pool_use_heap_if_exhausted: true
+
+# The strategy for optimizing disk read
+# Possible values are:
+# ssd (for solid state disks, the default)
+# spinning (for spinning disks)
+# disk_optimization_strategy: ssd
+
+# Total permitted memory to use for memtables. Cassandra will stop
+# accepting writes when the limit is exceeded until a flush completes,
+# and will trigger a flush based on memtable_cleanup_threshold
+# If omitted, Cassandra will set both to 1/4 the size of the heap.
+# memtable_heap_space_in_mb: 2048
+# memtable_offheap_space_in_mb: 2048
+
+# Ratio of occupied non-flushing memtable size to total permitted size
+# that will trigger a flush of the largest memtable. Larger mct will
+# mean larger flushes and hence less compaction, but also less concurrent
+# flush activity which can make it difficult to keep your disks fed
+# under heavy write load.
+#
+# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
+# memtable_cleanup_threshold: 0.11
+
+# Specify the way Cassandra allocates and manages memtable memory.
+# Options are:
+#
+# heap_buffers
+# on heap nio buffers
+#
+# offheap_buffers
+# off heap (direct) nio buffers
+#
+# offheap_objects
+# off heap objects
+memtable_allocation_type: heap_buffers
+
+# Total space to use for commit logs on disk.
+#
+# If space gets above this value, Cassandra will flush every dirty CF
+# in the oldest segment and remove it. So a small total commitlog space
+# will tend to cause more flush activity on less-active columnfamilies.
+#
+# The default value is the smaller of 8192, and 1/4 of the total space
+# of the commitlog volume.
+#
+# commitlog_total_space_in_mb: 8192
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked.
+#
+# memtable_flush_writers defaults to one per data_file_directory.
+#
+# If your data directories are backed by SSD, you can increase this, but
+# avoid having memtable_flush_writers * data_file_directories > number of cores
+#memtable_flush_writers: 1
+
+# Total space to use for change-data-capture logs on disk.
+#
+# If space gets above this value, Cassandra will throw WriteTimeoutException
+# on Mutations including tables with CDC enabled. A CDCCompactor is responsible
+# for parsing the raw CDC logs and deleting them when parsing is completed.
+#
+# The default value is the min of 4096 mb and 1/8th of the total space
+# of the drive where cdc_raw_directory resides.
+# cdc_total_space_in_mb: 4096
+
+# When we hit our cdc_raw limit and the CDCCompactor is either running behind
+# or experiencing backpressure, we check at the following interval to see if any
+# new space for cdc-tracked tables has been made available. Default to 250ms
+# cdc_free_space_check_interval_ms: 250
+
+# A fixed memory pool size in MB for for SSTable index summaries. If left
+# empty, this will default to 5% of the heap size. If the memory usage of
+# all index summaries exceeds this limit, SSTables with low read rates will
+# shrink their index summaries in order to meet this limit. However, this
+# is a best-effort process. In extreme conditions Cassandra may need to use
+# more than this amount of memory.
+index_summary_capacity_in_mb:
+
+# How frequently index summaries should be resampled. This is done
+# periodically to redistribute memory from the fixed-size pool to sstables
+# proportional their recent read rates. Setting to -1 will disable this
+# process, leaving existing index summaries at their current sampling level.
+index_summary_resize_interval_in_minutes: 60
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSDs; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+ssl_storage_port: 7001
+
+# Address or interface to bind to and tell other Cassandra nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# Set listen_address OR listen_interface, not both.
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing _if_ the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting listen_address to 0.0.0.0 is always wrong.
+#
+listen_address: 172.17.0.5
+
+# Set listen_address OR listen_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# listen_interface: eth0
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# listen_interface_prefer_ipv6: false
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+broadcast_address: 127.0.0.1
+
+# When using multiple physical network interfaces, set this
+# to true to listen on broadcast_address in addition to
+# the listen_address, allowing nodes to communicate in both
+# interfaces.
+# Ignore this property if the network configuration automatically
+# routes between the public and private networks such as EC2.
+# listen_on_broadcast_address: false
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+native_transport_port: 9042
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+# native_transport_port_ssl: 9142
+# The maximum threads for handling requests when the native transport is used.
+# This is similar to rpc_max_threads though the default differs slightly (and
+# there is no native_transport_min_threads, idle threads will always be stopped
+# after 30 seconds).
+# native_transport_max_threads: 128
+#
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB. If you're changing this parameter,
+# you may want to adjust max_value_size_in_mb accordingly.
+# native_transport_max_frame_size_in_mb: 256
+
+# The maximum number of concurrent client connections.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections: -1
+
+# The maximum number of concurrent client connections per source ip.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections_per_ip: -1
+
+# Whether to start the thrift rpc server.
+start_rpc: false
+
+# The address or interface to bind the Thrift RPC service and native transport
+# server to.
+#
+# Set rpc_address OR rpc_interface, not both.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+rpc_address: 0.0.0.0
+
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# rpc_interface: eth1
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# rpc_interface_prefer_ipv6: false
+
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
+# be set to 0.0.0.0. If left blank, this will be set to the value of
+# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+# be set.
+broadcast_rpc_address: 127.0.0.1
+
+# enable or disable keepalive on rpc/native connections
+rpc_keepalive: true
+
+# Cassandra provides two out-of-the-box options for the RPC Server:
+#
+# sync
+# One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
+#
+# hsha
+# Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request). If hsha is selected then it is essential
+# that rpc_max_threads is changed from the default value of unlimited.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See also:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and 'man tcp'
+# internode_send_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum message length).
+thrift_framed_transport_size_in_mb: 15
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+#
+# - a smaller granularity means more index entries are generated
+# and looking up rows withing the partition by collation column
+# is faster
+# - but, Cassandra will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+column_index_size_in_kb: 64
+
+# Per sstable indexed key cache entries (the collation index in memory
+# mentioned above) exceeding this size will not be held on heap.
+# This means that only partition information is held on heap and the
+# index entries are read from disk.
+#
+# Note that this size refers to the size of the
+# serialized index information and not the size of the partition.
+column_index_cache_size_in_kb: 2
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the smaller of (number of disks,
+# number of cores), with a minimum of 2 and a maximum of 8.
+#
+# If your data directories are backed by SSD, you should increase this
+# to the number of cores.
+#concurrent_compactors: 1
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# When compacting, the replacement sstable(s) can be opened before they
+# are completely written, and used in place of the prior sstables for
+# any range that has been written. This helps to smoothly transfer reads
+# between the sstables, reducing page cache churn and keeping hot rows hot
+sstable_preemptive_open_interval_in_mb: 50
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# Throttles all streaming file transfer between the datacenters,
+# this setting allows users to throttle inter dc stream throughput in addition
+# to throttling all network stream traffic as configured with
+# stream_throughput_outbound_megabits_per_sec
+# When unset, the default is 200 Mbps or 25 MB/s
+# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# How long the coordinator should wait for counter writes to complete
+counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts. If disabled, replicas will assume that requests
+# were forwarded to them instantly by the coordinator, which means that
+# under overload conditions we will waste that much extra time processing
+# already-timed-out requests.
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Set socket timeout for streaming operation.
+# The stream session is failed if no data/ack is received by any of the participants
+# within that period, which means this should also be sufficient to stream a large
+# sstable or rebuild table indexes.
+# Default value is 86400000ms, which means stale streams timeout after 24 hours.
+# A value of zero means stream sockets should never time out.
+# streaming_socket_timeout_in_ms: 86400000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+#
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH
+# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss.
+# This means that if you start with the default SimpleSnitch, which
+# locates every node on "rack1" in "datacenter1", your only options
+# if you need to add another datacenter are GossipingPropertyFileSnitch
+# (and the older PFS). From there, if you want to migrate to an
+# incompatible snitch like Ec2Snitch you can do it by adding new nodes
+# under Ec2Snitch (which will locate them in a new "datacenter") and
+# decommissioning the old ones.
+#
+# Out of the box, Cassandra provides:
+#
+# SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+#
+# GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+#
+# PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+#
+# Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+#
+# Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+#
+# NoScheduler
+# Has no options
+#
+# RoundRobin
+# throttle_limit
+# The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# default_weight
+# default_weight is optional and allows for
+# overriding the default which is 1.
+# weights
+# Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifier based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# Enable or disable inter-node encryption
+# JVM defaults for supported SSL socket protocols and cipher suites can
+# be replaced using custom encryption options. This is not recommended
+# unless you have policies in place that dictate certain settings, or
+# need to disable vulnerable ciphers or protocols in case the JVM cannot
+# be updated.
+# FIPS compliant settings can be configured at JVM level and should not
+# involve changing encryption settings here:
+# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
+# *NOTE* No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+ # require_endpoint_verification: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ # If enabled and optional is set to true encrypted and unencrypted connections are handled.
+ optional: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # require_client_auth: false
+ # Set trustore and truststore_password if require_client_auth is true
+ # truststore: conf/.truststore
+ # truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# Can be:
+#
+# all
+# all traffic is compressed
+#
+# dc
+# traffic between different datacenters is compressed
+#
+# none
+# nothing is compressed.
+internode_compression: dc
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: false
+
+# TTL for different trace types used during logging of the repair process.
+tracetype_query_ttl: 86400
+tracetype_repair_ttl: 604800
+
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+# This threshold can be adjusted to minimize logging if necessary
+# gc_log_threshold_in_ms: 200
+
+# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
+# INFO level
+# UDFs (user defined functions) are disabled by default.
+# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
+enable_user_defined_functions: false
+
+# Enables scripted UDFs (JavaScript UDFs).
+# Java UDFs are always enabled, if enable_user_defined_functions is true.
+# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
+# This option has no effect, if enable_user_defined_functions is false.
+enable_scripted_user_defined_functions: false
+
+# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
+# Lowering this value on Windows can provide much tighter latency and better throughput, however
+# some virtualized environments may see a negative performance impact from changing this setting
+# below their system default. The sysinternals 'clockres' tool can confirm your system's default
+# setting.
+windows_timer_interval: 1
+
+
+# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
+# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
+# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
+# can still (and should!) be in the keystore and will be used on decrypt operations
+# (to handle the case of key rotation).
+#
+# It is strongly recommended to download and install Java Cryptography Extension (JCE)
+# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
+# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
+#
+# Currently, only the following file types are supported for transparent data encryption, although
+# more are coming in future cassandra releases: commitlog, hints
+transparent_data_encryption_options:
+ enabled: false
+ chunk_length_kb: 64
+ cipher: AES/CBC/PKCS5Padding
+ key_alias: testing:1
+ # CBC IV length for AES needs to be 16 bytes (which is also the default size)
+ # iv_length: 16
+ key_provider:
+ - class_name: org.apache.cassandra.security.JKSKeyProvider
+ parameters:
+ - keystore: conf/.keystore
+ keystore_password: cassandra
+ store_type: JCEKS
+ key_password: cassandra
+
+
+#####################
+# SAFETY THRESHOLDS #
+#####################
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+tombstone_warn_threshold: 1000
+tombstone_failure_threshold: 100000
+
+# Log WARN on any batch size exceeding this value. 5kb per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 5
+
+# Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 50
+
+# Log WARN on any batches not of type LOGGED than span across more partitions than this limit
+unlogged_batch_across_partitions_warn_threshold: 10
+
+# Log a warning when compacting partitions larger than this value
+compaction_large_partition_warning_threshold_mb: 100
+
+# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
+# Adjust the threshold based on your application throughput requirement
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+gc_warn_threshold_in_ms: 1000
+
+# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
+# early. Any value size larger than this threshold will result into marking an SSTable
+# as corrupted.
+# max_value_size_in_mb: 256
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go
new file mode 100644
index 0000000..f995fe0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/plugins/database/hana"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:])
+
+ err := hana.Run(apiClientMeta.GetTLSConfig())
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go
new file mode 100644
index 0000000..aa2b53d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go
@@ -0,0 +1,283 @@
+package hana
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "time"
+
+ _ "github.com/SAP/go-hdb/driver"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/plugins"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+ "github.com/hashicorp/vault/plugins/helper/database/dbutil"
+)
+
+const (
+ hanaTypeName = "hdb"
+)
+
+// HANA is an implementation of Database interface
+type HANA struct {
+ connutil.ConnectionProducer
+ credsutil.CredentialsProducer
+}
+
+// New implements builtinplugins.BuiltinFactory
+func New() (interface{}, error) {
+ connProducer := &connutil.SQLConnectionProducer{}
+ connProducer.Type = hanaTypeName
+
+ credsProducer := &credsutil.SQLCredentialsProducer{
+ DisplayNameLen: 32,
+ RoleNameLen: 20,
+ UsernameLen: 128,
+ Separator: "_",
+ }
+
+ dbType := &HANA{
+ ConnectionProducer: connProducer,
+ CredentialsProducer: credsProducer,
+ }
+
+ return dbType, nil
+}
+
+// Run instantiates a HANA object, and runs the RPC server for the plugin
+func Run(apiTLSConfig *api.TLSConfig) error {
+ dbType, err := New()
+ if err != nil {
+ return err
+ }
+
+ plugins.Serve(dbType.(*HANA), apiTLSConfig)
+
+ return nil
+}
+
+// Type returns the TypeName for this backend
+func (h *HANA) Type() (string, error) {
+ return hanaTypeName, nil
+}
+
+func (h *HANA) getConnection() (*sql.DB, error) {
+ db, err := h.Connection()
+ if err != nil {
+ return nil, err
+ }
+
+ return db.(*sql.DB), nil
+}
+
+// CreateUser generates the username/password on the underlying HANA secret backend
+// as instructed by the CreationStatement provided.
+func (h *HANA) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ // Grab the lock
+ h.Lock()
+ defer h.Unlock()
+
+ // Get the connection
+ db, err := h.getConnection()
+ if err != nil {
+ return "", "", err
+ }
+
+ if statements.CreationStatements == "" {
+ return "", "", dbutil.ErrEmptyCreationStatement
+ }
+
+ // Generate username
+ username, err = h.GenerateUsername(usernameConfig)
+ if err != nil {
+ return "", "", err
+ }
+
+ // HANA does not allow hyphens in usernames, and highly prefers capital letters
+ username = strings.Replace(username, "-", "_", -1)
+ username = strings.ToUpper(username)
+
+ // Generate password
+ password, err = h.GeneratePassword()
+ if err != nil {
+ return "", "", err
+ }
+ // Most HANA configurations have password constraints
+ // Prefix with A1a to satisfy these constraints. User will be forced to change upon login
+ password = strings.Replace(password, "-", "_", -1)
+ password = "A1a" + password
+
+ // If expiration is in the role SQL, HANA will deactivate the user when time is up,
+ // regardless of whether vault is alive to revoke lease
+ expirationStr, err := h.GenerateExpiration(expiration)
+ if err != nil {
+ return "", "", err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return "", "", err
+ }
+ defer tx.Rollback()
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ "password": password,
+ "expiration": expirationStr,
+ }))
+ if err != nil {
+ return "", "", err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return "", "", err
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return "", "", err
+ }
+
+ return username, password, nil
+}
+
+// Renewing hana user just means altering user's valid until property
+func (h *HANA) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
+ // Get connection
+ db, err := h.getConnection()
+ if err != nil {
+ return err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // If expiration is in the role SQL, HANA will deactivate the user when time is up,
+ // regardless of whether vault is alive to revoke lease
+ expirationStr, err := h.GenerateExpiration(expiration)
+ if err != nil {
+ return err
+ }
+
+ // Renew user's valid until property field
+ stmt, err := tx.Prepare("ALTER USER " + username + " VALID UNTIL " + "'" + expirationStr + "'")
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return err
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Revoking hana user will deactivate user and try to perform a soft drop
+func (h *HANA) RevokeUser(statements dbplugin.Statements, username string) error {
+ // default revoke will be a soft drop on user
+ if statements.RevocationStatements == "" {
+ return h.revokeUserDefault(username)
+ }
+
+ // Get connection
+ db, err := h.getConnection()
+ if err != nil {
+ return err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ }))
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return err
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *HANA) revokeUserDefault(username string) error {
+ // Get connection
+ db, err := h.getConnection()
+ if err != nil {
+ return err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // Disable server login for user
+ disableStmt, err := tx.Prepare(fmt.Sprintf("ALTER USER %s DEACTIVATE USER NOW", username))
+ if err != nil {
+ return err
+ }
+ defer disableStmt.Close()
+ if _, err := disableStmt.Exec(); err != nil {
+ return err
+ }
+
+ // Invalidates current sessions and performs soft drop (drop if no dependencies)
+ // if hard drop is desired, custom revoke statements should be written for role
+ dropStmt, err := tx.Prepare(fmt.Sprintf("DROP USER %s RESTRICT", username))
+ if err != nil {
+ return err
+ }
+ defer dropStmt.Close()
+ if _, err := dropStmt.Exec(); err != nil {
+ return err
+ }
+
+ // Commit transaction
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go
new file mode 100644
index 0000000..7cff7f1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go
@@ -0,0 +1,167 @@
+package hana
+
+import (
+ "database/sql"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+)
+
+func TestHANA_Initialize(t *testing.T) {
+ if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
+ t.SkipNow()
+ }
+ connURL := os.Getenv("HANA_URL")
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*HANA)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
+ if !connProducer.Initialized {
+ t.Fatal("Database should be initialized")
+ }
+
+ err = db.Close()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+// this test will leave a lingering user on the system
+func TestHANA_CreateUser(t *testing.T) {
+ if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
+ t.SkipNow()
+ }
+ connURL := os.Getenv("HANA_URL")
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*HANA)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test-test",
+ RoleName: "test-test",
+ }
+
+ // Test with no configured Creation Statememt
+ _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Hour))
+ if err == nil {
+ t.Fatal("Expected error when no creation statement is provided")
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testHANARole,
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+}
+
+func TestHANA_RevokeUser(t *testing.T) {
+ if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
+ t.SkipNow()
+ }
+ connURL := os.Getenv("HANA_URL")
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*HANA)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testHANARole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test-test",
+ RoleName: "test-test",
+ }
+
+ // Test default revoke statememts
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+
+ // Test custom revoke statememt
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ statements.RevocationStatements = testHANADrop
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+}
+
+func testCredsExist(t testing.TB, connURL, username, password string) error {
+ // Log in with the new creds
+ parts := strings.Split(connURL, "@")
+ connURL = fmt.Sprintf("hdb://%s:%s@%s", username, password, parts[1])
+ db, err := sql.Open("hdb", connURL)
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+ return db.Ping()
+}
+
+const testHANARole = `
+CREATE USER {{name}} PASSWORD {{password}} VALID UNTIL '{{expiration}}';`
+
+const testHANADrop = `
+DROP USER {{name}} CASCADE;`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go
new file mode 100644
index 0000000..f802dc3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go
@@ -0,0 +1,167 @@
+package mongodb
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/mitchellh/mapstructure"
+
+ "gopkg.in/mgo.v2"
+)
+
+// mongoDBConnectionProducer implements ConnectionProducer and provides an
+// interface for databases to make connections.
+type mongoDBConnectionProducer struct {
+ ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
+
+ Initialized bool
+ Type string
+ session *mgo.Session
+ sync.Mutex
+}
+
+// Initialize parses connection configuration.
+func (c *mongoDBConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error {
+ c.Lock()
+ defer c.Unlock()
+
+ err := mapstructure.WeakDecode(conf, c)
+ if err != nil {
+ return err
+ }
+
+ if len(c.ConnectionURL) == 0 {
+ return fmt.Errorf("connection_url cannot be empty")
+ }
+
+ // Set initialized to true at this point since all fields are set,
+ // and the connection can be established at a later time.
+ c.Initialized = true
+
+ if verifyConnection {
+ if _, err := c.Connection(); err != nil {
+ return fmt.Errorf("error verifying connection: %s", err)
+ }
+
+ if err := c.session.Ping(); err != nil {
+ return fmt.Errorf("error verifying connection: %s", err)
+ }
+ }
+
+ return nil
+}
+
+// Connection creates a database connection.
+func (c *mongoDBConnectionProducer) Connection() (interface{}, error) {
+ if !c.Initialized {
+ return nil, connutil.ErrNotInitialized
+ }
+
+ if c.session != nil {
+ return c.session, nil
+ }
+
+ dialInfo, err := parseMongoURL(c.ConnectionURL)
+ if err != nil {
+ return nil, err
+ }
+
+ c.session, err = mgo.DialWithInfo(dialInfo)
+ if err != nil {
+ return nil, err
+ }
+ c.session.SetSyncTimeout(1 * time.Minute)
+ c.session.SetSocketTimeout(1 * time.Minute)
+
+ return nil, nil
+}
+
+// Close terminates the database connection.
+func (c *mongoDBConnectionProducer) Close() error {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.session != nil {
+ c.session.Close()
+ }
+
+ c.session = nil
+
+ return nil
+}
+
+func parseMongoURL(rawURL string) (*mgo.DialInfo, error) {
+ url, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, err
+ }
+
+ info := mgo.DialInfo{
+ Addrs: strings.Split(url.Host, ","),
+ Database: strings.TrimPrefix(url.Path, "/"),
+ Timeout: 10 * time.Second,
+ }
+
+ if url.User != nil {
+ info.Username = url.User.Username()
+ info.Password, _ = url.User.Password()
+ }
+
+ query := url.Query()
+ for key, values := range query {
+ var value string
+ if len(values) > 0 {
+ value = values[0]
+ }
+
+ switch key {
+ case "authSource":
+ info.Source = value
+ case "authMechanism":
+ info.Mechanism = value
+ case "gssapiServiceName":
+ info.Service = value
+ case "replicaSet":
+ info.ReplicaSetName = value
+ case "maxPoolSize":
+ poolLimit, err := strconv.Atoi(value)
+ if err != nil {
+ return nil, errors.New("bad value for maxPoolSize: " + value)
+ }
+ info.PoolLimit = poolLimit
+ case "ssl":
+ // Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that
+ // ourselves. See https://github.com/go-mgo/mgo/issues/84
+ ssl, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, errors.New("bad value for ssl: " + value)
+ }
+ if ssl {
+ info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
+ return tls.Dial("tcp", addr.String(), &tls.Config{})
+ }
+ }
+ case "connect":
+ if value == "direct" {
+ info.Direct = true
+ break
+ }
+ if value == "replicaSet" {
+ break
+ }
+ fallthrough
+ default:
+ return nil, errors.New("unsupported connection URL option: " + key + "=" + value)
+ }
+ }
+
+ return &info, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go
new file mode 100644
index 0000000..eedb0d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/plugins/database/mongodb"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:])
+
+ err := mongodb.Run(apiClientMeta.GetTLSConfig())
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go
new file mode 100644
index 0000000..52671da
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go
@@ -0,0 +1,204 @@
+package mongodb
+
+import (
+ "io"
+ "strings"
+ "time"
+
+ "encoding/json"
+
+ "fmt"
+
+ "github.com/hashicorp/errwrap"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/plugins"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+ "github.com/hashicorp/vault/plugins/helper/database/dbutil"
+ "gopkg.in/mgo.v2"
+)
+
+const mongoDBTypeName = "mongodb"
+
+// MongoDB is an implementation of Database interface
+type MongoDB struct {
+ connutil.ConnectionProducer
+ credsutil.CredentialsProducer
+}
+
+// New returns a new MongoDB instance
+func New() (interface{}, error) {
+ connProducer := &mongoDBConnectionProducer{}
+ connProducer.Type = mongoDBTypeName
+
+ credsProducer := &credsutil.SQLCredentialsProducer{
+ DisplayNameLen: 15,
+ RoleNameLen: 15,
+ UsernameLen: 100,
+ Separator: "-",
+ }
+
+ dbType := &MongoDB{
+ ConnectionProducer: connProducer,
+ CredentialsProducer: credsProducer,
+ }
+ return dbType, nil
+}
+
+// Run instantiates a MongoDB object, and runs the RPC server for the plugin
+func Run(apiTLSConfig *api.TLSConfig) error {
+ dbType, err := New()
+ if err != nil {
+ return err
+ }
+
+ plugins.Serve(dbType.(*MongoDB), apiTLSConfig)
+
+ return nil
+}
+
+// Type returns the TypeName for this backend
+func (m *MongoDB) Type() (string, error) {
+ return mongoDBTypeName, nil
+}
+
+func (m *MongoDB) getConnection() (*mgo.Session, error) {
+ session, err := m.Connection()
+ if err != nil {
+ return nil, err
+ }
+
+ return session.(*mgo.Session), nil
+}
+
+// CreateUser generates the username/password on the underlying secret backend as instructed by
+// the CreationStatement provided. The creation statement is a JSON blob that has a db value,
+// and an array of roles that accepts a role, and an optional db value pair. This array will
+// be normalized the format specified in the mongoDB docs:
+// https://docs.mongodb.com/manual/reference/command/createUser/#dbcmd.createUser
+//
+// JSON Example:
+// { "db": "admin", "roles": [{ "role": "readWrite" }, {"role": "read", "db": "foo"}] }
+func (m *MongoDB) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ // Grab the lock
+ m.Lock()
+ defer m.Unlock()
+
+ if statements.CreationStatements == "" {
+ return "", "", dbutil.ErrEmptyCreationStatement
+ }
+
+ session, err := m.getConnection()
+ if err != nil {
+ return "", "", err
+ }
+
+ username, err = m.GenerateUsername(usernameConfig)
+ if err != nil {
+ return "", "", err
+ }
+
+ password, err = m.GeneratePassword()
+ if err != nil {
+ return "", "", err
+ }
+
+ // Unmarshal statements.CreationStatements into mongodbRoles
+ var mongoCS mongoDBStatement
+ err = json.Unmarshal([]byte(statements.CreationStatements), &mongoCS)
+ if err != nil {
+ return "", "", err
+ }
+
+ // Default to "admin" if no db provided
+ if mongoCS.DB == "" {
+ mongoCS.DB = "admin"
+ }
+
+ if len(mongoCS.Roles) == 0 {
+ return "", "", fmt.Errorf("roles array is required in creation statement")
+ }
+
+ createUserCmd := createUserCommand{
+ Username: username,
+ Password: password,
+ Roles: mongoCS.Roles.toStandardRolesArray(),
+ }
+
+ err = session.DB(mongoCS.DB).Run(createUserCmd, nil)
+ switch {
+ case err == nil:
+ case err == io.EOF, strings.Contains(err.Error(), "EOF"):
+ if err := m.ConnectionProducer.Close(); err != nil {
+ return "", "", errwrap.Wrapf("error closing EOF'd mongo connection: {{err}}", err)
+ }
+ session, err := m.getConnection()
+ if err != nil {
+ return "", "", err
+ }
+ err = session.DB(mongoCS.DB).Run(createUserCmd, nil)
+ if err != nil {
+ return "", "", err
+ }
+ default:
+ return "", "", err
+ }
+
+ return username, password, nil
+}
+
+// RenewUser is not supported on MongoDB, so this is a no-op.
+func (m *MongoDB) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
+ // NOOP
+ return nil
+}
+
+// RevokeUser drops the specified user from the authentication databse. If none is provided
+// in the revocation statement, the default "admin" authentication database will be assumed.
+func (m *MongoDB) RevokeUser(statements dbplugin.Statements, username string) error {
+ session, err := m.getConnection()
+ if err != nil {
+ return err
+ }
+
+ // If no revocation statements provided, pass in empty JSON
+ revocationStatement := statements.RevocationStatements
+ if revocationStatement == "" {
+ revocationStatement = `{}`
+ }
+
+ // Unmarshal revocation statements into mongodbRoles
+ var mongoCS mongoDBStatement
+ err = json.Unmarshal([]byte(revocationStatement), &mongoCS)
+ if err != nil {
+ return err
+ }
+
+ db := mongoCS.DB
+ // If db is not specified, use the default authenticationDatabase "admin"
+ if db == "" {
+ db = "admin"
+ }
+
+ err = session.DB(db).RemoveUser(username)
+ switch {
+ case err == nil, err == mgo.ErrNotFound:
+ case err == io.EOF, strings.Contains(err.Error(), "EOF"):
+ if err := m.ConnectionProducer.Close(); err != nil {
+ return errwrap.Wrapf("error closing EOF'd mongo connection: {{err}}", err)
+ }
+ session, err := m.getConnection()
+ if err != nil {
+ return err
+ }
+ err = session.DB(db).RemoveUser(username)
+ if err != nil {
+ return err
+ }
+ default:
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go
new file mode 100644
index 0000000..95f6e90
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go
@@ -0,0 +1,193 @@
+package mongodb
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ mgo "gopkg.in/mgo.v2"
+
+ "strings"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+const testMongoDBRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }`
+
+func prepareMongoDBTestContainer(t *testing.T) (cleanup func(), retURL string) {
+ if os.Getenv("MONGODB_URL") != "" {
+ return func() {}, os.Getenv("MONGODB_URL")
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("mongo", "latest", []string{})
+ if err != nil {
+ t.Fatalf("Could not start local mongo docker container: %s", err)
+ }
+
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local container: %s", err)
+ }
+ }
+
+ retURL = fmt.Sprintf("mongodb://localhost:%s", resource.GetPort("27017/tcp"))
+
+ // exponential backoff-retry
+ if err = pool.Retry(func() error {
+ var err error
+ dialInfo, err := parseMongoURL(retURL)
+ if err != nil {
+ return err
+ }
+
+ session, err := mgo.DialWithInfo(dialInfo)
+ if err != nil {
+ return err
+ }
+ session.SetSyncTimeout(1 * time.Minute)
+ session.SetSocketTimeout(1 * time.Minute)
+ return session.Ping()
+ }); err != nil {
+ t.Fatalf("Could not connect to mongo docker container: %s", err)
+ }
+
+ return
+}
+
+func TestMongoDB_Initialize(t *testing.T) {
+ cleanup, connURL := prepareMongoDBTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, err := New()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ db := dbRaw.(*MongoDB)
+ connProducer := db.ConnectionProducer.(*mongoDBConnectionProducer)
+
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !connProducer.Initialized {
+ t.Fatal("Database should be initialized")
+ }
+
+ err = db.Close()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestMongoDB_CreateUser(t *testing.T) {
+ cleanup, connURL := prepareMongoDBTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, err := New()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ db := dbRaw.(*MongoDB)
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testMongoDBRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+}
+
+func TestMongoDB_RevokeUser(t *testing.T) {
+ cleanup, connURL := prepareMongoDBTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, err := New()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ db := dbRaw.(*MongoDB)
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testMongoDBRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test default revocation statememt
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+}
+
+func testCredsExist(t testing.TB, connURL, username, password string) error {
+ connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1)
+ dialInfo, err := parseMongoURL(connURL)
+ if err != nil {
+ return err
+ }
+
+ session, err := mgo.DialWithInfo(dialInfo)
+ if err != nil {
+ return err
+ }
+ session.SetSyncTimeout(1 * time.Minute)
+ session.SetSocketTimeout(1 * time.Minute)
+ return session.Ping()
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go
new file mode 100644
index 0000000..9004a3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go
@@ -0,0 +1,39 @@
+package mongodb
+
+type createUserCommand struct {
+ Username string `bson:"createUser"`
+ Password string `bson:"pwd"`
+ Roles []interface{} `bson:"roles"`
+}
+type mongodbRole struct {
+ Role string `json:"role" bson:"role"`
+ DB string `json:"db" bson:"db"`
+}
+
+type mongodbRoles []mongodbRole
+
+type mongoDBStatement struct {
+ DB string `json:"db"`
+ Roles mongodbRoles `json:"roles"`
+}
+
+// Convert array of role documents like:
+//
+// [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ]
+//
+// into a "standard" MongoDB roles array containing both strings and role documents:
+//
+// [ "readWrite", { "role": "readWrite", "db": "test" } ]
+//
+// MongoDB's createUser command accepts the latter.
+func (roles mongodbRoles) toStandardRolesArray() []interface{} {
+ var standardRolesArray []interface{}
+ for _, role := range roles {
+ if role.DB == "" {
+ standardRolesArray = append(standardRolesArray, role.Role)
+ } else {
+ standardRolesArray = append(standardRolesArray, role)
+ }
+ }
+ return standardRolesArray
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go
new file mode 100644
index 0000000..9201b48
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/plugins/database/mssql"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:])
+
+ err := mssql.Run(apiClientMeta.GetTLSConfig())
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go
new file mode 100644
index 0000000..7b920c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go
@@ -0,0 +1,321 @@
+package mssql
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "time"
+
+ _ "github.com/denisenkom/go-mssqldb"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/plugins"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+ "github.com/hashicorp/vault/plugins/helper/database/dbutil"
+)
+
+const msSQLTypeName = "mssql"
+
+// MSSQL is an implementation of Database interface
+type MSSQL struct {
+ connutil.ConnectionProducer
+ credsutil.CredentialsProducer
+}
+
+func New() (interface{}, error) {
+ connProducer := &connutil.SQLConnectionProducer{}
+ connProducer.Type = msSQLTypeName
+
+ credsProducer := &credsutil.SQLCredentialsProducer{
+ DisplayNameLen: 20,
+ RoleNameLen: 20,
+ UsernameLen: 128,
+ Separator: "-",
+ }
+
+ dbType := &MSSQL{
+ ConnectionProducer: connProducer,
+ CredentialsProducer: credsProducer,
+ }
+
+ return dbType, nil
+}
+
+// Run instantiates a MSSQL object, and runs the RPC server for the plugin
+func Run(apiTLSConfig *api.TLSConfig) error {
+ dbType, err := New()
+ if err != nil {
+ return err
+ }
+
+ plugins.Serve(dbType.(*MSSQL), apiTLSConfig)
+
+ return nil
+}
+
+// Type returns the TypeName for this backend
+func (m *MSSQL) Type() (string, error) {
+ return msSQLTypeName, nil
+}
+
+func (m *MSSQL) getConnection() (*sql.DB, error) {
+ db, err := m.Connection()
+ if err != nil {
+ return nil, err
+ }
+
+ return db.(*sql.DB), nil
+}
+
+// CreateUser generates the username/password on the underlying MSSQL secret backend as instructed by
+// the CreationStatement provided.
+func (m *MSSQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ // Grab the lock
+ m.Lock()
+ defer m.Unlock()
+
+ // Get the connection
+ db, err := m.getConnection()
+ if err != nil {
+ return "", "", err
+ }
+
+ if statements.CreationStatements == "" {
+ return "", "", dbutil.ErrEmptyCreationStatement
+ }
+
+ username, err = m.GenerateUsername(usernameConfig)
+ if err != nil {
+ return "", "", err
+ }
+
+ password, err = m.GeneratePassword()
+ if err != nil {
+ return "", "", err
+ }
+
+ expirationStr, err := m.GenerateExpiration(expiration)
+ if err != nil {
+ return "", "", err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return "", "", err
+ }
+ defer tx.Rollback()
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ "password": password,
+ "expiration": expirationStr,
+ }))
+ if err != nil {
+ return "", "", err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return "", "", err
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return "", "", err
+ }
+
+ return username, password, nil
+}
+
+// RenewUser is not supported on MSSQL, so this is a no-op.
+func (m *MSSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
+ // NOOP
+ return nil
+}
+
+// RevokeUser attempts to drop the specified user. It will first attempt to disable login,
+// then kill pending connections from that user, and finally drop the user and login from the
+// database instance.
+func (m *MSSQL) RevokeUser(statements dbplugin.Statements, username string) error {
+ if statements.RevocationStatements == "" {
+ return m.revokeUserDefault(username)
+ }
+
+ // Get connection
+ db, err := m.getConnection()
+ if err != nil {
+ return err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ }))
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return err
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *MSSQL) revokeUserDefault(username string) error {
+ // Get connection
+ db, err := m.getConnection()
+ if err != nil {
+ return err
+ }
+
+ // First disable server login
+ disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username))
+ if err != nil {
+ return err
+ }
+ defer disableStmt.Close()
+ if _, err := disableStmt.Exec(); err != nil {
+ return err
+ }
+
+ // Query for sessions for the login so that we can kill any outstanding
+ // sessions. There cannot be any active sessions before we drop the logins
+ // This isn't done in a transaction because even if we fail along the way,
+ // we want to remove as much access as possible
+ sessionStmt, err := db.Prepare(fmt.Sprintf(
+ "SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';", username))
+ if err != nil {
+ return err
+ }
+ defer sessionStmt.Close()
+
+ sessionRows, err := sessionStmt.Query()
+ if err != nil {
+ return err
+ }
+ defer sessionRows.Close()
+
+ var revokeStmts []string
+ for sessionRows.Next() {
+ var sessionID int
+ err = sessionRows.Scan(&sessionID)
+ if err != nil {
+ return err
+ }
+ revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID))
+ }
+
+ // Query for database users using undocumented stored procedure for now since
+ // it is the easiest way to get this information;
+ // we need to drop the database users before we can drop the login and the role
+ // This isn't done in a transaction because even if we fail along the way,
+ // we want to remove as much access as possible
+ stmt, err := db.Prepare(fmt.Sprintf("EXEC master.dbo.sp_msloginmappings '%s';", username))
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+
+ rows, err := stmt.Query()
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var loginName, dbName, qUsername string
+ var aliasName sql.NullString
+ err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName)
+ if err != nil {
+ return err
+ }
+ revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username))
+ }
+
+ // we do not stop on error, as we want to remove as
+ // many permissions as possible right now
+ var lastStmtError error
+ for _, query := range revokeStmts {
+ stmt, err := db.Prepare(query)
+ if err != nil {
+ lastStmtError = err
+ continue
+ }
+ defer stmt.Close()
+ _, err = stmt.Exec()
+ if err != nil {
+ lastStmtError = err
+ }
+ }
+
+ // can't drop if not all database users are dropped
+ if rows.Err() != nil {
+ return fmt.Errorf("cound not generate sql statements for all rows: %s", rows.Err())
+ }
+ if lastStmtError != nil {
+ return fmt.Errorf("could not perform all sql statements: %s", lastStmtError)
+ }
+
+ // Drop this login
+ stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username))
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+const dropUserSQL = `
+USE [%s]
+IF EXISTS
+ (SELECT name
+ FROM sys.database_principals
+ WHERE name = N'%s')
+BEGIN
+ DROP USER [%s]
+END
+`
+
+const dropLoginSQL = `
+IF EXISTS
+ (SELECT name
+ FROM master.sys.server_principals
+ WHERE name = N'%s')
+BEGIN
+ DROP LOGIN [%s]
+END
+`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go
new file mode 100644
index 0000000..5a00890
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go
@@ -0,0 +1,188 @@
+package mssql
+
+import (
+ "database/sql"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+)
+
+var (
+ testMSQLImagePull sync.Once
+)
+
+func TestMSSQL_Initialize(t *testing.T) {
+ if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
+ return
+ }
+ connURL := os.Getenv("MSSQL_URL")
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*MSSQL)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
+ if !connProducer.Initialized {
+ t.Fatal("Database should be initalized")
+ }
+
+ err = db.Close()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Test decoding a string value for max_open_connections
+ connectionDetails = map[string]interface{}{
+ "connection_url": connURL,
+ "max_open_connections": "5",
+ }
+
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestMSSQL_CreateUser(t *testing.T) {
+ if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
+ return
+ }
+ connURL := os.Getenv("MSSQL_URL")
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*MSSQL)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ // Test with no configured Creation Statememt
+ _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
+ if err == nil {
+ t.Fatal("Expected error when no creation statement is provided")
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testMSSQLRole,
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+}
+
+func TestMSSQL_RevokeUser(t *testing.T) {
+ if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
+ return
+ }
+ connURL := os.Getenv("MSSQL_URL")
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*MSSQL)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testMSSQLRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test default revoke statememts
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test custom revoke statememt
+ statements.RevocationStatements = testMSSQLDrop
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+}
+
+func testCredsExist(t testing.TB, connURL, username, password string) error {
+ // Log in with the new creds
+ parts := strings.Split(connURL, "@")
+ connURL = fmt.Sprintf("sqlserver://%s:%s@%s", username, password, parts[1])
+ db, err := sql.Open("mssql", connURL)
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+ return db.Ping()
+}
+
+const testMSSQLRole = `
+CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';
+CREATE USER [{{name}}] FOR LOGIN [{{name}}];
+GRANT SELECT, INSERT, UPDATE, DELETE ON SCHEMA::dbo TO [{{name}}];`
+
+const testMSSQLDrop = `
+DROP USER [{{name}}];
+DROP LOGIN [{{name}}];
+`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go
new file mode 100644
index 0000000..917f1b3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/plugins/database/mysql"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:])
+
+ err := mysql.Run(apiClientMeta.GetTLSConfig())
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go
new file mode 100644
index 0000000..2b950e0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/plugins/database/mysql"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:])
+
+ err := mysql.RunLegacy(apiClientMeta.GetTLSConfig())
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go
new file mode 100644
index 0000000..297941c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go
@@ -0,0 +1,218 @@
+package mysql
+
+import (
+ "database/sql"
+ "strings"
+ "time"
+
+ _ "github.com/go-sql-driver/mysql"
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/plugins"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+ "github.com/hashicorp/vault/plugins/helper/database/dbutil"
+)
+
+const (
+ defaultMysqlRevocationStmts = `
+ REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%';
+ DROP USER '{{name}}'@'%'
+ `
+ mySQLTypeName = "mysql"
+)
+
+var (
+ MetadataLen int = 10
+ LegacyMetadataLen int = 4
+ UsernameLen int = 32
+ LegacyUsernameLen int = 16
+)
+
+type MySQL struct {
+ connutil.ConnectionProducer
+ credsutil.CredentialsProducer
+}
+
+// New implements builtinplugins.BuiltinFactory
+func New(displayNameLen, roleNameLen, usernameLen int) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ connProducer := &connutil.SQLConnectionProducer{}
+ connProducer.Type = mySQLTypeName
+
+ credsProducer := &credsutil.SQLCredentialsProducer{
+ DisplayNameLen: displayNameLen,
+ RoleNameLen: roleNameLen,
+ UsernameLen: usernameLen,
+ Separator: "-",
+ }
+
+ dbType := &MySQL{
+ ConnectionProducer: connProducer,
+ CredentialsProducer: credsProducer,
+ }
+
+ return dbType, nil
+ }
+}
+
+// Run instantiates a MySQL object, and runs the RPC server for the plugin
+func Run(apiTLSConfig *api.TLSConfig) error {
+ return runCommon(false, apiTLSConfig)
+}
+
+// Run instantiates a MySQL object, and runs the RPC server for the plugin
+func RunLegacy(apiTLSConfig *api.TLSConfig) error {
+ return runCommon(true, apiTLSConfig)
+}
+
+func runCommon(legacy bool, apiTLSConfig *api.TLSConfig) error {
+ var f func() (interface{}, error)
+ if legacy {
+ f = New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen)
+ } else {
+ f = New(MetadataLen, MetadataLen, UsernameLen)
+ }
+ dbType, err := f()
+ if err != nil {
+ return err
+ }
+
+ plugins.Serve(dbType.(*MySQL), apiTLSConfig)
+
+ return nil
+}
+
+func (m *MySQL) Type() (string, error) {
+ return mySQLTypeName, nil
+}
+
+func (m *MySQL) getConnection() (*sql.DB, error) {
+ db, err := m.Connection()
+ if err != nil {
+ return nil, err
+ }
+
+ return db.(*sql.DB), nil
+}
+
+func (m *MySQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ // Grab the lock
+ m.Lock()
+ defer m.Unlock()
+
+ // Get the connection
+ db, err := m.getConnection()
+ if err != nil {
+ return "", "", err
+ }
+
+ if statements.CreationStatements == "" {
+ return "", "", dbutil.ErrEmptyCreationStatement
+ }
+
+ username, err = m.GenerateUsername(usernameConfig)
+ if err != nil {
+ return "", "", err
+ }
+
+ password, err = m.GeneratePassword()
+ if err != nil {
+ return "", "", err
+ }
+
+ expirationStr, err := m.GenerateExpiration(expiration)
+ if err != nil {
+ return "", "", err
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return "", "", err
+ }
+ defer tx.Rollback()
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ "password": password,
+ "expiration": expirationStr,
+ }))
+ if err != nil {
+ return "", "", err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return "", "", err
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return "", "", err
+ }
+
+ return username, password, nil
+}
+
+// NOOP
+func (m *MySQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
+ return nil
+}
+
+func (m *MySQL) RevokeUser(statements dbplugin.Statements, username string) error {
+ // Grab the read lock
+ m.Lock()
+ defer m.Unlock()
+
+ // Get the connection
+ db, err := m.getConnection()
+ if err != nil {
+ return err
+ }
+
+ revocationStmts := statements.RevocationStatements
+ // Use a default SQL statement for revocation if one cannot be fetched from the role
+ if revocationStmts == "" {
+ revocationStmts = defaultMysqlRevocationStmts
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ // This is not a prepared statement because not all commands are supported
+ // 1295: This command is not supported in the prepared statement protocol yet
+ // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/
+ query = strings.Replace(query, "{{name}}", username, -1)
+ _, err = tx.Exec(query)
+ if err != nil {
+ return err
+ }
+
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go
new file mode 100644
index 0000000..851bd02
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go
@@ -0,0 +1,326 @@
+package mysql
+
+import (
+ "database/sql"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+func prepareMySQLTestContainer(t *testing.T) (cleanup func(), retURL string) {
+ if os.Getenv("MYSQL_URL") != "" {
+ return func() {}, os.Getenv("MYSQL_URL")
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("mysql", "latest", []string{"MYSQL_ROOT_PASSWORD=secret"})
+ if err != nil {
+ t.Fatalf("Could not start local MySQL docker container: %s", err)
+ }
+
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local container: %s", err)
+ }
+ }
+
+ retURL = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp"))
+
+ // exponential backoff-retry
+ if err = pool.Retry(func() error {
+ var err error
+ var db *sql.DB
+ db, err = sql.Open("mysql", retURL)
+ if err != nil {
+ return err
+ }
+ return db.Ping()
+ }); err != nil {
+ t.Fatalf("Could not connect to MySQL docker container: %s", err)
+ }
+
+ return
+}
+
+func prepareMySQLLegacyTestContainer(t *testing.T) (cleanup func(), retURL string) {
+ if os.Getenv("MYSQL_URL") != "" {
+ return func() {}, os.Getenv("MYSQL_URL")
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ // Mysql 5.6 is the last MySQL version to limit usernames to 16 characters.
+ resource, err := pool.Run("mysql", "5.6", []string{"MYSQL_ROOT_PASSWORD=secret"})
+ if err != nil {
+ t.Fatalf("Could not start local MySQL docker container: %s", err)
+ }
+
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local container: %s", err)
+ }
+ }
+
+ retURL = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp"))
+
+ // exponential backoff-retry
+ if err = pool.Retry(func() error {
+ var err error
+ var db *sql.DB
+ db, err = sql.Open("mysql", retURL)
+ if err != nil {
+ return err
+ }
+ return db.Ping()
+ }); err != nil {
+ t.Fatalf("Could not connect to MySQL docker container: %s", err)
+ }
+
+ return
+}
+
+func TestMySQL_Initialize(t *testing.T) {
+ cleanup, connURL := prepareMySQLTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ f := New(MetadataLen, MetadataLen, UsernameLen)
+ dbRaw, _ := f()
+ db := dbRaw.(*MySQL)
+ connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !connProducer.Initialized {
+ t.Fatal("Database should be initalized")
+ }
+
+ err = db.Close()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Test decoding a string value for max_open_connections
+ connectionDetails = map[string]interface{}{
+ "connection_url": connURL,
+ "max_open_connections": "5",
+ }
+
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestMySQL_CreateUser(t *testing.T) {
+ cleanup, connURL := prepareMySQLTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ f := New(MetadataLen, MetadataLen, UsernameLen)
+ dbRaw, _ := f()
+ db := dbRaw.(*MySQL)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test-long-displayname",
+ RoleName: "test-long-rolename",
+ }
+
+ // Test with no configured Creation Statememt
+ _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
+ if err == nil {
+ t.Fatal("Expected error when no creation statement is provided")
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testMySQLRoleWildCard,
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test a second time to make sure usernames don't collide
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+}
+
+func TestMySQL_CreateUser_Legacy(t *testing.T) {
+ cleanup, connURL := prepareMySQLLegacyTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ f := New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen)
+ dbRaw, _ := f()
+ db := dbRaw.(*MySQL)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test-long-displayname",
+ RoleName: "test-long-rolename",
+ }
+
+ // Test with no configured Creation Statememt
+ _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
+ if err == nil {
+ t.Fatal("Expected error when no creation statement is provided")
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testMySQLRoleWildCard,
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test a second time to make sure usernames don't collide
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+}
+
+func TestMySQL_RevokeUser(t *testing.T) {
+ cleanup, connURL := prepareMySQLTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ f := New(MetadataLen, MetadataLen, UsernameLen)
+ dbRaw, _ := f()
+ db := dbRaw.(*MySQL)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testMySQLRoleWildCard,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test default revoke statememts
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+
+ statements.CreationStatements = testMySQLRoleWildCard
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test custom revoke statements
+ statements.RevocationStatements = testMySQLRevocationSQL
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+}
+
+func testCredsExist(t testing.TB, connURL, username, password string) error {
+ // Log in with the new creds
+ connURL = strings.Replace(connURL, "root:secret", fmt.Sprintf("%s:%s", username, password), 1)
+ db, err := sql.Open("mysql", connURL)
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+ return db.Ping()
+}
+
+const testMySQLRoleWildCard = `
+CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
+GRANT SELECT ON *.* TO '{{name}}'@'%';
+`
+const testMySQLRevocationSQL = `
+REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%';
+DROP USER '{{name}}'@'%';
+`
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go
new file mode 100644
index 0000000..a3b1789
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/plugins/database/postgresql"
+)
+
+func main() {
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(os.Args[1:])
+
+ err := postgresql.Run(apiClientMeta.GetTLSConfig())
+ if err != nil {
+ log.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go
new file mode 100644
index 0000000..93fa8a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go
@@ -0,0 +1,372 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/plugins"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ "github.com/hashicorp/vault/plugins/helper/database/credsutil"
+ "github.com/hashicorp/vault/plugins/helper/database/dbutil"
+ "github.com/lib/pq"
+ _ "github.com/lib/pq"
+)
+
+const (
+ postgreSQLTypeName string = "postgres"
+ defaultPostgresRenewSQL = `
+ALTER ROLE "{{name}}" VALID UNTIL '{{expiration}}';
+`
+)
+
+// New implements builtinplugins.BuiltinFactory
+func New() (interface{}, error) {
+ connProducer := &connutil.SQLConnectionProducer{}
+ connProducer.Type = postgreSQLTypeName
+
+ credsProducer := &credsutil.SQLCredentialsProducer{
+ DisplayNameLen: 8,
+ RoleNameLen: 8,
+ UsernameLen: 63,
+ Separator: "-",
+ }
+
+ dbType := &PostgreSQL{
+ ConnectionProducer: connProducer,
+ CredentialsProducer: credsProducer,
+ }
+
+ return dbType, nil
+}
+
+// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin
+func Run(apiTLSConfig *api.TLSConfig) error {
+ dbType, err := New()
+ if err != nil {
+ return err
+ }
+
+ plugins.Serve(dbType.(*PostgreSQL), apiTLSConfig)
+
+ return nil
+}
+
+type PostgreSQL struct {
+ connutil.ConnectionProducer
+ credsutil.CredentialsProducer
+}
+
+func (p *PostgreSQL) Type() (string, error) {
+ return postgreSQLTypeName, nil
+}
+
+func (p *PostgreSQL) getConnection() (*sql.DB, error) {
+ db, err := p.Connection()
+ if err != nil {
+ return nil, err
+ }
+
+ return db.(*sql.DB), nil
+}
+
+func (p *PostgreSQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
+ if statements.CreationStatements == "" {
+ return "", "", dbutil.ErrEmptyCreationStatement
+ }
+
+ // Grab the lock
+ p.Lock()
+ defer p.Unlock()
+
+ username, err = p.GenerateUsername(usernameConfig)
+ if err != nil {
+ return "", "", err
+ }
+
+ password, err = p.GeneratePassword()
+ if err != nil {
+ return "", "", err
+ }
+
+ expirationStr, err := p.GenerateExpiration(expiration)
+ if err != nil {
+ return "", "", err
+ }
+
+ // Get the connection
+ db, err := p.getConnection()
+ if err != nil {
+ return "", "", err
+
+ }
+
+ // Start a transaction
+ tx, err := db.Begin()
+ if err != nil {
+ return "", "", err
+
+ }
+ defer func() {
+ tx.Rollback()
+ }()
+ // Return the secret
+
+ // Execute each query
+ for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ "password": password,
+ "expiration": expirationStr,
+ }))
+ if err != nil {
+ return "", "", err
+
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return "", "", err
+
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ return "", "", err
+
+ }
+
+ return username, password, nil
+}
+
+func (p *PostgreSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
+ p.Lock()
+ defer p.Unlock()
+
+ renewStmts := statements.RenewStatements
+ if renewStmts == "" {
+ renewStmts = defaultPostgresRenewSQL
+ }
+
+ db, err := p.getConnection()
+ if err != nil {
+ return err
+ }
+
+ tx, err := db.Begin()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ tx.Rollback()
+ }()
+
+ expirationStr, err := p.GenerateExpiration(expiration)
+ if err != nil {
+ return err
+ }
+
+ for _, query := range strutil.ParseArbitraryStringSlice(renewStmts, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ "expiration": expirationStr,
+ }))
+ if err != nil {
+ return err
+ }
+
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return err
+ }
+ }
+
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *PostgreSQL) RevokeUser(statements dbplugin.Statements, username string) error {
+ // Grab the lock
+ p.Lock()
+ defer p.Unlock()
+
+ if statements.RevocationStatements == "" {
+ return p.defaultRevokeUser(username)
+ }
+
+ return p.customRevokeUser(username, statements.RevocationStatements)
+}
+
+func (p *PostgreSQL) customRevokeUser(username, revocationStmts string) error {
+ db, err := p.getConnection()
+ if err != nil {
+ return err
+ }
+
+ tx, err := db.Begin()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ tx.Rollback()
+ }()
+
+ for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") {
+ query = strings.TrimSpace(query)
+ if len(query) == 0 {
+ continue
+ }
+
+ stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
+ "name": username,
+ }))
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+
+ if _, err := stmt.Exec(); err != nil {
+ return err
+ }
+ }
+
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *PostgreSQL) defaultRevokeUser(username string) error {
+ db, err := p.getConnection()
+ if err != nil {
+ return err
+ }
+
+ // Check if the role exists
+ var exists bool
+ err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists)
+ if err != nil && err != sql.ErrNoRows {
+ return err
+ }
+
+ if exists == false {
+ return nil
+ }
+
+ // Query for permissions; we need to revoke permissions before we can drop
+ // the role
+ // This isn't done in a transaction because even if we fail along the way,
+ // we want to remove as much access as possible
+ stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;")
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+
+ rows, err := stmt.Query(username)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ const initialNumRevocations = 16
+ revocationStmts := make([]string, 0, initialNumRevocations)
+ for rows.Next() {
+ var schema string
+ err = rows.Scan(&schema)
+ if err != nil {
+ // keep going; remove as many permissions as possible right now
+ continue
+ }
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`,
+ pq.QuoteIdentifier(schema),
+ pq.QuoteIdentifier(username)))
+
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE USAGE ON SCHEMA %s FROM %s;`,
+ pq.QuoteIdentifier(schema),
+ pq.QuoteIdentifier(username)))
+ }
+
+ // for good measure, revoke all privileges and usage on schema public
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`,
+ pq.QuoteIdentifier(username)))
+
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;",
+ pq.QuoteIdentifier(username)))
+
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ "REVOKE USAGE ON SCHEMA public FROM %s;",
+ pq.QuoteIdentifier(username)))
+
+ // get the current database name so we can issue a REVOKE CONNECT for
+ // this username
+ var dbname sql.NullString
+ if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil {
+ return err
+ }
+
+ if dbname.Valid {
+ revocationStmts = append(revocationStmts, fmt.Sprintf(
+ `REVOKE CONNECT ON DATABASE %s FROM %s;`,
+ pq.QuoteIdentifier(dbname.String),
+ pq.QuoteIdentifier(username)))
+ }
+
+ // again, here, we do not stop on error, as we want to remove as
+ // many permissions as possible right now
+ var lastStmtError error
+ for _, query := range revocationStmts {
+ stmt, err := db.Prepare(query)
+ if err != nil {
+ lastStmtError = err
+ continue
+ }
+ defer stmt.Close()
+ _, err = stmt.Exec()
+ if err != nil {
+ lastStmtError = err
+ }
+ }
+
+ // can't drop if not all privileges are revoked
+ if rows.Err() != nil {
+ return fmt.Errorf("could not generate revocation statements for all rows: %s", rows.Err())
+ }
+ if lastStmtError != nil {
+ return fmt.Errorf("could not perform all revocation statements: %s", lastStmtError)
+ }
+
+ // Drop this user
+ stmt, err = db.Prepare(fmt.Sprintf(
+ `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username)))
+ if err != nil {
+ return err
+ }
+ defer stmt.Close()
+ if _, err := stmt.Exec(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go
new file mode 100644
index 0000000..a74abb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go
@@ -0,0 +1,363 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/plugins/helper/database/connutil"
+ dockertest "gopkg.in/ory-am/dockertest.v3"
+)
+
+var (
+ testPostgresImagePull sync.Once
+)
+
+func preparePostgresTestContainer(t *testing.T) (cleanup func(), retURL string) {
+ if os.Getenv("PG_URL") != "" {
+ return func() {}, os.Getenv("PG_URL")
+ }
+
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ t.Fatalf("Failed to connect to docker: %s", err)
+ }
+
+ resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"})
+ if err != nil {
+ t.Fatalf("Could not start local PostgreSQL docker container: %s", err)
+ }
+
+ cleanup = func() {
+ err := pool.Purge(resource)
+ if err != nil {
+ t.Fatalf("Failed to cleanup local container: %s", err)
+ }
+ }
+
+ retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp"))
+
+ // exponential backoff-retry
+ if err = pool.Retry(func() error {
+ var err error
+ var db *sql.DB
+ db, err = sql.Open("postgres", retURL)
+ if err != nil {
+ return err
+ }
+ return db.Ping()
+ }); err != nil {
+ t.Fatalf("Could not connect to PostgreSQL docker container: %s", err)
+ }
+
+ return
+}
+
+func TestPostgreSQL_Initialize(t *testing.T) {
+ cleanup, connURL := preparePostgresTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ "max_open_connections": 5,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*PostgreSQL)
+
+ connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
+
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !connProducer.Initialized {
+ t.Fatal("Database should be initalized")
+ }
+
+ err = db.Close()
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Test decoding a string value for max_open_connections
+ connectionDetails = map[string]interface{}{
+ "connection_url": connURL,
+ "max_open_connections": "5",
+ }
+
+ err = db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+}
+
+func TestPostgreSQL_CreateUser(t *testing.T) {
+ cleanup, connURL := preparePostgresTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*PostgreSQL)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ // Test with no configured Creation Statememt
+ _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute))
+ if err == nil {
+ t.Fatal("Expected error when no creation statement is provided")
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testPostgresRole,
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ statements.CreationStatements = testPostgresReadOnlyRole
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+}
+
+func TestPostgreSQL_RenewUser(t *testing.T) {
+ cleanup, connURL := preparePostgresTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*PostgreSQL)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testPostgresRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ err = db.RenewUser(statements, username, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Sleep longer than the inital expiration time
+ time.Sleep(2 * time.Second)
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+ statements.RenewStatements = defaultPostgresRenewSQL
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ err = db.RenewUser(statements, username, time.Now().Add(time.Minute))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ // Sleep longer than the inital expiration time
+ time.Sleep(2 * time.Second)
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+}
+
+func TestPostgreSQL_RevokeUser(t *testing.T) {
+ cleanup, connURL := preparePostgresTestContainer(t)
+ defer cleanup()
+
+ connectionDetails := map[string]interface{}{
+ "connection_url": connURL,
+ }
+
+ dbRaw, _ := New()
+ db := dbRaw.(*PostgreSQL)
+ err := db.Initialize(connectionDetails, true)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ statements := dbplugin.Statements{
+ CreationStatements: testPostgresRole,
+ }
+
+ usernameConfig := dbplugin.UsernameConfig{
+ DisplayName: "test",
+ RoleName: "test",
+ }
+
+ username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test default revoke statememts
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+
+ username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err = testCredsExist(t, connURL, username, password); err != nil {
+ t.Fatalf("Could not connect with new credentials: %s", err)
+ }
+
+ // Test custom revoke statements
+ statements.RevocationStatements = defaultPostgresRevocationSQL
+ err = db.RevokeUser(statements, username)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if err := testCredsExist(t, connURL, username, password); err == nil {
+ t.Fatal("Credentials were not revoked")
+ }
+}
+
+func testCredsExist(t testing.TB, connURL, username, password string) error {
+ // Log in with the new creds
+ connURL = strings.Replace(connURL, "postgres:secret", fmt.Sprintf("%s:%s", username, password), 1)
+ db, err := sql.Open("postgres", connURL)
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+ return db.Ping()
+}
+
+const testPostgresRole = `
+CREATE ROLE "{{name}}" WITH
+ LOGIN
+ PASSWORD '{{password}}'
+ VALID UNTIL '{{expiration}}';
+GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
+`
+
+const testPostgresReadOnlyRole = `
+CREATE ROLE "{{name}}" WITH
+ LOGIN
+ PASSWORD '{{password}}'
+ VALID UNTIL '{{expiration}}';
+GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
+GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
+`
+
+const testPostgresBlockStatementRole = `
+DO $$
+BEGIN
+ IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
+ CREATE ROLE "foo-role";
+ CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
+ ALTER ROLE "foo-role" SET search_path = foo;
+ GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
+ GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
+ END IF;
+END
+$$
+
+CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';
+GRANT "foo-role" TO "{{name}}";
+ALTER ROLE "{{name}}" SET search_path = foo;
+GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";
+`
+
+var testPostgresBlockStatementRoleSlice = []string{
+ `
+DO $$
+BEGIN
+ IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
+ CREATE ROLE "foo-role";
+ CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
+ ALTER ROLE "foo-role" SET search_path = foo;
+ GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
+ GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
+ GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
+ END IF;
+END
+$$
+`,
+ `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`,
+ `GRANT "foo-role" TO "{{name}}";`,
+ `ALTER ROLE "{{name}}" SET search_path = foo;`,
+ `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`,
+}
+
+const defaultPostgresRevocationSQL = `
+REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}";
+REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}";
+REVOKE USAGE ON SCHEMA public FROM "{{name}}";
+
+DROP ROLE IF EXISTS "{{name}}";
+`
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go
new file mode 100644
index 0000000..d36d571
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go
@@ -0,0 +1,21 @@
+package connutil
+
+import (
+ "errors"
+ "sync"
+)
+
+var (
+ ErrNotInitialized = errors.New("connection has not been initalized")
+)
+
+// ConnectionProducer can be used as an embeded interface in the Database
+// definition. It implements the methods dealing with individual database
+// connections and is used in all the builtin database types.
+type ConnectionProducer interface {
+ Close() error
+ Initialize(map[string]interface{}, bool) error
+ Connection() (interface{}, error)
+
+ sync.Locker
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go
new file mode 100644
index 0000000..c325cbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go
@@ -0,0 +1,138 @@
+package connutil
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/vault/helper/parseutil"
+ "github.com/mitchellh/mapstructure"
+)
+
+// SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases
+type SQLConnectionProducer struct {
+ ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"`
+ MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"`
+ MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"`
+ MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" structs:"max_connection_lifetime" mapstructure:"max_connection_lifetime"`
+
+ Type string
+ maxConnectionLifetime time.Duration
+ Initialized bool
+ db *sql.DB
+ sync.Mutex
+}
+
+func (c *SQLConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error {
+ c.Lock()
+ defer c.Unlock()
+
+ err := mapstructure.WeakDecode(conf, c)
+ if err != nil {
+ return err
+ }
+
+ if len(c.ConnectionURL) == 0 {
+ return fmt.Errorf("connection_url cannot be empty")
+ }
+
+ if c.MaxOpenConnections == 0 {
+ c.MaxOpenConnections = 2
+ }
+
+ if c.MaxIdleConnections == 0 {
+ c.MaxIdleConnections = c.MaxOpenConnections
+ }
+ if c.MaxIdleConnections > c.MaxOpenConnections {
+ c.MaxIdleConnections = c.MaxOpenConnections
+ }
+ if c.MaxConnectionLifetimeRaw == nil {
+ c.MaxConnectionLifetimeRaw = "0s"
+ }
+
+ c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw)
+ if err != nil {
+ return fmt.Errorf("invalid max_connection_lifetime: %s", err)
+ }
+
+ // Set initialized to true at this point since all fields are set,
+ // and the connection can be established at a later time.
+ c.Initialized = true
+
+ if verifyConnection {
+ if _, err := c.Connection(); err != nil {
+ return fmt.Errorf("error verifying connection: %s", err)
+ }
+
+ if err := c.db.Ping(); err != nil {
+ return fmt.Errorf("error verifying connection: %s", err)
+ }
+ }
+
+ return nil
+}
+
+func (c *SQLConnectionProducer) Connection() (interface{}, error) {
+ if !c.Initialized {
+ return nil, ErrNotInitialized
+ }
+
+ // If we already have a DB, test it and return
+ if c.db != nil {
+ if err := c.db.Ping(); err == nil {
+ return c.db, nil
+ }
+ // If the ping was unsuccessful, close it and ignore errors as we'll be
+ // reestablishing anyways
+ c.db.Close()
+ }
+
+ // For mssql backend, switch to sqlserver instead
+ dbType := c.Type
+ if c.Type == "mssql" {
+ dbType = "sqlserver"
+ }
+
+ // Otherwise, attempt to make connection
+ conn := c.ConnectionURL
+
+ // Ensure timezone is set to UTC for all the conenctions
+ if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") {
+ if strings.Contains(conn, "?") {
+ conn += "&timezone=utc"
+ } else {
+ conn += "?timezone=utc"
+ }
+ }
+
+ var err error
+ c.db, err = sql.Open(dbType, conn)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set some connection pool settings. We don't need much of this,
+ // since the request rate shouldn't be high.
+ c.db.SetMaxOpenConns(c.MaxOpenConnections)
+ c.db.SetMaxIdleConns(c.MaxIdleConnections)
+ c.db.SetConnMaxLifetime(c.maxConnectionLifetime)
+
+ return c.db, nil
+}
+
+// Close attempts to close the connection
+func (c *SQLConnectionProducer) Close() error {
+ // Grab the write lock
+ c.Lock()
+ defer c.Unlock()
+
+ if c.db != nil {
+ c.db.Close()
+ }
+
+ c.db = nil
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go
new file mode 100644
index 0000000..8ce3b5e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go
@@ -0,0 +1,87 @@
+package credsutil
+
+import (
+ "crypto/rand"
+ "time"
+
+ "fmt"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+)
+
+// CredentialsProducer can be used as an embeded interface in the Database
+// definition. It implements the methods for generating user information for a
+// particular database type and is used in all the builtin database types.
+type CredentialsProducer interface {
+ GenerateUsername(usernameConfig dbplugin.UsernameConfig) (string, error)
+ GeneratePassword() (string, error)
+ GenerateExpiration(ttl time.Time) (string, error)
+}
+
+const (
+ reqStr = `A1a-`
+ minStrLen = 10
+)
+
+// RandomAlphaNumeric returns a random string of characters [A-Za-z0-9-]
+// of the provided length. The string generated takes up to 4 characters
+// of space that are predefined and prepended to ensure password
+// character requirements. It also requires a min length of 10 characters.
+func RandomAlphaNumeric(length int, prependA1a bool) (string, error) {
+ if length < minStrLen {
+ return "", fmt.Errorf("minimum length of %d is required", minStrLen)
+ }
+
+ var size int
+ var retBytes []byte
+ if prependA1a {
+ size = len(reqStr)
+ retBytes = make([]byte, length-size)
+ // Enforce alphanumeric requirements
+ retBytes = append([]byte(reqStr), retBytes...)
+ } else {
+ retBytes = make([]byte, length)
+ }
+
+ for size < length {
+ // Extend the len of the random byte slice to lower odds of having to
+ // re-roll.
+ c := length + len(reqStr)
+ bArr := make([]byte, c)
+ _, err := rand.Read(bArr)
+ if err != nil {
+ return "", err
+ }
+
+ for _, b := range bArr {
+ if size == length {
+ break
+ }
+
+ /**
+ * Each byte will be in [0, 256), but we only care about:
+ *
+ * [48, 57] 0-9
+ * [65, 90] A-Z
+ * [97, 122] a-z
+ *
+ * Which means that the highest bit will always be zero, since the last byte with high bit
+ * zero is 01111111 = 127 which is higher than 122. Lower our odds of having to re-roll a byte by
+ * dividing by two (right bit shift of 1).
+ */
+
+ b = b >> 1
+ // Bitwise OR to set min to 48, further reduces re-roll
+ b |= 0x30
+
+ // The byte is any of 0-9 A-Z a-z
+ byteIsAllowable := (b >= 48 && b <= 57) || (b >= 65 && b <= 90) || (b >= 97 && b <= 122)
+ if byteIsAllowable {
+ retBytes[size] = b
+ size++
+ }
+ }
+ }
+
+ return string(retBytes), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go
new file mode 100644
index 0000000..e094719
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go
@@ -0,0 +1,40 @@
+package credsutil
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestRandomAlphaNumeric(t *testing.T) {
+ s, err := RandomAlphaNumeric(10, true)
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+ if len(s) != 10 {
+ t.Fatalf("Unexpected length of string, expected 10, got string: %s", s)
+ }
+
+ s, err = RandomAlphaNumeric(20, true)
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+ if len(s) != 20 {
+ t.Fatalf("Unexpected length of string, expected 20, got string: %s", s)
+ }
+
+ if !strings.Contains(s, reqStr) {
+ t.Fatalf("Expected %s to contain %s", s, reqStr)
+ }
+
+ s, err = RandomAlphaNumeric(20, false)
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+ if len(s) != 20 {
+ t.Fatalf("Unexpected length of string, expected 20, got string: %s", s)
+ }
+
+ if strings.Contains(s, reqStr) {
+ t.Fatalf("Expected %s not to contain %s", s, reqStr)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go
new file mode 100644
index 0000000..af9a746
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go
@@ -0,0 +1,72 @@
+package credsutil
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+)
+
+const (
+ NoneLength int = -1
+)
+
+// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types.
+type SQLCredentialsProducer struct {
+ DisplayNameLen int
+ RoleNameLen int
+ UsernameLen int
+ Separator string
+}
+
+func (scp *SQLCredentialsProducer) GenerateUsername(config dbplugin.UsernameConfig) (string, error) {
+ username := "v"
+
+ displayName := config.DisplayName
+ if scp.DisplayNameLen > 0 && len(displayName) > scp.DisplayNameLen {
+ displayName = displayName[:scp.DisplayNameLen]
+ } else if scp.DisplayNameLen == NoneLength {
+ displayName = ""
+ }
+
+ if len(displayName) > 0 {
+ username = fmt.Sprintf("%s%s%s", username, scp.Separator, displayName)
+ }
+
+ roleName := config.RoleName
+ if scp.RoleNameLen > 0 && len(roleName) > scp.RoleNameLen {
+ roleName = roleName[:scp.RoleNameLen]
+ } else if scp.RoleNameLen == NoneLength {
+ roleName = ""
+ }
+
+ if len(roleName) > 0 {
+ username = fmt.Sprintf("%s%s%s", username, scp.Separator, roleName)
+ }
+
+ userUUID, err := RandomAlphaNumeric(20, false)
+ if err != nil {
+ return "", err
+ }
+
+ username = fmt.Sprintf("%s%s%s", username, scp.Separator, userUUID)
+ username = fmt.Sprintf("%s%s%s", username, scp.Separator, fmt.Sprint(time.Now().UTC().Unix()))
+ if scp.UsernameLen > 0 && len(username) > scp.UsernameLen {
+ username = username[:scp.UsernameLen]
+ }
+
+ return username, nil
+}
+
+func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) {
+ password, err := RandomAlphaNumeric(20, true)
+ if err != nil {
+ return "", err
+ }
+
+ return password, nil
+}
+
+func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) {
+ return ttl.Format("2006-01-02 15:04:05-0700"), nil
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go
new file mode 100644
index 0000000..e80273b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go
@@ -0,0 +1,20 @@
+package dbutil
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+var (
+ ErrEmptyCreationStatement = errors.New("empty creation statements")
+)
+
+// Query templates a query for us.
+func QueryHelper(tpl string, data map[string]string) string {
+ for k, v := range data {
+ tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
+ }
+
+ return tpl
+}
diff --git a/vendor/github.com/hashicorp/vault/plugins/serve.go b/vendor/github.com/hashicorp/vault/plugins/serve.go
new file mode 100644
index 0000000..a40fc5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/plugins/serve.go
@@ -0,0 +1,31 @@
+package plugins
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/vault/api"
+ "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+)
+
+// Serve is used to start a plugin's RPC server. It takes an interface that must
+// implement a known plugin interface to vault and an optional api.TLSConfig for
+// use during the inital unwrap request to vault. The api config is particulary
+// useful when vault is setup to require client cert checking.
+func Serve(plugin interface{}, tlsConfig *api.TLSConfig) {
+ tlsProvider := pluginutil.VaultPluginTLSProvider(tlsConfig)
+
+ err := pluginutil.OptionallyEnableMlock()
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ switch p := plugin.(type) {
+ case dbplugin.Database:
+ dbplugin.Serve(p, tlsProvider)
+ default:
+ fmt.Println("Unsupported plugin type")
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile b/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile
index 7126b63..c5104f3 100644
--- a/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile
+++ b/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile
@@ -1,6 +1,6 @@
# Adapted from tcnksm/dockerfile-gox -- thanks!
-FROM debian:jessie
+FROM debian:stable
RUN apt-get update -y && apt-get install --no-install-recommends -y -q \
curl \
@@ -10,7 +10,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \
git mercurial bzr \
&& rm -rf /var/lib/apt/lists/*
-ENV GOVERSION 1.8.1
+ENV GOVERSION 1.9
RUN mkdir /goroot && mkdir /gopath
RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \
| tar xvzf - -C /goroot --strip-components=1
diff --git a/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh b/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh
new file mode 100755
index 0000000..574f4d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+echo "==> Checking that code complies with gofmt requirements..."
+
+gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`)
+if [[ -n ${gofmt_files} ]]; then
+ echo 'gofmt needs running on the following files:'
+ echo "${gofmt_files}"
+ echo "You can use the command: \`make fmt\` to reformat code."
+ exit 1
+fi
diff --git a/vendor/github.com/hashicorp/vault/scripts/update_deps.sh b/vendor/github.com/hashicorp/vault/scripts/update_deps.sh
index 80241fc..82b85b0 100755
--- a/vendor/github.com/hashicorp/vault/scripts/update_deps.sh
+++ b/vendor/github.com/hashicorp/vault/scripts/update_deps.sh
@@ -32,4 +32,8 @@ govendor init
echo "Fetching deps, will take some time..."
govendor fetch +missing
+govendor remove github.com/Sirupsen/logrus
+cd vendor
+find -type f | grep '.go' | xargs sed -i -e 's/Sirupsen/sirupsen/'
+
echo "Done; to commit run \n\ncd ${GOPATH}/src/github.com/hashicorp/${TOOL}\n"
diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf b/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf
index ba0ee3a..cef4002 100644
--- a/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf
+++ b/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf
@@ -3,7 +3,7 @@
//-------------------------------------------------------------------
variable "download-url" {
- default = "https://releases.hashicorp.com/vault/0.7.0/vault_0.7.0_linux_amd64.zip"
+ default = "https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_linux_amd64.zip"
description = "URL to download Vault"
}
diff --git a/vendor/github.com/hashicorp/vault/vault/acl.go b/vendor/github.com/hashicorp/vault/vault/acl.go
index 550e0df..7360178 100644
--- a/vendor/github.com/hashicorp/vault/vault/acl.go
+++ b/vendor/github.com/hashicorp/vault/vault/acl.go
@@ -5,6 +5,7 @@ import (
"strings"
"github.com/armon/go-radix"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
)
@@ -51,7 +52,11 @@ func NewACL(policies []*Policy) (*ACL, error) {
// Check for an existing policy
raw, ok := tree.Get(pc.Prefix)
if !ok {
- tree.Insert(pc.Prefix, pc.Permissions)
+ clonedPerms, err := pc.Permissions.Clone()
+ if err != nil {
+ return nil, errwrap.Wrapf("error cloning ACL permissions: {{err}}", err)
+ }
+ tree.Insert(pc.Prefix, clonedPerms)
continue
}
@@ -66,15 +71,15 @@ func NewACL(policies []*Policy) (*ACL, error) {
case pc.Permissions.CapabilitiesBitmap&DenyCapabilityInt > 0:
// If this new policy explicitly denies, only save the deny value
- pc.Permissions.CapabilitiesBitmap = DenyCapabilityInt
- pc.Permissions.AllowedParameters = nil
- pc.Permissions.DeniedParameters = nil
+ existingPerms.CapabilitiesBitmap = DenyCapabilityInt
+ existingPerms.AllowedParameters = nil
+ existingPerms.DeniedParameters = nil
goto INSERT
default:
// Insert the capabilities in this new policy into the existing
// value
- pc.Permissions.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap
+ existingPerms.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap
}
// Note: In these stanzas, we're preferring minimum lifetimes. So
@@ -85,59 +90,58 @@ func NewACL(policies []*Policy) (*ACL, error) {
// If we have an existing max, and we either don't have a current
// max, or the current is greater than the previous, use the
// existing.
- if existingPerms.MaxWrappingTTL > 0 &&
- (pc.Permissions.MaxWrappingTTL == 0 ||
- existingPerms.MaxWrappingTTL < pc.Permissions.MaxWrappingTTL) {
- pc.Permissions.MaxWrappingTTL = existingPerms.MaxWrappingTTL
+ if pc.Permissions.MaxWrappingTTL > 0 &&
+ (existingPerms.MaxWrappingTTL == 0 ||
+ pc.Permissions.MaxWrappingTTL < existingPerms.MaxWrappingTTL) {
+ existingPerms.MaxWrappingTTL = pc.Permissions.MaxWrappingTTL
}
// If we have an existing min, and we either don't have a current
// min, or the current is greater than the previous, use the
// existing
- if existingPerms.MinWrappingTTL > 0 &&
- (pc.Permissions.MinWrappingTTL == 0 ||
- existingPerms.MinWrappingTTL < pc.Permissions.MinWrappingTTL) {
- pc.Permissions.MinWrappingTTL = existingPerms.MinWrappingTTL
+ if pc.Permissions.MinWrappingTTL > 0 &&
+ (existingPerms.MinWrappingTTL == 0 ||
+ pc.Permissions.MinWrappingTTL < existingPerms.MinWrappingTTL) {
+ existingPerms.MinWrappingTTL = pc.Permissions.MinWrappingTTL
}
- if len(existingPerms.AllowedParameters) > 0 {
- if pc.Permissions.AllowedParameters == nil {
- pc.Permissions.AllowedParameters = existingPerms.AllowedParameters
+ if len(pc.Permissions.AllowedParameters) > 0 {
+ if existingPerms.AllowedParameters == nil {
+ existingPerms.AllowedParameters = pc.Permissions.AllowedParameters
} else {
- for key, value := range existingPerms.AllowedParameters {
- pcValue, ok := pc.Permissions.AllowedParameters[key]
+ for key, value := range pc.Permissions.AllowedParameters {
+ pcValue, ok := existingPerms.AllowedParameters[key]
// If an empty array exist it should overwrite any other
// value.
if len(value) == 0 || (ok && len(pcValue) == 0) {
- pc.Permissions.AllowedParameters[key] = []interface{}{}
+ existingPerms.AllowedParameters[key] = []interface{}{}
} else {
// Merge the two maps, appending values on key conflict.
- pc.Permissions.AllowedParameters[key] = append(value, pc.Permissions.AllowedParameters[key]...)
+ existingPerms.AllowedParameters[key] = append(value, existingPerms.AllowedParameters[key]...)
}
}
}
}
- if len(existingPerms.DeniedParameters) > 0 {
- if pc.Permissions.DeniedParameters == nil {
- pc.Permissions.DeniedParameters = existingPerms.DeniedParameters
+ if len(pc.Permissions.DeniedParameters) > 0 {
+ if existingPerms.DeniedParameters == nil {
+ existingPerms.DeniedParameters = pc.Permissions.DeniedParameters
} else {
- for key, value := range existingPerms.DeniedParameters {
- pcValue, ok := pc.Permissions.DeniedParameters[key]
+ for key, value := range pc.Permissions.DeniedParameters {
+ pcValue, ok := existingPerms.DeniedParameters[key]
// If an empty array exist it should overwrite any other
// value.
if len(value) == 0 || (ok && len(pcValue) == 0) {
- pc.Permissions.DeniedParameters[key] = []interface{}{}
+ existingPerms.DeniedParameters[key] = []interface{}{}
} else {
// Merge the two maps, appending values on key conflict.
- pc.Permissions.DeniedParameters[key] = append(value, pc.Permissions.DeniedParameters[key]...)
+ existingPerms.DeniedParameters[key] = append(value, existingPerms.DeniedParameters[key]...)
}
}
}
}
INSERT:
-
- tree.Insert(pc.Prefix, pc.Permissions)
+ tree.Insert(pc.Prefix, existingPerms)
}
}
diff --git a/vendor/github.com/hashicorp/vault/vault/acl_test.go b/vendor/github.com/hashicorp/vault/vault/acl_test.go
index 7eb45b8..638fed6 100644
--- a/vendor/github.com/hashicorp/vault/vault/acl_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/acl_test.go
@@ -2,6 +2,7 @@ package vault
import (
"reflect"
+ "sync"
"testing"
"time"
@@ -245,7 +246,7 @@ func TestACL_PolicyMerge(t *testing.T) {
{"allow/all1", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil},
{"deny/all", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}},
{"deny/all1", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}},
- {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{1, 2, 3, 4}}, map[string][]interface{}{"test": []interface{}{1, 2, 3, 4}}},
+ {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}},
{"value/empty", nil, nil, map[string][]interface{}{"empty": []interface{}{}}, map[string][]interface{}{"empty": []interface{}{}}},
}
@@ -415,6 +416,35 @@ func TestACL_ValuePermissions(t *testing.T) {
}
}
+// NOTE: this test doesn't catch any races ATM
+func TestACL_CreationRace(t *testing.T) {
+ policy, err := Parse(valuePermissionsPolicy)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ var wg sync.WaitGroup
+ stopTime := time.Now().Add(20 * time.Second)
+
+ for i := 0; i < 50; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ if time.Now().After(stopTime) {
+ return
+ }
+ _, err := NewACL([]*Policy{policy})
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
var tokenCreationPolicy = `
name = "tokenCreation"
path "auth/token/create*" {
@@ -423,7 +453,7 @@ path "auth/token/create*" {
`
var aclPolicy = `
-name = "dev"
+name = "DeV"
path "dev/*" {
policy = "sudo"
}
@@ -452,7 +482,7 @@ path "foo/bar" {
`
var aclPolicy2 = `
-name = "ops"
+name = "OpS"
path "dev/hide/*" {
policy = "deny"
}
diff --git a/vendor/github.com/hashicorp/vault/vault/audit.go b/vendor/github.com/hashicorp/vault/vault/audit.go
index 9391843..fccf9aa 100644
--- a/vendor/github.com/hashicorp/vault/vault/audit.go
+++ b/vendor/github.com/hashicorp/vault/vault/audit.go
@@ -79,6 +79,13 @@ func (c *Core) enableAudit(entry *MountEntry) error {
}
entry.UUID = entryUUID
}
+ if entry.Accessor == "" {
+ accessor, err := c.generateMountAccessor("audit_" + entry.Type)
+ if err != nil {
+ return err
+ }
+ entry.Accessor = accessor
+ }
viewPath := auditBarrierPrefix + entry.UUID + "/"
view := NewBarrierView(c.barrier, viewPath)
@@ -201,6 +208,14 @@ func (c *Core) loadAudits() error {
entry.Table = c.audit.Type
needPersist = true
}
+ if entry.Accessor == "" {
+ accessor, err := c.generateMountAccessor("audit_" + entry.Type)
+ if err != nil {
+ return err
+ }
+ entry.Accessor = accessor
+ needPersist = true
+ }
}
if !needPersist {
@@ -368,17 +383,16 @@ func (c *Core) newAuditBackend(entry *MountEntry, view logical.Storage, conf map
if !ok {
return nil, fmt.Errorf("unknown backend type: %s", entry.Type)
}
- salter, err := salt.NewSalt(view, &salt.Config{
+ saltConfig := &salt.Config{
HMAC: sha256.New,
HMACType: "hmac-sha256",
- })
- if err != nil {
- return nil, fmt.Errorf("core: unable to generate salt: %v", err)
+ Location: salt.DefaultLocation,
}
be, err := f(&audit.BackendConfig{
- Salt: salter,
- Config: conf,
+ SaltView: view,
+ SaltConfig: saltConfig,
+ Config: conf,
})
if err != nil {
return nil, err
@@ -397,7 +411,7 @@ func (c *Core) newAuditBackend(entry *MountEntry, view logical.Storage, conf map
c.logger.Debug("audit: adding reload function", "path", entry.Path)
}
- c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]string) error {
+ c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]interface{}) error {
if c.logger.IsInfo() {
c.logger.Info("audit: reloading file audit backend", "path", entry.Path)
}
@@ -474,20 +488,29 @@ func (a *AuditBroker) GetHash(name string, input string) (string, error) {
return "", fmt.Errorf("unknown audit backend %s", name)
}
- return be.backend.GetHash(input), nil
+ return be.backend.GetHash(input)
}
// LogRequest is used to ensure all the audit backends have an opportunity to
// log the given request and that *at least one* succeeds.
-func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, headersConfig *AuditedHeadersConfig, outerErr error) (retErr error) {
+func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, headersConfig *AuditedHeadersConfig, outerErr error) (ret error) {
defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now())
a.RLock()
defer a.RUnlock()
+
+ var retErr *multierror.Error
+
defer func() {
if r := recover(); r != nil {
a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
}
+
+ ret = retErr.ErrorOrNil()
+
+ if ret != nil {
+ metrics.IncrCounter([]string{"audit", "log_request_failure"}, 1.0)
+ }
}()
// All logged requests must have an identifier
@@ -506,35 +529,49 @@ func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, heade
anyLogged := false
for name, be := range a.backends {
req.Headers = nil
- req.Headers = headersConfig.ApplyConfig(headers, be.backend.GetHash)
+ transHeaders, thErr := headersConfig.ApplyConfig(headers, be.backend.GetHash)
+ if thErr != nil {
+ a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr)
+ continue
+ }
+ req.Headers = transHeaders
start := time.Now()
- err := be.backend.LogRequest(auth, req, outerErr)
+ lrErr := be.backend.LogRequest(auth, req, outerErr)
metrics.MeasureSince([]string{"audit", name, "log_request"}, start)
- if err != nil {
- a.logger.Error("audit: backend failed to log request", "backend", name, "error", err)
+ if lrErr != nil {
+ a.logger.Error("audit: backend failed to log request", "backend", name, "error", lrErr)
} else {
anyLogged = true
}
}
if !anyLogged && len(a.backends) > 0 {
retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the request"))
- return
}
- return nil
+
+ return retErr.ErrorOrNil()
}
// LogResponse is used to ensure all the audit backends have an opportunity to
// log the given response and that *at least one* succeeds.
func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request,
- resp *logical.Response, headersConfig *AuditedHeadersConfig, err error) (reterr error) {
+ resp *logical.Response, headersConfig *AuditedHeadersConfig, err error) (ret error) {
defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now())
a.RLock()
defer a.RUnlock()
+
+ var retErr *multierror.Error
+
defer func() {
if r := recover(); r != nil {
a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
- reterr = fmt.Errorf("panic generating audit log")
+ retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
+ }
+
+ ret = retErr.ErrorOrNil()
+
+ if ret != nil {
+ metrics.IncrCounter([]string{"audit", "log_response_failure"}, 1.0)
}
}()
@@ -547,19 +584,35 @@ func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request,
anyLogged := false
for name, be := range a.backends {
req.Headers = nil
- req.Headers = headersConfig.ApplyConfig(headers, be.backend.GetHash)
+ transHeaders, thErr := headersConfig.ApplyConfig(headers, be.backend.GetHash)
+ if thErr != nil {
+ a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr)
+ continue
+ }
+ req.Headers = transHeaders
start := time.Now()
- err := be.backend.LogResponse(auth, req, resp, err)
+ lrErr := be.backend.LogResponse(auth, req, resp, err)
metrics.MeasureSince([]string{"audit", name, "log_response"}, start)
- if err != nil {
- a.logger.Error("audit: backend failed to log response", "backend", name, "error", err)
+ if lrErr != nil {
+ a.logger.Error("audit: backend failed to log response", "backend", name, "error", lrErr)
} else {
anyLogged = true
}
}
if !anyLogged && len(a.backends) > 0 {
- return fmt.Errorf("no audit backend succeeded in logging the response")
+ retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the response"))
+ }
+
+ return retErr.ErrorOrNil()
+}
+
+func (a *AuditBroker) Invalidate(key string) {
+ // For now we ignore the key as this would only apply to salts. We just
+ // sort of brute force it on each one.
+ a.Lock()
+ defer a.Unlock()
+ for _, be := range a.backends {
+ be.backend.Invalidate()
}
- return nil
}
diff --git a/vendor/github.com/hashicorp/vault/vault/audit_test.go b/vendor/github.com/hashicorp/vault/vault/audit_test.go
index 5e97da8..a91298d 100644
--- a/vendor/github.com/hashicorp/vault/vault/audit_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/audit_test.go
@@ -3,6 +3,8 @@ package vault
import (
"fmt"
"reflect"
+ "strings"
+ "sync"
"testing"
"time"
@@ -13,6 +15,7 @@ import (
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
log "github.com/mgutz/logxi/v1"
"github.com/mitchellh/copystructure"
@@ -31,6 +34,9 @@ type NoopAudit struct {
RespReq []*logical.Request
Resp []*logical.Response
RespErrs []error
+
+ salt *salt.Salt
+ saltMutex sync.RWMutex
}
func (n *NoopAudit) LogRequest(a *logical.Auth, r *logical.Request, err error) error {
@@ -49,14 +55,44 @@ func (n *NoopAudit) LogResponse(a *logical.Auth, r *logical.Request, re *logical
return n.RespErr
}
-func (n *NoopAudit) GetHash(data string) string {
- return n.Config.Salt.GetIdentifiedHMAC(data)
+func (n *NoopAudit) Salt() (*salt.Salt, error) {
+ n.saltMutex.RLock()
+ if n.salt != nil {
+ defer n.saltMutex.RUnlock()
+ return n.salt, nil
+ }
+ n.saltMutex.RUnlock()
+ n.saltMutex.Lock()
+ defer n.saltMutex.Unlock()
+ if n.salt != nil {
+ return n.salt, nil
+ }
+ salt, err := salt.NewSalt(n.Config.SaltView, n.Config.SaltConfig)
+ if err != nil {
+ return nil, err
+ }
+ n.salt = salt
+ return salt, nil
+}
+
+func (n *NoopAudit) GetHash(data string) (string, error) {
+ salt, err := n.Salt()
+ if err != nil {
+ return "", err
+ }
+ return salt.GetIdentifiedHMAC(data), nil
}
func (n *NoopAudit) Reload() error {
return nil
}
+func (n *NoopAudit) Invalidate() {
+ n.saltMutex.Lock()
+ defer n.saltMutex.Unlock()
+ n.salt = nil
+}
+
func TestCore_EnableAudit(t *testing.T) {
c, keys, _ := TestCoreUnsealed(t)
c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) {
@@ -184,16 +220,18 @@ func TestCore_EnableAudit_Local(t *testing.T) {
Type: auditTableType,
Entries: []*MountEntry{
&MountEntry{
- Table: auditTableType,
- Path: "noop/",
- Type: "noop",
- UUID: "abcd",
+ Table: auditTableType,
+ Path: "noop/",
+ Type: "noop",
+ UUID: "abcd",
+ Accessor: "noop-abcd",
},
&MountEntry{
- Table: auditTableType,
- Path: "noop2/",
- Type: "noop",
- UUID: "bcde",
+ Table: auditTableType,
+ Path: "noop2/",
+ Type: "noop",
+ UUID: "bcde",
+ Accessor: "noop-bcde",
},
},
}
@@ -508,7 +546,7 @@ func TestAuditBroker_LogResponse(t *testing.T) {
t.Fatalf("Bad: %#v", a.Resp[0])
}
if !reflect.DeepEqual(a.RespErrs[0], respErr) {
- t.Fatalf("Bad: %#v", a.RespErrs[0])
+ t.Fatalf("Expected\n%v\nGot\n%#v", respErr, a.RespErrs[0])
}
}
@@ -522,7 +560,7 @@ func TestAuditBroker_LogResponse(t *testing.T) {
// Should FAIL work with both failing backends
a2.RespErr = fmt.Errorf("failed")
err = b.LogResponse(auth, req, resp, headersConf, respErr)
- if err.Error() != "no audit backend succeeded in logging the response" {
+ if !strings.Contains(err.Error(), "no audit backend succeeded in logging the response") {
t.Fatalf("err: %v", err)
}
}
diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers.go b/vendor/github.com/hashicorp/vault/vault/audited_headers.go
index 781c035..1e1a11b 100644
--- a/vendor/github.com/hashicorp/vault/vault/audited_headers.go
+++ b/vendor/github.com/hashicorp/vault/vault/audited_headers.go
@@ -88,7 +88,7 @@ func (a *AuditedHeadersConfig) remove(header string) error {
// ApplyConfig returns a map of approved headers and their values, either
// hmac'ed or plaintext
-func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc func(string) string) (result map[string][]string) {
+func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc func(string) (string, error)) (result map[string][]string, retErr error) {
// Grab a read lock
a.RLock()
defer a.RUnlock()
@@ -110,7 +110,11 @@ func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc
// Optionally hmac the values
if settings.HMAC {
for i, el := range hVals {
- hVals[i] = hashFunc(el)
+ hVal, err := hashFunc(el)
+ if err != nil {
+ return nil, err
+ }
+ hVals[i] = hVal
}
}
@@ -118,7 +122,7 @@ func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc
}
}
- return
+ return result, nil
}
// Initalize the headers config by loading from the barrier view
diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go b/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go
index 5e82ec7..93225cf 100644
--- a/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go
@@ -166,9 +166,12 @@ func TestAuditedHeadersConfig_ApplyConfig(t *testing.T) {
"Content-Type": []string{"json"},
}
- hashFunc := func(s string) string { return "hashed" }
+ hashFunc := func(s string) (string, error) { return "hashed", nil }
- result := conf.ApplyConfig(reqHeaders, hashFunc)
+ result, err := conf.ApplyConfig(reqHeaders, hashFunc)
+ if err != nil {
+ t.Fatal(err)
+ }
expected := map[string][]string{
"x-test-header": []string{"foo"},
@@ -214,7 +217,7 @@ func BenchmarkAuditedHeaderConfig_ApplyConfig(b *testing.B) {
b.Fatal(err)
}
- hashFunc := func(s string) string { return salter.GetIdentifiedHMAC(s) }
+ hashFunc := func(s string) (string, error) { return salter.GetIdentifiedHMAC(s), nil }
// Reset the timer since we did a lot above
b.ResetTimer()
diff --git a/vendor/github.com/hashicorp/vault/vault/auth.go b/vendor/github.com/hashicorp/vault/vault/auth.go
index 5a5e68b..5900449 100644
--- a/vendor/github.com/hashicorp/vault/vault/auth.go
+++ b/vendor/github.com/hashicorp/vault/vault/auth.go
@@ -5,6 +5,7 @@ import (
"fmt"
"strings"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/logical"
@@ -85,13 +86,23 @@ func (c *Core) enableCredential(entry *MountEntry) error {
}
entry.UUID = entryUUID
}
-
+ if entry.Accessor == "" {
+ accessor, err := c.generateMountAccessor("auth_" + entry.Type)
+ if err != nil {
+ return err
+ }
+ entry.Accessor = accessor
+ }
viewPath := credentialBarrierPrefix + entry.UUID + "/"
view := NewBarrierView(c.barrier, viewPath)
sysView := c.mountEntrySysView(entry)
+ conf := make(map[string]string)
+ if entry.Config.PluginName != "" {
+ conf["plugin_name"] = entry.Config.PluginName
+ }
// Create the new backend
- backend, err := c.newCredentialBackend(entry.Type, sysView, view, nil)
+ backend, err := c.newCredentialBackend(entry.Type, sysView, view, conf)
if err != nil {
return err
}
@@ -99,6 +110,12 @@ func (c *Core) enableCredential(entry *MountEntry) error {
return fmt.Errorf("nil backend returned from %q factory", entry.Type)
}
+ // Check for the correct backend type
+ backendType := backend.Type()
+ if entry.Type == "plugin" && backendType != logical.TypeCredential {
+ return fmt.Errorf("cannot mount '%s' of type '%s' as an auth backend", entry.Config.PluginName, backendType)
+ }
+
if err := backend.Initialize(); err != nil {
return err
}
@@ -125,7 +142,7 @@ func (c *Core) enableCredential(entry *MountEntry) error {
// disableCredential is used to disable an existing credential backend; the
// boolean indicates if it existed
-func (c *Core) disableCredential(path string) (bool, error) {
+func (c *Core) disableCredential(path string) error {
// Ensure we end the path in a slash
if !strings.HasSuffix(path, "/") {
path += "/"
@@ -133,29 +150,29 @@ func (c *Core) disableCredential(path string) (bool, error) {
// Ensure the token backend is not affected
if path == "token/" {
- return true, fmt.Errorf("token credential backend cannot be disabled")
+ return fmt.Errorf("token credential backend cannot be disabled")
}
// Store the view for this backend
fullPath := credentialRoutePrefix + path
view := c.router.MatchingStorageView(fullPath)
if view == nil {
- return false, fmt.Errorf("no matching backend %s", fullPath)
+ return fmt.Errorf("no matching backend %s", fullPath)
}
// Mark the entry as tainted
if err := c.taintCredEntry(path); err != nil {
- return true, err
+ return err
}
// Taint the router path to prevent routing
if err := c.router.Taint(fullPath); err != nil {
- return true, err
+ return err
}
// Revoke credentials from this path
if err := c.expiration.RevokePrefix(fullPath); err != nil {
- return true, err
+ return err
}
// Call cleanup function if it exists
@@ -166,24 +183,24 @@ func (c *Core) disableCredential(path string) (bool, error) {
// Unmount the backend
if err := c.router.Unmount(fullPath); err != nil {
- return true, err
+ return err
}
// Clear the data in the view
if view != nil {
if err := logical.ClearView(view); err != nil {
- return true, err
+ return err
}
}
// Remove the mount table entry
if err := c.removeCredEntry(path); err != nil {
- return true, err
+ return err
}
if c.logger.IsInfo() {
c.logger.Info("core: disabled credential backend", "path", path)
}
- return true, nil
+ return nil
}
// removeCredEntry is used to remove an entry in the auth table
@@ -283,13 +300,21 @@ func (c *Core) loadCredentials() error {
entry.Table = c.auth.Type
needPersist = true
}
+ if entry.Accessor == "" {
+ accessor, err := c.generateMountAccessor("auth_" + entry.Type)
+ if err != nil {
+ return err
+ }
+ entry.Accessor = accessor
+ needPersist = true
+ }
}
if !needPersist {
return nil
}
} else {
- c.auth = defaultAuthTable()
+ c.auth = c.defaultAuthTable()
}
if err := c.persistAuth(c.auth, false); err != nil {
@@ -373,7 +398,6 @@ func (c *Core) persistAuth(table *MountTable, localOnly bool) error {
// setupCredentials is invoked after we've loaded the auth table to
// initialize the credential backends and setup the router
func (c *Core) setupCredentials() error {
- var backend logical.Backend
var view *BarrierView
var err error
var persistNeeded bool
@@ -382,6 +406,7 @@ func (c *Core) setupCredentials() error {
defer c.authLock.Unlock()
for _, entry := range c.auth.Entries {
+ var backend logical.Backend
// Work around some problematic code that existed in master for a while
if strings.HasPrefix(entry.Path, credentialRoutePrefix) {
entry.Path = strings.TrimPrefix(entry.Path, credentialRoutePrefix)
@@ -392,21 +417,36 @@ func (c *Core) setupCredentials() error {
viewPath := credentialBarrierPrefix + entry.UUID + "/"
view = NewBarrierView(c.barrier, viewPath)
sysView := c.mountEntrySysView(entry)
+ conf := make(map[string]string)
+ if entry.Config.PluginName != "" {
+ conf["plugin_name"] = entry.Config.PluginName
+ }
// Initialize the backend
- backend, err = c.newCredentialBackend(entry.Type, sysView, view, nil)
+ backend, err = c.newCredentialBackend(entry.Type, sysView, view, conf)
if err != nil {
c.logger.Error("core: failed to create credential entry", "path", entry.Path, "error", err)
+ if errwrap.Contains(err, ErrPluginNotFound.Error()) && entry.Type == "plugin" {
+ // If we encounter an error instantiating the backend due to it being missing from the catalog,
+ // skip backend initialization but register the entry to the mount table to preserve storage
+ // and path.
+ goto ROUTER_MOUNT
+ }
return errLoadAuthFailed
}
if backend == nil {
return fmt.Errorf("nil backend returned from %q factory", entry.Type)
}
+ // Check for the correct backend type
+ if entry.Type == "plugin" && backend.Type() != logical.TypeCredential {
+ return fmt.Errorf("cannot mount '%s' of type '%s' as an auth backend", entry.Config.PluginName, backend.Type())
+ }
+
if err := backend.Initialize(); err != nil {
return err
}
-
+ ROUTER_MOUNT:
// Mount the backend
path := credentialRoutePrefix + entry.Path
err = c.router.Mount(backend, path, entry, view)
@@ -425,7 +465,7 @@ func (c *Core) setupCredentials() error {
c.tokenStore = backend.(*TokenStore)
// this is loaded *after* the normal mounts, including cubbyhole
- c.router.tokenStoreSalt = c.tokenStore.salt
+ c.router.tokenStoreSaltFunc = c.tokenStore.Salt
c.tokenStore.cubbyholeBackend = c.router.MatchingBackend("cubbyhole/").(*CubbyholeBackend)
}
}
@@ -485,7 +525,7 @@ func (c *Core) newCredentialBackend(
}
// defaultAuthTable creates a default auth table
-func defaultAuthTable() *MountTable {
+func (c *Core) defaultAuthTable() *MountTable {
table := &MountTable{
Type: credentialTableType,
}
@@ -493,12 +533,17 @@ func defaultAuthTable() *MountTable {
if err != nil {
panic(fmt.Sprintf("could not generate UUID for default auth table token entry: %v", err))
}
+ tokenAccessor, err := c.generateMountAccessor("auth_token")
+ if err != nil {
+ panic(fmt.Sprintf("could not generate accessor for default auth table token entry: %v", err))
+ }
tokenAuth := &MountEntry{
Table: credentialTableType,
Path: "token/",
Type: "token",
Description: "token based credentials",
UUID: tokenUUID,
+ Accessor: tokenAccessor,
}
table.Entries = append(table.Entries, tokenAuth)
return table
diff --git a/vendor/github.com/hashicorp/vault/vault/auth_test.go b/vendor/github.com/hashicorp/vault/vault/auth_test.go
index bc150e9..c81b264 100644
--- a/vendor/github.com/hashicorp/vault/vault/auth_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/auth_test.go
@@ -99,16 +99,18 @@ func TestCore_EnableCredential_Local(t *testing.T) {
Type: credentialTableType,
Entries: []*MountEntry{
&MountEntry{
- Table: credentialTableType,
- Path: "noop/",
- Type: "noop",
- UUID: "abcd",
+ Table: credentialTableType,
+ Path: "noop/",
+ Type: "noop",
+ UUID: "abcd",
+ Accessor: "noop-abcd",
},
&MountEntry{
- Table: credentialTableType,
- Path: "noop2/",
- Type: "noop",
- UUID: "bcde",
+ Table: credentialTableType,
+ Path: "noop2/",
+ Type: "noop",
+ UUID: "bcde",
+ Accessor: "noop-bcde",
},
},
}
@@ -215,9 +217,9 @@ func TestCore_DisableCredential(t *testing.T) {
return &NoopBackend{}, nil
}
- existed, err := c.disableCredential("foo")
- if existed || (err != nil && !strings.HasPrefix(err.Error(), "no matching backend")) {
- t.Fatalf("existed: %v; err: %v", existed, err)
+ err := c.disableCredential("foo")
+ if err != nil && !strings.HasPrefix(err.Error(), "no matching backend") {
+ t.Fatalf("err: %v", err)
}
me := &MountEntry{
@@ -230,9 +232,9 @@ func TestCore_DisableCredential(t *testing.T) {
t.Fatalf("err: %v", err)
}
- existed, err = c.disableCredential("foo")
- if !existed || err != nil {
- t.Fatalf("existed: %v; err: %v", existed, err)
+ err = c.disableCredential("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
}
match := c.router.MatchingMount("auth/foo/bar")
@@ -266,9 +268,9 @@ func TestCore_DisableCredential(t *testing.T) {
func TestCore_DisableCredential_Protected(t *testing.T) {
c, _, _ := TestCoreUnsealed(t)
- existed, err := c.disableCredential("token")
- if !existed || err.Error() != "token credential backend cannot be disabled" {
- t.Fatalf("existed: %v; err: %v", existed, err)
+ err := c.disableCredential("token")
+ if err.Error() != "token credential backend cannot be disabled" {
+ t.Fatalf("err: %v", err)
}
}
@@ -322,9 +324,9 @@ func TestCore_DisableCredential_Cleanup(t *testing.T) {
}
// Disable should cleanup
- existed, err := c.disableCredential("foo")
- if !existed || err != nil {
- t.Fatalf("existed: %v; err: %v", existed, err)
+ err = c.disableCredential("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
}
// Token should be revoked
@@ -347,7 +349,8 @@ func TestCore_DisableCredential_Cleanup(t *testing.T) {
}
func TestDefaultAuthTable(t *testing.T) {
- table := defaultAuthTable()
+ c, _, _ := TestCoreUnsealed(t)
+ table := c.defaultAuthTable()
verifyDefaultAuthTable(t, table)
}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go
index 7d575ce..ef0fe38 100644
--- a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go
@@ -7,6 +7,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
log "github.com/mgutz/logxi/v1"
)
@@ -16,8 +17,10 @@ var (
// mockBarrier returns a physical backend, security barrier, and master key
func mockBarrier(t testing.TB) (physical.Backend, SecurityBarrier, []byte) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -31,8 +34,10 @@ func mockBarrier(t testing.TB) (physical.Backend, SecurityBarrier, []byte) {
}
func TestAESGCMBarrier_Basic(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -41,8 +46,10 @@ func TestAESGCMBarrier_Basic(t *testing.T) {
}
func TestAESGCMBarrier_Rotate(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -51,8 +58,10 @@ func TestAESGCMBarrier_Rotate(t *testing.T) {
}
func TestAESGCMBarrier_Upgrade(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b1, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -65,8 +74,10 @@ func TestAESGCMBarrier_Upgrade(t *testing.T) {
}
func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b1, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -79,8 +90,10 @@ func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) {
}
func TestAESGCMBarrier_Rekey(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -91,8 +104,10 @@ func TestAESGCMBarrier_Rekey(t *testing.T) {
// Test an upgrade from the old (0.1) barrier/init to the new
// core/keyring style
func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -171,8 +186,10 @@ func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) {
// Verify data sent through is encrypted
func TestAESGCMBarrier_Confidential(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -209,8 +226,10 @@ func TestAESGCMBarrier_Confidential(t *testing.T) {
// Verify data sent through cannot be tampered with
func TestAESGCMBarrier_Integrity(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -245,8 +264,10 @@ func TestAESGCMBarrier_Integrity(t *testing.T) {
// Verify data sent through cannot be moved
func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -275,6 +296,9 @@ func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) {
pe, _ := inm.Get("test")
pe.Key = "moved"
err = inm.Put(pe)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
// Read from the barrier
_, err = b.Get("moved")
@@ -284,8 +308,10 @@ func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) {
}
func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -314,6 +340,9 @@ func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) {
pe, _ := inm.Get("test")
pe.Key = "moved"
err = inm.Put(pe)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
// Read from the barrier
_, err = b.Get("moved")
@@ -323,8 +352,10 @@ func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) {
}
func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -376,8 +407,10 @@ func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) {
}
func TestEncrypt_Unique(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -404,8 +437,10 @@ func TestEncrypt_Unique(t *testing.T) {
}
func TestInitialize_KeyLength(t *testing.T) {
-
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
@@ -435,7 +470,13 @@ func TestInitialize_KeyLength(t *testing.T) {
}
func TestEncrypt_BarrierEncryptor(t *testing.T) {
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
b, err := NewAESGCMBarrier(inm)
if err != nil {
t.Fatalf("err: %v", err)
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view.go b/vendor/github.com/hashicorp/vault/vault/barrier_view.go
index 0fa6f2d..3512aba 100644
--- a/vendor/github.com/hashicorp/vault/vault/barrier_view.go
+++ b/vendor/github.com/hashicorp/vault/vault/barrier_view.go
@@ -1,7 +1,7 @@
package vault
import (
- "fmt"
+ "errors"
"strings"
"github.com/hashicorp/vault/logical"
@@ -20,6 +20,10 @@ type BarrierView struct {
readonly bool
}
+var (
+ ErrRelativePath = errors.New("relative paths not supported")
+)
+
// NewBarrierView takes an underlying security barrier and returns
// a view of it that can only operate with the given prefix.
func NewBarrierView(barrier BarrierStorage, prefix string) *BarrierView {
@@ -32,7 +36,7 @@ func NewBarrierView(barrier BarrierStorage, prefix string) *BarrierView {
// sanityCheck is used to perform a sanity check on a key
func (v *BarrierView) sanityCheck(key string) error {
if strings.Contains(key, "..") {
- return fmt.Errorf("key cannot be relative path")
+ return ErrRelativePath
}
return nil
}
@@ -98,7 +102,6 @@ func (v *BarrierView) Delete(key string) error {
return logical.ErrReadOnly
}
-
return v.barrier.Delete(expandedKey)
}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster.go b/vendor/github.com/hashicorp/vault/vault/cluster.go
index dc9fb65..beca4b9 100644
--- a/vendor/github.com/hashicorp/vault/vault/cluster.go
+++ b/vendor/github.com/hashicorp/vault/vault/cluster.go
@@ -17,13 +17,8 @@ import (
"net/http"
"time"
- log "github.com/mgutz/logxi/v1"
-
- "golang.org/x/net/http2"
-
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/forwarding"
"github.com/hashicorp/vault/helper/jsonutil"
)
@@ -50,11 +45,6 @@ type clusterKeyParams struct {
D *big.Int `json:"d" structs:"d" mapstructure:"d"`
}
-type activeConnection struct {
- transport *http2.Transport
- clusterAddr string
-}
-
// Structure representing the storage entry that holds cluster information
type Cluster struct {
// Name of the cluster
@@ -292,21 +282,11 @@ func (c *Core) setupCluster() error {
return nil
}
-// SetClusterSetupFuncs sets the handler setup func
-func (c *Core) SetClusterSetupFuncs(handler func() (http.Handler, http.Handler)) {
- c.clusterHandlerSetupFunc = handler
-}
-
// startClusterListener starts cluster request listeners during postunseal. It
// is assumed that the state lock is held while this is run. Right now this
// only starts forwarding listeners; it's TBD whether other request types will
// be built in the same mechanism or started independently.
func (c *Core) startClusterListener() error {
- if c.clusterHandlerSetupFunc == nil {
- c.logger.Error("core: cluster handler setup function has not been set when trying to start listeners")
- return fmt.Errorf("cluster handler setup function has not been set")
- }
-
if c.clusterAddr == "" {
c.logger.Info("core: clustering disabled, not starting listeners")
return nil
@@ -418,7 +398,7 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
//c.logger.Trace("core: performing server config lookup")
for _, v := range clientHello.SupportedProtos {
switch v {
- case "h2", "req_fw_sb-act_v1":
+ case "h2", requestForwardingALPN:
default:
return nil, fmt.Errorf("unknown ALPN proto %s", v)
}
@@ -434,6 +414,7 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
RootCAs: caPool,
ClientCAs: caPool,
NextProtos: clientHello.SupportedProtos,
+ CipherSuites: c.clusterCipherSuites,
}
switch {
@@ -458,6 +439,7 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
GetClientCertificate: clientLookup,
GetConfigForClient: serverConfigLookup,
MinVersion: tls.VersionTLS12,
+ CipherSuites: c.clusterCipherSuites,
}
var localCert bytes.Buffer
@@ -482,50 +464,6 @@ func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
c.clusterListenerAddrs = addrs
}
-// WrapHandlerForClustering takes in Vault's HTTP handler and returns a setup
-// function that returns both the original handler and one wrapped with cluster
-// methods
-func WrapHandlerForClustering(handler http.Handler, logger log.Logger) func() (http.Handler, http.Handler) {
- return func() (http.Handler, http.Handler) {
- // This mux handles cluster functions (right now, only forwarded requests)
- mux := http.NewServeMux()
- mux.HandleFunc("/cluster/local/forwarded-request", func(w http.ResponseWriter, req *http.Request) {
- //logger.Trace("forwarding: serving h2 forwarded request")
- freq, err := forwarding.ParseForwardedHTTPRequest(req)
- if err != nil {
- if logger != nil {
- logger.Error("http/forwarded-request-server: error parsing forwarded request", "error", err)
- }
-
- w.Header().Add("Content-Type", "application/json")
-
- // The response writer here is different from
- // the one set in Vault's HTTP handler.
- // Hence, set the Cache-Control explicitly.
- w.Header().Set("Cache-Control", "no-store")
-
- w.WriteHeader(http.StatusInternalServerError)
-
- type errorResponse struct {
- Errors []string
- }
- resp := &errorResponse{
- Errors: []string{
- err.Error(),
- },
- }
-
- enc := json.NewEncoder(w)
- enc.Encode(resp)
- return
- }
-
- // To avoid the risk of a forward loop in some pathological condition,
- // set the no-forward header
- freq.Header.Set(IntNoForwardingHeaderName, "true")
- handler.ServeHTTP(w, freq)
- })
-
- return handler, mux
- }
+func (c *Core) SetClusterHandler(handler http.Handler) {
+ c.clusterHandler = handler
}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster_test.go b/vendor/github.com/hashicorp/vault/vault/cluster_test.go
index d3ee512..9bc5b69 100644
--- a/vendor/github.com/hashicorp/vault/vault/cluster_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/cluster_test.go
@@ -6,7 +6,6 @@ import (
"fmt"
"net"
"net/http"
- "os"
"testing"
"time"
@@ -14,6 +13,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
log "github.com/mgutz/logxi/v1"
)
@@ -44,9 +44,17 @@ func TestClusterHAFetching(t *testing.T) {
redirect := "http://127.0.0.1:8200"
+ inm, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
c, err := NewCore(&CoreConfig{
- Physical: physical.NewInmemHA(logger),
- HAPhysical: physical.NewInmemHA(logger),
+ Physical: inm,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirect,
DisableMlock: true,
})
@@ -86,12 +94,12 @@ func TestCluster_ListenForRequests(t *testing.T) {
// Make this nicer for tests
manualStepDownSleepPeriod = 5 * time.Second
- cores := TestCluster(t, []http.Handler{nil, nil, nil}, nil, false)
- for _, core := range cores {
- defer core.CloseListeners()
- }
-
- root := cores[0].Root
+ cluster := NewTestCluster(t, nil, &TestClusterOptions{
+ KeepStandbysSealed: true,
+ })
+ cluster.Start()
+ defer cluster.Cleanup()
+ cores := cluster.Cores
// Wait for core to become active
TestWaitActive(t, cores[0].Core)
@@ -116,16 +124,16 @@ func TestCluster_ListenForRequests(t *testing.T) {
t.Fatalf("%s not a TCP port", tcpAddr.String())
}
- conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+10), tlsConfig)
+ conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+105), tlsConfig)
if err != nil {
if expectFail {
- t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+10)
+ t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
continue
}
t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[1])
}
if expectFail {
- t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+10)
+ t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
}
err = conn.Handshake()
if err != nil {
@@ -138,7 +146,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
case connState.NegotiatedProtocol != "h2" || !connState.NegotiatedProtocolIsMutual:
t.Fatal("bad protocol negotiation")
}
- t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+10)
+ t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+105)
}
}
@@ -148,7 +156,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
err := cores[0].StepDown(&logical.Request{
Operation: logical.UpdateOperation,
Path: "sys/step-down",
- ClientToken: root,
+ ClientToken: cluster.RootToken,
})
if err != nil {
t.Fatal(err)
@@ -163,7 +171,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
time.Sleep(manualStepDownSleepPeriod)
checkListenersFunc(false)
- err = cores[0].Seal(root)
+ err = cores[0].Seal(cluster.RootToken)
if err != nil {
t.Fatal(err)
}
@@ -176,51 +184,39 @@ func TestCluster_ForwardRequests(t *testing.T) {
// Make this nicer for tests
manualStepDownSleepPeriod = 5 * time.Second
- testCluster_ForwardRequestsCommon(t, false)
- testCluster_ForwardRequestsCommon(t, true)
- os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
+ testCluster_ForwardRequestsCommon(t)
}
-func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) {
- if rpc {
- os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1")
- } else {
- os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
- }
-
- handler1 := http.NewServeMux()
- handler1.HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
+func testCluster_ForwardRequestsCommon(t *testing.T) {
+ cluster := NewTestCluster(t, nil, nil)
+ cores := cluster.Cores
+ cores[0].Handler.(*http.ServeMux).HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(201)
w.Write([]byte("core1"))
})
- handler2 := http.NewServeMux()
- handler2.HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
+ cores[1].Handler.(*http.ServeMux).HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(202)
w.Write([]byte("core2"))
})
- handler3 := http.NewServeMux()
- handler3.HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
+ cores[2].Handler.(*http.ServeMux).HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(203)
w.Write([]byte("core3"))
})
+ cluster.Start()
+ defer cluster.Cleanup()
- cores := TestCluster(t, []http.Handler{handler1, handler2, handler3}, nil, true)
- for _, core := range cores {
- defer core.CloseListeners()
- }
-
- root := cores[0].Root
+ root := cluster.RootToken
// Wait for core to become active
TestWaitActive(t, cores[0].Core)
// Test forwarding a request. Since we're going directly from core to core
// with no fallback we know that if it worked, request handling is working
- testCluster_ForwardRequests(t, cores[1], "core1")
- testCluster_ForwardRequests(t, cores[2], "core1")
+ testCluster_ForwardRequests(t, cores[1], root, "core1")
+ testCluster_ForwardRequests(t, cores[2], root, "core1")
//
// Now we do a bunch of round-robining. The point is to make sure that as
@@ -245,8 +241,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[1].Core)
- testCluster_ForwardRequests(t, cores[0], "core2")
- testCluster_ForwardRequests(t, cores[2], "core2")
+ testCluster_ForwardRequests(t, cores[0], root, "core2")
+ testCluster_ForwardRequests(t, cores[2], root, "core2")
// Ensure active core is cores[2] and test
err = cores[1].StepDown(&logical.Request{
@@ -265,8 +261,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[2].Core)
- testCluster_ForwardRequests(t, cores[0], "core3")
- testCluster_ForwardRequests(t, cores[1], "core3")
+ testCluster_ForwardRequests(t, cores[0], root, "core3")
+ testCluster_ForwardRequests(t, cores[1], root, "core3")
// Ensure active core is cores[0] and test
err = cores[2].StepDown(&logical.Request{
@@ -285,8 +281,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[0].Core)
- testCluster_ForwardRequests(t, cores[1], "core1")
- testCluster_ForwardRequests(t, cores[2], "core1")
+ testCluster_ForwardRequests(t, cores[1], root, "core1")
+ testCluster_ForwardRequests(t, cores[2], root, "core1")
// Ensure active core is cores[1] and test
err = cores[0].StepDown(&logical.Request{
@@ -305,8 +301,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[1].Core)
- testCluster_ForwardRequests(t, cores[0], "core2")
- testCluster_ForwardRequests(t, cores[2], "core2")
+ testCluster_ForwardRequests(t, cores[0], root, "core2")
+ testCluster_ForwardRequests(t, cores[2], root, "core2")
// Ensure active core is cores[2] and test
err = cores[1].StepDown(&logical.Request{
@@ -325,11 +321,11 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[2].Core)
- testCluster_ForwardRequests(t, cores[0], "core3")
- testCluster_ForwardRequests(t, cores[1], "core3")
+ testCluster_ForwardRequests(t, cores[0], root, "core3")
+ testCluster_ForwardRequests(t, cores[1], root, "core3")
}
-func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID string) {
+func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, rootToken, remoteCoreID string) {
standby, err := c.Standby()
if err != nil {
t.Fatal(err)
@@ -339,8 +335,9 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID
}
// We need to call Leader as that refreshes the connection info
- isLeader, _, err := c.Leader()
+ isLeader, _, _, err := c.Leader()
if err != nil {
+ panic(err.Error())
t.Fatal(err)
}
if isLeader {
@@ -352,7 +349,7 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID
if err != nil {
t.Fatal(err)
}
- req.Header.Add("X-Vault-Token", c.Root)
+ req.Header.Add("X-Vault-Token", rootToken)
statusCode, header, respBytes, err := c.ForwardRequest(req)
if err != nil {
@@ -386,3 +383,37 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID
}
}
}
+
+func TestCluster_CustomCipherSuites(t *testing.T) {
+ cluster := NewTestCluster(t, &CoreConfig{
+ ClusterCipherSuites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+ }, nil)
+ cluster.Start()
+ defer cluster.Cleanup()
+ core := cluster.Cores[0]
+
+ // Wait for core to become active
+ TestWaitActive(t, core.Core)
+
+ tlsConf, err := core.Core.ClusterTLSConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", core.Listeners[0].Address.IP.String(), core.Listeners[0].Address.Port+105), tlsConf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conn.Close()
+ err = conn.Handshake()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if conn.ConnectionState().CipherSuite != tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 {
+ var availCiphers string
+ for _, cipher := range core.clusterCipherSuites {
+ availCiphers += fmt.Sprintf("%x ", cipher)
+ }
+ t.Fatalf("got bad negotiated cipher %x, core-set suites are %s", conn.ConnectionState().CipherSuite, availCiphers)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/core.go b/vendor/github.com/hashicorp/vault/vault/core.go
index 396a2bc..1259c03 100644
--- a/vendor/github.com/hashicorp/vault/vault/core.go
+++ b/vendor/github.com/hashicorp/vault/vault/core.go
@@ -10,6 +10,7 @@ import (
"net"
"net/http"
"net/url"
+ "path/filepath"
"sync"
"time"
@@ -28,9 +29,12 @@ import (
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/helper/mlock"
+ "github.com/hashicorp/vault/helper/reload"
+ "github.com/hashicorp/vault/helper/tlsutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/shamir"
+ cache "github.com/patrickmn/go-cache"
)
const (
@@ -50,6 +54,9 @@ const (
// HA lock if an error is encountered
lockRetryInterval = 10 * time.Second
+ // leaderCheckInterval is how often a standby checks for a new leader
+ leaderCheckInterval = 2500 * time.Millisecond
+
// keyRotateCheckInterval is how often a standby checks for a key
// rotation taking place.
keyRotateCheckInterval = 30 * time.Second
@@ -98,9 +105,6 @@ var (
LastRemoteWAL = lastRemoteWALImpl
)
-// ReloadFunc are functions that are called when a reload is requested.
-type ReloadFunc func(map[string]string) error
-
// NonFatalError is an error that can be returned during NewCore that should be
// displayed but not cause a program exit
type NonFatalError struct {
@@ -268,9 +272,9 @@ type Core struct {
cachingDisabled bool
// reloadFuncs is a map containing reload functions
- reloadFuncs map[string][]ReloadFunc
+ reloadFuncs map[string][]reload.ReloadFunc
- // reloadFuncsLock controlls access to the funcs
+ // reloadFuncsLock controls access to the funcs
reloadFuncsLock sync.RWMutex
// wrappingJWTKey is the key used for generating JWTs containing response
@@ -282,6 +286,8 @@ type Core struct {
//
// Name
clusterName string
+ // Specific cipher suites to use for clustering, if any
+ clusterCipherSuites []uint16
// Used to modify cluster parameters
clusterParamsLock sync.RWMutex
// The private key stored in the barrier used for establishing
@@ -293,8 +299,8 @@ type Core struct {
localClusterParsedCert *x509.Certificate
// The TCP addresses we should use for clustering
clusterListenerAddrs []*net.TCPAddr
- // The setup function that gives us the handler to use
- clusterHandlerSetupFunc func() (http.Handler, http.Handler)
+ // The handler to use for request forwarding
+ clusterHandler http.Handler
// Tracks whether cluster listeners are running, e.g. it's safe to send a
// shutdown down the channel
clusterListenersRunning bool
@@ -303,8 +309,6 @@ type Core struct {
// Shutdown success channel. We need this to be done serially to ensure
// that binds are removed before they might be reinstated.
clusterListenerShutdownSuccessCh chan struct{}
- // Connection info containing a client and a current active address
- requestForwardingConnection *activeConnection
// Write lock used to ensure that we don't have multiple connections adjust
// this value at the same time
requestForwardingConnectionLock sync.RWMutex
@@ -313,16 +317,25 @@ type Core struct {
clusterLeaderUUID string
// Most recent leader redirect addr
clusterLeaderRedirectAddr string
+ // Most recent leader cluster addr
+ clusterLeaderClusterAddr string
// Lock for the cluster leader values
clusterLeaderParamsLock sync.RWMutex
+ // Info on cluster members
+ clusterPeerClusterAddrsCache *cache.Cache
// The grpc Server that handles server RPC calls
rpcServer *grpc.Server
+ // The context for the client
+ rpcClientConnContext context.Context
// The function for canceling the client connection
rpcClientConnCancelFunc context.CancelFunc
// The grpc ClientConn for RPC calls
rpcClientConn *grpc.ClientConn
// The grpc forwarding client
- rpcForwardingClient RequestForwardingClient
+ rpcForwardingClient *forwardingClient
+
+ // CORS Information
+ corsConfig *CORSConfig
// replicationState keeps the current replication state cached for quick
// lookup
@@ -330,6 +343,22 @@ type Core struct {
// uiEnabled indicates whether Vault Web UI is enabled or not
uiEnabled bool
+
+ // rawEnabled indicates whether the Raw endpoint is enabled
+ rawEnabled bool
+
+ // pluginDirectory is the location vault will look for plugin binaries
+ pluginDirectory string
+
+ // pluginCatalog is used to manage plugin configurations
+ pluginCatalog *PluginCatalog
+
+ enableMlock bool
+
+ // This can be used to trigger operations to stop running when Vault is
+ // going to be shut down, stepped down, or sealed
+ requestContext context.Context
+ requestContextCancelFunc context.CancelFunc
}
// CoreConfig is used to parameterize a core
@@ -372,9 +401,16 @@ type CoreConfig struct {
ClusterName string `json:"cluster_name" structs:"cluster_name" mapstructure:"cluster_name"`
+ ClusterCipherSuites string `json:"cluster_cipher_suites" structs:"cluster_cipher_suites" mapstructure:"cluster_cipher_suites"`
+
EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"`
- ReloadFuncs *map[string][]ReloadFunc
+ // Enable the raw endpoint
+ EnableRaw bool `json:"enable_raw" structs:"enable_raw" mapstructure:"enable_raw"`
+
+ PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"`
+
+ ReloadFuncs *map[string][]reload.ReloadFunc
ReloadFuncsLock *sync.RWMutex
}
@@ -430,11 +466,30 @@ func NewCore(conf *CoreConfig) (*Core, error) {
clusterName: conf.ClusterName,
clusterListenerShutdownCh: make(chan struct{}),
clusterListenerShutdownSuccessCh: make(chan struct{}),
+ clusterPeerClusterAddrsCache: cache.New(3*heartbeatInterval, time.Second),
+ enableMlock: !conf.DisableMlock,
+ rawEnabled: conf.EnableRaw,
}
+ if conf.ClusterCipherSuites != "" {
+ suites, err := tlsutil.ParseCiphers(conf.ClusterCipherSuites)
+ if err != nil {
+ return nil, errwrap.Wrapf("error parsing cluster cipher suites: {{err}}", err)
+ }
+ c.clusterCipherSuites = suites
+ }
+
+ c.corsConfig = &CORSConfig{core: c}
+ // Load CORS config and provide a value for the core field.
+
+ _, txnOK := conf.Physical.(physical.Transactional)
// Wrap the physical backend in a cache layer if enabled and not already wrapped
if _, isCache := conf.Physical.(*physical.Cache); !conf.DisableCache && !isCache {
- c.physical = physical.NewCache(conf.Physical, conf.CacheSize, conf.Logger)
+ if txnOK {
+ c.physical = physical.NewTransactionalCache(conf.Physical, conf.CacheSize, conf.Logger)
+ } else {
+ c.physical = physical.NewCache(conf.Physical, conf.CacheSize, conf.Logger)
+ }
}
if !conf.DisableMlock {
@@ -453,8 +508,15 @@ func NewCore(conf *CoreConfig) (*Core, error) {
}
}
- // Construct a new AES-GCM barrier
var err error
+ if conf.PluginDirectory != "" {
+ c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory)
+ if err != nil {
+ return nil, fmt.Errorf("core setup failed, could not verify plugin directory: %v", err)
+ }
+ }
+
+ // Construct a new AES-GCM barrier
c.barrier, err = NewAESGCMBarrier(c.physical)
if err != nil {
return nil, fmt.Errorf("barrier setup failed: %v", err)
@@ -468,7 +530,7 @@ func NewCore(conf *CoreConfig) (*Core, error) {
// the caller can share state
conf.ReloadFuncsLock = &c.reloadFuncsLock
c.reloadFuncsLock.Lock()
- c.reloadFuncs = make(map[string][]ReloadFunc)
+ c.reloadFuncs = make(map[string][]reload.ReloadFunc)
c.reloadFuncsLock.Unlock()
conf.ReloadFuncs = &c.reloadFuncs
@@ -477,13 +539,17 @@ func NewCore(conf *CoreConfig) (*Core, error) {
for k, f := range conf.LogicalBackends {
logicalBackends[k] = f
}
- _, ok := logicalBackends["generic"]
+ _, ok := logicalBackends["kv"]
if !ok {
- logicalBackends["generic"] = PassthroughBackendFactory
+ logicalBackends["kv"] = PassthroughBackendFactory
}
logicalBackends["cubbyhole"] = CubbyholeBackendFactory
logicalBackends["system"] = func(config *logical.BackendConfig) (logical.Backend, error) {
- return NewSystemBackend(c, config)
+ b := NewSystemBackend(c)
+ if err := b.Setup(config); err != nil {
+ return nil, err
+ }
+ return b, nil
}
c.logicalBackends = logicalBackends
@@ -519,14 +585,27 @@ func NewCore(conf *CoreConfig) (*Core, error) {
// problem. It is only used to gracefully quit in the case of HA so that failover
// happens as quickly as possible.
func (c *Core) Shutdown() error {
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
- if c.sealed {
- return nil
+ c.stateLock.RLock()
+ // Tell any requests that know about this to stop
+ if c.requestContextCancelFunc != nil {
+ c.requestContextCancelFunc()
}
+ c.stateLock.RUnlock()
// Seal the Vault, causes a leader stepdown
- return c.sealInternal()
+ retChan := make(chan error)
+ go func() {
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+ retChan <- c.sealInternal()
+ }()
+
+ return <-retChan
+}
+
+// CORSConfig returns the current CORS configuration
+func (c *Core) CORSConfig() *CORSConfig {
+ return c.corsConfig
}
// LookupToken returns the properties of the token from the token store. This
@@ -637,24 +716,27 @@ func (c *Core) checkToken(req *logical.Request) (*logical.Auth, *TokenEntry, err
panic("unreachable code")
}
}
+ // Create the auth response
+ auth := &logical.Auth{
+ ClientToken: req.ClientToken,
+ Accessor: req.ClientTokenAccessor,
+ Policies: te.Policies,
+ Metadata: te.Meta,
+ DisplayName: te.DisplayName,
+ }
// Check the standard non-root ACLs. Return the token entry if it's not
// allowed so we can decrement the use count.
allowed, rootPrivs := acl.AllowOperation(req)
if !allowed {
- return nil, te, logical.ErrPermissionDenied
+ // Return auth for audit logging even if not allowed
+ return auth, te, logical.ErrPermissionDenied
}
if rootPath && !rootPrivs {
- return nil, te, logical.ErrPermissionDenied
+ // Return auth for audit logging even if not allowed
+ return auth, te, logical.ErrPermissionDenied
}
- // Create the auth response
- auth := &logical.Auth{
- ClientToken: req.ClientToken,
- Policies: te.Policies,
- Metadata: te.Meta,
- DisplayName: te.DisplayName,
- }
return auth, te, nil
}
@@ -673,49 +755,50 @@ func (c *Core) Standby() (bool, error) {
}
// Leader is used to get the current active leader
-func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) {
+func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
// Check if sealed
if c.sealed {
- return false, "", consts.ErrSealed
+ return false, "", "", consts.ErrSealed
}
// Check if HA enabled
if c.ha == nil {
- return false, "", ErrHANotEnabled
+ return false, "", "", ErrHANotEnabled
}
// Check if we are the leader
if !c.standby {
- return true, c.redirectAddr, nil
+ return true, c.redirectAddr, c.clusterAddr, nil
}
// Initialize a lock
lock, err := c.ha.LockWith(coreLockPath, "read")
if err != nil {
- return false, "", err
+ return false, "", "", err
}
// Read the value
held, leaderUUID, err := lock.Value()
if err != nil {
- return false, "", err
+ return false, "", "", err
}
if !held {
- return false, "", nil
+ return false, "", "", nil
}
c.clusterLeaderParamsLock.RLock()
localLeaderUUID := c.clusterLeaderUUID
localRedirAddr := c.clusterLeaderRedirectAddr
+ localClusterAddr := c.clusterLeaderClusterAddr
c.clusterLeaderParamsLock.RUnlock()
// If the leader hasn't changed, return the cached value; nothing changes
// mid-leadership, and the barrier caches anyways
if leaderUUID == localLeaderUUID && localRedirAddr != "" {
- return false, localRedirAddr, nil
+ return false, localRedirAddr, localClusterAddr, nil
}
c.logger.Trace("core: found new active node information, refreshing")
@@ -725,16 +808,16 @@ func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) {
// Validate base conditions again
if leaderUUID == c.clusterLeaderUUID && c.clusterLeaderRedirectAddr != "" {
- return false, localRedirAddr, nil
+ return false, localRedirAddr, localClusterAddr, nil
}
key := coreLeaderPrefix + leaderUUID
entry, err := c.barrier.Get(key)
if err != nil {
- return false, "", err
+ return false, "", "", err
}
if entry == nil {
- return false, "", nil
+ return false, "", "", nil
}
var oldAdv bool
@@ -754,23 +837,24 @@ func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) {
// Ensure we are using current values
err = c.loadLocalClusterTLS(adv)
if err != nil {
- return false, "", err
+ return false, "", "", err
}
// This will ensure that we both have a connection at the ready and that
// the address is the current known value
err = c.refreshRequestForwardingConnection(adv.ClusterAddr)
if err != nil {
- return false, "", err
+ return false, "", "", err
}
}
// Don't set these until everything has been parsed successfully or we'll
// never try again
c.clusterLeaderRedirectAddr = adv.RedirectAddr
+ c.clusterLeaderClusterAddr = adv.ClusterAddr
c.clusterLeaderUUID = leaderUUID
- return false, adv.RedirectAddr, nil
+ return false, adv.RedirectAddr, adv.ClusterAddr, nil
}
// SecretProgress returns the number of keys provided so far
@@ -956,13 +1040,14 @@ func (c *Core) unsealInternal(masterKey []byte) (bool, error) {
func (c *Core) SealWithRequest(req *logical.Request) error {
defer metrics.MeasureSince([]string{"core", "seal-with-request"}, time.Now())
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
+ c.stateLock.RLock()
if c.sealed {
+ c.stateLock.RUnlock()
return nil
}
+ // This will unlock the read lock
return c.sealInitCommon(req)
}
@@ -971,10 +1056,10 @@ func (c *Core) SealWithRequest(req *logical.Request) error {
func (c *Core) Seal(token string) error {
defer metrics.MeasureSince([]string{"core", "seal"}, time.Now())
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
+ c.stateLock.RLock()
if c.sealed {
+ c.stateLock.RUnlock()
return nil
}
@@ -984,17 +1069,19 @@ func (c *Core) Seal(token string) error {
ClientToken: token,
}
+ // This will unlock the read lock
return c.sealInitCommon(req)
}
// sealInitCommon is common logic for Seal and SealWithRequest and is used to
// re-seal the Vault. This requires the Vault to be unsealed again to perform
-// any further operations.
+// any further operations. Note: this function will read-unlock the state lock.
func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
defer metrics.MeasureSince([]string{"core", "seal-internal"}, time.Now())
if req == nil {
retErr = multierror.Append(retErr, errors.New("nil request to seal"))
+ c.stateLock.RUnlock()
return retErr
}
@@ -1009,9 +1096,11 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
if c.standby {
c.logger.Error("core: vault cannot seal when in standby mode; please restart instead")
retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead"))
+ c.stateLock.RUnlock()
return retErr
}
retErr = multierror.Append(retErr, err)
+ c.stateLock.RUnlock()
return retErr
}
@@ -1026,6 +1115,7 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil {
c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
+ c.stateLock.RUnlock()
return retErr
}
@@ -1036,11 +1126,13 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
if err != nil {
c.logger.Error("core: failed to use token", "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
+ c.stateLock.RUnlock()
return retErr
}
if te == nil {
// Token is no longer valid
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ c.stateLock.RUnlock()
return retErr
}
if te.NumUses == -1 {
@@ -1059,19 +1151,36 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
allowed, rootPrivs := acl.AllowOperation(req)
if !allowed {
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ c.stateLock.RUnlock()
return retErr
}
// We always require root privileges for this operation
if !rootPrivs {
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
+ c.stateLock.RUnlock()
return retErr
}
+ // Tell any requests that know about this to stop
+ if c.requestContextCancelFunc != nil {
+ c.requestContextCancelFunc()
+ }
+
+ // Unlock from the request handling
+ c.stateLock.RUnlock()
+
//Seal the Vault
- err = c.sealInternal()
- if err != nil {
- retErr = multierror.Append(retErr, err)
+ retChan := make(chan error)
+ go func() {
+ c.stateLock.Lock()
+ defer c.stateLock.Unlock()
+ retChan <- c.sealInternal()
+ }()
+
+ funcErr := <-retChan
+ if funcErr != nil {
+ retErr = multierror.Append(retErr, funcErr)
}
return retErr
@@ -1086,8 +1195,8 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) {
return retErr
}
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
if c.sealed {
return nil
}
@@ -1165,7 +1274,11 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) {
// sealInternal is an internal method used to seal the vault. It does not do
// any authorization checking. The stateLock must be held prior to calling.
func (c *Core) sealInternal() error {
- // Enable that we are sealed to prevent furthur transactions
+ if c.sealed {
+ return nil
+ }
+
+ // Enable that we are sealed to prevent further transactions
c.sealed = true
c.logger.Debug("core: marked as sealed")
@@ -1187,6 +1300,8 @@ func (c *Core) sealInternal() error {
// Signal the standby goroutine to shutdown, wait for completion
close(c.standbyStopCh)
+ c.requestContext = nil
+
// Release the lock while we wait to avoid deadlocking
c.stateLock.Unlock()
<-c.standbyDoneCh
@@ -1224,6 +1339,8 @@ func (c *Core) postUnseal() (retErr error) {
defer func() {
if retErr != nil {
c.preSeal()
+ } else {
+ c.requestContext, c.requestContextCancelFunc = context.WithCancel(context.Background())
}
}()
c.logger.Info("core: post-unseal setup starting")
@@ -1250,16 +1367,19 @@ func (c *Core) postUnseal() (retErr error) {
if err := c.ensureWrappingKey(); err != nil {
return err
}
+ if err := c.setupPluginCatalog(); err != nil {
+ return err
+ }
if err := c.loadMounts(); err != nil {
return err
}
if err := c.setupMounts(); err != nil {
return err
}
- if err := c.startRollback(); err != nil {
+ if err := c.setupPolicyStore(); err != nil {
return err
}
- if err := c.setupPolicyStore(); err != nil {
+ if err := c.loadCORSConfig(); err != nil {
return err
}
if err := c.loadCredentials(); err != nil {
@@ -1268,6 +1388,9 @@ func (c *Core) postUnseal() (retErr error) {
if err := c.setupCredentials(); err != nil {
return err
}
+ if err := c.startRollback(); err != nil {
+ return err
+ }
if err := c.setupExpiration(); err != nil {
return err
}
@@ -1280,6 +1403,7 @@ func (c *Core) postUnseal() (retErr error) {
if err := c.setupAuditedHeadersConfig(); err != nil {
return err
}
+
if c.ha != nil {
if err := c.startClusterListener(); err != nil {
return err
@@ -1369,9 +1493,15 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
keyRotateDone := make(chan struct{})
keyRotateStop := make(chan struct{})
go c.periodicCheckKeyUpgrade(keyRotateDone, keyRotateStop)
+ // Monitor for new leadership
+ checkLeaderDone := make(chan struct{})
+ checkLeaderStop := make(chan struct{})
+ go c.periodicLeaderRefresh(checkLeaderDone, checkLeaderStop)
defer func() {
close(keyRotateStop)
<-keyRotateDone
+ close(checkLeaderStop)
+ <-checkLeaderDone
}()
for {
@@ -1382,11 +1512,6 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
default:
}
- // Clear forwarding clients
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
-
// Create a lock
uuid, err := uuid.GenerateUUID()
if err != nil {
@@ -1498,6 +1623,11 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
c.logger.Error("core: clearing leader advertisement failed", "error", err)
}
+ // Tell any requests that know about this to stop
+ if c.requestContextCancelFunc != nil {
+ c.requestContextCancelFunc()
+ }
+
// Attempt the pre-seal process
c.stateLock.Lock()
c.standby = true
@@ -1520,6 +1650,22 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
}
}
+// This checks the leader periodically to ensure that we switch RPC to a new
+// leader pretty quickly. There is logic in Leader() already to not make this
+// onerous and avoid more traffic than needed, so we just call that and ignore
+// the result.
+func (c *Core) periodicLeaderRefresh(doneCh, stopCh chan struct{}) {
+ defer close(doneCh)
+ for {
+ select {
+ case <-time.After(leaderCheckInterval):
+ c.Leader()
+ case <-stopCh:
+ return
+ }
+ }
+}
+
// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
func (c *Core) periodicCheckKeyUpgrade(doneCh, stopCh chan struct{}) {
defer close(doneCh)
@@ -1590,6 +1736,15 @@ func (c *Core) scheduleUpgradeCleanup() error {
// Schedule cleanup for all of them
time.AfterFunc(keyRotateGracePeriod, func() {
+ sealed, err := c.barrier.Sealed()
+ if err != nil {
+ c.logger.Warn("core: failed to check barrier status at upgrade cleanup time")
+ return
+ }
+ if sealed {
+ c.logger.Warn("core: barrier sealed at upgrade cleanup time")
+ return
+ }
for _, upgrade := range upgrades {
path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
if err := c.barrier.Delete(path); err != nil {
@@ -1743,11 +1898,9 @@ func (c *Core) emitMetrics(stopCh chan struct{}) {
}
func (c *Core) ReplicationState() consts.ReplicationState {
- var state consts.ReplicationState
- c.clusterParamsLock.RLock()
- state = c.replicationState
- c.clusterParamsLock.RUnlock()
- return state
+ c.stateLock.RLock()
+ defer c.stateLock.RUnlock()
+ return c.replicationState
}
func (c *Core) SealAccess() *SealAccess {
diff --git a/vendor/github.com/hashicorp/vault/vault/core_test.go b/vendor/github.com/hashicorp/vault/vault/core_test.go
index ced18cd..b940254 100644
--- a/vendor/github.com/hashicorp/vault/vault/core_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/core_test.go
@@ -12,6 +12,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
log "github.com/mgutz/logxi/v1"
)
@@ -23,12 +24,17 @@ var (
func TestNewCore_badRedirectAddr(t *testing.T) {
logger = logformat.NewVaultLogger(log.LevelTrace)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
conf := &CoreConfig{
RedirectAddr: "127.0.0.1:8200",
- Physical: physical.NewInmem(logger),
+ Physical: inm,
DisableMlock: true,
}
- _, err := NewCore(conf)
+ _, err = NewCore(conf)
if err == nil {
t.Fatal("should error")
}
@@ -974,12 +980,19 @@ func TestCore_Standby_Seal(t *testing.T) {
// Create the first core and initialize it
logger = logformat.NewVaultLogger(log.LevelTrace)
- inm := physical.NewInmem(logger)
- inmha := physical.NewInmemHA(logger)
+ inm, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
redirectOriginal := "http://127.0.0.1:8200"
core, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal,
DisableMlock: true,
})
@@ -1006,7 +1019,7 @@ func TestCore_Standby_Seal(t *testing.T) {
TestWaitActive(t, core)
// Check the leader is local
- isLeader, advertise, err := core.Leader()
+ isLeader, advertise, _, err := core.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1021,7 +1034,7 @@ func TestCore_Standby_Seal(t *testing.T) {
redirectOriginal2 := "http://127.0.0.1:8500"
core2, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal2,
DisableMlock: true,
})
@@ -1053,7 +1066,7 @@ func TestCore_Standby_Seal(t *testing.T) {
}
// Check the leader is not local
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1085,12 +1098,19 @@ func TestCore_StepDown(t *testing.T) {
// Create the first core and initialize it
logger = logformat.NewVaultLogger(log.LevelTrace)
- inm := physical.NewInmem(logger)
- inmha := physical.NewInmemHA(logger)
+ inm, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
redirectOriginal := "http://127.0.0.1:8200"
core, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal,
DisableMlock: true,
})
@@ -1117,7 +1137,7 @@ func TestCore_StepDown(t *testing.T) {
TestWaitActive(t, core)
// Check the leader is local
- isLeader, advertise, err := core.Leader()
+ isLeader, advertise, _, err := core.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1132,7 +1152,7 @@ func TestCore_StepDown(t *testing.T) {
redirectOriginal2 := "http://127.0.0.1:8500"
core2, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal2,
DisableMlock: true,
})
@@ -1164,7 +1184,7 @@ func TestCore_StepDown(t *testing.T) {
}
// Check the leader is not local
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1205,7 +1225,7 @@ func TestCore_StepDown(t *testing.T) {
}
// Check the leader is core2
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1217,7 +1237,7 @@ func TestCore_StepDown(t *testing.T) {
}
// Check the leader is not local
- isLeader, advertise, err = core.Leader()
+ isLeader, advertise, _, err = core.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1248,7 +1268,7 @@ func TestCore_StepDown(t *testing.T) {
}
// Check the leader is core1
- isLeader, advertise, err = core.Leader()
+ isLeader, advertise, _, err = core.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1260,7 +1280,7 @@ func TestCore_StepDown(t *testing.T) {
}
// Check the leader is not local
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1276,12 +1296,19 @@ func TestCore_CleanLeaderPrefix(t *testing.T) {
// Create the first core and initialize it
logger = logformat.NewVaultLogger(log.LevelTrace)
- inm := physical.NewInmem(logger)
- inmha := physical.NewInmemHA(logger)
+ inm, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
redirectOriginal := "http://127.0.0.1:8200"
core, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal,
DisableMlock: true,
})
@@ -1335,7 +1362,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) {
}
// Check the leader is local
- isLeader, advertise, err := core.Leader()
+ isLeader, advertise, _, err := core.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1350,7 +1377,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) {
redirectOriginal2 := "http://127.0.0.1:8500"
core2, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal2,
DisableMlock: true,
})
@@ -1382,7 +1409,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) {
}
// Check the leader is not local
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1412,7 +1439,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) {
TestWaitActive(t, core2)
// Check the leader is local
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1438,14 +1465,27 @@ func TestCore_CleanLeaderPrefix(t *testing.T) {
func TestCore_Standby(t *testing.T) {
logger = logformat.NewVaultLogger(log.LevelTrace)
- inmha := physical.NewInmemHA(logger)
- testCore_Standby_Common(t, inmha, inmha)
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testCore_Standby_Common(t, inmha, inmha.(physical.HABackend))
}
func TestCore_Standby_SeparateHA(t *testing.T) {
logger = logformat.NewVaultLogger(log.LevelTrace)
- testCore_Standby_Common(t, physical.NewInmemHA(logger), physical.NewInmemHA(logger))
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inmha2, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testCore_Standby_Common(t, inmha, inmha2.(physical.HABackend))
}
func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.HABackend) {
@@ -1494,7 +1534,7 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.
}
// Check the leader is local
- isLeader, advertise, err := core.Leader()
+ isLeader, advertise, _, err := core.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1547,7 +1587,7 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.
}
// Check the leader is not local
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1593,7 +1633,7 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.
}
// Check the leader is local
- isLeader, advertise, err = core2.Leader()
+ isLeader, advertise, _, err = core2.Leader()
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -1604,18 +1644,18 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.
t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2)
}
- if inm.(*physical.InmemHABackend) == inmha.(*physical.InmemHABackend) {
- lockSize := inm.(*physical.InmemHABackend).LockMapSize()
+ if inm.(*inmem.InmemHABackend) == inmha.(*inmem.InmemHABackend) {
+ lockSize := inm.(*inmem.InmemHABackend).LockMapSize()
if lockSize == 0 {
t.Fatalf("locks not used with only one HA backend")
}
} else {
- lockSize := inmha.(*physical.InmemHABackend).LockMapSize()
+ lockSize := inmha.(*inmem.InmemHABackend).LockMapSize()
if lockSize == 0 {
t.Fatalf("locks not used with expected HA backend")
}
- lockSize = inm.(*physical.InmemHABackend).LockMapSize()
+ lockSize = inm.(*inmem.InmemHABackend).LockMapSize()
if lockSize != 0 {
t.Fatalf("locks used with unexpected HA backend")
}
@@ -1793,6 +1833,19 @@ func TestCore_RenewSameLease(t *testing.T) {
if resp.Secret.LeaseID != original {
t.Fatalf("lease id changed: %s %s", original, resp.Secret.LeaseID)
}
+
+ // Renew the lease (alternate path)
+ req = logical.TestRequest(t, logical.UpdateOperation, "sys/leases/renew/"+resp.Secret.LeaseID)
+ req.ClientToken = root
+ resp, err = c.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Verify the lease did not change
+ if resp.Secret.LeaseID != original {
+ t.Fatalf("lease id changed: %s %s", original, resp.Secret.LeaseID)
+ }
}
// Renew of a token should not create a new lease
@@ -1937,7 +1990,7 @@ path "secret/*" {
}
// Renew the lease
- req = logical.TestRequest(t, logical.UpdateOperation, "sys/renew")
+ req = logical.TestRequest(t, logical.UpdateOperation, "sys/leases/renew")
req.Data = map[string]interface{}{
"lease_id": resp.Secret.LeaseID,
}
@@ -2002,12 +2055,19 @@ func TestCore_Standby_Rotate(t *testing.T) {
// Create the first core and initialize it
logger = logformat.NewVaultLogger(log.LevelTrace)
- inm := physical.NewInmem(logger)
- inmha := physical.NewInmemHA(logger)
+ inm, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
redirectOriginal := "http://127.0.0.1:8200"
core, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal,
DisableMlock: true,
})
@@ -2028,7 +2088,7 @@ func TestCore_Standby_Rotate(t *testing.T) {
redirectOriginal2 := "http://127.0.0.1:8500"
core2, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal2,
DisableMlock: true,
})
diff --git a/vendor/github.com/hashicorp/vault/vault/cors.go b/vendor/github.com/hashicorp/vault/vault/cors.go
new file mode 100644
index 0000000..f94f078
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/cors.go
@@ -0,0 +1,153 @@
+package vault
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+const (
+ CORSDisabled uint32 = iota
+ CORSEnabled
+)
+
+var StdAllowedHeaders = []string{
+ "Content-Type",
+ "X-Requested-With",
+ "X-Vault-AWS-IAM-Server-ID",
+ "X-Vault-MFA",
+ "X-Vault-No-Request-Forwarding",
+ "X-Vault-Token",
+ "X-Vault-Wrap-Format",
+ "X-Vault-Wrap-TTL",
+}
+
+// CORSConfig stores the state of the CORS configuration.
+type CORSConfig struct {
+ sync.RWMutex `json:"-"`
+ core *Core
+ Enabled uint32 `json:"enabled"`
+ AllowedOrigins []string `json:"allowed_origins,omitempty"`
+ AllowedHeaders []string `json:"allowed_headers,omitempty"`
+}
+
+func (c *Core) saveCORSConfig() error {
+ view := c.systemBarrierView.SubView("config/")
+
+ localConfig := &CORSConfig{
+ Enabled: atomic.LoadUint32(&c.corsConfig.Enabled),
+ }
+ c.corsConfig.RLock()
+ localConfig.AllowedOrigins = c.corsConfig.AllowedOrigins
+ localConfig.AllowedHeaders = c.corsConfig.AllowedHeaders
+ c.corsConfig.RUnlock()
+
+ entry, err := logical.StorageEntryJSON("cors", localConfig)
+ if err != nil {
+ return fmt.Errorf("failed to create CORS config entry: %v", err)
+ }
+
+ if err := view.Put(entry); err != nil {
+ return fmt.Errorf("failed to save CORS config: %v", err)
+ }
+
+ return nil
+}
+
+// This should only be called with the core state lock held for writing
+func (c *Core) loadCORSConfig() error {
+ view := c.systemBarrierView.SubView("config/")
+
+ // Load the config in
+ out, err := view.Get("cors")
+ if err != nil {
+ return fmt.Errorf("failed to read CORS config: %v", err)
+ }
+ if out == nil {
+ return nil
+ }
+
+ newConfig := new(CORSConfig)
+ err = out.DecodeJSON(newConfig)
+ if err != nil {
+ return err
+ }
+ newConfig.core = c
+
+ c.corsConfig = newConfig
+
+ return nil
+}
+
+// Enable takes either a '*' or a comma-seprated list of URLs that can make
+// cross-origin requests to Vault.
+func (c *CORSConfig) Enable(urls []string, headers []string) error {
+ if len(urls) == 0 {
+ return errors.New("at least one origin or the wildcard must be provided.")
+ }
+
+ if strutil.StrListContains(urls, "*") && len(urls) > 1 {
+ return errors.New("to allow all origins the '*' must be the only value for allowed_origins")
+ }
+
+ c.Lock()
+ c.AllowedOrigins = urls
+
+ // Start with the standard headers to Vault accepts.
+ c.AllowedHeaders = append(c.AllowedHeaders, StdAllowedHeaders...)
+
+ // Allow the user to add additional headers to the list of
+ // headers allowed on cross-origin requests.
+ if len(headers) > 0 {
+ c.AllowedHeaders = append(c.AllowedHeaders, headers...)
+ }
+ c.Unlock()
+
+ atomic.StoreUint32(&c.Enabled, CORSEnabled)
+
+ return c.core.saveCORSConfig()
+}
+
+// IsEnabled returns the value of CORSConfig.isEnabled
+func (c *CORSConfig) IsEnabled() bool {
+ return atomic.LoadUint32(&c.Enabled) == CORSEnabled
+}
+
+// Disable sets CORS to disabled and clears the allowed origins & headers.
+func (c *CORSConfig) Disable() error {
+ atomic.StoreUint32(&c.Enabled, CORSDisabled)
+ c.Lock()
+
+ c.AllowedOrigins = nil
+ c.AllowedHeaders = nil
+
+ c.Unlock()
+
+ return c.core.saveCORSConfig()
+}
+
+// IsValidOrigin determines if the origin of the request is allowed to make
+// cross-origin requests based on the CORSConfig.
+func (c *CORSConfig) IsValidOrigin(origin string) bool {
+ // If we aren't enabling CORS then all origins are valid
+ if !c.IsEnabled() {
+ return true
+ }
+
+ c.RLock()
+ defer c.RUnlock()
+
+ if len(c.AllowedOrigins) == 0 {
+ return false
+ }
+
+ if len(c.AllowedOrigins) == 1 && (c.AllowedOrigins)[0] == "*" {
+ return true
+ }
+
+ return strutil.StrListContains(c.AllowedOrigins, origin)
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
index 30b6a76..b5e477a 100644
--- a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
+++ b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
@@ -1,9 +1,14 @@
package vault
import (
+ "fmt"
"time"
+ "github.com/hashicorp/errwrap"
+
"github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/helper/wrapping"
"github.com/hashicorp/vault/logical"
)
@@ -79,11 +84,60 @@ func (d dynamicSystemView) CachingDisabled() bool {
return d.core.cachingDisabled || (d.mountEntry != nil && d.mountEntry.Config.ForceNoCache)
}
-// Checks if this is a primary Vault instance.
+// Checks if this is a primary Vault instance. Caller should hold the stateLock
+// in read mode.
func (d dynamicSystemView) ReplicationState() consts.ReplicationState {
- var state consts.ReplicationState
- d.core.clusterParamsLock.RLock()
- state = d.core.replicationState
- d.core.clusterParamsLock.RUnlock()
- return state
+ return d.core.replicationState
+}
+
+// ResponseWrapData wraps the given data in a cubbyhole and returns the
+// token used to unwrap.
+func (d dynamicSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
+ req := &logical.Request{
+ Operation: logical.CreateOperation,
+ Path: "sys/wrapping/wrap",
+ }
+
+ resp := &logical.Response{
+ WrapInfo: &wrapping.ResponseWrapInfo{
+ TTL: ttl,
+ },
+ Data: data,
+ }
+
+ if jwt {
+ resp.WrapInfo.Format = "jwt"
+ }
+
+ _, err := d.core.wrapInCubbyhole(req, resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.WrapInfo, nil
+}
+
+// LookupPlugin looks for a plugin with the given name in the plugin catalog. It
+// returns a PluginRunner or an error if no plugin was found.
+func (d dynamicSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) {
+ if d.core == nil {
+ return nil, fmt.Errorf("system view core is nil")
+ }
+ if d.core.pluginCatalog == nil {
+ return nil, fmt.Errorf("system view core plugin catalog is nil")
+ }
+ r, err := d.core.pluginCatalog.Get(name)
+ if err != nil {
+ return nil, err
+ }
+ if r == nil {
+ return nil, errwrap.Wrapf(fmt.Sprintf("{{err}}: %s", name), ErrPluginNotFound)
+ }
+
+ return r, nil
+}
+
+// MlockEnabled returns the configuration setting for enabling mlock on plugins.
+func (d dynamicSystemView) MlockEnabled() bool {
+ return d.core.enableMlock
}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration.go b/vendor/github.com/hashicorp/vault/vault/expiration.go
index f0f885e..628df8e 100644
--- a/vendor/github.com/hashicorp/vault/vault/expiration.go
+++ b/vendor/github.com/hashicorp/vault/vault/expiration.go
@@ -2,18 +2,23 @@ package vault
import (
"encoding/json"
+ "errors"
"fmt"
"path"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/armon/go-metrics"
log "github.com/mgutz/logxi/v1"
+ "github.com/hashicorp/errwrap"
+ multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/logical"
)
@@ -34,9 +39,6 @@ const (
// revokeRetryBase is a baseline retry time
revokeRetryBase = 10 * time.Second
- // minRevokeDelay is used to prevent an instant revoke on restore
- minRevokeDelay = 5 * time.Second
-
// maxLeaseDuration is the default maximum lease duration
maxLeaseTTL = 32 * 24 * time.Hour
@@ -56,7 +58,16 @@ type ExpirationManager struct {
logger log.Logger
pending map[string]*time.Timer
- pendingLock sync.Mutex
+ pendingLock sync.RWMutex
+
+ tidyLock int32
+
+ restoreMode int32
+ restoreModeLock sync.RWMutex
+ restoreRequestLock sync.RWMutex
+ restoreLocks []*locksutil.LockEntry
+ restoreLoaded sync.Map
+ quitCh chan struct{}
}
// NewExpirationManager creates a new ExpirationManager that is backed
@@ -64,8 +75,8 @@ type ExpirationManager struct {
func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger log.Logger) *ExpirationManager {
if logger == nil {
logger = log.New("expiration_manager")
-
}
+
exp := &ExpirationManager{
router: router,
idView: view.SubView(leaseViewPrefix),
@@ -73,6 +84,12 @@ func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, log
tokenStore: ts,
logger: logger,
pending: make(map[string]*time.Timer),
+
+ // new instances of the expiration manager will go immediately into
+ // restore mode
+ restoreMode: 1,
+ restoreLocks: locksutil.CreateLocks(),
+ quitCh: make(chan struct{}),
}
return exp
}
@@ -94,9 +111,14 @@ func (c *Core) setupExpiration() error {
// Restore the existing state
c.logger.Info("expiration: restoring leases")
- if err := c.expiration.Restore(); err != nil {
- return fmt.Errorf("expiration state restore failed: %v", err)
+ errorFunc := func() {
+ c.logger.Error("expiration: shutting down")
+ if err := c.Shutdown(); err != nil {
+ c.logger.Error("expiration: error shutting down core: %v", err)
+ }
}
+ go c.expiration.Restore(errorFunc)
+
return nil
}
@@ -114,17 +136,165 @@ func (c *Core) stopExpiration() error {
return nil
}
+// lockLease takes out a lock for a given lease ID
+func (m *ExpirationManager) lockLease(leaseID string) {
+ locksutil.LockForKey(m.restoreLocks, leaseID).Lock()
+}
+
+// unlockLease unlocks a given lease ID
+func (m *ExpirationManager) unlockLease(leaseID string) {
+ locksutil.LockForKey(m.restoreLocks, leaseID).Unlock()
+}
+
+// inRestoreMode returns if we are currently in restore mode
+func (m *ExpirationManager) inRestoreMode() bool {
+ return atomic.LoadInt32(&m.restoreMode) == 1
+}
+
+// Tidy cleans up the dangling storage entries for leases. It scans the storage
+// view to find all the available leases, checks if the token embedded in it is
+// either empty or invalid and in both the cases, it revokes them. It also uses
+// a token cache to avoid multiple lookups of the same token ID. It is normally
+// not required to use the API that invokes this. This is only intended to
+// clean up the corrupt storage due to bugs.
+func (m *ExpirationManager) Tidy() error {
+ if m.inRestoreMode() {
+ return errors.New("cannot run tidy while restoring leases")
+ }
+
+ var tidyErrors *multierror.Error
+
+ if !atomic.CompareAndSwapInt32(&m.tidyLock, 0, 1) {
+ m.logger.Warn("expiration: tidy operation on leases is already in progress")
+ return fmt.Errorf("tidy operation on leases is already in progress")
+ }
+
+ defer atomic.CompareAndSwapInt32(&m.tidyLock, 1, 0)
+
+ m.logger.Info("expiration: beginning tidy operation on leases")
+ defer m.logger.Info("expiration: finished tidy operation on leases")
+
+ // Create a cache to keep track of looked up tokens
+ tokenCache := make(map[string]bool)
+ var countLease, revokedCount, deletedCountInvalidToken, deletedCountEmptyToken int64
+
+ tidyFunc := func(leaseID string) {
+ countLease++
+ if countLease%500 == 0 {
+ m.logger.Info("expiration: tidying leases", "progress", countLease)
+ }
+
+ le, err := m.loadEntry(leaseID)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to load the lease ID %q: %v", leaseID, err))
+ return
+ }
+
+ if le == nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("nil entry for lease ID %q: %v", leaseID, err))
+ return
+ }
+
+ var isValid, ok bool
+ revokeLease := false
+ if le.ClientToken == "" {
+ m.logger.Trace("expiration: revoking lease which has an empty token", "lease_id", leaseID)
+ revokeLease = true
+ deletedCountEmptyToken++
+ goto REVOKE_CHECK
+ }
+
+ isValid, ok = tokenCache[le.ClientToken]
+ if !ok {
+ saltedID, err := m.tokenStore.SaltID(le.ClientToken)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup salt id: %v", err))
+ return
+ }
+ lock := locksutil.LockForKey(m.tokenStore.tokenLocks, le.ClientToken)
+ lock.RLock()
+ te, err := m.tokenStore.lookupSalted(saltedID, true)
+ lock.RUnlock()
+
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup token: %v", err))
+ return
+ }
+
+ if te == nil {
+ m.logger.Trace("expiration: revoking lease which holds an invalid token", "lease_id", leaseID)
+ revokeLease = true
+ deletedCountInvalidToken++
+ tokenCache[le.ClientToken] = false
+ } else {
+ tokenCache[le.ClientToken] = true
+ }
+ goto REVOKE_CHECK
+ } else {
+ if isValid {
+ return
+ }
+
+ m.logger.Trace("expiration: revoking lease which contains an invalid token", "lease_id", leaseID)
+ revokeLease = true
+ deletedCountInvalidToken++
+ goto REVOKE_CHECK
+ }
+
+ REVOKE_CHECK:
+ if revokeLease {
+ // Force the revocation and skip going through the token store
+ // again
+ err = m.revokeCommon(leaseID, true, true)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke an invalid lease with ID %q: %v", leaseID, err))
+ return
+ }
+ revokedCount++
+ }
+ }
+
+ if err := logical.ScanView(m.idView, tidyFunc); err != nil {
+ return err
+ }
+
+ m.logger.Debug("expiration: number of leases scanned", "count", countLease)
+ m.logger.Debug("expiration: number of leases which had empty tokens", "count", deletedCountEmptyToken)
+ m.logger.Debug("expiration: number of leases which had invalid tokens", "count", deletedCountInvalidToken)
+ m.logger.Debug("expiration: number of leases successfully revoked", "count", revokedCount)
+
+ return tidyErrors.ErrorOrNil()
+}
+
// Restore is used to recover the lease states when starting.
// This is used after starting the vault.
-func (m *ExpirationManager) Restore() error {
- m.pendingLock.Lock()
- defer m.pendingLock.Unlock()
+func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) {
+ defer func() {
+ // Turn off restore mode. We can do this safely without the lock because
+ // if restore mode finished successfully, restore mode was already
+ // disabled with the lock. In an error state, this will allow the
+ // Stop() function to shut everything down.
+ atomic.StoreInt32(&m.restoreMode, 0)
+
+ switch {
+ case retErr == nil:
+ case errwrap.Contains(retErr, ErrBarrierSealed.Error()):
+ // Don't run error func because we're likely already shutting down
+ m.logger.Warn("expiration: barrier sealed while restoring leases, stopping lease loading")
+ retErr = nil
+ default:
+ m.logger.Error("expiration: error restoring leases", "error", retErr)
+ if errorFunc != nil {
+ errorFunc()
+ }
+ }
+ }()
// Accumulate existing leases
m.logger.Debug("expiration: collecting leases")
existing, err := logical.CollectKeys(m.idView)
if err != nil {
- return fmt.Errorf("failed to scan for leases: %v", err)
+ return errwrap.Wrapf("failed to scan for leases: {{err}}", err)
}
m.logger.Debug("expiration: leases collected", "num_existing", len(existing))
@@ -133,7 +303,7 @@ func (m *ExpirationManager) Restore() error {
quit := make(chan bool)
// Buffer these channels to prevent deadlocks
errs := make(chan error, len(existing))
- result := make(chan *leaseEntry, len(existing))
+ result := make(chan struct{}, len(existing))
// Use a wait group
wg := &sync.WaitGroup{}
@@ -152,18 +322,21 @@ func (m *ExpirationManager) Restore() error {
return
}
- le, err := m.loadEntry(leaseID)
+ err := m.processRestore(leaseID)
if err != nil {
errs <- err
continue
}
- // Write results out to the result channel
- result <- le
+ // Send message that lease is done
+ result <- struct{}{}
// quit early
case <-quit:
return
+
+ case <-m.quitCh:
+ return
}
}
}()
@@ -174,7 +347,7 @@ func (m *ExpirationManager) Restore() error {
go func() {
defer wg.Done()
for i, leaseID := range existing {
- if i%500 == 0 {
+ if i > 0 && i%500 == 0 {
m.logger.Trace("expiration: leases loading", "progress", i)
}
@@ -182,6 +355,9 @@ func (m *ExpirationManager) Restore() error {
case <-quit:
return
+ case <-m.quitCh:
+ return
+
default:
broker <- leaseID
}
@@ -191,49 +367,59 @@ func (m *ExpirationManager) Restore() error {
close(broker)
}()
- // Restore each key by pulling from the result chan
+ // Ensure all keys on the chan are processed
for i := 0; i < len(existing); i++ {
select {
case err := <-errs:
// Close all go routines
close(quit)
-
return err
- case le := <-result:
+ case <-m.quitCh:
+ close(quit)
+ return nil
- // If there is no entry, nothing to restore
- if le == nil {
- continue
- }
-
- // If there is no expiry time, don't do anything
- if le.ExpireTime.IsZero() {
- continue
- }
-
- // Determine the remaining time to expiration
- expires := le.ExpireTime.Sub(time.Now())
- if expires <= 0 {
- expires = minRevokeDelay
- }
-
- // Setup revocation timer
- m.pending[le.LeaseID] = time.AfterFunc(expires, func() {
- m.expireID(le.LeaseID)
- })
+ case <-result:
}
}
// Let all go routines finish
wg.Wait()
- if len(m.pending) > 0 {
- if m.logger.IsInfo() {
- m.logger.Info("expire: leases restored", "restored_lease_count", len(m.pending))
- }
+ m.restoreModeLock.Lock()
+ m.restoreLoaded = sync.Map{}
+ m.restoreLocks = nil
+ atomic.StoreInt32(&m.restoreMode, 0)
+ m.restoreModeLock.Unlock()
+
+ m.logger.Info("expiration: lease restore complete")
+ return nil
+}
+
+// processRestore takes a lease and restores it in the expiration manager if it has
+// not already been seen
+func (m *ExpirationManager) processRestore(leaseID string) error {
+ m.restoreRequestLock.RLock()
+ defer m.restoreRequestLock.RUnlock()
+
+ // Check if the lease has been seen
+ if _, ok := m.restoreLoaded.Load(leaseID); ok {
+ return nil
}
+ m.lockLease(leaseID)
+ defer m.unlockLease(leaseID)
+
+ // Check again with the lease locked
+ if _, ok := m.restoreLoaded.Load(leaseID); ok {
+ return nil
+ }
+
+ // Load lease and restore expiration timer
+ _, err := m.loadEntryInternal(leaseID, true, false)
+ if err != nil {
+ return err
+ }
return nil
}
@@ -241,12 +427,26 @@ func (m *ExpirationManager) Restore() error {
// This must be called before sealing the view.
func (m *ExpirationManager) Stop() error {
// Stop all the pending expiration timers
+ m.logger.Debug("expiration: stop triggered")
+ defer m.logger.Debug("expiration: finished stopping")
+
m.pendingLock.Lock()
for _, timer := range m.pending {
timer.Stop()
}
m.pending = make(map[string]*time.Timer)
m.pendingLock.Unlock()
+
+ close(m.quitCh)
+ if m.inRestoreMode() {
+ for {
+ if !m.inRestoreMode() {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+
return nil
}
@@ -261,6 +461,7 @@ func (m *ExpirationManager) Revoke(leaseID string) error {
// during revocation and still remove entries/index/lease timers
func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool) error {
defer metrics.MeasureSince([]string{"expire", "revoke-common"}, time.Now())
+
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
@@ -277,10 +478,10 @@ func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool)
if err := m.revokeEntry(le); err != nil {
if !force {
return err
- } else {
- if m.logger.IsWarn() {
- m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err)
- }
+ }
+
+ if m.logger.IsWarn() {
+ m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err)
}
}
}
@@ -330,6 +531,7 @@ func (m *ExpirationManager) RevokePrefix(prefix string) error {
// token store's revokeSalted function.
func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error {
defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now())
+
// Lookup the leases
existing, err := m.lookupByToken(te.ID)
if err != nil {
@@ -338,14 +540,18 @@ func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error {
// Revoke all the keys
for idx, leaseID := range existing {
- if err := m.Revoke(leaseID); err != nil {
+ if err := m.revokeCommon(leaseID, false, false); err != nil {
return fmt.Errorf("failed to revoke '%s' (%d / %d): %v",
leaseID, idx+1, len(existing), err)
}
}
if te.Path != "" {
- tokenLeaseID := path.Join(te.Path, m.tokenStore.SaltID(te.ID))
+ saltedID, err := m.tokenStore.SaltID(te.ID)
+ if err != nil {
+ return err
+ }
+ tokenLeaseID := path.Join(te.Path, saltedID)
// We want to skip the revokeEntry call as that will call back into
// revocation logic in the token store, which is what is running this
@@ -361,6 +567,11 @@ func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error {
}
func (m *ExpirationManager) revokePrefixCommon(prefix string, force bool) error {
+ if m.inRestoreMode() {
+ m.restoreRequestLock.Lock()
+ defer m.restoreRequestLock.Unlock()
+ }
+
// Ensure there is a trailing slash
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
@@ -388,6 +599,7 @@ func (m *ExpirationManager) revokePrefixCommon(prefix string, force bool) error
// and a renew interval. The increment may be ignored.
func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*logical.Response, error) {
defer metrics.MeasureSince([]string{"expire", "renew"}, time.Now())
+
// Load the entry
le, err := m.loadEntry(leaseID)
if err != nil {
@@ -399,6 +611,13 @@ func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*log
return nil, err
}
+ if le.Secret == nil {
+ if le.Auth != nil {
+ return logical.ErrorResponse("tokens cannot be renewed through this endpoint"), logical.ErrPermissionDenied
+ }
+ return logical.ErrorResponse("lease does not correspond to a secret"), nil
+ }
+
// Attempt to renew the entry
resp, err := m.renewEntry(le, increment)
if err != nil {
@@ -434,13 +653,57 @@ func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*log
return resp, nil
}
+// RestoreSaltedTokenCheck verifies that the token is not expired while running
+// in restore mode. If we are not in restore mode, the lease has already been
+// restored or the lease still has time left, it returns true.
+func (m *ExpirationManager) RestoreSaltedTokenCheck(source string, saltedID string) (bool, error) {
+ defer metrics.MeasureSince([]string{"expire", "restore-token-check"}, time.Now())
+
+ // Return immediately if we are not in restore mode, expiration manager is
+ // already loaded
+ if !m.inRestoreMode() {
+ return true, nil
+ }
+
+ m.restoreModeLock.RLock()
+ defer m.restoreModeLock.RUnlock()
+
+ // Check again after we obtain the lock
+ if !m.inRestoreMode() {
+ return true, nil
+ }
+
+ leaseID := path.Join(source, saltedID)
+
+ m.lockLease(leaseID)
+ defer m.unlockLease(leaseID)
+
+ le, err := m.loadEntryInternal(leaseID, true, true)
+ if err != nil {
+ return false, err
+ }
+ if le != nil && !le.ExpireTime.IsZero() {
+ expires := le.ExpireTime.Sub(time.Now())
+ if expires <= 0 {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
// RenewToken is used to renew a token which does not need to
// invoke a logical backend.
func (m *ExpirationManager) RenewToken(req *logical.Request, source string, token string,
increment time.Duration) (*logical.Response, error) {
defer metrics.MeasureSince([]string{"expire", "renew-token"}, time.Now())
+
// Compute the Lease ID
- leaseID := path.Join(source, m.tokenStore.SaltID(token))
+ saltedID, err := m.tokenStore.SaltID(token)
+ if err != nil {
+ return nil, err
+ }
+ leaseID := path.Join(source, saltedID)
// Load the entry
le, err := m.loadEntry(leaseID)
@@ -498,8 +761,13 @@ func (m *ExpirationManager) RenewToken(req *logical.Request, source string, toke
// Register is used to take a request and response with an associated
// lease. The secret gets assigned a LeaseID and the management of
// of lease is assumed by the expiration manager.
-func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Response) (string, error) {
+func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Response) (id string, retErr error) {
defer metrics.MeasureSince([]string{"expire", "register"}, time.Now())
+
+ if req.ClientToken == "" {
+ return "", fmt.Errorf("expiration: cannot register a lease with an empty client token")
+ }
+
// Ignore if there is no leased secret
if resp == nil || resp.Secret == nil {
return "", nil
@@ -515,8 +783,34 @@ func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Respons
if err != nil {
return "", err
}
+
+ leaseID := path.Join(req.Path, leaseUUID)
+
+ defer func() {
+ // If there is an error we want to rollback as much as possible (note
+ // that errors here are ignored to do as much cleanup as we can). We
+ // want to revoke a generated secret (since an error means we may not
+ // be successfully tracking it), remove indexes, and delete the entry.
+ if retErr != nil {
+ revResp, err := m.router.Route(logical.RevokeRequest(req.Path, resp.Secret, resp.Data))
+ if err != nil {
+ retErr = multierror.Append(retErr, errwrap.Wrapf("an additional internal error was encountered revoking the newly-generated secret: {{err}}", err))
+ } else if revResp != nil && revResp.IsError() {
+ retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered revoking the newly-generated secret: {{err}}", revResp.Error()))
+ }
+
+ if err := m.deleteEntry(leaseID); err != nil {
+ retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered deleting any lease associated with the newly-generated secret: {{err}}", err))
+ }
+
+ if err := m.removeIndexByToken(req.ClientToken, leaseID); err != nil {
+ retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered removing lease indexes associated with the newly-generated secret: {{err}}", err))
+ }
+ }
+ }()
+
le := leaseEntry{
- LeaseID: path.Join(req.Path, leaseUUID),
+ LeaseID: leaseID,
ClientToken: req.ClientToken,
Path: req.Path,
Data: resp.Data,
@@ -548,9 +842,22 @@ func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Respons
func (m *ExpirationManager) RegisterAuth(source string, auth *logical.Auth) error {
defer metrics.MeasureSince([]string{"expire", "register-auth"}, time.Now())
+ if auth.ClientToken == "" {
+ return fmt.Errorf("expiration: cannot register an auth lease with an empty token")
+ }
+
+ if strings.Contains(source, "..") {
+ return fmt.Errorf("expiration: %s", consts.ErrPathContainsParentReferences)
+ }
+
+ saltedID, err := m.tokenStore.SaltID(auth.ClientToken)
+ if err != nil {
+ return err
+ }
+
// Create a lease entry
le := leaseEntry{
- LeaseID: path.Join(source, m.tokenStore.SaltID(auth.ClientToken)),
+ LeaseID: path.Join(source, saltedID),
ClientToken: auth.ClientToken,
Auth: auth,
Path: source,
@@ -574,7 +881,11 @@ func (m *ExpirationManager) FetchLeaseTimesByToken(source, token string) (*lease
defer metrics.MeasureSince([]string{"expire", "fetch-lease-times-by-token"}, time.Now())
// Compute the Lease ID
- leaseID := path.Join(source, m.tokenStore.SaltID(token))
+ saltedID, err := m.tokenStore.SaltID(token)
+ if err != nil {
+ return nil, err
+ }
+ leaseID := path.Join(source, saltedID)
return m.FetchLeaseTimes(leaseID)
}
@@ -620,8 +931,19 @@ func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Durati
// Check for an existing timer
timer, ok := m.pending[le.LeaseID]
+ // If there is no expiry time, don't do anything
+ if le.ExpireTime.IsZero() {
+ // if the timer happened to exist, stop the time and delete it from the
+ // pending timers.
+ if ok {
+ timer.Stop()
+ delete(m.pending, le.LeaseID)
+ }
+ return
+ }
+
// Create entry if it does not exist
- if !ok && leaseTotal > 0 {
+ if !ok {
timer := time.AfterFunc(leaseTotal, func() {
m.expireID(le.LeaseID)
})
@@ -629,17 +951,8 @@ func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Durati
return
}
- // Delete the timer if the expiration time is zero
- if ok && leaseTotal == 0 {
- timer.Stop()
- delete(m.pending, le.LeaseID)
- return
- }
-
// Extend the timer by the lease total
- if ok && leaseTotal > 0 {
- timer.Reset(leaseTotal)
- }
+ timer.Reset(leaseTotal)
}
// expireID is invoked when a given ID is expired
@@ -650,17 +963,23 @@ func (m *ExpirationManager) expireID(leaseID string) {
m.pendingLock.Unlock()
for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ {
+ select {
+ case <-m.quitCh:
+ m.logger.Error("expiration: shutting down, not attempting further revocation of lease", "lease_id", leaseID)
+ return
+ default:
+ }
err := m.Revoke(leaseID)
if err == nil {
if m.logger.IsInfo() {
- m.logger.Info("expire: revoked lease", "lease_id", leaseID)
+ m.logger.Info("expiration: revoked lease", "lease_id", leaseID)
}
return
}
- m.logger.Error("expire: failed to revoke lease", "lease_id", leaseID, "error", err)
+ m.logger.Error("expiration: failed to revoke lease", "lease_id", leaseID, "error", err)
time.Sleep((1 << attempt) * revokeRetryBase)
}
- m.logger.Error("expire: maximum revoke attempts reached", "lease_id", leaseID)
+ m.logger.Error("expiration: maximum revoke attempts reached", "lease_id", leaseID)
}
// revokeEntry is used to attempt revocation of an internal entry
@@ -668,7 +987,7 @@ func (m *ExpirationManager) revokeEntry(le *leaseEntry) error {
// Revocation of login tokens is special since we can by-pass the
// backend and directly interact with the token store
if le.Auth != nil {
- if err := m.tokenStore.RevokeTree(le.Auth.ClientToken); err != nil {
+ if err := m.tokenStore.RevokeTree(le.ClientToken); err != nil {
return fmt.Errorf("failed to revoke token: %v", err)
}
@@ -722,6 +1041,24 @@ func (m *ExpirationManager) renewAuthEntry(req *logical.Request, le *leaseEntry,
// loadEntry is used to read a lease entry
func (m *ExpirationManager) loadEntry(leaseID string) (*leaseEntry, error) {
+ // Take out the lease locks after we ensure we are in restore mode
+ restoreMode := m.inRestoreMode()
+ if restoreMode {
+ m.restoreModeLock.RLock()
+ defer m.restoreModeLock.RUnlock()
+
+ restoreMode = m.inRestoreMode()
+ if restoreMode {
+ m.lockLease(leaseID)
+ defer m.unlockLease(leaseID)
+ }
+ }
+ return m.loadEntryInternal(leaseID, restoreMode, true)
+}
+
+// loadEntryInternal is used when you need to load an entry but also need to
+// control the lifecycle of the restoreLock
+func (m *ExpirationManager) loadEntryInternal(leaseID string, restoreMode bool, checkRestored bool) (*leaseEntry, error) {
out, err := m.idView.Get(leaseID)
if err != nil {
return nil, fmt.Errorf("failed to read lease entry: %v", err)
@@ -733,6 +1070,24 @@ func (m *ExpirationManager) loadEntry(leaseID string) (*leaseEntry, error) {
if err != nil {
return nil, fmt.Errorf("failed to decode lease entry: %v", err)
}
+
+ if restoreMode {
+ if checkRestored {
+ // If we have already loaded this lease, we don't need to update on
+ // load. In the case of renewal and revocation, updatePending will be
+ // done after making the appropriate modifications to the lease.
+ if _, ok := m.restoreLoaded.Load(leaseID); ok {
+ return le, nil
+ }
+ }
+
+ // Update the cache of restored leases, either synchronously or through
+ // the lazy loaded restore process
+ m.restoreLoaded.Store(le.LeaseID, struct{}{})
+
+ // Setup revocation timer
+ m.updatePending(le, le.ExpireTime.Sub(time.Now()))
+ }
return le, nil
}
@@ -765,8 +1120,18 @@ func (m *ExpirationManager) deleteEntry(leaseID string) error {
// createIndexByToken creates a secondary index from the token to a lease entry
func (m *ExpirationManager) createIndexByToken(token, leaseID string) error {
+ saltedID, err := m.tokenStore.SaltID(token)
+ if err != nil {
+ return err
+ }
+
+ leaseSaltedID, err := m.tokenStore.SaltID(leaseID)
+ if err != nil {
+ return err
+ }
+
ent := logical.StorageEntry{
- Key: m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID),
+ Key: saltedID + "/" + leaseSaltedID,
Value: []byte(leaseID),
}
if err := m.tokenView.Put(&ent); err != nil {
@@ -777,7 +1142,17 @@ func (m *ExpirationManager) createIndexByToken(token, leaseID string) error {
// indexByToken looks up the secondary index from the token to a lease entry
func (m *ExpirationManager) indexByToken(token, leaseID string) (*logical.StorageEntry, error) {
- key := m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID)
+ saltedID, err := m.tokenStore.SaltID(token)
+ if err != nil {
+ return nil, err
+ }
+
+ leaseSaltedID, err := m.tokenStore.SaltID(leaseID)
+ if err != nil {
+ return nil, err
+ }
+
+ key := saltedID + "/" + leaseSaltedID
entry, err := m.tokenView.Get(key)
if err != nil {
return nil, fmt.Errorf("failed to look up secondary index entry")
@@ -787,7 +1162,17 @@ func (m *ExpirationManager) indexByToken(token, leaseID string) (*logical.Storag
// removeIndexByToken removes the secondary index from the token to a lease entry
func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error {
- key := m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID)
+ saltedID, err := m.tokenStore.SaltID(token)
+ if err != nil {
+ return err
+ }
+
+ leaseSaltedID, err := m.tokenStore.SaltID(leaseID)
+ if err != nil {
+ return err
+ }
+
+ key := saltedID + "/" + leaseSaltedID
if err := m.tokenView.Delete(key); err != nil {
return fmt.Errorf("failed to delete lease index entry: %v", err)
}
@@ -796,8 +1181,13 @@ func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error {
// lookupByToken is used to lookup all the leaseID's via the
func (m *ExpirationManager) lookupByToken(token string) ([]string, error) {
+ saltedID, err := m.tokenStore.SaltID(token)
+ if err != nil {
+ return nil, err
+ }
+
// Scan via the index for sub-leases
- prefix := m.tokenStore.SaltID(token) + "/"
+ prefix := saltedID + "/"
subKeys, err := m.tokenView.List(prefix)
if err != nil {
return nil, fmt.Errorf("failed to list leases: %v", err)
@@ -820,9 +1210,9 @@ func (m *ExpirationManager) lookupByToken(token string) ([]string, error) {
// emitMetrics is invoked periodically to emit statistics
func (m *ExpirationManager) emitMetrics() {
- m.pendingLock.Lock()
+ m.pendingLock.RLock()
num := len(m.pending)
- m.pendingLock.Unlock()
+ m.pendingLock.RUnlock()
metrics.SetGauge([]string{"expire", "num_leases"}, float32(num))
}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration_test.go b/vendor/github.com/hashicorp/vault/vault/expiration_test.go
index ced6b42..144bd16 100644
--- a/vendor/github.com/hashicorp/vault/vault/expiration_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/expiration_test.go
@@ -2,7 +2,6 @@ package vault
import (
"fmt"
- "os"
"reflect"
"sort"
"strings"
@@ -15,6 +14,7 @@ import (
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
"github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
log "github.com/mgutz/logxi/v1"
)
@@ -33,16 +33,230 @@ func mockBackendExpiration(t testing.TB, backend physical.Backend) (*Core, *Expi
return c, ts.expiration
}
+func TestExpiration_Tidy(t *testing.T) {
+ var err error
+
+ exp := mockExpiration(t)
+ if err := exp.Restore(nil); err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up a count function to calculate number of leases
+ count := 0
+ countFunc := func(leaseID string) {
+ count++
+ }
+
+ // Scan the storage with the count func set
+ if err = logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that there are no leases to begin with
+ if count != 0 {
+ t.Fatalf("bad: lease count; expected:0 actual:%d", count)
+ }
+
+ // Create a lease entry without a client token in it
+ le := &leaseEntry{
+ LeaseID: "lease/with/no/client/token",
+ Path: "foo/bar",
+ }
+
+ // Persist the invalid lease entry
+ if err = exp.persistEntry(le); err != nil {
+ t.Fatalf("error persisting entry: %v", err)
+ }
+
+ count = 0
+ if err = logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that the storage was successful and that the count of leases is
+ // now 1
+ if count != 1 {
+ t.Fatalf("bad: lease count; expected:1 actual:%d", count)
+ }
+
+ // Run the tidy operation
+ err = exp.Tidy()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ count = 0
+ if err := logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Post the tidy operation, the invalid lease entry should have been gone
+ if count != 0 {
+ t.Fatalf("bad: lease count; expected:0 actual:%d", count)
+ }
+
+ // Set a revoked/invalid token in the lease entry
+ le.ClientToken = "invalidtoken"
+
+ // Persist the invalid lease entry
+ if err = exp.persistEntry(le); err != nil {
+ t.Fatalf("error persisting entry: %v", err)
+ }
+
+ count = 0
+ if err = logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that the storage was successful and that the count of leases is
+ // now 1
+ if count != 1 {
+ t.Fatalf("bad: lease count; expected:1 actual:%d", count)
+ }
+
+ // Run the tidy operation
+ err = exp.Tidy()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ count = 0
+ if err = logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Post the tidy operation, the invalid lease entry should have been gone
+ if count != 0 {
+ t.Fatalf("bad: lease count; expected:0 actual:%d", count)
+ }
+
+ // Attach an invalid token with 2 leases
+ if err = exp.persistEntry(le); err != nil {
+ t.Fatalf("error persisting entry: %v", err)
+ }
+
+ le.LeaseID = "another/invalid/lease"
+ if err = exp.persistEntry(le); err != nil {
+ t.Fatalf("error persisting entry: %v", err)
+ }
+
+ // Run the tidy operation
+ err = exp.Tidy()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ count = 0
+ if err = logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Post the tidy operation, the invalid lease entry should have been gone
+ if count != 0 {
+ t.Fatalf("bad: lease count; expected:0 actual:%d", count)
+ }
+
+ for i := 0; i < 1000; i++ {
+ req := &logical.Request{
+ Operation: logical.ReadOperation,
+ Path: "invalid/lease/" + fmt.Sprintf("%d", i+1),
+ ClientToken: "invalidtoken",
+ }
+ resp := &logical.Response{
+ Secret: &logical.Secret{
+ LeaseOptions: logical.LeaseOptions{
+ TTL: 100 * time.Millisecond,
+ },
+ },
+ Data: map[string]interface{}{
+ "test_key": "test_value",
+ },
+ }
+ _, err := exp.Register(req, resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ count = 0
+ if err = logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that there are 1000 leases now
+ if count != 1000 {
+ t.Fatalf("bad: lease count; expected:1000 actual:%d", count)
+ }
+
+ errCh1 := make(chan error)
+ errCh2 := make(chan error)
+
+ // Initiate tidy of the above 1000 invalid leases in quick succession. Only
+ // one tidy operation can be in flight at any time. One of these requests
+ // should error out.
+ go func() {
+ errCh1 <- exp.Tidy()
+ }()
+
+ go func() {
+ errCh2 <- exp.Tidy()
+ }()
+
+ var err1, err2 error
+
+ for i := 0; i < 2; i++ {
+ select {
+ case err1 = <-errCh1:
+ case err2 = <-errCh2:
+ }
+ }
+
+ if !(err1 != nil && err1.Error() == "tidy operation on leases is already in progress") &&
+ !(err2 != nil && err2.Error() == "tidy operation on leases is already in progress") {
+ t.Fatalf("expected at least one of err1 or err2 to be set; err1: %#v\n err2:%#v\n", err1, err2)
+ }
+
+ root, err := exp.tokenStore.rootToken()
+ if err != nil {
+ t.Fatal(err)
+ }
+ le.ClientToken = root.ID
+
+ // Attach a valid token with the leases
+ if err = exp.persistEntry(le); err != nil {
+ t.Fatalf("error persisting entry: %v", err)
+ }
+
+ // Run the tidy operation
+ err = exp.Tidy()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ count = 0
+ if err = logical.ScanView(exp.idView, countFunc); err != nil {
+ t.Fatal(err)
+ }
+
+ // Post the tidy operation, the valid lease entry should not get affected
+ if count != 1 {
+ t.Fatalf("bad: lease count; expected:1 actual:%d", count)
+ }
+}
+
+// To avoid pulling in deps for all users of the package, don't leave these
+// uncommented in the public tree
+/*
func BenchmarkExpiration_Restore_Etcd(b *testing.B) {
addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR")
randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
logger := logformat.NewVaultLogger(log.LevelTrace)
- physicalBackend, err := physical.NewBackend("etcd", logger, map[string]string{
+ physicalBackend, err := physEtcd.NewEtcdBackend(map[string]string{
"address": addr,
"path": randPath,
"max_parallel": "256",
- })
+ }, logger)
if err != nil {
b.Fatalf("err: %s", err)
}
@@ -55,21 +269,26 @@ func BenchmarkExpiration_Restore_Consul(b *testing.B) {
randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
logger := logformat.NewVaultLogger(log.LevelTrace)
- physicalBackend, err := physical.NewBackend("consul", logger, map[string]string{
+ physicalBackend, err := physConsul.NewConsulBackend(map[string]string{
"address": addr,
"path": randPath,
"max_parallel": "256",
- })
+ }, logger)
if err != nil {
b.Fatalf("err: %s", err)
}
benchmarkExpirationBackend(b, physicalBackend, 10000) // 10,000 leases
}
+*/
func BenchmarkExpiration_Restore_InMem(b *testing.B) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- benchmarkExpirationBackend(b, physical.NewInmem(logger), 100000) // 100,000 Leases
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ b.Fatal(err)
+ }
+ benchmarkExpirationBackend(b, inm, 100000) // 100,000 Leases
}
func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, numLeases int) {
@@ -80,7 +299,10 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend,
if err != nil {
b.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ b.Fatal(err)
+ }
// Register fake leases
for i := 0; i < numLeases; i++ {
@@ -90,8 +312,9 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend,
}
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/" + pathUUID,
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/" + pathUUID,
+ ClientToken: "root",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -118,7 +341,7 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend,
b.ResetTimer()
for i := 0; i < b.N; i++ {
- err = exp.Restore()
+ err = exp.Restore(nil)
// Restore
if err != nil {
b.Fatalf("err: %v", err)
@@ -136,7 +359,10 @@ func TestExpiration_Restore(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
paths := []string{
"prod/aws/foo",
@@ -145,8 +371,9 @@ func TestExpiration_Restore(t *testing.T) {
}
for _, path := range paths {
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: path,
+ Operation: logical.ReadOperation,
+ Path: path,
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -172,7 +399,7 @@ func TestExpiration_Restore(t *testing.T) {
}
// Restore
- err = exp.Restore()
+ err = exp.Restore(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -200,8 +427,9 @@ func TestExpiration_Restore(t *testing.T) {
func TestExpiration_Register(t *testing.T) {
exp := mockExpiration(t)
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -247,6 +475,11 @@ func TestExpiration_RegisterAuth(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
+
+ err = exp.RegisterAuth("auth/github/../login", auth)
+ if err == nil {
+ t.Fatal("expected error")
+ }
}
func TestExpiration_RegisterAuth_NoLease(t *testing.T) {
@@ -296,11 +529,15 @@ func TestExpiration_Revoke(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -338,11 +575,15 @@ func TestExpiration_RevokeOnExpire(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -391,7 +632,10 @@ func TestExpiration_RevokePrefix(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
paths := []string{
"prod/aws/foo",
@@ -400,8 +644,9 @@ func TestExpiration_RevokePrefix(t *testing.T) {
}
for _, path := range paths {
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: path,
+ Operation: logical.ReadOperation,
+ Path: path,
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -455,7 +700,10 @@ func TestExpiration_RevokeByToken(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
paths := []string{
"prod/aws/foo",
@@ -585,11 +833,15 @@ func TestExpiration_Renew(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -651,11 +903,15 @@ func TestExpiration_Renew_NotRenewable(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -697,11 +953,15 @@ func TestExpiration_Renew_RevokeOnExpire(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
req := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "prod/aws/foo",
+ Operation: logical.ReadOperation,
+ Path: "prod/aws/foo",
+ ClientToken: "foobar",
}
resp := &logical.Response{
Secret: &logical.Secret{
@@ -769,7 +1029,10 @@ func TestExpiration_revokeEntry(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "foo/bar/", &MountEntry{Path: "foo/bar/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
le := &leaseEntry{
LeaseID: "foo/bar/1234",
@@ -796,13 +1059,10 @@ func TestExpiration_revokeEntry(t *testing.T) {
req := noop.Requests[0]
if req.Operation != logical.RevokeOperation {
- t.Fatalf("Bad: %v", req)
- }
- if req.Path != le.Path {
- t.Fatalf("Bad: %v", req)
+ t.Fatalf("bad: operation; req: %#v", req)
}
if !reflect.DeepEqual(req.Data, le.Data) {
- t.Fatalf("Bad: %v", req)
+ t.Fatalf("bad: data; req: %#v\n le: %#v\n", req, le)
}
}
@@ -900,7 +1160,10 @@ func TestExpiration_renewEntry(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "foo/bar/", &MountEntry{Path: "foo/bar/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
le := &leaseEntry{
LeaseID: "foo/bar/1234",
@@ -933,9 +1196,6 @@ func TestExpiration_renewEntry(t *testing.T) {
if req.Operation != logical.RenewOperation {
t.Fatalf("Bad: %v", req)
}
- if req.Path != le.Path {
- t.Fatalf("Bad: %v", req)
- }
if !reflect.DeepEqual(req.Data, le.Data) {
t.Fatalf("Bad: %v", req)
}
@@ -966,7 +1226,10 @@ func TestExpiration_renewAuthEntry(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "auth/foo/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "auth/foo/", &MountEntry{Path: "auth/foo/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
le := &leaseEntry{
LeaseID: "auth/foo/1234",
@@ -1134,9 +1397,10 @@ func TestExpiration_RevokeForce(t *testing.T) {
core.logicalBackends["badrenew"] = badRenewFactory
me := &MountEntry{
- Table: mountTableType,
- Path: "badrenew/",
- Type: "badrenew",
+ Table: mountTableType,
+ Path: "badrenew/",
+ Type: "badrenew",
+ Accessor: "badrenewaccessor",
}
err := core.mount(me)
@@ -1207,5 +1471,10 @@ func badRenewFactory(conf *logical.BackendConfig) (logical.Backend, error) {
},
}
- return be.Setup(conf)
+ err := be.Setup(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ return be, nil
}
diff --git a/vendor/github.com/hashicorp/vault/vault/init_test.go b/vendor/github.com/hashicorp/vault/vault/init_test.go
index 38d95e4..48581f7 100644
--- a/vendor/github.com/hashicorp/vault/vault/init_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/init_test.go
@@ -8,7 +8,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
)
func TestCore_Init(t *testing.T) {
@@ -25,12 +25,15 @@ func TestCore_Init(t *testing.T) {
func testCore_NewTestCore(t *testing.T, seal Seal) (*Core, *CoreConfig) {
logger := logformat.NewVaultLogger(log.LevelTrace)
- inm := physical.NewInmem(logger)
+ inm, err := inmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
conf := &CoreConfig{
Physical: inm,
DisableMlock: true,
LogicalBackends: map[string]logical.Factory{
- "generic": LeasedPassthroughBackendFactory,
+ "kv": LeasedPassthroughBackendFactory,
},
Seal: seal,
}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
index 76353b0..cedb241 100644
--- a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
+++ b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
@@ -46,7 +46,7 @@ func CubbyholeBackendFactory(conf *logical.BackendConfig) (logical.Backend, erro
// CubbyholeBackend is used for storing secrets directly into the physical
// backend. The secrets are encrypted in the durable storage.
-// This differs from generic in that every token has its own private
+// This differs from kv in that every token has its own private
// storage view. The view is removed when the token expires.
type CubbyholeBackend struct {
*framework.Backend
@@ -185,7 +185,7 @@ The secrets are encrypted/decrypted by Vault: they are never stored
unencrypted in the backend and the backend never has an opportunity to
see the unencrypted value.
-This backend differs from the 'generic' backend in that it is namespaced
+This backend differs from the 'kv' backend in that it is namespaced
per-token. Tokens can only read and write their own values, with no
sharing possible (per-token cubbyholes). This can be useful for implementing
certain authentication workflows, as well as "scratch" areas for individual
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
index eb52a3f..5fc013e 100644
--- a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
+++ b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
@@ -5,8 +5,8 @@ import (
"fmt"
"strings"
- "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@@ -17,13 +17,13 @@ func PassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, er
return LeaseSwitchedPassthroughBackend(conf, false)
}
-// PassthroughBackendWithLeasesFactory returns a PassthroughBackend
+// LeasedPassthroughBackendFactory returns a PassthroughBackend
// with leases switched on
func LeasedPassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) {
return LeaseSwitchedPassthroughBackend(conf, true)
}
-// LeaseSwitchedPassthroughBackendFactory returns a PassthroughBackend
+// LeaseSwitchedPassthroughBackend returns a PassthroughBackend
// with leases switched on or off
func LeaseSwitchedPassthroughBackend(conf *logical.BackendConfig, leases bool) (logical.Backend, error) {
var b PassthroughBackend
@@ -53,7 +53,7 @@ func LeaseSwitchedPassthroughBackend(conf *logical.BackendConfig, leases bool) (
b.Backend.Secrets = []*framework.Secret{
&framework.Secret{
- Type: "generic",
+ Type: "kv",
Renew: b.handleRead,
Revoke: b.handleRevoke,
@@ -116,7 +116,7 @@ func (b *PassthroughBackend) handleRead(
var resp *logical.Response
if b.generateLeases {
// Generate the response
- resp = b.Secret("generic").Response(rawData, nil)
+ resp = b.Secret("kv").Response(rawData, nil)
resp.Secret.Renewable = false
} else {
resp = &logical.Response{
@@ -126,14 +126,13 @@ func (b *PassthroughBackend) handleRead(
}
// Check if there is a ttl key
- var ttl string
- ttl, _ = rawData["ttl"].(string)
- if len(ttl) == 0 {
- ttl, _ = rawData["lease"].(string)
- }
ttlDuration := b.System().DefaultLeaseTTL()
- if len(ttl) != 0 {
- dur, err := parseutil.ParseDurationSecond(ttl)
+ ttlRaw, ok := rawData["ttl"]
+ if !ok {
+ ttlRaw, ok = rawData["lease"]
+ }
+ if ok {
+ dur, err := parseutil.ParseDurationSecond(ttlRaw)
if err == nil {
ttlDuration = dur
}
@@ -148,6 +147,10 @@ func (b *PassthroughBackend) handleRead(
return resp, nil
}
+func (b *PassthroughBackend) GeneratesLeases() bool {
+ return b.generateLeases
+}
+
func (b *PassthroughBackend) handleWrite(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
// Check that some fields are given
@@ -203,12 +206,8 @@ func (b *PassthroughBackend) handleList(
return logical.ListResponse(keys), nil
}
-func (b *PassthroughBackend) GeneratesLeases() bool {
- return b.generateLeases
-}
-
const passthroughHelp = `
-The generic backend reads and writes arbitrary secrets to the backend.
+The kv backend reads and writes arbitrary secrets to the backend.
The secrets are encrypted/decrypted by Vault: they are never stored
unencrypted in the backend and the backend never has an opportunity to
see the unencrypted value.
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go
index bd33d65..1ccda69 100644
--- a/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go
@@ -1,10 +1,12 @@
package vault
import (
+ "encoding/json"
"reflect"
"testing"
"time"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
)
@@ -49,10 +51,19 @@ func TestPassthroughBackend_Write(t *testing.T) {
}
func TestPassthroughBackend_Read(t *testing.T) {
- test := func(b logical.Backend, ttlType string, leased bool) {
+ test := func(b logical.Backend, ttlType string, ttl interface{}, leased bool) {
req := logical.TestRequest(t, logical.UpdateOperation, "foo")
req.Data["raw"] = "test"
- req.Data[ttlType] = "1h"
+ var reqTTL interface{}
+ switch ttl.(type) {
+ case int64:
+ reqTTL = ttl.(int64)
+ case string:
+ reqTTL = ttl.(string)
+ default:
+ t.Fatal("unknown ttl type")
+ }
+ req.Data[ttlType] = reqTTL
storage := req.Storage
if _, err := b.HandleRequest(req); err != nil {
@@ -67,16 +78,34 @@ func TestPassthroughBackend_Read(t *testing.T) {
t.Fatalf("err: %v", err)
}
+ expectedTTL, err := parseutil.ParseDurationSecond(ttl)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // What comes back if an int is passed in is a json.Number which is
+ // actually aliased as a string so to make the deep equal happy if it's
+ // actually a number we set it to an int64
+ var respTTL interface{} = resp.Data[ttlType]
+ _, ok := respTTL.(json.Number)
+ if ok {
+ respTTL, err = respTTL.(json.Number).Int64()
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp.Data[ttlType] = respTTL
+ }
+
expected := &logical.Response{
Secret: &logical.Secret{
LeaseOptions: logical.LeaseOptions{
Renewable: true,
- TTL: time.Hour,
+ TTL: expectedTTL,
},
},
Data: map[string]interface{}{
"raw": "test",
- ttlType: "1h",
+ ttlType: reqTTL,
},
}
@@ -86,15 +115,15 @@ func TestPassthroughBackend_Read(t *testing.T) {
resp.Secret.InternalData = nil
resp.Secret.LeaseID = ""
if !reflect.DeepEqual(resp, expected) {
- t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp)
+ t.Fatalf("bad response.\n\nexpected:\n%#v\n\nGot:\n%#v", expected, resp)
}
}
b := testPassthroughLeasedBackend()
- test(b, "lease", true)
- test(b, "ttl", true)
+ test(b, "lease", "1h", true)
+ test(b, "ttl", "5", true)
b = testPassthroughBackend()
- test(b, "lease", false)
- test(b, "ttl", false)
+ test(b, "lease", int64(10), false)
+ test(b, "ttl", "40s", false)
}
func TestPassthroughBackend_Delete(t *testing.T) {
@@ -168,10 +197,10 @@ func TestPassthroughBackend_List(t *testing.T) {
func TestPassthroughBackend_Revoke(t *testing.T) {
test := func(b logical.Backend) {
- req := logical.TestRequest(t, logical.RevokeOperation, "generic")
+ req := logical.TestRequest(t, logical.RevokeOperation, "kv")
req.Secret = &logical.Secret{
InternalData: map[string]interface{}{
- "secret_type": "generic",
+ "secret_type": "kv",
},
}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system.go b/vendor/github.com/hashicorp/vault/vault/logical_system.go
index 5fdf312..1593a1f 100644
--- a/vendor/github.com/hashicorp/vault/vault/logical_system.go
+++ b/vendor/github.com/hashicorp/vault/vault/logical_system.go
@@ -9,8 +9,10 @@ import (
"sync"
"time"
+ "github.com/fatih/structs"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/parseutil"
+ "github.com/hashicorp/vault/helper/wrapping"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
"github.com/mitchellh/mapstructure"
@@ -20,7 +22,7 @@ var (
// protectedPaths cannot be accessed via the raw APIs.
// This is both for security and to prevent disrupting Vault.
protectedPaths = []string{
- "core",
+ keyringPath,
}
replicationPaths = func(b *SystemBackend) []*framework.Path {
@@ -43,7 +45,7 @@ var (
}
)
-func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backend, error) {
+func NewSystemBackend(core *Core) *SystemBackend {
b := &SystemBackend{
Core: core,
}
@@ -57,18 +59,23 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
"remount",
"audit",
"audit/*",
+ "raw",
"raw/*",
"replication/primary/secondary-token",
"replication/reindex",
"rotate",
+ "config/cors",
"config/auditing/*",
+ "plugins/catalog/*",
"revoke-prefix/*",
+ "revoke-force/*",
"leases/revoke-prefix/*",
"leases/revoke-force/*",
"leases/lookup/*",
},
Unauthenticated: []string{
+ "wrapping/lookup",
"wrapping/pubkey",
"replication/status",
},
@@ -97,6 +104,34 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
HelpDescription: strings.TrimSpace(sysHelp["capabilities_accessor"][1]),
},
+ &framework.Path{
+ Pattern: "config/cors$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "enable": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: "Enables or disables CORS headers on requests.",
+ },
+ "allowed_origins": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: "A comma-separated string or array of strings indicating origins that may make cross-origin requests.",
+ },
+ "allowed_headers": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: "A comma-separated string or array of strings indicating headers that are allowed on cross-origin requests.",
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleCORSRead,
+ logical.UpdateOperation: b.handleCORSUpdate,
+ logical.DeleteOperation: b.handleCORSDelete,
+ },
+
+ HelpDescription: strings.TrimSpace(sysHelp["config/cors"][0]),
+ HelpSynopsis: strings.TrimSpace(sysHelp["config/cors"][1]),
+ },
+
&framework.Path{
Pattern: "capabilities$",
@@ -196,6 +231,10 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
Type: framework.TypeString,
Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
},
+ "description": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.handleAuthTuneRead,
@@ -221,6 +260,10 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
Type: framework.TypeString,
Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
},
+ "description": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -257,6 +300,10 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
Default: false,
Description: strings.TrimSpace(sysHelp["mount_local"][0]),
},
+ "plugin_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["mount_plugin_name"][0]),
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -421,6 +468,17 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
HelpDescription: strings.TrimSpace(sysHelp["revoke-prefix"][1]),
},
+ &framework.Path{
+ Pattern: "leases/tidy$",
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handleTidyLeases,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["tidy_leases"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["tidy_leases"][1]),
+ },
+
&framework.Path{
Pattern: "auth$",
@@ -448,11 +506,19 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
Type: framework.TypeString,
Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
},
+ "config": &framework.FieldSchema{
+ Type: framework.TypeMap,
+ Description: strings.TrimSpace(sysHelp["auth_config"][0]),
+ },
"local": &framework.FieldSchema{
Type: framework.TypeBool,
Default: false,
Description: strings.TrimSpace(sysHelp["mount_local"][0]),
},
+ "plugin_name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["auth_plugin"][0]),
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -587,25 +653,6 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
HelpDescription: strings.TrimSpace(sysHelp["audit"][1]),
},
- &framework.Path{
- Pattern: "raw/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- "value": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRawRead,
- logical.UpdateOperation: b.handleRawWrite,
- logical.DeleteOperation: b.handleRawDelete,
- },
- },
-
&framework.Path{
Pattern: "key-status$",
@@ -681,6 +728,7 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.handleWrappingLookup,
+ logical.ReadOperation: b.handleWrappingLookup,
},
HelpSynopsis: strings.TrimSpace(sysHelp["wraplookup"][0]),
@@ -736,27 +784,159 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen
HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]),
HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]),
},
+ &framework.Path{
+ Pattern: "plugins/catalog/?$",
+
+ Fields: map[string]*framework.FieldSchema{},
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ListOperation: b.handlePluginCatalogList,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
+ },
+ &framework.Path{
+ Pattern: "plugins/catalog/(?P.+)",
+
+ Fields: map[string]*framework.FieldSchema{
+ "name": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["plugin-catalog_name"][0]),
+ },
+ "sha256": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]),
+ },
+ "sha_256": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]),
+ },
+ "command": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["plugin-catalog_command"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handlePluginCatalogUpdate,
+ logical.DeleteOperation: b.handlePluginCatalogDelete,
+ logical.ReadOperation: b.handlePluginCatalogRead,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
+ },
+ &framework.Path{
+ Pattern: "plugins/reload/backend$",
+
+ Fields: map[string]*framework.FieldSchema{
+ "plugin": &framework.FieldSchema{
+ Type: framework.TypeString,
+ Description: strings.TrimSpace(sysHelp["plugin-backend-reload-plugin"][0]),
+ },
+ "mounts": &framework.FieldSchema{
+ Type: framework.TypeCommaStringSlice,
+ Description: strings.TrimSpace(sysHelp["plugin-backend-reload-mounts"][0]),
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.UpdateOperation: b.handlePluginReloadUpdate,
+ },
+
+ HelpSynopsis: strings.TrimSpace(sysHelp["plugin-reload"][0]),
+ HelpDescription: strings.TrimSpace(sysHelp["plugin-reload"][1]),
+ },
},
}
b.Backend.Paths = append(b.Backend.Paths, replicationPaths(b)...)
+ if core.rawEnabled {
+ b.Backend.Paths = append(b.Backend.Paths, &framework.Path{
+ Pattern: "(raw/?$|raw/(?P.+))",
+
+ Fields: map[string]*framework.FieldSchema{
+ "path": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ "value": &framework.FieldSchema{
+ Type: framework.TypeString,
+ },
+ },
+
+ Callbacks: map[logical.Operation]framework.OperationFunc{
+ logical.ReadOperation: b.handleRawRead,
+ logical.UpdateOperation: b.handleRawWrite,
+ logical.DeleteOperation: b.handleRawDelete,
+ logical.ListOperation: b.handleRawList,
+ },
+ })
+ }
+
b.Backend.Invalidate = b.invalidate
- return b.Backend.Setup(config)
+ return b
}
// SystemBackend implements logical.Backend and is used to interact with
// the core of the system. This backend is hardcoded to exist at the "sys"
// prefix. Conceptually it is similar to procfs on Linux.
type SystemBackend struct {
- Core *Core
- Backend *framework.Backend
+ *framework.Backend
+ Core *Core
+}
+
+// handleCORSRead returns the current CORS configuration
+func (b *SystemBackend) handleCORSRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ corsConf := b.Core.corsConfig
+
+ enabled := corsConf.IsEnabled()
+
+ resp := &logical.Response{
+ Data: map[string]interface{}{
+ "enabled": enabled,
+ },
+ }
+
+ if enabled {
+ corsConf.RLock()
+ resp.Data["allowed_origins"] = corsConf.AllowedOrigins
+ resp.Data["allowed_headers"] = corsConf.AllowedHeaders
+ corsConf.RUnlock()
+ }
+
+ return resp, nil
+}
+
+// handleCORSUpdate sets the list of origins that are allowed to make
+// cross-origin requests and sets the CORS enabled flag to true
+func (b *SystemBackend) handleCORSUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ origins := d.Get("allowed_origins").([]string)
+ headers := d.Get("allowed_headers").([]string)
+
+ return nil, b.Core.corsConfig.Enable(origins, headers)
+}
+
+// handleCORSDelete sets the CORS enabled flag to false and clears the list of
+// allowed origins & headers.
+func (b *SystemBackend) handleCORSDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ return nil, b.Core.corsConfig.Disable()
+}
+
+func (b *SystemBackend) handleTidyLeases(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ err := b.Core.expiration.Tidy()
+ if err != nil {
+ b.Backend.Logger().Error("sys: failed to tidy leases", "error", err)
+ return handleError(err)
+ }
+ return nil, err
}
func (b *SystemBackend) invalidate(key string) {
if b.Core.logger.IsTrace() {
- b.Core.logger.Trace("sys: invaliding key", "key", key)
+ b.Core.logger.Trace("sys: invalidating key", "key", key)
}
switch {
case strings.HasPrefix(key, policySubPath):
@@ -768,6 +948,107 @@ func (b *SystemBackend) invalidate(key string) {
}
}
+func (b *SystemBackend) handlePluginCatalogList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ plugins, err := b.Core.pluginCatalog.List()
+ if err != nil {
+ return nil, err
+ }
+
+ return logical.ListResponse(plugins), nil
+}
+
+func (b *SystemBackend) handlePluginCatalogUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ pluginName := d.Get("name").(string)
+ if pluginName == "" {
+ return logical.ErrorResponse("missing plugin name"), nil
+ }
+
+ sha256 := d.Get("sha256").(string)
+ if sha256 == "" {
+ sha256 = d.Get("sha_256").(string)
+ if sha256 == "" {
+ return logical.ErrorResponse("missing SHA-256 value"), nil
+ }
+ }
+
+ command := d.Get("command").(string)
+ if command == "" {
+ return logical.ErrorResponse("missing command value"), nil
+ }
+
+ sha256Bytes, err := hex.DecodeString(sha256)
+ if err != nil {
+ return logical.ErrorResponse("Could not decode SHA-256 value from Hex"), err
+ }
+
+ err = b.Core.pluginCatalog.Set(pluginName, command, sha256Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *SystemBackend) handlePluginCatalogRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ pluginName := d.Get("name").(string)
+ if pluginName == "" {
+ return logical.ErrorResponse("missing plugin name"), nil
+ }
+ plugin, err := b.Core.pluginCatalog.Get(pluginName)
+ if err != nil {
+ return nil, err
+ }
+ if plugin == nil {
+ return nil, nil
+ }
+
+ // Create a map of data to be returned and remove sensitive information from it
+ data := structs.New(plugin).Map()
+
+ return &logical.Response{
+ Data: data,
+ }, nil
+}
+
+func (b *SystemBackend) handlePluginCatalogDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ pluginName := d.Get("name").(string)
+ if pluginName == "" {
+ return logical.ErrorResponse("missing plugin name"), nil
+ }
+ err := b.Core.pluginCatalog.Delete(pluginName)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (b *SystemBackend) handlePluginReloadUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ pluginName := d.Get("plugin").(string)
+ pluginMounts := d.Get("mounts").([]string)
+
+ if pluginName != "" && len(pluginMounts) > 0 {
+ return logical.ErrorResponse("plugin and mounts cannot be set at the same time"), nil
+ }
+ if pluginName == "" && len(pluginMounts) == 0 {
+ return logical.ErrorResponse("plugin or mounts must be provided"), nil
+ }
+
+ if pluginName != "" {
+ err := b.Core.reloadMatchingPlugin(pluginName)
+ if err != nil {
+ return nil, err
+ }
+ } else if len(pluginMounts) > 0 {
+ err := b.Core.reloadMatchingPluginMounts(pluginMounts)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+}
+
// handleAuditedHeaderUpdate creates or overwrites a header entry
func (b *SystemBackend) handleAuditedHeaderUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
header := d.Get("header").(string)
@@ -832,7 +1113,7 @@ func (b *SystemBackend) handleAuditedHeadersRead(req *logical.Request, d *framew
}, nil
}
-// handleCapabilitiesreturns the ACL capabilities of the token for a given path
+// handleCapabilities returns the ACL capabilities of the token for a given path
func (b *SystemBackend) handleCapabilities(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
token := d.Get("token").(string)
if token == "" {
@@ -850,15 +1131,15 @@ func (b *SystemBackend) handleCapabilities(req *logical.Request, d *framework.Fi
}, nil
}
-// handleCapabilitiesAccessor returns the ACL capabilities of the token associted
-// with the given accessor for a given path.
+// handleCapabilitiesAccessor returns the ACL capabilities of the
+// token associted with the given accessor for a given path.
func (b *SystemBackend) handleCapabilitiesAccessor(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
accessor := d.Get("accessor").(string)
if accessor == "" {
return logical.ErrorResponse("missing accessor"), nil
}
- aEntry, err := b.Core.tokenStore.lookupByAccessor(accessor)
+ aEntry, err := b.Core.tokenStore.lookupByAccessor(accessor, false)
if err != nil {
return nil, err
}
@@ -962,17 +1243,17 @@ func (b *SystemBackend) handleMountTable(
}
for _, entry := range b.Core.mounts.Entries {
+ // Populate mount info
+ structConfig := structs.New(entry.Config).Map()
+ structConfig["default_lease_ttl"] = int64(structConfig["default_lease_ttl"].(time.Duration).Seconds())
+ structConfig["max_lease_ttl"] = int64(structConfig["max_lease_ttl"].(time.Duration).Seconds())
info := map[string]interface{}{
"type": entry.Type,
"description": entry.Description,
- "config": map[string]interface{}{
- "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
- "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
- "force_no_cache": entry.Config.ForceNoCache,
- },
- "local": entry.Local,
+ "accessor": entry.Accessor,
+ "config": structConfig,
+ "local": entry.Local,
}
-
resp.Data[entry.Path] = info
}
@@ -982,12 +1263,10 @@ func (b *SystemBackend) handleMountTable(
// handleMount is used to mount a new path
func (b *SystemBackend) handleMount(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.clusterParamsLock.RLock()
repState := b.Core.replicationState
- b.Core.clusterParamsLock.RUnlock()
local := data.Get("local").(bool)
- if !local && repState == consts.ReplicationSecondary {
+ if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
}
@@ -995,16 +1274,13 @@ func (b *SystemBackend) handleMount(
path := data.Get("path").(string)
logicalType := data.Get("type").(string)
description := data.Get("description").(string)
+ pluginName := data.Get("plugin_name").(string)
path = sanitizeMountPath(path)
var config MountConfig
+ var apiConfig APIMountConfig
- var apiConfig struct {
- DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
- }
configMap := data.Get("config").(map[string]interface{})
if configMap != nil && len(configMap) != 0 {
err := mapstructure.Decode(configMap, &apiConfig)
@@ -1053,6 +1329,21 @@ func (b *SystemBackend) handleMount(
logical.ErrInvalidRequest
}
+ // Only set plugin-name if mount is of type plugin, with apiConfig.PluginName
+ // option taking precedence.
+ if logicalType == "plugin" {
+ switch {
+ case apiConfig.PluginName != "":
+ config.PluginName = apiConfig.PluginName
+ case pluginName != "":
+ config.PluginName = pluginName
+ default:
+ return logical.ErrorResponse(
+ "plugin_name must be provided for plugin backend"),
+ logical.ErrInvalidRequest
+ }
+ }
+
// Copy over the force no cache if set
if apiConfig.ForceNoCache {
config.ForceNoCache = true
@@ -1097,25 +1388,25 @@ func handleError(
// handleUnmount is used to unmount a path
func (b *SystemBackend) handleUnmount(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.clusterParamsLock.RLock()
+ path := data.Get("path").(string)
+ path = sanitizeMountPath(path)
+
repState := b.Core.replicationState
- b.Core.clusterParamsLock.RUnlock()
-
- suffix := strings.TrimPrefix(req.Path, "mounts/")
- if len(suffix) == 0 {
- return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest
- }
-
- suffix = sanitizeMountPath(suffix)
-
- entry := b.Core.router.MatchingMountEntry(suffix)
- if entry != nil && !entry.Local && repState == consts.ReplicationSecondary {
+ entry := b.Core.router.MatchingMountEntry(path)
+ if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
}
+ // We return success when the mount does not exists to not expose if the
+ // mount existed or not
+ match := b.Core.router.MatchingMount(path)
+ if match == "" || path != match {
+ return nil, nil
+ }
+
// Attempt unmount
- if existed, err := b.Core.unmount(suffix); existed && err != nil {
- b.Backend.Logger().Error("sys: unmount failed", "path", suffix, "error", err)
+ if err := b.Core.unmount(path); err != nil {
+ b.Backend.Logger().Error("sys: unmount failed", "path", path, "error", err)
return handleError(err)
}
@@ -1125,9 +1416,7 @@ func (b *SystemBackend) handleUnmount(
// handleRemount is used to remount a path
func (b *SystemBackend) handleRemount(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.clusterParamsLock.RLock()
repState := b.Core.replicationState
- b.Core.clusterParamsLock.RUnlock()
// Get the paths
fromPath := data.Get("from").(string)
@@ -1142,7 +1431,7 @@ func (b *SystemBackend) handleRemount(
toPath = sanitizeMountPath(toPath)
entry := b.Core.router.MatchingMountEntry(fromPath)
- if entry != nil && !entry.Local && repState == consts.ReplicationSecondary {
+ if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
return logical.ErrorResponse("cannot remount a non-local mount on a replication secondary"), nil
}
@@ -1238,9 +1527,7 @@ func (b *SystemBackend) handleMountTuneWrite(
// handleTuneWriteCommon is used to set config settings on a path
func (b *SystemBackend) handleTuneWriteCommon(
path string, data *framework.FieldData) (*logical.Response, error) {
- b.Core.clusterParamsLock.RLock()
repState := b.Core.replicationState
- b.Core.clusterParamsLock.RUnlock()
path = sanitizeMountPath(path)
@@ -1257,7 +1544,7 @@ func (b *SystemBackend) handleTuneWriteCommon(
b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path)
return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path))
}
- if mountEntry != nil && !mountEntry.Local && repState == consts.ReplicationSecondary {
+ if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
}
@@ -1269,40 +1556,52 @@ func (b *SystemBackend) handleTuneWriteCommon(
lock = &b.Core.mountsLock
}
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Check again after grabbing the lock
+ mountEntry = b.Core.router.MatchingMountEntry(path)
+ if mountEntry == nil {
+ b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path)
+ return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path))
+ }
+ if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
+ return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
+ }
+
// Timing configuration parameters
{
- var newDefault, newMax *time.Duration
+ var newDefault, newMax time.Duration
defTTL := data.Get("default_lease_ttl").(string)
switch defTTL {
case "":
+ newDefault = mountEntry.Config.DefaultLeaseTTL
case "system":
- tmpDef := time.Duration(0)
- newDefault = &tmpDef
+ newDefault = time.Duration(0)
default:
tmpDef, err := parseutil.ParseDurationSecond(defTTL)
if err != nil {
return handleError(err)
}
- newDefault = &tmpDef
+ newDefault = tmpDef
}
maxTTL := data.Get("max_lease_ttl").(string)
switch maxTTL {
case "":
+ newMax = mountEntry.Config.MaxLeaseTTL
case "system":
- tmpMax := time.Duration(0)
- newMax = &tmpMax
+ newMax = time.Duration(0)
default:
tmpMax, err := parseutil.ParseDurationSecond(maxTTL)
if err != nil {
return handleError(err)
}
- newMax = &tmpMax
+ newMax = tmpMax
}
- if newDefault != nil || newMax != nil {
- lock.Lock()
- defer lock.Unlock()
+ if newDefault != mountEntry.Config.DefaultLeaseTTL ||
+ newMax != mountEntry.Config.MaxLeaseTTL {
if err := b.tuneMountTTLs(path, mountEntry, newDefault, newMax); err != nil {
b.Backend.Logger().Error("sys: tuning failed", "path", path, "error", err)
@@ -1311,6 +1610,28 @@ func (b *SystemBackend) handleTuneWriteCommon(
}
}
+ description := data.Get("description").(string)
+ if description != "" {
+ oldDesc := mountEntry.Description
+ mountEntry.Description = description
+
+ // Update the mount table
+ var err error
+ switch {
+ case strings.HasPrefix(path, "auth/"):
+ err = b.Core.persistAuth(b.Core.auth, mountEntry.Local)
+ default:
+ err = b.Core.persistMounts(b.Core.mounts, mountEntry.Local)
+ }
+ if err != nil {
+ mountEntry.Description = oldDesc
+ return handleError(err)
+ }
+ if b.Core.logger.IsInfo() {
+ b.Core.logger.Info("core: mount tuning of description successful", "path", path)
+ }
+ }
+
return nil, nil
}
@@ -1461,6 +1782,7 @@ func (b *SystemBackend) handleAuthTable(
info := map[string]interface{}{
"type": entry.Type,
"description": entry.Description,
+ "accessor": entry.Accessor,
"config": map[string]interface{}{
"default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
"max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
@@ -1475,12 +1797,10 @@ func (b *SystemBackend) handleAuthTable(
// handleEnableAuth is used to enable a new credential backend
func (b *SystemBackend) handleEnableAuth(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.clusterParamsLock.RLock()
repState := b.Core.replicationState
- b.Core.clusterParamsLock.RUnlock()
local := data.Get("local").(bool)
- if !local && repState == consts.ReplicationSecondary {
+ if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
}
@@ -1488,6 +1808,35 @@ func (b *SystemBackend) handleEnableAuth(
path := data.Get("path").(string)
logicalType := data.Get("type").(string)
description := data.Get("description").(string)
+ pluginName := data.Get("plugin_name").(string)
+
+ var config MountConfig
+ var apiConfig APIMountConfig
+
+ configMap := data.Get("config").(map[string]interface{})
+ if configMap != nil && len(configMap) != 0 {
+ err := mapstructure.Decode(configMap, &apiConfig)
+ if err != nil {
+ return logical.ErrorResponse(
+ "unable to convert given auth config information"),
+ logical.ErrInvalidRequest
+ }
+ }
+
+ // Only set plugin name if mount is of type plugin, with apiConfig.PluginName
+ // option taking precedence.
+ if logicalType == "plugin" {
+ switch {
+ case apiConfig.PluginName != "":
+ config.PluginName = apiConfig.PluginName
+ case pluginName != "":
+ config.PluginName = pluginName
+ default:
+ return logical.ErrorResponse(
+ "plugin_name must be provided for plugin backend"),
+ logical.ErrInvalidRequest
+ }
+ }
if logicalType == "" {
return logical.ErrorResponse(
@@ -1503,6 +1852,7 @@ func (b *SystemBackend) handleEnableAuth(
Path: path,
Type: logicalType,
Description: description,
+ Config: config,
Local: local,
}
@@ -1517,16 +1867,26 @@ func (b *SystemBackend) handleEnableAuth(
// handleDisableAuth is used to disable a credential backend
func (b *SystemBackend) handleDisableAuth(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- suffix := strings.TrimPrefix(req.Path, "auth/")
- if len(suffix) == 0 {
- return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest
+ path := data.Get("path").(string)
+ path = sanitizeMountPath(path)
+ fullPath := credentialRoutePrefix + path
+
+ repState := b.Core.replicationState
+ entry := b.Core.router.MatchingMountEntry(fullPath)
+ if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
+ return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
}
- suffix = sanitizeMountPath(suffix)
+ // We return success when the mount does not exists to not expose if the
+ // mount existed or not
+ match := b.Core.router.MatchingMount(fullPath)
+ if match == "" || fullPath != match {
+ return nil, nil
+ }
// Attempt disable
- if existed, err := b.Core.disableCredential(suffix); existed && err != nil {
- b.Backend.Logger().Error("sys: disable auth mount failed", "path", suffix, "error", err)
+ if err := b.Core.disableCredential(path); err != nil {
+ b.Backend.Logger().Error("sys: disable auth mount failed", "path", path, "error", err)
return handleError(err)
}
return nil, nil
@@ -1564,7 +1924,7 @@ func (b *SystemBackend) handlePolicyRead(
return &logical.Response{
Data: map[string]interface{}{
- "name": name,
+ "name": policy.Name,
"rules": policy.Raw,
},
}, nil
@@ -1574,7 +1934,16 @@ func (b *SystemBackend) handlePolicyRead(
func (b *SystemBackend) handlePolicySet(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
- rules := data.Get("rules").(string)
+
+ rulesRaw, ok := data.GetOk("rules")
+ if !ok {
+ return logical.ErrorResponse("'rules' parameter not supplied"), nil
+ }
+
+ rules := rulesRaw.(string)
+ if rules == "" {
+ return logical.ErrorResponse("'rules' parameter empty"), nil
+ }
// Validate the rules parse
parse, err := Parse(rules)
@@ -1582,8 +1951,9 @@ func (b *SystemBackend) handlePolicySet(
return handleError(err)
}
- // Override the name
- parse.Name = strings.ToLower(name)
+ if name != "" {
+ parse.Name = name
+ }
// Update the policy
if err := b.Core.policyStore.SetPolicy(parse); err != nil {
@@ -1652,12 +2022,10 @@ func (b *SystemBackend) handleAuditHash(
// handleEnableAudit is used to enable a new audit backend
func (b *SystemBackend) handleEnableAudit(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.clusterParamsLock.RLock()
repState := b.Core.replicationState
- b.Core.clusterParamsLock.RUnlock()
local := data.Get("local").(bool)
- if !local && repState == consts.ReplicationSecondary {
+ if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
}
@@ -1779,6 +2147,29 @@ func (b *SystemBackend) handleRawDelete(
return nil, nil
}
+// handleRawList is used to list directly from the barrier
+func (b *SystemBackend) handleRawList(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ path := data.Get("path").(string)
+ if path != "" && !strings.HasSuffix(path, "/") {
+ path = path + "/"
+ }
+
+ // Prevent access of protected paths
+ for _, p := range protectedPaths {
+ if strings.HasPrefix(path, p) {
+ err := fmt.Sprintf("cannot list '%s'", path)
+ return logical.ErrorResponse(err), logical.ErrInvalidRequest
+ }
+ }
+
+ keys, err := b.Core.barrier.List(path)
+ if err != nil {
+ return handleError(err)
+ }
+ return logical.ListResponse(keys), nil
+}
+
// handleKeyStatus returns status information about the backend key
func (b *SystemBackend) handleKeyStatus(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
@@ -1800,10 +2191,8 @@ func (b *SystemBackend) handleKeyStatus(
// handleRotate is used to trigger a key rotation
func (b *SystemBackend) handleRotate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.clusterParamsLock.RLock()
repState := b.Core.replicationState
- b.Core.clusterParamsLock.RUnlock()
- if repState == consts.ReplicationSecondary {
+ if repState.HasState(consts.ReplicationPerformanceSecondary) {
return logical.ErrorResponse("cannot rotate on a replication secondary"), nil
}
@@ -1942,10 +2331,14 @@ func (b *SystemBackend) handleWrappingUnwrap(
func (b *SystemBackend) handleWrappingLookup(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ // This ordering of lookups has been validated already in the wrapping
+ // validation func, we're just doing this for a safety check
token := data.Get("token").(string)
-
if token == "" {
- return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest
+ token = req.ClientToken
+ if token == "" {
+ return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest
+ }
}
cubbyReq := &logical.Request{
@@ -1969,6 +2362,7 @@ func (b *SystemBackend) handleWrappingLookup(
creationTTLRaw := cubbyResp.Data["creation_ttl"]
creationTime := cubbyResp.Data["creation_time"]
+ creationPath := cubbyResp.Data["creation_path"]
resp := &logical.Response{
Data: map[string]interface{}{},
@@ -1984,6 +2378,9 @@ func (b *SystemBackend) handleWrappingLookup(
// This was JSON marshaled so it's already a string in RFC3339 format
resp.Data["creation_time"] = cubbyResp.Data["creation_time"]
}
+ if creationPath != nil {
+ resp.Data["creation_path"] = cubbyResp.Data["creation_path"]
+ }
return resp, nil
}
@@ -2043,6 +2440,13 @@ func (b *SystemBackend) handleWrappingRewrap(
return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err)
}
+ // Get creation_path to return as the response later
+ creationPathRaw := cubbyResp.Data["creation_path"]
+ if creationPathRaw == nil {
+ return nil, fmt.Errorf("creation_path value in wrapping information was nil")
+ }
+ creationPath := creationPathRaw.(string)
+
// Fetch the original response and return it as the data for the new response
cubbyReq = &logical.Request{
Operation: logical.ReadOperation,
@@ -2074,8 +2478,9 @@ func (b *SystemBackend) handleWrappingRewrap(
Data: map[string]interface{}{
"response": response,
},
- WrapInfo: &logical.ResponseWrapInfo{
- TTL: time.Duration(creationTTL),
+ WrapInfo: &wrapping.ResponseWrapInfo{
+ TTL: time.Duration(creationTTL),
+ CreationPath: creationPath,
},
}, nil
}
@@ -2100,6 +2505,21 @@ as well as perform core operations.
// sysHelp is all the help text for the sys backend.
var sysHelp = map[string][2]string{
+ "config/cors": {
+ "Configures or returns the current configuration of CORS settings.",
+ `
+This path responds to the following HTTP methods.
+
+ GET /
+ Returns the configuration of the CORS setting.
+
+ POST /
+ Sets the comma-separated list of origins that can make cross-origin requests.
+
+ DELETE /
+ Clears the CORS configuration and disables acceptance of CORS requests.
+ `,
+ },
"init": {
"Initializes or returns the initialization status of the Vault.",
`
@@ -2218,6 +2638,11 @@ and max_lease_ttl.`,
and is unaffected by replication.`,
},
+ "mount_plugin_name": {
+ `Name of the plugin to mount based from the name registered
+in the plugin catalog.`,
+ },
+
"tune_default_lease_ttl": {
`The default lease TTL for this mount.`,
},
@@ -2354,6 +2779,15 @@ Example: you might have an OAuth backend for GitHub, and one for Google Apps.
"",
},
+ "auth_config": {
+ `Configuration for this mount, such as plugin_name.`,
+ },
+
+ "auth_plugin": {
+ `Name of the auth plugin to use based from the name in the plugin catalog.`,
+ "",
+ },
+
"policy-list": {
`List the configured access control policies.`,
`
@@ -2478,6 +2912,15 @@ Enable a new audit backend or disable an existing backend.
on a given path.`,
},
+ "tidy_leases": {
+ `This endpoint performs cleanup tasks that can be run if certain error
+conditions have occurred.`,
+ `This endpoint performs cleanup tasks that can be run to clean up the
+lease entries after certain error conditions. Usually running this is not
+necessary, and is only required if upgrade notes or support personnel suggest
+it.`,
+ },
+
"wrap": {
"Response-wraps an arbitrary JSON object.",
`Round trips the given input data into a response-wrapped token.`,
@@ -2524,7 +2967,38 @@ This path responds to the following HTTP methods.
"Lists the headers configured to be audited.",
`Returns a list of headers that have been configured to be audited.`,
},
+ "plugin-catalog": {
+ "Configures the plugins known to vault",
+ `
+This path responds to the following HTTP methods.
+ LIST /
+ Returns a list of names of configured plugins.
+ GET /
+ Retrieve the metadata for the named plugin.
+
+ PUT /
+ Add or update plugin.
+
+ DELETE /
+ Delete the plugin with the given name.
+ `,
+ },
+ "plugin-catalog_name": {
+ "The name of the plugin",
+ "",
+ },
+ "plugin-catalog_sha-256": {
+ `The SHA256 sum of the executable used in the
+command field. This should be HEX encoded.`,
+ "",
+ },
+ "plugin-catalog_command": {
+ `The command used to start the plugin. The
+executable defined in this command must exist in vault's
+plugin directory.`,
+ "",
+ },
"leases": {
`View or list lease metadata.`,
`
@@ -2542,4 +3016,19 @@ This path responds to the following HTTP methods.
`The path to list leases under. Example: "aws/creds/deploy"`,
"",
},
+ "plugin-reload": {
+ "Reload mounts that use a particular backend plugin.",
+ `Reload mounts that use a particular backend plugin. Either the plugin name
+ or the desired plugin backend mounts must be provided, but not both. In the
+ case that the plugin name is provided, all mounted paths that use that plugin
+ backend will be reloaded.`,
+ },
+ "plugin-backend-reload-plugin": {
+ `The name of the plugin to reload, as registered in the plugin catalog.`,
+ "",
+ },
+ "plugin-backend-reload-mounts": {
+ `The mount paths of the plugin backends to reload.`,
+ "",
+ },
}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go
index 809ebb9..929159e 100644
--- a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go
+++ b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go
@@ -7,61 +7,31 @@ import (
)
// tuneMount is used to set config on a mount point
-func (b *SystemBackend) tuneMountTTLs(path string, me *MountEntry, newDefault, newMax *time.Duration) error {
- meConfig := &me.Config
+func (b *SystemBackend) tuneMountTTLs(path string, me *MountEntry, newDefault, newMax time.Duration) error {
+ zero := time.Duration(0)
- if newDefault == nil && newMax == nil {
- return nil
- }
- if newDefault == nil && newMax != nil &&
- *newMax == meConfig.MaxLeaseTTL {
- return nil
- }
- if newMax == nil && newDefault != nil &&
- *newDefault == meConfig.DefaultLeaseTTL {
- return nil
- }
- if newMax != nil && newDefault != nil &&
- *newDefault == meConfig.DefaultLeaseTTL &&
- *newMax == meConfig.MaxLeaseTTL {
- return nil
- }
+ switch {
+ case newDefault == zero && newMax == zero:
+ // No checks needed
- if newMax != nil && newDefault != nil && *newMax < *newDefault {
- return fmt.Errorf("new backend max lease TTL of %d less than new backend default lease TTL of %d",
- int(newMax.Seconds()), int(newDefault.Seconds()))
- }
+ case newDefault == zero && newMax != zero:
+ // No default/max conflict, no checks needed
- if newMax != nil && newDefault == nil {
- if meConfig.DefaultLeaseTTL != 0 && *newMax < meConfig.DefaultLeaseTTL {
- return fmt.Errorf("new backend max lease TTL of %d less than backend default lease TTL of %d",
- int(newMax.Seconds()), int(meConfig.DefaultLeaseTTL.Seconds()))
+ case newDefault != zero && newMax == zero:
+ // No default/max conflict, no checks needed
+
+ case newDefault != zero && newMax != zero:
+ if newMax < newDefault {
+ return fmt.Errorf("backend max lease TTL of %d would be less than backend default lease TTL of %d",
+ int(newMax.Seconds()), int(newDefault.Seconds()))
}
}
- if newDefault != nil {
- if meConfig.MaxLeaseTTL == 0 {
- if newMax == nil && *newDefault > b.Core.maxLeaseTTL {
- return fmt.Errorf("new backend default lease TTL of %d greater than system max lease TTL of %d",
- int(newDefault.Seconds()), int(b.Core.maxLeaseTTL.Seconds()))
- }
- } else {
- if newMax == nil && *newDefault > meConfig.MaxLeaseTTL {
- return fmt.Errorf("new backend default lease TTL of %d greater than backend max lease TTL of %d",
- int(newDefault.Seconds()), int(meConfig.MaxLeaseTTL.Seconds()))
- }
- }
- }
+ origMax := me.Config.MaxLeaseTTL
+ origDefault := me.Config.DefaultLeaseTTL
- origMax := meConfig.MaxLeaseTTL
- origDefault := meConfig.DefaultLeaseTTL
-
- if newMax != nil {
- meConfig.MaxLeaseTTL = *newMax
- }
- if newDefault != nil {
- meConfig.DefaultLeaseTTL = *newDefault
- }
+ me.Config.MaxLeaseTTL = newMax
+ me.Config.DefaultLeaseTTL = newDefault
// Update the mount table
var err error
@@ -72,13 +42,12 @@ func (b *SystemBackend) tuneMountTTLs(path string, me *MountEntry, newDefault, n
err = b.Core.persistMounts(b.Core.mounts, me.Local)
}
if err != nil {
- meConfig.MaxLeaseTTL = origMax
- meConfig.DefaultLeaseTTL = origDefault
+ me.Config.MaxLeaseTTL = origMax
+ me.Config.DefaultLeaseTTL = origDefault
return fmt.Errorf("failed to update mount table, rolling back TTL changes")
}
-
if b.Core.logger.IsInfo() {
- b.Core.logger.Info("core: mount tuning successful", "path", path)
+ b.Core.logger.Info("core: mount tuning of leases successful", "path", path)
}
return nil
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_integ_test.go b/vendor/github.com/hashicorp/vault/vault/logical_system_integ_test.go
new file mode 100644
index 0000000..60eab6b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/logical_system_integ_test.go
@@ -0,0 +1,467 @@
+package vault_test
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/vault/builtin/plugin"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/logical"
+ lplugin "github.com/hashicorp/vault/logical/plugin"
+ "github.com/hashicorp/vault/logical/plugin/mock"
+ "github.com/hashicorp/vault/vault"
+)
+
+func TestSystemBackend_Plugin_secret(t *testing.T) {
+ cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical)
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0]
+
+ // Make a request to lazy load the plugin
+ req := logical.TestRequest(t, logical.ReadOperation, "mock-0/internal")
+ req.ClientToken = core.Client.Token()
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: response should not be nil")
+ }
+
+ // Seal the cluster
+ cluster.EnsureCoresSealed(t)
+
+ // Unseal the cluster
+ barrierKeys := cluster.BarrierKeys
+ for _, core := range cluster.Cores {
+ for _, key := range barrierKeys {
+ _, err := core.Unseal(vault.TestKeyCopy(key))
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+ // Wait for active so post-unseal takes place
+ // If it fails, it means unseal process failed
+ vault.TestWaitActive(t, core.Core)
+ }
+}
+
+func TestSystemBackend_Plugin_auth(t *testing.T) {
+ cluster := testSystemBackendMock(t, 1, 1, logical.TypeCredential)
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0]
+
+ // Make a request to lazy load the plugin
+ req := logical.TestRequest(t, logical.ReadOperation, "auth/mock-0/internal")
+ req.ClientToken = core.Client.Token()
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: response should not be nil")
+ }
+
+ // Seal the cluster
+ cluster.EnsureCoresSealed(t)
+
+ // Unseal the cluster
+ barrierKeys := cluster.BarrierKeys
+ for _, core := range cluster.Cores {
+ for _, key := range barrierKeys {
+ _, err := core.Unseal(vault.TestKeyCopy(key))
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+ // Wait for active so post-unseal takes place
+ // If it fails, it means unseal process failed
+ vault.TestWaitActive(t, core.Core)
+ }
+}
+
+func TestSystemBackend_Plugin_MismatchType(t *testing.T) {
+ cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical)
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0]
+
+ // Replace the plugin with a credential backend
+ vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainCredentials")
+
+ // Make a request to lazy load the now-credential plugin
+ // and expect an error
+ req := logical.TestRequest(t, logical.ReadOperation, "mock-0/internal")
+ req.ClientToken = core.Client.Token()
+ _, err := core.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected error due to mismatch on error type: %s", err)
+ }
+
+ // Sleep a bit before cleanup is called
+ time.Sleep(1 * time.Second)
+}
+
+func TestSystemBackend_Plugin_CatalogRemoved(t *testing.T) {
+ t.Run("secret", func(t *testing.T) {
+ testPlugin_CatalogRemoved(t, logical.TypeLogical, false)
+ })
+
+ t.Run("auth", func(t *testing.T) {
+ testPlugin_CatalogRemoved(t, logical.TypeCredential, false)
+ })
+
+ t.Run("secret-mount-existing", func(t *testing.T) {
+ testPlugin_CatalogRemoved(t, logical.TypeLogical, true)
+ })
+
+ t.Run("auth-mount-existing", func(t *testing.T) {
+ testPlugin_CatalogRemoved(t, logical.TypeCredential, true)
+ })
+}
+
+func testPlugin_CatalogRemoved(t *testing.T, btype logical.BackendType, testMount bool) {
+ cluster := testSystemBackendMock(t, 1, 1, btype)
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0]
+
+ // Remove the plugin from the catalog
+ req := logical.TestRequest(t, logical.DeleteOperation, "sys/plugins/catalog/mock-plugin")
+ req.ClientToken = core.Client.Token()
+ resp, err := core.HandleRequest(req)
+ if err != nil || (resp != nil && resp.IsError()) {
+ t.Fatalf("err:%v resp:%#v", err, resp)
+ }
+
+ // Seal the cluster
+ cluster.EnsureCoresSealed(t)
+
+ // Unseal the cluster
+ barrierKeys := cluster.BarrierKeys
+ for _, core := range cluster.Cores {
+ for _, key := range barrierKeys {
+ _, err := core.Unseal(vault.TestKeyCopy(key))
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+ // Wait for active so post-unseal takes place
+ // If it fails, it means unseal process failed
+ vault.TestWaitActive(t, core.Core)
+ }
+
+ if testMount {
+ // Add plugin back to the catalog
+ vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainLogical")
+
+ // Mount the plugin at the same path after plugin is re-added to the catalog
+ // and expect an error due to existing path.
+ var err error
+ switch btype {
+ case logical.TypeLogical:
+ _, err = core.Client.Logical().Write("sys/mounts/mock-0", map[string]interface{}{
+ "type": "plugin",
+ "config": map[string]interface{}{
+ "plugin_name": "mock-plugin",
+ },
+ })
+ case logical.TypeCredential:
+ _, err = core.Client.Logical().Write("sys/auth/mock-0", map[string]interface{}{
+ "type": "plugin",
+ "plugin_name": "mock-plugin",
+ })
+ }
+ if err == nil {
+ t.Fatal("expected error when mounting on existing path")
+ }
+ }
+}
+
+func TestSystemBackend_Plugin_autoReload(t *testing.T) {
+ cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical)
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0]
+
+ // Update internal value
+ req := logical.TestRequest(t, logical.UpdateOperation, "mock-0/internal")
+ req.ClientToken = core.Client.Token()
+ req.Data["value"] = "baz"
+ resp, err := core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Call errors/rpc endpoint to trigger reload
+ req = logical.TestRequest(t, logical.ReadOperation, "mock-0/errors/rpc")
+ req.ClientToken = core.Client.Token()
+ resp, err = core.HandleRequest(req)
+ if err == nil {
+ t.Fatalf("expected error from error/rpc request")
+ }
+
+ // Check internal value to make sure it's reset
+ req = logical.TestRequest(t, logical.ReadOperation, "mock-0/internal")
+ req.ClientToken = core.Client.Token()
+ resp, err = core.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: response should not be nil")
+ }
+ if resp.Data["value"].(string) == "baz" {
+ t.Fatal("did not expect backend internal value to be 'baz'")
+ }
+}
+
+func TestSystemBackend_Plugin_SealUnseal(t *testing.T) {
+ cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical)
+ defer cluster.Cleanup()
+
+ // Seal the cluster
+ cluster.EnsureCoresSealed(t)
+
+ // Unseal the cluster
+ barrierKeys := cluster.BarrierKeys
+ for _, core := range cluster.Cores {
+ for _, key := range barrierKeys {
+ _, err := core.Unseal(vault.TestKeyCopy(key))
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ sealed, err := core.Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
+ // Wait for active so post-unseal takes place
+ // If it fails, it means unseal process failed
+ vault.TestWaitActive(t, core.Core)
+ }
+}
+
+func TestSystemBackend_Plugin_reload(t *testing.T) {
+ data := map[string]interface{}{
+ "plugin": "mock-plugin",
+ }
+ t.Run("plugin", func(t *testing.T) { testSystemBackend_PluginReload(t, data) })
+
+ data = map[string]interface{}{
+ "mounts": "mock-0/,mock-1/",
+ }
+ t.Run("mounts", func(t *testing.T) { testSystemBackend_PluginReload(t, data) })
+}
+
+// Helper func to test different reload methods on plugin reload endpoint
+func testSystemBackend_PluginReload(t *testing.T, reqData map[string]interface{}) {
+ cluster := testSystemBackendMock(t, 1, 2, logical.TypeLogical)
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0]
+ client := core.Client
+
+ for i := 0; i < 2; i++ {
+ // Update internal value in the backend
+ resp, err := client.Logical().Write(fmt.Sprintf("mock-%d/internal", i), map[string]interface{}{
+ "value": "baz",
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ }
+
+ // Perform plugin reload
+ resp, err := client.Logical().Write("sys/plugins/reload/backend", reqData)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ for i := 0; i < 2; i++ {
+ // Ensure internal backed value is reset
+ resp, err := client.Logical().Read(fmt.Sprintf("mock-%d/internal", i))
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: response should not be nil")
+ }
+ if resp.Data["value"].(string) == "baz" {
+ t.Fatal("did not expect backend internal value to be 'baz'")
+ }
+ }
+}
+
+// testSystemBackendMock returns a systemBackend with the desired number
+// of mounted mock plugin backends
+func testSystemBackendMock(t *testing.T, numCores, numMounts int, backendType logical.BackendType) *vault.TestCluster {
+ coreConfig := &vault.CoreConfig{
+ LogicalBackends: map[string]logical.Factory{
+ "plugin": plugin.Factory,
+ },
+ CredentialBackends: map[string]logical.Factory{
+ "plugin": plugin.Factory,
+ },
+ }
+
+ cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
+ HandlerFunc: vaulthttp.Handler,
+ KeepStandbysSealed: true,
+ NumCores: numCores,
+ })
+ cluster.Start()
+
+ core := cluster.Cores[0]
+ vault.TestWaitActive(t, core.Core)
+ client := core.Client
+
+ os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
+
+ switch backendType {
+ case logical.TypeLogical:
+ vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainLogical")
+ for i := 0; i < numMounts; i++ {
+ // Alternate input styles for plugin_name on every other mount
+ options := map[string]interface{}{
+ "type": "plugin",
+ }
+ if (i+1)%2 == 0 {
+ options["config"] = map[string]interface{}{
+ "plugin_name": "mock-plugin",
+ }
+ } else {
+ options["plugin_name"] = "mock-plugin"
+ }
+ resp, err := client.Logical().Write(fmt.Sprintf("sys/mounts/mock-%d", i), options)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ }
+ case logical.TypeCredential:
+ vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainCredentials")
+ for i := 0; i < numMounts; i++ {
+ // Alternate input styles for plugin_name on every other mount
+ options := map[string]interface{}{
+ "type": "plugin",
+ }
+ if (i+1)%2 == 0 {
+ options["config"] = map[string]interface{}{
+ "plugin_name": "mock-plugin",
+ }
+ } else {
+ options["plugin_name"] = "mock-plugin"
+ }
+ resp, err := client.Logical().Write(fmt.Sprintf("sys/auth/mock-%d", i), options)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ }
+ default:
+ t.Fatal("unknown backend type provided")
+ }
+
+ return cluster
+}
+
+func TestBackend_PluginMainLogical(t *testing.T) {
+ args := []string{}
+ if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadaModeEnv) != "true" {
+ return
+ }
+
+ caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
+ if caPEM == "" {
+ t.Fatal("CA cert not passed in")
+ }
+ args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM))
+
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(args)
+ tlsConfig := apiClientMeta.GetTLSConfig()
+ tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig)
+
+ factoryFunc := mock.FactoryType(logical.TypeLogical)
+
+ err := lplugin.Serve(&lplugin.ServeOpts{
+ BackendFactoryFunc: factoryFunc,
+ TLSProviderFunc: tlsProviderFunc,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackend_PluginMainCredentials(t *testing.T) {
+ args := []string{}
+ if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadaModeEnv) != "true" {
+ return
+ }
+
+ caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv)
+ if caPEM == "" {
+ t.Fatal("CA cert not passed in")
+ }
+ args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM))
+
+ apiClientMeta := &pluginutil.APIClientMeta{}
+ flags := apiClientMeta.FlagSet()
+ flags.Parse(args)
+ tlsConfig := apiClientMeta.GetTLSConfig()
+ tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig)
+
+ factoryFunc := mock.FactoryType(logical.TypeCredential)
+
+ err := lplugin.Serve(&lplugin.ServeOpts{
+ BackendFactoryFunc: factoryFunc,
+ TLSProviderFunc: tlsProviderFunc,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_test.go b/vendor/github.com/hashicorp/vault/vault/logical_system_test.go
index 4f3f70f..3f9243b 100644
--- a/vendor/github.com/hashicorp/vault/vault/logical_system_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/logical_system_test.go
@@ -2,6 +2,11 @@ package vault
import (
"crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
"reflect"
"strings"
"testing"
@@ -9,6 +14,8 @@ import (
"github.com/fatih/structs"
"github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/helper/builtinplugins"
+ "github.com/hashicorp/vault/helper/pluginutil"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
"github.com/mitchellh/mapstructure"
@@ -20,12 +27,16 @@ func TestSystemBackend_RootPaths(t *testing.T) {
"remount",
"audit",
"audit/*",
+ "raw",
"raw/*",
"replication/primary/secondary-token",
"replication/reindex",
"rotate",
+ "config/cors",
"config/auditing/*",
+ "plugins/catalog/*",
"revoke-prefix/*",
+ "revoke-force/*",
"leases/revoke-prefix/*",
"leases/revoke-force/*",
"leases/lookup/*",
@@ -38,6 +49,62 @@ func TestSystemBackend_RootPaths(t *testing.T) {
}
}
+func TestSystemConfigCORS(t *testing.T) {
+ b := testSystemBackend(t)
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, "")
+ b.(*SystemBackend).Core.systemBarrierView = view
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "config/cors")
+ req.Data["allowed_origins"] = "http://www.example.com"
+ req.Data["allowed_headers"] = "X-Custom-Header"
+ _, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := &logical.Response{
+ Data: map[string]interface{}{
+ "enabled": true,
+ "allowed_origins": []string{"http://www.example.com"},
+ "allowed_headers": append(StdAllowedHeaders, "X-Custom-Header"),
+ },
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "config/cors")
+ actual, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("bad: %#v", actual)
+ }
+
+ req = logical.TestRequest(t, logical.DeleteOperation, "config/cors")
+ _, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "config/cors")
+ actual, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ expected = &logical.Response{
+ Data: map[string]interface{}{
+ "enabled": false,
+ },
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("DELETE FAILED -- bad: %#v", actual)
+ }
+
+}
+
func TestSystemBackend_mounts(t *testing.T) {
b := testSystemBackend(t)
req := logical.TestRequest(t, logical.ReadOperation, "mounts")
@@ -50,8 +117,9 @@ func TestSystemBackend_mounts(t *testing.T) {
// copy what's given
exp := map[string]interface{}{
"secret/": map[string]interface{}{
- "type": "generic",
- "description": "generic secret storage",
+ "type": "kv",
+ "description": "key/value secret storage",
+ "accessor": resp.Data["secret/"].(map[string]interface{})["accessor"],
"config": map[string]interface{}{
"default_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
@@ -62,6 +130,7 @@ func TestSystemBackend_mounts(t *testing.T) {
"sys/": map[string]interface{}{
"type": "system",
"description": "system endpoints used for control, policy and debugging",
+ "accessor": resp.Data["sys/"].(map[string]interface{})["accessor"],
"config": map[string]interface{}{
"default_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
@@ -72,6 +141,7 @@ func TestSystemBackend_mounts(t *testing.T) {
"cubbyhole/": map[string]interface{}{
"description": "per-token private secret storage",
"type": "cubbyhole",
+ "accessor": resp.Data["cubbyhole/"].(map[string]interface{})["accessor"],
"config": map[string]interface{}{
"default_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
@@ -89,7 +159,7 @@ func TestSystemBackend_mount(t *testing.T) {
b := testSystemBackend(t)
req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/")
- req.Data["type"] = "generic"
+ req.Data["type"] = "kv"
resp, err := b.HandleRequest(req)
if err != nil {
@@ -104,7 +174,7 @@ func TestSystemBackend_mount_force_no_cache(t *testing.T) {
core, b, _ := testCoreSystemBackend(t)
req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/")
- req.Data["type"] = "generic"
+ req.Data["type"] = "kv"
req.Data["config"] = map[string]interface{}{
"force_no_cache": true,
}
@@ -353,7 +423,7 @@ func TestSystemBackend_leases(t *testing.T) {
t.Fatalf("err: %v", err)
}
if resp.Data["renewable"] == nil || resp.Data["renewable"].(bool) {
- t.Fatal("generic leases are not renewable")
+ t.Fatal("kv leases are not renewable")
}
// Invalid lease
@@ -919,7 +989,8 @@ func TestSystemBackend_revokePrefixAuth(t *testing.T) {
MaxLeaseTTLVal: time.Hour * 24 * 32,
},
}
- b, err := NewSystemBackend(core, bc)
+ b := NewSystemBackend(core)
+ err := b.Backend.Setup(bc)
if err != nil {
t.Fatal(err)
}
@@ -982,7 +1053,8 @@ func TestSystemBackend_revokePrefixAuth_origUrl(t *testing.T) {
MaxLeaseTTLVal: time.Hour * 24 * 32,
},
}
- b, err := NewSystemBackend(core, bc)
+ b := NewSystemBackend(core)
+ err := b.Backend.Setup(bc)
if err != nil {
t.Fatal(err)
}
@@ -1048,6 +1120,7 @@ func TestSystemBackend_authTable(t *testing.T) {
"token/": map[string]interface{}{
"type": "token",
"description": "token based credentials",
+ "accessor": resp.Data["token/"].(map[string]interface{})["accessor"],
"config": map[string]interface{}{
"default_lease_ttl": int64(0),
"max_lease_ttl": int64(0),
@@ -1163,8 +1236,16 @@ func TestSystemBackend_policyCRUD(t *testing.T) {
// Read, and make sure that case has been normalized
req = logical.TestRequest(t, logical.ReadOperation, "policy/Foo")
resp, err = b.HandleRequest(req)
- if resp != nil {
- t.Fatalf("err: expected nil response, got %#v", *resp)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ exp = map[string]interface{}{
+ "name": "foo",
+ "rules": rules,
+ }
+ if !reflect.DeepEqual(resp.Data, exp) {
+ t.Fatalf("got: %#v expect: %#v", resp.Data, exp)
}
// List the policies
@@ -1246,13 +1327,11 @@ func TestSystemBackend_auditHash(t *testing.T) {
Key: "salt",
Value: []byte("foo"),
})
- var err error
- config.Salt, err = salt.NewSalt(view, &salt.Config{
+ config.SaltView = view
+ config.SaltConfig = &salt.Config{
HMAC: sha256.New,
HMACType: "hmac-sha256",
- })
- if err != nil {
- t.Fatalf("error getting new salt: %v", err)
+ Location: salt.DefaultLocation,
}
return &NoopAudit{
Config: config,
@@ -1369,7 +1448,7 @@ func TestSystemBackend_disableAudit(t *testing.T) {
}
func TestSystemBackend_rawRead_Protected(t *testing.T) {
- b := testSystemBackend(t)
+ b := testSystemBackendRaw(t)
req := logical.TestRequest(t, logical.ReadOperation, "raw/"+keyringPath)
_, err := b.HandleRequest(req)
@@ -1379,7 +1458,7 @@ func TestSystemBackend_rawRead_Protected(t *testing.T) {
}
func TestSystemBackend_rawWrite_Protected(t *testing.T) {
- b := testSystemBackend(t)
+ b := testSystemBackendRaw(t)
req := logical.TestRequest(t, logical.UpdateOperation, "raw/"+keyringPath)
_, err := b.HandleRequest(req)
@@ -1389,7 +1468,7 @@ func TestSystemBackend_rawWrite_Protected(t *testing.T) {
}
func TestSystemBackend_rawReadWrite(t *testing.T) {
- c, b, _ := testCoreSystemBackend(t)
+ c, b, _ := testCoreSystemBackendRaw(t)
req := logical.TestRequest(t, logical.UpdateOperation, "raw/sys/policy/test")
req.Data["value"] = `path "secret/" { policy = "read" }`
@@ -1425,7 +1504,7 @@ func TestSystemBackend_rawReadWrite(t *testing.T) {
}
func TestSystemBackend_rawDelete_Protected(t *testing.T) {
- b := testSystemBackend(t)
+ b := testSystemBackendRaw(t)
req := logical.TestRequest(t, logical.DeleteOperation, "raw/"+keyringPath)
_, err := b.HandleRequest(req)
@@ -1435,7 +1514,7 @@ func TestSystemBackend_rawDelete_Protected(t *testing.T) {
}
func TestSystemBackend_rawDelete(t *testing.T) {
- c, b, _ := testCoreSystemBackend(t)
+ c, b, _ := testCoreSystemBackendRaw(t)
// set the policy!
p := &Policy{Name: "test"}
@@ -1511,6 +1590,25 @@ func TestSystemBackend_rotate(t *testing.T) {
func testSystemBackend(t *testing.T) logical.Backend {
c, _, _ := TestCoreUnsealed(t)
+ return testSystemBackendInternal(t, c)
+}
+
+func testSystemBackendRaw(t *testing.T) logical.Backend {
+ c, _, _ := TestCoreUnsealedRaw(t)
+ return testSystemBackendInternal(t, c)
+}
+
+func testCoreSystemBackend(t *testing.T) (*Core, logical.Backend, string) {
+ c, _, root := TestCoreUnsealed(t)
+ return c, testSystemBackendInternal(t, c), root
+}
+
+func testCoreSystemBackendRaw(t *testing.T) (*Core, logical.Backend, string) {
+ c, _, root := TestCoreUnsealedRaw(t)
+ return c, testSystemBackendInternal(t, c), root
+}
+
+func testSystemBackendInternal(t *testing.T, c *Core) logical.Backend {
bc := &logical.BackendConfig{
Logger: c.logger,
System: logical.StaticSystemView{
@@ -1519,7 +1617,8 @@ func testSystemBackend(t *testing.T) logical.Backend {
},
}
- b, err := NewSystemBackend(c, bc)
+ b := NewSystemBackend(c)
+ err := b.Backend.Setup(bc)
if err != nil {
t.Fatal(err)
}
@@ -1527,19 +1626,88 @@ func testSystemBackend(t *testing.T) logical.Backend {
return b
}
-func testCoreSystemBackend(t *testing.T) (*Core, logical.Backend, string) {
- c, _, root := TestCoreUnsealed(t)
- bc := &logical.BackendConfig{
- Logger: c.logger,
- System: logical.StaticSystemView{
- DefaultLeaseTTLVal: time.Hour * 24,
- MaxLeaseTTLVal: time.Hour * 24 * 32,
- },
+func TestSystemBackend_PluginCatalog_CRUD(t *testing.T) {
+ c, b, _ := testCoreSystemBackend(t)
+ // Bootstrap the pluginCatalog
+ sym, err := filepath.EvalSymlinks(os.TempDir())
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+ c.pluginCatalog.directory = sym
+
+ req := logical.TestRequest(t, logical.ListOperation, "plugins/catalog/")
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
}
- b, err := NewSystemBackend(c, bc)
+ if len(resp.Data["keys"].([]string)) != len(builtinplugins.Keys()) {
+ t.Fatalf("Wrong number of plugins, got %d, expected %d", len(resp.Data["keys"].([]string)), len(builtinplugins.Keys()))
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/mysql-database-plugin")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ actualRespData := resp.Data
+
+ expectedBuiltin := &pluginutil.PluginRunner{
+ Name: "mysql-database-plugin",
+ Builtin: true,
+ }
+ expectedRespData := structs.New(expectedBuiltin).Map()
+
+ if !reflect.DeepEqual(actualRespData, expectedRespData) {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", actualRespData, expectedRespData)
+ }
+
+ // Set a plugin
+ file, err := ioutil.TempFile(os.TempDir(), "temp")
if err != nil {
t.Fatal(err)
}
- return c, b, root
+ defer file.Close()
+
+ command := fmt.Sprintf("%s --test", filepath.Base(file.Name()))
+ req = logical.TestRequest(t, logical.UpdateOperation, "plugins/catalog/test-plugin")
+ req.Data["sha_256"] = hex.EncodeToString([]byte{'1'})
+ req.Data["command"] = command
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/test-plugin")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ actual := resp.Data
+
+ expectedRunner := &pluginutil.PluginRunner{
+ Name: "test-plugin",
+ Command: filepath.Join(sym, filepath.Base(file.Name())),
+ Args: []string{"--test"},
+ Sha256: []byte{'1'},
+ Builtin: false,
+ }
+ expected := structs.New(expectedRunner).Map()
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", actual, expected)
+ }
+
+ // Delete plugin
+ req = logical.TestRequest(t, logical.DeleteOperation, "plugins/catalog/test-plugin")
+ resp, err = b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/test-plugin")
+ resp, err = b.HandleRequest(req)
+ if resp != nil || err != nil {
+ t.Fatalf("expected nil response, plugin not deleted correctly got resp: %v, err: %v", resp, err)
+ }
}
diff --git a/vendor/github.com/hashicorp/vault/vault/mount.go b/vendor/github.com/hashicorp/vault/vault/mount.go
index d428eee..41aece9 100644
--- a/vendor/github.com/hashicorp/vault/vault/mount.go
+++ b/vendor/github.com/hashicorp/vault/vault/mount.go
@@ -9,8 +9,11 @@ import (
"strings"
"time"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
)
@@ -60,9 +63,30 @@ var (
singletonMounts = []string{
"cubbyhole",
"system",
+ "token",
}
+
+ // mountAliases maps old backend names to new backend names, allowing us
+ // to move/rename backends but maintain backwards compatibility
+ mountAliases = map[string]string{"generic": "kv"}
)
+func (c *Core) generateMountAccessor(entryType string) (string, error) {
+ var accessor string
+ for {
+ randBytes, err := uuid.GenerateRandomBytes(4)
+ if err != nil {
+ return "", err
+ }
+ accessor = fmt.Sprintf("%s_%s", entryType, fmt.Sprintf("%08x", randBytes[0:4]))
+ if entry := c.router.MatchingMountByAccessor(accessor); entry == nil {
+ break
+ }
+ }
+
+ return accessor, nil
+}
+
// MountTable is used to represent the internal mount table
type MountTable struct {
Type string `json:"type"`
@@ -136,6 +160,7 @@ type MountEntry struct {
Type string `json:"type"` // Logical backend Type
Description string `json:"description"` // User-provided description
UUID string `json:"uuid"` // Barrier view UUID
+ Accessor string `json:"accessor"` // Unique but more human-friendly ID. Does not change, not used for any sensitive things (like as a salt, which the UUID sometimes is).
Config MountConfig `json:"config"` // Configuration related to this mount (but not backend-derived)
Options map[string]string `json:"options"` // Backend options
Local bool `json:"local"` // Local mounts are not replicated or affected by replication
@@ -147,25 +172,15 @@ type MountConfig struct {
DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default
MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default
ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
-// Returns a deep copy of the mount entry
-func (e *MountEntry) Clone() *MountEntry {
- optClone := make(map[string]string)
- for k, v := range e.Options {
- optClone[k] = v
- }
- return &MountEntry{
- Table: e.Table,
- Path: e.Path,
- Type: e.Type,
- Description: e.Description,
- UUID: e.UUID,
- Config: e.Config,
- Options: optClone,
- Local: e.Local,
- Tainted: e.Tainted,
- }
+// APIMountConfig is an embedded struct of api.MountConfigInput
+type APIMountConfig struct {
+ DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
// Mount is used to mount a new backend to the mount table.
@@ -205,11 +220,22 @@ func (c *Core) mount(entry *MountEntry) error {
}
entry.UUID = entryUUID
}
+ if entry.Accessor == "" {
+ accessor, err := c.generateMountAccessor(entry.Type)
+ if err != nil {
+ return err
+ }
+ entry.Accessor = accessor
+ }
viewPath := backendBarrierPrefix + entry.UUID + "/"
view := NewBarrierView(c.barrier, viewPath)
sysView := c.mountEntrySysView(entry)
+ conf := make(map[string]string)
+ if entry.Config.PluginName != "" {
+ conf["plugin_name"] = entry.Config.PluginName
+ }
- backend, err := c.newLogicalBackend(entry.Type, sysView, view, nil)
+ backend, err := c.newLogicalBackend(entry.Type, sysView, view, conf)
if err != nil {
return err
}
@@ -217,8 +243,14 @@ func (c *Core) mount(entry *MountEntry) error {
return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type)
}
+ // Check for the correct backend type
+ backendType := backend.Type()
+ if entry.Type == "plugin" && backendType != logical.TypeLogical {
+ return fmt.Errorf("cannot mount '%s' of type '%s' as a logical backend", entry.Config.PluginName, backendType)
+ }
+
// Call initialize; this takes care of init tasks that must be run after
- // the ignore paths are collected
+ // the ignore paths are collected.
if err := backend.Initialize(); err != nil {
return err
}
@@ -243,7 +275,7 @@ func (c *Core) mount(entry *MountEntry) error {
// Unmount is used to unmount a path. The boolean indicates whether the mount
// was found.
-func (c *Core) unmount(path string) (bool, error) {
+func (c *Core) unmount(path string) error {
// Ensure we end the path in a slash
if !strings.HasSuffix(path, "/") {
path += "/"
@@ -252,14 +284,14 @@ func (c *Core) unmount(path string) (bool, error) {
// Prevent protected paths from being unmounted
for _, p := range protectedMounts {
if strings.HasPrefix(path, p) {
- return true, fmt.Errorf("cannot unmount '%s'", path)
+ return fmt.Errorf("cannot unmount '%s'", path)
}
}
// Verify exact match of the route
match := c.router.MatchingMount(path)
if match == "" || path != match {
- return false, fmt.Errorf("no matching mount")
+ return fmt.Errorf("no matching mount")
}
// Get the view for this backend
@@ -267,23 +299,23 @@ func (c *Core) unmount(path string) (bool, error) {
// Mark the entry as tainted
if err := c.taintMountEntry(path); err != nil {
- return true, err
+ return err
}
// Taint the router path to prevent routing. Note that in-flight requests
// are uncertain, right now.
if err := c.router.Taint(path); err != nil {
- return true, err
+ return err
}
// Invoke the rollback manager a final time
if err := c.rollback.Rollback(path); err != nil {
- return true, err
+ return err
}
// Revoke all the dynamic keys
if err := c.expiration.RevokePrefix(path); err != nil {
- return true, err
+ return err
}
// Call cleanup function if it exists
@@ -294,22 +326,22 @@ func (c *Core) unmount(path string) (bool, error) {
// Unmount the backend entirely
if err := c.router.Unmount(path); err != nil {
- return true, err
+ return err
}
// Clear the data in the view
if err := logical.ClearView(view); err != nil {
- return true, err
+ return err
}
// Remove the mount table entry
if err := c.removeMountEntry(path); err != nil {
- return true, err
+ return err
}
if c.logger.IsInfo() {
c.logger.Info("core: successfully unmounted", "path", path)
}
- return true, nil
+ return nil
}
// removeMountEntry is used to remove an entry from the mount table
@@ -501,7 +533,7 @@ func (c *Core) loadMounts() error {
needPersist = true
}
- for _, requiredMount := range requiredMountTable().Entries {
+ for _, requiredMount := range c.requiredMountTable().Entries {
foundRequired := false
for _, coreMount := range c.mounts.Entries {
if coreMount.Type == requiredMount.Type {
@@ -509,7 +541,14 @@ func (c *Core) loadMounts() error {
break
}
}
- if !foundRequired {
+ // In a replication scenario we will let sync invalidation take
+ // care of creating a new required mount that doesn't exist yet.
+ // This should only happen in the upgrade case where a new one is
+ // introduced on the primary; otherwise initial bootstrapping will
+ // ensure this comes over. If we upgrade first, we simply don't
+ // create the mount, so we won't conflict when we sync. If this is
+ // local (e.g. cubbyhole) we do still add it.
+ if !foundRequired && (c.replicationState.HasState(consts.ReplicationPerformanceSecondary) || requiredMount.Local) {
c.mounts.Entries = append(c.mounts.Entries, requiredMount)
needPersist = true
}
@@ -525,6 +564,14 @@ func (c *Core) loadMounts() error {
entry.Table = c.mounts.Type
needPersist = true
}
+ if entry.Accessor == "" {
+ accessor, err := c.generateMountAccessor(entry.Type)
+ if err != nil {
+ return err
+ }
+ entry.Accessor = accessor
+ needPersist = true
+ }
}
// Done if we have restored the mount table and we don't need
@@ -534,7 +581,7 @@ func (c *Core) loadMounts() error {
}
} else {
// Create and persist the default mount table
- c.mounts = defaultMountTable()
+ c.mounts = c.defaultMountTable()
}
if err := c.persistMounts(c.mounts, false); err != nil {
@@ -621,11 +668,12 @@ func (c *Core) setupMounts() error {
c.mountsLock.Lock()
defer c.mountsLock.Unlock()
- var backend logical.Backend
var view *BarrierView
var err error
for _, entry := range c.mounts.Entries {
+ var backend logical.Backend
+
// Initialize the backend, special casing for system
barrierPath := backendBarrierPrefix + entry.UUID + "/"
if entry.Type == "system" {
@@ -635,17 +683,32 @@ func (c *Core) setupMounts() error {
// Create a barrier view using the UUID
view = NewBarrierView(c.barrier, barrierPath)
sysView := c.mountEntrySysView(entry)
- // Initialize the backend
+ // Set up conf to pass in plugin_name
+ conf := make(map[string]string)
+ if entry.Config.PluginName != "" {
+ conf["plugin_name"] = entry.Config.PluginName
+ }
// Create the new backend
- backend, err = c.newLogicalBackend(entry.Type, sysView, view, nil)
+ backend, err = c.newLogicalBackend(entry.Type, sysView, view, conf)
if err != nil {
c.logger.Error("core: failed to create mount entry", "path", entry.Path, "error", err)
+ if errwrap.Contains(err, ErrPluginNotFound.Error()) && entry.Type == "plugin" {
+ // If we encounter an error instantiating the backend due to it being missing from the catalog,
+ // skip backend initialization but register the entry to the mount table to preserve storage
+ // and path.
+ goto ROUTER_MOUNT
+ }
return errLoadMountsFailed
}
if backend == nil {
return fmt.Errorf("created mount entry of type %q is nil", entry.Type)
}
+ // Check for the correct backend type
+ if entry.Type == "plugin" && backend.Type() != logical.TypeLogical {
+ return fmt.Errorf("cannot mount '%s' of type '%s' as a logical backend", entry.Config.PluginName, backend.Type())
+ }
+
if err := backend.Initialize(); err != nil {
return err
}
@@ -658,16 +721,15 @@ func (c *Core) setupMounts() error {
ch.saltUUID = entry.UUID
ch.storageView = view
}
-
+ ROUTER_MOUNT:
// Mount the backend
err = c.router.Mount(backend, entry.Path, entry, view)
if err != nil {
c.logger.Error("core: failed to mount entry", "path", entry.Path, "error", err)
return errLoadMountsFailed
- } else {
- if c.logger.IsInfo() {
- c.logger.Info("core: successfully mounted backend", "type", entry.Type, "path", entry.Path)
- }
+ }
+ if c.logger.IsInfo() {
+ c.logger.Info("core: successfully mounted backend", "type", entry.Type, "path", entry.Path)
}
// Ensure the path is tainted if set in the mount table
@@ -702,6 +764,9 @@ func (c *Core) unloadMounts() error {
// newLogicalBackend is used to create and configure a new logical backend by name
func (c *Core) newLogicalBackend(t string, sysView logical.SystemView, view logical.Storage, conf map[string]string) (logical.Backend, error) {
+ if alias, ok := mountAliases[t]; ok {
+ t = alias
+ }
f, ok := c.logicalBackends[t]
if !ok {
return nil, fmt.Errorf("unknown backend type: %s", t)
@@ -735,29 +800,34 @@ func (c *Core) mountEntrySysView(entry *MountEntry) logical.SystemView {
}
// defaultMountTable creates a default mount table
-func defaultMountTable() *MountTable {
+func (c *Core) defaultMountTable() *MountTable {
table := &MountTable{
Type: mountTableType,
}
mountUUID, err := uuid.GenerateUUID()
if err != nil {
- panic(fmt.Sprintf("could not create default mount table UUID: %v", err))
+ panic(fmt.Sprintf("could not create default secret mount UUID: %v", err))
}
- genericMount := &MountEntry{
+ mountAccessor, err := c.generateMountAccessor("kv")
+ if err != nil {
+ panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err))
+ }
+ kvMount := &MountEntry{
Table: mountTableType,
Path: "secret/",
- Type: "generic",
- Description: "generic secret storage",
+ Type: "kv",
+ Description: "key/value secret storage",
UUID: mountUUID,
+ Accessor: mountAccessor,
}
- table.Entries = append(table.Entries, genericMount)
- table.Entries = append(table.Entries, requiredMountTable().Entries...)
+ table.Entries = append(table.Entries, kvMount)
+ table.Entries = append(table.Entries, c.requiredMountTable().Entries...)
return table
}
// requiredMountTable() creates a mount table with entries required
// to be available
-func requiredMountTable() *MountTable {
+func (c *Core) requiredMountTable() *MountTable {
table := &MountTable{
Type: mountTableType,
}
@@ -765,12 +835,17 @@ func requiredMountTable() *MountTable {
if err != nil {
panic(fmt.Sprintf("could not create cubbyhole UUID: %v", err))
}
+ cubbyholeAccessor, err := c.generateMountAccessor("cubbyhole")
+ if err != nil {
+ panic(fmt.Sprintf("could not generate cubbyhole accessor: %v", err))
+ }
cubbyholeMount := &MountEntry{
Table: mountTableType,
Path: "cubbyhole/",
Type: "cubbyhole",
Description: "per-token private secret storage",
UUID: cubbyholeUUID,
+ Accessor: cubbyholeAccessor,
Local: true,
}
@@ -778,14 +853,48 @@ func requiredMountTable() *MountTable {
if err != nil {
panic(fmt.Sprintf("could not create sys UUID: %v", err))
}
+ sysAccessor, err := c.generateMountAccessor("system")
+ if err != nil {
+ panic(fmt.Sprintf("could not generate sys accessor: %v", err))
+ }
sysMount := &MountEntry{
Table: mountTableType,
Path: "sys/",
Type: "system",
Description: "system endpoints used for control, policy and debugging",
UUID: sysUUID,
+ Accessor: sysAccessor,
}
table.Entries = append(table.Entries, cubbyholeMount)
table.Entries = append(table.Entries, sysMount)
return table
}
+
+// This function returns tables that are singletons. The main usage of this is
+// for replication, so we can send over mount info (especially, UUIDs of
+// mounts, which are used for salts) for mounts that may not be able to be
+// handled normally. After saving these values on the secondary, we let normal
+// sync invalidation do its thing. Because of its use for replication, we
+// exclude local mounts.
+func (c *Core) singletonMountTables() (mounts, auth *MountTable) {
+ mounts = &MountTable{}
+ auth = &MountTable{}
+
+ c.mountsLock.RLock()
+ for _, entry := range c.mounts.Entries {
+ if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local {
+ mounts.Entries = append(mounts.Entries, entry)
+ }
+ }
+ c.mountsLock.RUnlock()
+
+ c.authLock.RLock()
+ for _, entry := range c.auth.Entries {
+ if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local {
+ auth.Entries = append(auth.Entries, entry)
+ }
+ }
+ c.authLock.RUnlock()
+
+ return
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/mount_test.go b/vendor/github.com/hashicorp/vault/vault/mount_test.go
index 4e8d25f..cf24e18 100644
--- a/vendor/github.com/hashicorp/vault/vault/mount_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/mount_test.go
@@ -47,7 +47,7 @@ func TestCore_Mount(t *testing.T) {
me := &MountEntry{
Table: mountTableType,
Path: "foo",
- Type: "generic",
+ Type: "kv",
}
err := c.mount(me)
if err != nil {
@@ -93,16 +93,18 @@ func TestCore_Mount_Local(t *testing.T) {
Type: mountTableType,
Entries: []*MountEntry{
&MountEntry{
- Table: mountTableType,
- Path: "noop/",
- Type: "generic",
- UUID: "abcd",
+ Table: mountTableType,
+ Path: "noop/",
+ Type: "kv",
+ UUID: "abcd",
+ Accessor: "kv-abcd",
},
&MountEntry{
- Table: mountTableType,
- Path: "noop2/",
- Type: "generic",
- UUID: "bcde",
+ Table: mountTableType,
+ Path: "noop2/",
+ Type: "kv",
+ UUID: "bcde",
+ Accessor: "kv-bcde",
},
},
}
@@ -162,7 +164,7 @@ func TestCore_Mount_Local(t *testing.T) {
compEntries := c.mounts.Entries[:0]
// Filter out required mounts
for _, v := range c.mounts.Entries {
- if v.Type == "generic" {
+ if v.Type == "kv" {
compEntries = append(compEntries, v)
}
}
@@ -179,9 +181,9 @@ func TestCore_Mount_Local(t *testing.T) {
func TestCore_Unmount(t *testing.T) {
c, keys, _ := TestCoreUnsealed(t)
- existed, err := c.unmount("secret")
- if !existed || err != nil {
- t.Fatalf("existed: %v; err: %v", existed, err)
+ err := c.unmount("secret")
+ if err != nil {
+ t.Fatalf("err: %v", err)
}
match := c.router.MatchingMount("secret/foo")
@@ -270,8 +272,8 @@ func TestCore_Unmount_Cleanup(t *testing.T) {
}
// Unmount, this should cleanup
- if existed, err := c.unmount("test/"); !existed || err != nil {
- t.Fatalf("existed: %v; err: %v", existed, err)
+ if err := c.unmount("test/"); err != nil {
+ t.Fatalf("err: %v", err)
}
// Rollback should be invoked
@@ -426,7 +428,8 @@ func TestCore_Remount_Protected(t *testing.T) {
}
func TestDefaultMountTable(t *testing.T) {
- table := defaultMountTable()
+ c, _, _ := TestCoreUnsealed(t)
+ table := c.defaultMountTable()
verifyDefaultTable(t, table)
}
@@ -606,7 +609,7 @@ func verifyDefaultTable(t *testing.T, table *MountTable) {
if entry.Path != "secret/" {
t.Fatalf("bad: %v", entry)
}
- if entry.Type != "generic" {
+ if entry.Type != "kv" {
t.Fatalf("bad: %v", entry)
}
case 2:
@@ -628,3 +631,28 @@ func verifyDefaultTable(t *testing.T, table *MountTable) {
}
}
}
+
+func TestSingletonMountTableFunc(t *testing.T) {
+ c, _, _ := TestCoreUnsealed(t)
+
+ mounts, auth := c.singletonMountTables()
+
+ if len(mounts.Entries) != 1 {
+ t.Fatal("length of mounts is wrong")
+ }
+ for _, entry := range mounts.Entries {
+ switch entry.Type {
+ case "system":
+ default:
+ t.Fatalf("unknown type %s", entry.Type)
+ }
+ }
+
+ if len(auth.Entries) != 1 {
+ t.Fatal("length of auth is wrong")
+ }
+
+ if auth.Entries[0].Type != "token" {
+ t.Fatal("unexpected entry type for auth")
+ }
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go b/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go
new file mode 100644
index 0000000..3e2466f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go
@@ -0,0 +1,189 @@
+package vault
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/vault/helper/builtinplugins"
+ "github.com/hashicorp/vault/helper/consts"
+ "github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/pluginutil"
+ "github.com/hashicorp/vault/logical"
+)
+
+var (
+ pluginCatalogPath = "core/plugin-catalog/"
+ ErrDirectoryNotConfigured = errors.New("could not set plugin, plugin directory is not configured")
+ ErrPluginNotFound = errors.New("plugin not found in the catalog")
+)
+
+// PluginCatalog keeps a record of plugins known to vault. External plugins need
+// to be registered to the catalog before they can be used in backends. Builtin
+// plugins are automatically detected and included in the catalog.
+type PluginCatalog struct {
+ catalogView *BarrierView
+ directory string
+
+ lock sync.RWMutex
+}
+
+func (c *Core) setupPluginCatalog() error {
+ c.pluginCatalog = &PluginCatalog{
+ catalogView: NewBarrierView(c.barrier, pluginCatalogPath),
+ directory: c.pluginDirectory,
+ }
+
+ if c.logger.IsInfo() {
+ c.logger.Info("core: successfully setup plugin catalog", "plugin-directory", c.pluginDirectory)
+ }
+
+ return nil
+}
+
+// Get retrieves a plugin with the specified name from the catalog. It first
+// looks for external plugins with this name and then looks for builtin plugins.
+// It returns a PluginRunner or an error if no plugin was found.
+func (c *PluginCatalog) Get(name string) (*pluginutil.PluginRunner, error) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ // If the directory isn't set only look for builtin plugins.
+ if c.directory != "" {
+ // Look for external plugins in the barrier
+ out, err := c.catalogView.Get(name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve plugin \"%s\": %v", name, err)
+ }
+ if out != nil {
+ entry := new(pluginutil.PluginRunner)
+ if err := jsonutil.DecodeJSON(out.Value, entry); err != nil {
+ return nil, fmt.Errorf("failed to decode plugin entry: %v", err)
+ }
+
+ // prepend the plugin directory to the command
+ entry.Command = filepath.Join(c.directory, entry.Command)
+
+ return entry, nil
+ }
+ }
+ // Look for builtin plugins
+ if factory, ok := builtinplugins.Get(name); ok {
+ return &pluginutil.PluginRunner{
+ Name: name,
+ Builtin: true,
+ BuiltinFactory: factory,
+ }, nil
+ }
+
+ return nil, nil
+}
+
+// Set registers a new external plugin with the catalog, or updates an existing
+// external plugin. It takes the name, command and SHA256 of the plugin.
+func (c *PluginCatalog) Set(name, command string, sha256 []byte) error {
+ if c.directory == "" {
+ return ErrDirectoryNotConfigured
+ }
+
+ switch {
+ case strings.Contains(name, ".."):
+ fallthrough
+ case strings.Contains(command, ".."):
+ return consts.ErrPathContainsParentReferences
+ }
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ parts := strings.Split(command, " ")
+
+ // Best effort check to make sure the command isn't breaking out of the
+ // configured plugin directory.
+ commandFull := filepath.Join(c.directory, parts[0])
+ sym, err := filepath.EvalSymlinks(commandFull)
+ if err != nil {
+ return fmt.Errorf("error while validating the command path: %v", err)
+ }
+ symAbs, err := filepath.Abs(filepath.Dir(sym))
+ if err != nil {
+ return fmt.Errorf("error while validating the command path: %v", err)
+ }
+
+ if symAbs != c.directory {
+ return errors.New("can not execute files outside of configured plugin directory")
+ }
+
+ entry := &pluginutil.PluginRunner{
+ Name: name,
+ Command: parts[0],
+ Args: parts[1:],
+ Sha256: sha256,
+ Builtin: false,
+ }
+
+ buf, err := json.Marshal(entry)
+ if err != nil {
+ return fmt.Errorf("failed to encode plugin entry: %v", err)
+ }
+
+ logicalEntry := logical.StorageEntry{
+ Key: name,
+ Value: buf,
+ }
+ if err := c.catalogView.Put(&logicalEntry); err != nil {
+ return fmt.Errorf("failed to persist plugin entry: %v", err)
+ }
+ return nil
+}
+
+// Delete is used to remove an external plugin from the catalog. Builtin plugins
+// can not be deleted.
+func (c *PluginCatalog) Delete(name string) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ return c.catalogView.Delete(name)
+}
+
+// List returns a list of all the known plugin names. If an external and builtin
+// plugin share the same name, only one instance of the name will be returned.
+func (c *PluginCatalog) List() ([]string, error) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ // Collect keys for external plugins in the barrier.
+ keys, err := logical.CollectKeys(c.catalogView)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the keys for builtin plugins
+ builtinKeys := builtinplugins.Keys()
+
+ // Use a map to unique the two lists
+ mapKeys := make(map[string]bool)
+
+ for _, plugin := range keys {
+ mapKeys[plugin] = true
+ }
+
+ for _, plugin := range builtinKeys {
+ mapKeys[plugin] = true
+ }
+
+ retList := make([]string, len(mapKeys))
+ i := 0
+ for k := range mapKeys {
+ retList[i] = k
+ i++
+ }
+ // sort for consistent ordering of builtin pluings
+ sort.Strings(retList)
+
+ return retList, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_catalog_test.go b/vendor/github.com/hashicorp/vault/vault/plugin_catalog_test.go
new file mode 100644
index 0000000..6cfacda
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/plugin_catalog_test.go
@@ -0,0 +1,176 @@
+package vault
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/hashicorp/vault/helper/builtinplugins"
+ "github.com/hashicorp/vault/helper/pluginutil"
+)
+
+func TestPluginCatalog_CRUD(t *testing.T) {
+ core, _, _ := TestCoreUnsealed(t)
+
+ sym, err := filepath.EvalSymlinks(os.TempDir())
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+ core.pluginCatalog.directory = sym
+
+ // Get builtin plugin
+ p, err := core.pluginCatalog.Get("mysql-database-plugin")
+ if err != nil {
+ t.Fatalf("unexpected error %v", err)
+ }
+
+ expectedBuiltin := &pluginutil.PluginRunner{
+ Name: "mysql-database-plugin",
+ Builtin: true,
+ }
+ expectedBuiltin.BuiltinFactory, _ = builtinplugins.Get("mysql-database-plugin")
+
+ if &(p.BuiltinFactory) == &(expectedBuiltin.BuiltinFactory) {
+ t.Fatal("expected BuiltinFactory did not match actual")
+ }
+ expectedBuiltin.BuiltinFactory = nil
+ p.BuiltinFactory = nil
+ if !reflect.DeepEqual(p, expectedBuiltin) {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expectedBuiltin)
+ }
+
+ // Set a plugin, test overwriting a builtin plugin
+ file, err := ioutil.TempFile(os.TempDir(), "temp")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer file.Close()
+
+ command := fmt.Sprintf("%s --test", filepath.Base(file.Name()))
+ err = core.pluginCatalog.Set("mysql-database-plugin", command, []byte{'1'})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get the plugin
+ p, err = core.pluginCatalog.Get("mysql-database-plugin")
+ if err != nil {
+ t.Fatalf("unexpected error %v", err)
+ }
+
+ expected := &pluginutil.PluginRunner{
+ Name: "mysql-database-plugin",
+ Command: filepath.Join(sym, filepath.Base(file.Name())),
+ Args: []string{"--test"},
+ Sha256: []byte{'1'},
+ Builtin: false,
+ }
+
+ if !reflect.DeepEqual(p, expected) {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expected)
+ }
+
+ // Delete the plugin
+ err = core.pluginCatalog.Delete("mysql-database-plugin")
+ if err != nil {
+ t.Fatalf("unexpected err: %v", err)
+ }
+
+ // Get builtin plugin
+ p, err = core.pluginCatalog.Get("mysql-database-plugin")
+ if err != nil {
+ t.Fatalf("unexpected error %v", err)
+ }
+
+ expectedBuiltin = &pluginutil.PluginRunner{
+ Name: "mysql-database-plugin",
+ Builtin: true,
+ }
+ expectedBuiltin.BuiltinFactory, _ = builtinplugins.Get("mysql-database-plugin")
+
+ if &(p.BuiltinFactory) == &(expectedBuiltin.BuiltinFactory) {
+ t.Fatal("expected BuiltinFactory did not match actual")
+ }
+ expectedBuiltin.BuiltinFactory = nil
+ p.BuiltinFactory = nil
+ if !reflect.DeepEqual(p, expectedBuiltin) {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expectedBuiltin)
+ }
+
+}
+
+func TestPluginCatalog_List(t *testing.T) {
+ core, _, _ := TestCoreUnsealed(t)
+
+ sym, err := filepath.EvalSymlinks(os.TempDir())
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+ core.pluginCatalog.directory = sym
+
+ // Get builtin plugins and sort them
+ builtinKeys := builtinplugins.Keys()
+ sort.Strings(builtinKeys)
+
+ // List only builtin plugins
+ plugins, err := core.pluginCatalog.List()
+ if err != nil {
+ t.Fatalf("unexpected error %v", err)
+ }
+
+ if len(plugins) != len(builtinKeys) {
+ t.Fatalf("unexpected length of plugin list, expected %d, got %d", len(builtinKeys), len(plugins))
+ }
+
+ for i, p := range builtinKeys {
+ if !reflect.DeepEqual(plugins[i], p) {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[i], p)
+ }
+ }
+
+ // Set a plugin, test overwriting a builtin plugin
+ file, err := ioutil.TempFile(os.TempDir(), "temp")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer file.Close()
+
+ command := fmt.Sprintf("%s --test", filepath.Base(file.Name()))
+ err = core.pluginCatalog.Set("mysql-database-plugin", command, []byte{'1'})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set another plugin
+ err = core.pluginCatalog.Set("aaaaaaa", command, []byte{'1'})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // List the plugins
+ plugins, err = core.pluginCatalog.List()
+ if err != nil {
+ t.Fatalf("unexpected error %v", err)
+ }
+
+ if len(plugins) != len(builtinKeys)+1 {
+ t.Fatalf("unexpected length of plugin list, expected %d, got %d", len(builtinKeys)+1, len(plugins))
+ }
+
+ // verify the first plugin is the one we just created.
+ if !reflect.DeepEqual(plugins[0], "aaaaaaa") {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[0], "aaaaaaa")
+ }
+
+ // verify the builtin pluings are correct
+ for i, p := range builtinKeys {
+ if !reflect.DeepEqual(plugins[i+1], p) {
+ t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[i+1], p)
+ }
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_reload.go b/vendor/github.com/hashicorp/vault/vault/plugin_reload.go
new file mode 100644
index 0000000..eaff18b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/vault/plugin_reload.go
@@ -0,0 +1,125 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/vault/logical"
+)
+
+// reloadPluginMounts reloads provided mounts, regardless of
+// plugin name, as long as the backend type is plugin.
+func (c *Core) reloadMatchingPluginMounts(mounts []string) error {
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ var errors error
+ for _, mount := range mounts {
+ entry := c.router.MatchingMountEntry(mount)
+ if entry == nil {
+ errors = multierror.Append(errors, fmt.Errorf("cannot fetch mount entry on %s", mount))
+ continue
+ // return fmt.Errorf("cannot fetch mount entry on %s", mount)
+ }
+
+ var isAuth bool
+ fullPath := c.router.MatchingMount(mount)
+ if strings.HasPrefix(fullPath, credentialRoutePrefix) {
+ isAuth = true
+ }
+
+ if entry.Type == "plugin" {
+ err := c.reloadPluginCommon(entry, isAuth)
+ if err != nil {
+ errors = multierror.Append(errors, fmt.Errorf("cannot reload plugin on %s: %v", mount, err))
+ continue
+ }
+ c.logger.Info("core: successfully reloaded plugin", "plugin", entry.Config.PluginName, "path", entry.Path)
+ }
+ }
+ return errors
+}
+
+// reloadPlugin reloads all mounted backends that are of
+// plugin pluginName (name of the plugin as registered in
+// the plugin catalog).
+func (c *Core) reloadMatchingPlugin(pluginName string) error {
+ c.mountsLock.Lock()
+ defer c.mountsLock.Unlock()
+
+ // Filter mount entries that only matches the plugin name
+ for _, entry := range c.mounts.Entries {
+ if entry.Config.PluginName == pluginName && entry.Type == "plugin" {
+ err := c.reloadPluginCommon(entry, false)
+ if err != nil {
+ return err
+ }
+ c.logger.Info("core: successfully reloaded plugin", "plugin", pluginName, "path", entry.Path)
+ }
+ }
+
+ // Filter auth mount entries that ony matches the plugin name
+ for _, entry := range c.auth.Entries {
+ if entry.Config.PluginName == pluginName && entry.Type == "plugin" {
+ err := c.reloadPluginCommon(entry, true)
+ if err != nil {
+ return err
+ }
+ c.logger.Info("core: successfully reloaded plugin", "plugin", pluginName, "path", entry.Path)
+ }
+ }
+
+ return nil
+}
+
+// reloadPluginCommon is a generic method to reload a backend provided a
+// MountEntry. entry.Type should be checked by the caller to ensure that
+// it's a "plugin" type.
+func (c *Core) reloadPluginCommon(entry *MountEntry, isAuth bool) error {
+ path := entry.Path
+
+ // Fast-path out if the backend doesn't exist
+ raw, ok := c.router.root.Get(path)
+ if !ok {
+ return nil
+ }
+
+ // Call backend's Cleanup routine
+ re := raw.(*routeEntry)
+ re.backend.Cleanup()
+
+ view := re.storageView
+
+ sysView := c.mountEntrySysView(entry)
+ conf := make(map[string]string)
+ if entry.Config.PluginName != "" {
+ conf["plugin_name"] = entry.Config.PluginName
+ }
+
+ var backend logical.Backend
+ var err error
+ if !isAuth {
+ // Dispense a new backend
+ backend, err = c.newLogicalBackend(entry.Type, sysView, view, conf)
+ } else {
+ backend, err = c.newCredentialBackend(entry.Type, sysView, view, conf)
+ }
+ if err != nil {
+ return err
+ }
+ if backend == nil {
+ return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type)
+ }
+
+ // Call initialize; this takes care of init tasks that must be run after
+ // the ignore paths are collected.
+ if err := backend.Initialize(); err != nil {
+ return err
+ }
+
+ // Set the backend back
+ re.backend = backend
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy.go b/vendor/github.com/hashicorp/vault/vault/policy.go
index c808c2a..79f3b56 100644
--- a/vendor/github.com/hashicorp/vault/vault/policy.go
+++ b/vendor/github.com/hashicorp/vault/vault/policy.go
@@ -11,6 +11,7 @@ import (
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/helper/parseutil"
+ "github.com/mitchellh/copystructure"
)
const (
@@ -84,6 +85,40 @@ type Permissions struct {
DeniedParameters map[string][]interface{}
}
+func (p *Permissions) Clone() (*Permissions, error) {
+ ret := &Permissions{
+ CapabilitiesBitmap: p.CapabilitiesBitmap,
+ MinWrappingTTL: p.MinWrappingTTL,
+ MaxWrappingTTL: p.MaxWrappingTTL,
+ }
+
+ switch {
+ case p.AllowedParameters == nil:
+ case len(p.AllowedParameters) == 0:
+ ret.AllowedParameters = make(map[string][]interface{})
+ default:
+ clonedAllowed, err := copystructure.Copy(p.AllowedParameters)
+ if err != nil {
+ return nil, err
+ }
+ ret.AllowedParameters = clonedAllowed.(map[string][]interface{})
+ }
+
+ switch {
+ case p.DeniedParameters == nil:
+ case len(p.DeniedParameters) == 0:
+ ret.DeniedParameters = make(map[string][]interface{})
+ default:
+ clonedDenied, err := copystructure.Copy(p.DeniedParameters)
+ if err != nil {
+ return nil, err
+ }
+ ret.DeniedParameters = clonedDenied.(map[string][]interface{})
+ }
+
+ return ret, nil
+}
+
// Parse is used to parse the specified ACL rules into an
// intermediary set of policies, before being compiled into
// the ACL
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store.go b/vendor/github.com/hashicorp/vault/vault/policy_store.go
index 0768f76..986dcda 100644
--- a/vendor/github.com/hashicorp/vault/vault/policy_store.go
+++ b/vendor/github.com/hashicorp/vault/vault/policy_store.go
@@ -25,9 +25,7 @@ const (
responseWrappingPolicyName = "response-wrapping"
// responseWrappingPolicy is the policy that ensures cubbyhole response
- // wrapping can always succeed. Note that sys/wrapping/lookup isn't
- // contained here because using it would revoke the token anyways, so there
- // isn't much point.
+ // wrapping can always succeed.
responseWrappingPolicy = `
path "cubbyhole/response" {
capabilities = ["create", "read"]
@@ -147,7 +145,7 @@ func (c *Core) setupPolicyStore() error {
sysView := &dynamicSystemView{core: c}
c.policyStore = NewPolicyStore(view, sysView)
- if sysView.ReplicationState() == consts.ReplicationSecondary {
+ if c.replicationState.HasState(consts.ReplicationPerformanceSecondary) {
// Policies will sync from the primary
return nil
}
@@ -202,6 +200,8 @@ func (ps *PolicyStore) SetPolicy(p *Policy) error {
if p.Name == "" {
return fmt.Errorf("policy name missing")
}
+ // Policies are normalized to lower-case
+ p.Name = strings.ToLower(strings.TrimSpace(p.Name))
if strutil.StrListContains(immutablePolicies, p.Name) {
return fmt.Errorf("cannot update %s policy", p.Name)
}
@@ -232,6 +232,7 @@ func (ps *PolicyStore) setPolicyInternal(p *Policy) error {
// GetPolicy is used to fetch the named policy
func (ps *PolicyStore) GetPolicy(name string) (*Policy, error) {
defer metrics.MeasureSince([]string{"policy", "get_policy"}, time.Now())
+
if ps.lru != nil {
// Check for cached policy
if raw, ok := ps.lru.Get(name); ok {
@@ -239,6 +240,9 @@ func (ps *PolicyStore) GetPolicy(name string) (*Policy, error) {
}
}
+ // Policies are normalized to lower-case
+ name = strings.ToLower(strings.TrimSpace(name))
+
// Special case the root policy
if name == "root" {
p := &Policy{Name: "root"}
@@ -322,6 +326,9 @@ func (ps *PolicyStore) ListPolicies() ([]string, error) {
// DeletePolicy is used to delete the named policy
func (ps *PolicyStore) DeletePolicy(name string) error {
defer metrics.MeasureSince([]string{"policy", "delete_policy"}, time.Now())
+
+ // Policies are normalized to lower-case
+ name = strings.ToLower(strings.TrimSpace(name))
if strutil.StrListContains(immutablePolicies, name) {
return fmt.Errorf("cannot delete %s policy", name)
}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store_test.go b/vendor/github.com/hashicorp/vault/vault/policy_store_test.go
index dafca34..97107f1 100644
--- a/vendor/github.com/hashicorp/vault/vault/policy_store_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/policy_store_test.go
@@ -61,7 +61,7 @@ func TestPolicyStore_CRUD(t *testing.T) {
func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
// Get should return nothing
- p, err := ps.GetPolicy("dev")
+ p, err := ps.GetPolicy("Dev")
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -70,7 +70,7 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
}
// Delete should be no-op
- err = ps.DeletePolicy("dev")
+ err = ps.DeletePolicy("deV")
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -92,7 +92,7 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
}
// Get should work
- p, err = ps.GetPolicy("dev")
+ p, err = ps.GetPolicy("dEv")
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -110,13 +110,13 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
}
// Delete should be clear the entry
- err = ps.DeletePolicy("dev")
+ err = ps.DeletePolicy("Dev")
if err != nil {
t.Fatalf("err: %v", err)
}
// Get should fail
- p, err = ps.GetPolicy("dev")
+ p, err = ps.GetPolicy("deV")
if err != nil {
t.Fatalf("err: %v", err)
}
diff --git a/vendor/github.com/hashicorp/vault/vault/rekey_test.go b/vendor/github.com/hashicorp/vault/vault/rekey_test.go
index c463325..e6453ad 100644
--- a/vendor/github.com/hashicorp/vault/vault/rekey_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/rekey_test.go
@@ -9,6 +9,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/physical"
+ "github.com/hashicorp/vault/physical/inmem"
)
func TestCore_Rekey_Lifecycle(t *testing.T) {
@@ -372,12 +373,19 @@ func TestCore_Standby_Rekey(t *testing.T) {
// Create the first core and initialize it
logger := logformat.NewVaultLogger(log.LevelTrace)
- inm := physical.NewInmem(logger)
- inmha := physical.NewInmemHA(logger)
+ inm, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inmha, err := inmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+
redirectOriginal := "http://127.0.0.1:8200"
core, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal,
DisableMlock: true,
DisableCache: true,
@@ -399,7 +407,7 @@ func TestCore_Standby_Rekey(t *testing.T) {
redirectOriginal2 := "http://127.0.0.1:8500"
core2, err := NewCore(&CoreConfig{
Physical: inm,
- HAPhysical: inmha,
+ HAPhysical: inmha.(physical.HABackend),
RedirectAddr: redirectOriginal2,
DisableMlock: true,
DisableCache: true,
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go
index 62cbf44..0433fec 100644
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go
+++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go
@@ -1,14 +1,13 @@
package vault
import (
- "bytes"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
- "os"
+ "runtime"
"sync"
"sync/atomic"
"time"
@@ -17,10 +16,13 @@ import (
"golang.org/x/net/context"
"golang.org/x/net/http2"
"google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
)
const (
clusterListenerAcceptDeadline = 500 * time.Millisecond
+ heartbeatInterval = 30 * time.Second
+ requestForwardingALPN = "req_fw_sb-act_v1"
)
// Starts the listeners and servers necessary to handle forwarded requests
@@ -36,10 +38,6 @@ func (c *Core) startForwarding() error {
// Resolve locally to avoid races
ha := c.ha != nil
- // Get our base handler (for our RPC server) and our wrapped handler (for
- // straight HTTP/2 forwarding)
- baseHandler, wrappedHandler := c.clusterHandlerSetupFunc()
-
// Get our TLS config
tlsConfig, err := c.ClusterTLSConfig()
if err != nil {
@@ -48,7 +46,7 @@ func (c *Core) startForwarding() error {
}
// The server supports all of the possible protos
- tlsConfig.NextProtos = []string{"h2", "req_fw_sb-act_v1"}
+ tlsConfig.NextProtos = []string{"h2", requestForwardingALPN}
// Create our RPC server and register the request handler server
c.clusterParamsLock.Lock()
@@ -58,12 +56,16 @@ func (c *Core) startForwarding() error {
return nil
}
- c.rpcServer = grpc.NewServer()
+ c.rpcServer = grpc.NewServer(
+ grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: 2 * heartbeatInterval,
+ }),
+ )
- if ha {
+ if ha && c.clusterHandler != nil {
RegisterRequestForwardingServer(c.rpcServer, &forwardedRequestRPCServer{
core: c,
- handler: baseHandler,
+ handler: c.clusterHandler,
})
}
c.clusterParamsLock.Unlock()
@@ -143,24 +145,13 @@ func (c *Core) startForwarding() error {
}
switch tlsConn.ConnectionState().NegotiatedProtocol {
- case "h2":
+ case requestForwardingALPN:
if !ha {
conn.Close()
continue
}
- c.logger.Trace("core: got h2 connection")
- go fws.ServeConn(conn, &http2.ServeConnOpts{
- Handler: wrappedHandler,
- })
-
- case "req_fw_sb-act_v1":
- if !ha {
- conn.Close()
- continue
- }
-
- c.logger.Trace("core: got req_fw_sb-act_v1 connection")
+ c.logger.Trace("core: got request forwarding connection")
go fws.ServeConn(conn, &http2.ServeConnOpts{
Handler: c.rpcServer,
})
@@ -231,37 +222,31 @@ func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
return err
}
- switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") {
- case "":
- // Set up normal HTTP forwarding handling
- tlsConfig, err := c.ClusterTLSConfig()
- if err != nil {
- c.logger.Error("core: error fetching cluster tls configuration when trying to create connection", "error", err)
- return err
- }
- tp := &http2.Transport{
- TLSClientConfig: tlsConfig,
- }
- c.requestForwardingConnection = &activeConnection{
- transport: tp,
- clusterAddr: clusterAddr,
- }
-
- default:
- // Set up grpc forwarding handling
- // It's not really insecure, but we have to dial manually to get the
- // ALPN header right. It's just "insecure" because GRPC isn't managing
- // the TLS state.
-
- ctx, cancelFunc := context.WithCancel(context.Background())
- c.rpcClientConnCancelFunc = cancelFunc
- c.rpcClientConn, err = grpc.DialContext(ctx, clusterURL.Host, grpc.WithDialer(c.getGRPCDialer("req_fw_sb-act_v1", "", nil)), grpc.WithInsecure())
- if err != nil {
- c.logger.Error("core: err setting up forwarding rpc client", "error", err)
- return err
- }
- c.rpcForwardingClient = NewRequestForwardingClient(c.rpcClientConn)
+ // Set up grpc forwarding handling
+ // It's not really insecure, but we have to dial manually to get the
+ // ALPN header right. It's just "insecure" because GRPC isn't managing
+ // the TLS state.
+ ctx, cancelFunc := context.WithCancel(context.Background())
+ c.rpcClientConn, err = grpc.DialContext(ctx, clusterURL.Host,
+ grpc.WithDialer(c.getGRPCDialer(requestForwardingALPN, "", nil)),
+ grpc.WithInsecure(), // it's not, we handle it in the dialer
+ grpc.WithKeepaliveParams(keepalive.ClientParameters{
+ Time: 2 * heartbeatInterval,
+ }))
+ if err != nil {
+ cancelFunc()
+ c.logger.Error("core: err setting up forwarding rpc client", "error", err)
+ return err
}
+ c.rpcClientConnContext = ctx
+ c.rpcClientConnCancelFunc = cancelFunc
+ c.rpcForwardingClient = &forwardingClient{
+ RequestForwardingClient: NewRequestForwardingClient(c.rpcClientConn),
+ core: c,
+ echoTicker: time.NewTicker(heartbeatInterval),
+ echoContext: ctx,
+ }
+ c.rpcForwardingClient.startHeartbeat()
return nil
}
@@ -270,11 +255,6 @@ func (c *Core) clearForwardingClients() {
c.logger.Trace("core: clearing forwarding clients")
defer c.logger.Trace("core: done clearing forwarding clients")
- if c.requestForwardingConnection != nil {
- c.requestForwardingConnection.transport.CloseIdleConnections()
- c.requestForwardingConnection = nil
- }
-
if c.rpcClientConnCancelFunc != nil {
c.rpcClientConnCancelFunc()
c.rpcClientConnCancelFunc = nil
@@ -283,6 +263,8 @@ func (c *Core) clearForwardingClients() {
c.rpcClientConn.Close()
c.rpcClientConn = nil
}
+
+ c.rpcClientConnContext = nil
c.rpcForwardingClient = nil
}
@@ -292,70 +274,36 @@ func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, erro
c.requestForwardingConnectionLock.RLock()
defer c.requestForwardingConnectionLock.RUnlock()
- switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") {
- case "":
- if c.requestForwardingConnection == nil {
- return 0, nil, nil, ErrCannotForward
- }
+ if c.rpcForwardingClient == nil {
+ return 0, nil, nil, ErrCannotForward
+ }
- if c.requestForwardingConnection.clusterAddr == "" {
- return 0, nil, nil, ErrCannotForward
- }
+ freq, err := forwarding.GenerateForwardedRequest(req)
+ if err != nil {
+ c.logger.Error("core: error creating forwarding RPC request", "error", err)
+ return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request")
+ }
+ if freq == nil {
+ c.logger.Error("core: got nil forwarding RPC request")
+ return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request")
+ }
+ resp, err := c.rpcForwardingClient.ForwardRequest(c.rpcClientConnContext, freq)
+ if err != nil {
+ c.logger.Error("core: error during forwarded RPC request", "error", err)
+ return 0, nil, nil, fmt.Errorf("error during forwarding RPC request")
+ }
- freq, err := forwarding.GenerateForwardedHTTPRequest(req, c.requestForwardingConnection.clusterAddr+"/cluster/local/forwarded-request")
- if err != nil {
- c.logger.Error("core/ForwardRequest: error creating forwarded request", "error", err)
- return 0, nil, nil, fmt.Errorf("error creating forwarding request")
- }
-
- //resp, err := c.requestForwardingConnection.Do(freq)
- resp, err := c.requestForwardingConnection.transport.RoundTrip(freq)
- if err != nil {
- return 0, nil, nil, err
- }
- defer resp.Body.Close()
-
- // Read the body into a buffer so we can write it back out to the
- // original requestor
- buf := bytes.NewBuffer(nil)
- _, err = buf.ReadFrom(resp.Body)
- if err != nil {
- return 0, nil, nil, err
- }
- return resp.StatusCode, resp.Header, buf.Bytes(), nil
-
- default:
- if c.rpcForwardingClient == nil {
- return 0, nil, nil, ErrCannotForward
- }
-
- freq, err := forwarding.GenerateForwardedRequest(req)
- if err != nil {
- c.logger.Error("core/ForwardRequest: error creating forwarding RPC request", "error", err)
- return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request")
- }
- if freq == nil {
- c.logger.Error("core/ForwardRequest: got nil forwarding RPC request")
- return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request")
- }
- resp, err := c.rpcForwardingClient.ForwardRequest(context.Background(), freq, grpc.FailFast(true))
- if err != nil {
- c.logger.Error("core/ForwardRequest: error during forwarded RPC request", "error", err)
- return 0, nil, nil, fmt.Errorf("error during forwarding RPC request")
- }
-
- var header http.Header
- if resp.HeaderEntries != nil {
- header = make(http.Header)
- for k, v := range resp.HeaderEntries {
- for _, j := range v.Values {
- header.Add(k, j)
- }
+ var header http.Header
+ if resp.HeaderEntries != nil {
+ header = make(http.Header)
+ for k, v := range resp.HeaderEntries {
+ for _, j := range v.Values {
+ header.Add(k, j)
}
}
-
- return int(resp.StatusCode), header, resp.Body, nil
}
+
+ return int(resp.StatusCode), header, resp.Body, nil
}
// getGRPCDialer is used to return a dialer that has the correct TLS
@@ -406,12 +354,23 @@ func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *fo
// meets the interface requirements.
w := forwarding.NewRPCResponseWriter()
- s.handler.ServeHTTP(w, req)
+ resp := &forwarding.Response{}
- resp := &forwarding.Response{
- StatusCode: uint32(w.StatusCode()),
- Body: w.Body().Bytes(),
+ runRequest := func() {
+ defer func() {
+ // Logic here comes mostly from the Go source code
+ if err := recover(); err != nil {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ s.core.logger.Error("forwarding: panic serving request", "path", req.URL.Path, "error", err, "stacktrace", buf)
+ }
+ }()
+ s.handler.ServeHTTP(w, req)
}
+ runRequest()
+ resp.StatusCode = uint32(w.StatusCode())
+ resp.Body = w.Body().Bytes()
header := w.Header()
if header != nil {
@@ -425,3 +384,66 @@ func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *fo
return resp, nil
}
+
+func (s *forwardedRequestRPCServer) Echo(ctx context.Context, in *EchoRequest) (*EchoReply, error) {
+ if in.ClusterAddr != "" {
+ s.core.clusterPeerClusterAddrsCache.Set(in.ClusterAddr, nil, 0)
+ }
+ return &EchoReply{
+ Message: "pong",
+ }, nil
+}
+
+type forwardingClient struct {
+ RequestForwardingClient
+
+ core *Core
+
+ echoTicker *time.Ticker
+ echoContext context.Context
+}
+
+// NOTE: we also take advantage of gRPC's keepalive bits, but as we send data
+// with these requests it's useful to keep this as well
+func (c *forwardingClient) startHeartbeat() {
+ go func() {
+ tick := func() {
+ c.core.stateLock.RLock()
+ clusterAddr := c.core.clusterAddr
+ c.core.stateLock.RUnlock()
+
+ ctx, cancel := context.WithTimeout(c.echoContext, 2*time.Second)
+ resp, err := c.RequestForwardingClient.Echo(ctx, &EchoRequest{
+ Message: "ping",
+ ClusterAddr: clusterAddr,
+ })
+ cancel()
+ if err != nil {
+ c.core.logger.Debug("forwarding: error sending echo request to active node", "error", err)
+ return
+ }
+ if resp == nil {
+ c.core.logger.Debug("forwarding: empty echo response from active node")
+ return
+ }
+ if resp.Message != "pong" {
+ c.core.logger.Debug("forwarding: unexpected echo response from active node", "message", resp.Message)
+ return
+ }
+ c.core.logger.Trace("forwarding: successful heartbeat")
+ }
+
+ tick()
+
+ for {
+ select {
+ case <-c.echoContext.Done():
+ c.echoTicker.Stop()
+ c.core.logger.Trace("forwarding: stopping heartbeating")
+ return
+ case <-c.echoTicker.C:
+ tick()
+ }
+ }
+ }()
+}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go
index cae684d..add7bf3 100644
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go
+++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
// source: request_forwarding_service.proto
-// DO NOT EDIT!
/*
Package vault is a generated protocol buffer package.
@@ -9,6 +8,8 @@ It is generated from these files:
request_forwarding_service.proto
It has these top-level messages:
+ EchoRequest
+ EchoReply
*/
package vault
@@ -33,6 +34,59 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+type EchoRequest struct {
+ Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
+ ClusterAddr string `protobuf:"bytes,2,opt,name=cluster_addr,json=clusterAddr" json:"cluster_addr,omitempty"`
+}
+
+func (m *EchoRequest) Reset() { *m = EchoRequest{} }
+func (m *EchoRequest) String() string { return proto.CompactTextString(m) }
+func (*EchoRequest) ProtoMessage() {}
+func (*EchoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *EchoRequest) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *EchoRequest) GetClusterAddr() string {
+ if m != nil {
+ return m.ClusterAddr
+ }
+ return ""
+}
+
+type EchoReply struct {
+ Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
+ ClusterAddrs []string `protobuf:"bytes,2,rep,name=cluster_addrs,json=clusterAddrs" json:"cluster_addrs,omitempty"`
+}
+
+func (m *EchoReply) Reset() { *m = EchoReply{} }
+func (m *EchoReply) String() string { return proto.CompactTextString(m) }
+func (*EchoReply) ProtoMessage() {}
+func (*EchoReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *EchoReply) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *EchoReply) GetClusterAddrs() []string {
+ if m != nil {
+ return m.ClusterAddrs
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*EchoRequest)(nil), "vault.EchoRequest")
+ proto.RegisterType((*EchoReply)(nil), "vault.EchoReply")
+}
+
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
@@ -45,6 +99,7 @@ const _ = grpc.SupportPackageIsVersion4
type RequestForwardingClient interface {
ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error)
+ Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error)
}
type requestForwardingClient struct {
@@ -64,10 +119,20 @@ func (c *requestForwardingClient) ForwardRequest(ctx context.Context, in *forwar
return out, nil
}
+func (c *requestForwardingClient) Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error) {
+ out := new(EchoReply)
+ err := grpc.Invoke(ctx, "/vault.RequestForwarding/Echo", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for RequestForwarding service
type RequestForwardingServer interface {
ForwardRequest(context.Context, *forwarding.Request) (*forwarding.Response, error)
+ Echo(context.Context, *EchoRequest) (*EchoReply, error)
}
func RegisterRequestForwardingServer(s *grpc.Server, srv RequestForwardingServer) {
@@ -92,6 +157,24 @@ func _RequestForwarding_ForwardRequest_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
+func _RequestForwarding_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EchoRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(RequestForwardingServer).Echo(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vault.RequestForwarding/Echo",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(RequestForwardingServer).Echo(ctx, req.(*EchoRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _RequestForwarding_serviceDesc = grpc.ServiceDesc{
ServiceName: "vault.RequestForwarding",
HandlerType: (*RequestForwardingServer)(nil),
@@ -100,6 +183,10 @@ var _RequestForwarding_serviceDesc = grpc.ServiceDesc{
MethodName: "ForwardRequest",
Handler: _RequestForwarding_ForwardRequest_Handler,
},
+ {
+ MethodName: "Echo",
+ Handler: _RequestForwarding_Echo_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "request_forwarding_service.proto",
@@ -108,15 +195,21 @@ var _RequestForwarding_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("request_forwarding_service.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 151 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x28, 0x4a, 0x2d, 0x2c,
- 0x4d, 0x2d, 0x2e, 0x89, 0x4f, 0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xc9, 0xcc, 0x4b, 0x8f, 0x2f,
- 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4b,
- 0x2c, 0xcd, 0x29, 0x91, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5,
- 0xcf, 0x48, 0x2c, 0xce, 0xc8, 0x4c, 0xce, 0x2f, 0x2a, 0xd0, 0x07, 0xcb, 0xe9, 0x67, 0xa4, 0xe6,
- 0x14, 0xa4, 0x16, 0xe9, 0x23, 0x8c, 0xd0, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0x18, 0x60, 0x14,
- 0xc4, 0x25, 0x18, 0x04, 0xb1, 0xc4, 0x0d, 0xae, 0x40, 0xc8, 0x96, 0x8b, 0x0f, 0xca, 0x83, 0xca,
- 0x09, 0x09, 0xeb, 0x21, 0xf4, 0xeb, 0x41, 0x05, 0xa5, 0x44, 0x50, 0x05, 0x8b, 0x0b, 0xf2, 0xf3,
- 0x8a, 0x53, 0x95, 0x18, 0x92, 0xd8, 0xc0, 0x46, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x81,
- 0xce, 0x3f, 0x7f, 0xbf, 0x00, 0x00, 0x00,
+ // 254 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0x3d, 0x4f, 0xc3, 0x30,
+ 0x10, 0x86, 0xdb, 0xf2, 0xa5, 0xb8, 0x05, 0x81, 0x61, 0x88, 0x32, 0x85, 0xb0, 0x74, 0x72, 0x24,
+ 0x58, 0x58, 0x18, 0x18, 0x60, 0xe8, 0x98, 0x3f, 0x10, 0xb9, 0xf6, 0x11, 0x47, 0x72, 0x6b, 0x73,
+ 0xe7, 0x14, 0x65, 0xe5, 0x97, 0x23, 0x92, 0x94, 0xa6, 0x0b, 0xe3, 0xbd, 0x27, 0x3d, 0xf7, 0xdc,
+ 0xcb, 0x52, 0x84, 0xcf, 0x06, 0x28, 0x94, 0x1f, 0x0e, 0xbf, 0x24, 0xea, 0x7a, 0x5b, 0x95, 0x04,
+ 0xb8, 0xab, 0x15, 0x08, 0x8f, 0x2e, 0x38, 0x7e, 0xb6, 0x93, 0x8d, 0x0d, 0xc9, 0x73, 0x55, 0x07,
+ 0xd3, 0xac, 0x85, 0x72, 0x9b, 0xdc, 0x48, 0x32, 0xb5, 0x72, 0xe8, 0xf3, 0x6e, 0x97, 0x1b, 0xb0,
+ 0x1e, 0x30, 0x3f, 0x20, 0xf2, 0xd0, 0x7a, 0xa0, 0x1e, 0x90, 0xad, 0xd8, 0xfc, 0x4d, 0x19, 0x57,
+ 0xf4, 0x87, 0x78, 0xcc, 0x2e, 0x36, 0x40, 0x24, 0x2b, 0x88, 0xa7, 0xe9, 0x74, 0x19, 0x15, 0xfb,
+ 0x91, 0xdf, 0xb3, 0x85, 0xb2, 0x0d, 0x05, 0xc0, 0x52, 0x6a, 0x8d, 0xf1, 0xac, 0x5b, 0xcf, 0x87,
+ 0xec, 0x55, 0x6b, 0xcc, 0x56, 0x2c, 0xea, 0x59, 0xde, 0xb6, 0xff, 0x90, 0x1e, 0xd8, 0xe5, 0x98,
+ 0x44, 0xf1, 0x2c, 0x3d, 0x59, 0x46, 0xc5, 0x62, 0x84, 0xa2, 0xc7, 0xef, 0x29, 0xbb, 0x19, 0xa4,
+ 0xde, 0xff, 0xcc, 0xf9, 0x0b, 0xbb, 0x1a, 0xa6, 0xbd, 0xf0, 0xad, 0x38, 0x3c, 0x26, 0x86, 0x30,
+ 0xb9, 0x3b, 0x0e, 0xc9, 0xbb, 0x2d, 0x41, 0x36, 0xe1, 0x82, 0x9d, 0xfe, 0x0a, 0x72, 0x2e, 0xba,
+ 0x6a, 0xc4, 0xe8, 0xf3, 0xe4, 0xfa, 0x28, 0xf3, 0xb6, 0xcd, 0x26, 0xeb, 0xf3, 0xae, 0xa3, 0xa7,
+ 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x13, 0x7f, 0xc2, 0x88, 0x01, 0x00, 0x00,
}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto
index 4ab32c1..0018bb4 100644
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto
+++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto
@@ -4,6 +4,17 @@ import "github.com/hashicorp/vault/helper/forwarding/types.proto";
package vault;
+message EchoRequest {
+ string message = 1;
+ string cluster_addr = 2;
+}
+
+message EchoReply {
+ string message = 1;
+ repeated string cluster_addrs = 2;
+}
+
service RequestForwarding {
rpc ForwardRequest(forwarding.Request) returns (forwarding.Response) {}
+ rpc Echo(EchoRequest) returns (EchoReply) {}
}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling.go b/vendor/github.com/hashicorp/vault/vault/request_handling.go
index ad37b5a..b003b3f 100644
--- a/vendor/github.com/hashicorp/vault/vault/request_handling.go
+++ b/vendor/github.com/hashicorp/vault/vault/request_handling.go
@@ -11,6 +11,7 @@ import (
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/helper/strutil"
+ "github.com/hashicorp/vault/helper/wrapping"
"github.com/hashicorp/vault/logical"
)
@@ -26,7 +27,7 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err
}
// Allowing writing to a path ending in / makes it extremely difficult to
- // understand user intent for the filesystem-like backends (generic,
+ // understand user intent for the filesystem-like backends (kv,
// cubbyhole) -- did they want a key named foo/ or did they want to write
// to a directory foo/ with no (or forgotten) key, or...? It also affects
// lookup, because paths ending in / are considered prefixes by some
@@ -76,8 +77,8 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err
} else {
wrappingResp := &logical.Response{
WrapInfo: resp.WrapInfo,
+ Warnings: resp.Warnings,
}
- wrappingResp.CloneWarnings(resp)
resp = wrappingResp
}
}
@@ -170,7 +171,10 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
if errType != nil {
retErr = multierror.Append(retErr, errType)
}
- return logical.ErrorResponse(ctErr.Error()), nil, retErr
+ if ctErr == ErrInternalError {
+ return nil, auth, retErr
+ }
+ return logical.ErrorResponse(ctErr.Error()), auth, retErr
}
// Attach the display name
@@ -188,7 +192,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
if resp != nil {
// If wrapping is used, use the shortest between the request and response
var wrapTTL time.Duration
- var wrapFormat string
+ var wrapFormat, creationPath string
// Ensure no wrap info information is set other than, possibly, the TTL
if resp.WrapInfo != nil {
@@ -196,6 +200,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
wrapTTL = resp.WrapInfo.TTL
}
wrapFormat = resp.WrapInfo.Format
+ creationPath = resp.WrapInfo.CreationPath
resp.WrapInfo = nil
}
@@ -216,16 +221,18 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
}
if wrapTTL > 0 {
- resp.WrapInfo = &logical.ResponseWrapInfo{
- TTL: wrapTTL,
- Format: wrapFormat,
+ resp.WrapInfo = &wrapping.ResponseWrapInfo{
+ TTL: wrapTTL,
+ Format: wrapFormat,
+ CreationPath: creationPath,
}
}
}
// If there is a secret, we must register it with the expiration manager.
// We exclude renewal of a lease, since it does not need to be re-registered
- if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") {
+ if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") &&
+ !strings.HasPrefix(req.Path, "sys/leases/renew") {
// Get the SystemView for the mount
sysView := c.router.MatchingSystemView(req.Path)
if sysView == nil {
@@ -245,12 +252,12 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
resp.Secret.TTL = maxTTL
}
- // Generic mounts should return the TTL but not register
+ // KV mounts should return the TTL but not register
// for a lease as this provides a massive slowdown
registerLease := true
matchingBackend := c.router.MatchingBackend(req.Path)
if matchingBackend == nil {
- c.logger.Error("core: unable to retrieve generic backend from router")
+ c.logger.Error("core: unable to retrieve kv backend from router")
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
}
@@ -288,10 +295,11 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
if err != nil {
c.logger.Error("core: failed to look up token", "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
- return nil, nil, retErr
+ return nil, auth, retErr
}
if err := c.expiration.RegisterAuth(te.Path, resp.Auth); err != nil {
+ c.tokenStore.Revoke(te.ID)
c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err)
retErr = multierror.Append(retErr, ErrInternalError)
return nil, auth, retErr
@@ -335,7 +343,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
if resp != nil {
// If wrapping is used, use the shortest between the request and response
var wrapTTL time.Duration
- var wrapFormat string
+ var wrapFormat, creationPath string
// Ensure no wrap info information is set other than, possibly, the TTL
if resp.WrapInfo != nil {
@@ -343,6 +351,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
wrapTTL = resp.WrapInfo.TTL
}
wrapFormat = resp.WrapInfo.Format
+ creationPath = resp.WrapInfo.CreationPath
resp.WrapInfo = nil
}
@@ -361,9 +370,10 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
}
if wrapTTL > 0 {
- resp.WrapInfo = &logical.ResponseWrapInfo{
- TTL: wrapTTL,
- Format: wrapFormat,
+ resp.WrapInfo = &wrapping.ResponseWrapInfo{
+ TTL: wrapTTL,
+ Format: wrapFormat,
+ CreationPath: creationPath,
}
}
}
@@ -439,6 +449,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
// Register with the expiration manager
if err := c.expiration.RegisterAuth(te.Path, auth); err != nil {
+ c.tokenStore.Revoke(te.ID)
c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err)
return nil, auth, ErrInternalError
}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling_test.go b/vendor/github.com/hashicorp/vault/vault/request_handling_test.go
index c966b04..5f148c8 100644
--- a/vendor/github.com/hashicorp/vault/vault/request_handling_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/request_handling_test.go
@@ -12,14 +12,14 @@ import (
func TestRequestHandling_Wrapping(t *testing.T) {
core, _, root := TestCoreUnsealed(t)
- core.logicalBackends["generic"] = PassthroughBackendFactory
+ core.logicalBackends["kv"] = PassthroughBackendFactory
meUUID, _ := uuid.GenerateUUID()
err := core.mount(&MountEntry{
Table: mountTableType,
UUID: meUUID,
Path: "wraptest",
- Type: "generic",
+ Type: "kv",
})
if err != nil {
t.Fatalf("err: %v", err)
diff --git a/vendor/github.com/hashicorp/vault/vault/rollback.go b/vendor/github.com/hashicorp/vault/vault/rollback.go
index 9ace6b3..1ee6d9f 100644
--- a/vendor/github.com/hashicorp/vault/vault/rollback.go
+++ b/vendor/github.com/hashicorp/vault/vault/rollback.go
@@ -113,8 +113,15 @@ func (m *RollbackManager) triggerRollbacks() {
for _, e := range backends {
path := e.Path
if e.Table == credentialTableType {
- path = "auth/" + path
+ path = credentialRoutePrefix + path
}
+
+ // When the mount is filtered, the backend will be nil
+ backend := m.router.MatchingBackend(path)
+ if backend == nil {
+ continue
+ }
+
m.inflightLock.RLock()
_, ok := m.inflight[path]
m.inflightLock.RUnlock()
diff --git a/vendor/github.com/hashicorp/vault/vault/rollback_test.go b/vendor/github.com/hashicorp/vault/vault/rollback_test.go
index 797993a..f050df7 100644
--- a/vendor/github.com/hashicorp/vault/vault/rollback_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/rollback_test.go
@@ -29,7 +29,7 @@ func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) {
if err != nil {
t.Fatal(err)
}
- if err := router.Mount(backend, "foo", &MountEntry{UUID: meUUID}, view); err != nil {
+ if err := router.Mount(backend, "foo", &MountEntry{UUID: meUUID, Accessor: "noopaccessor"}, view); err != nil {
t.Fatalf("err: %s", err)
}
diff --git a/vendor/github.com/hashicorp/vault/vault/router.go b/vendor/github.com/hashicorp/vault/vault/router.go
index 5a90dfa..f05e207 100644
--- a/vendor/github.com/hashicorp/vault/vault/router.go
+++ b/vendor/github.com/hashicorp/vault/vault/router.go
@@ -14,21 +14,25 @@ import (
// Router is used to do prefix based routing of a request to a logical backend
type Router struct {
- l sync.RWMutex
- root *radix.Tree
- tokenStoreSalt *salt.Salt
+ l sync.RWMutex
+ root *radix.Tree
+ mountUUIDCache *radix.Tree
+ mountAccessorCache *radix.Tree
+ tokenStoreSaltFunc func() (*salt.Salt, error)
// storagePrefix maps the prefix used for storage (ala the BarrierView)
// to the backend. This is used to map a key back into the backend that owns it.
- // For example, logical/uuid1/foobar -> secrets/ (generic backend) + foobar
+ // For example, logical/uuid1/foobar -> secrets/ (kv backend) + foobar
storagePrefix *radix.Tree
}
// NewRouter returns a new router
func NewRouter() *Router {
r := &Router{
- root: radix.New(),
- storagePrefix: radix.New(),
+ root: radix.New(),
+ storagePrefix: radix.New(),
+ mountUUIDCache: radix.New(),
+ mountAccessorCache: radix.New(),
}
return r
}
@@ -60,9 +64,12 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount
}
// Build the paths
- paths := backend.SpecialPaths()
- if paths == nil {
- paths = new(logical.Paths)
+ paths := new(logical.Paths)
+ if backend != nil {
+ specialPaths := backend.SpecialPaths()
+ if specialPaths != nil {
+ paths = specialPaths
+ }
}
// Create a mount entry
@@ -74,8 +81,22 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount
rootPaths: pathsToRadix(paths.Root),
loginPaths: pathsToRadix(paths.Unauthenticated),
}
+
+ switch {
+ case prefix == "":
+ return fmt.Errorf("missing prefix to be used for router entry; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
+ case storageView.prefix == "":
+ return fmt.Errorf("missing storage view prefix; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
+ case re.mountEntry.UUID == "":
+ return fmt.Errorf("missing mount identifier; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
+ case re.mountEntry.Accessor == "":
+ return fmt.Errorf("missing mount accessor; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
+ }
+
r.root.Insert(prefix, re)
r.storagePrefix.Insert(storageView.prefix, re)
+ r.mountUUIDCache.Insert(re.mountEntry.UUID, re.mountEntry)
+ r.mountAccessorCache.Insert(re.mountEntry.Accessor, re.mountEntry)
return nil
}
@@ -98,6 +119,9 @@ func (r *Router) Unmount(prefix string) error {
// Purge from the radix trees
r.root.Delete(prefix)
r.storagePrefix.Delete(re.storageView.prefix)
+ r.mountUUIDCache.Delete(re.mountEntry.UUID)
+ r.mountAccessorCache.Delete(re.mountEntry.Accessor)
+
return nil
}
@@ -141,6 +165,39 @@ func (r *Router) Untaint(path string) error {
return nil
}
+func (r *Router) MatchingMountByUUID(mountID string) *MountEntry {
+ if mountID == "" {
+ return nil
+ }
+
+ r.l.RLock()
+ defer r.l.RUnlock()
+
+ _, raw, ok := r.mountUUIDCache.LongestPrefix(mountID)
+ if !ok {
+ return nil
+ }
+
+ return raw.(*MountEntry)
+}
+
+// MatchingMountByAccessor returns the MountEntry by accessor lookup
+func (r *Router) MatchingMountByAccessor(mountAccessor string) *MountEntry {
+ if mountAccessor == "" {
+ return nil
+ }
+
+ r.l.RLock()
+ defer r.l.RUnlock()
+
+ _, raw, ok := r.mountAccessorCache.LongestPrefix(mountAccessor)
+ if !ok {
+ return nil
+ }
+
+ return raw.(*MountEntry)
+}
+
// MatchingMount returns the mount prefix that would be used for a path
func (r *Router) MatchingMount(path string) string {
r.l.RLock()
@@ -152,7 +209,7 @@ func (r *Router) MatchingMount(path string) string {
return mount
}
-// MatchingView returns the view used for a path
+// MatchingStorageView returns the storageView used for a path
func (r *Router) MatchingStorageView(path string) *BarrierView {
r.l.RLock()
_, raw, ok := r.root.LongestPrefix(path)
@@ -174,7 +231,7 @@ func (r *Router) MatchingMountEntry(path string) *MountEntry {
return raw.(*routeEntry).mountEntry
}
-// MatchingMountEntry returns the MountEntry used for a path
+// MatchingBackend returns the backend used for a path
func (r *Router) MatchingBackend(path string) logical.Backend {
r.l.RLock()
_, raw, ok := r.root.LongestPrefix(path)
@@ -210,6 +267,12 @@ func (r *Router) MatchingStoragePrefix(path string) (string, string, bool) {
re := raw.(*routeEntry)
mountPath := re.mountEntry.Path
prefix := re.storageView.prefix
+
+ // Add back the prefix for credential backends
+ if strings.HasPrefix(path, credentialBarrierPrefix) {
+ mountPath = credentialRoutePrefix + mountPath
+ }
+
return mountPath, prefix, true
}
@@ -228,17 +291,19 @@ func (r *Router) RouteExistenceCheck(req *logical.Request) (bool, bool, error) {
func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logical.Response, bool, bool, error) {
// Find the mount point
r.l.RLock()
- mount, raw, ok := r.root.LongestPrefix(req.Path)
- if !ok {
+ adjustedPath := req.Path
+ mount, raw, ok := r.root.LongestPrefix(adjustedPath)
+ if !ok && !strings.HasSuffix(adjustedPath, "/") {
// Re-check for a backend by appending a slash. This lets "foo" mean
// "foo/" at the root level which is almost always what we want.
- req.Path += "/"
- mount, raw, ok = r.root.LongestPrefix(req.Path)
+ adjustedPath += "/"
+ mount, raw, ok = r.root.LongestPrefix(adjustedPath)
}
r.l.RUnlock()
if !ok {
return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath
}
+ req.Path = adjustedPath
defer metrics.MeasureSince([]string{"route", string(req.Operation),
strings.Replace(mount, "/", "-", -1)}, time.Now())
re := raw.(*routeEntry)
@@ -273,7 +338,11 @@ func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logica
case strings.HasPrefix(originalPath, "cubbyhole/"):
// In order for the token store to revoke later, we need to have the same
// salted ID, so we double-salt what's going to the cubbyhole backend
- req.ClientToken = re.SaltID(r.tokenStoreSalt.SaltID(req.ClientToken))
+ salt, err := r.tokenStoreSaltFunc()
+ if err != nil {
+ return nil, false, false, err
+ }
+ req.ClientToken = re.SaltID(salt.SaltID(req.ClientToken))
default:
req.ClientToken = re.SaltID(req.ClientToken)
}
diff --git a/vendor/github.com/hashicorp/vault/vault/router_test.go b/vendor/github.com/hashicorp/vault/vault/router_test.go
index e5de72e..acf4fcc 100644
--- a/vendor/github.com/hashicorp/vault/vault/router_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/router_test.go
@@ -2,13 +2,17 @@ package vault
import (
"fmt"
+ "io/ioutil"
+ "reflect"
"strings"
"sync"
"testing"
"time"
"github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/logical"
+ log "github.com/mgutz/logxi/v1"
)
type NoopBackend struct {
@@ -62,10 +66,26 @@ func (n *NoopBackend) InvalidateKey(k string) {
n.Invalidations = append(n.Invalidations, k)
}
+func (n *NoopBackend) Setup(config *logical.BackendConfig) error {
+ return nil
+}
+
+func (n *NoopBackend) Logger() log.Logger {
+ return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff)
+}
+
func (n *NoopBackend) Initialize() error {
return nil
}
+func (n *NoopBackend) Type() logical.BackendType {
+ return logical.TypeLogical
+}
+
+func (n *NoopBackend) RegisterLicense(license interface{}) error {
+ return nil
+}
+
func TestRouter_Mount(t *testing.T) {
r := NewRouter()
_, barrier, _ := mockBarrier(t)
@@ -75,8 +95,15 @@ func TestRouter_Mount(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
+ mountEntry := &MountEntry{
+ Path: "prod/aws/",
+ UUID: meUUID,
+ Accessor: "awsaccessor",
+ }
+
n := &NoopBackend{}
- err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID}, view)
+ err = r.Mount(n, "prod/aws/", mountEntry, view)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -85,6 +112,7 @@ func TestRouter_Mount(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
if !strings.Contains(err.Error(), "cannot mount under existing mount") {
t.Fatalf("err: %v", err)
@@ -106,6 +134,11 @@ func TestRouter_Mount(t *testing.T) {
t.Fatalf("bad: %v", v)
}
+ mountEntryFetched := r.MatchingMountByUUID(mountEntry.UUID)
+ if mountEntryFetched == nil || !reflect.DeepEqual(mountEntry, mountEntryFetched) {
+ t.Fatalf("failed to fetch mount entry using its ID; expected: %#v\n actual: %#v\n", mountEntry, mountEntryFetched)
+ }
+
mount, prefix, ok := r.MatchingStoragePrefix("logical/foo")
if !ok {
t.Fatalf("missing storage prefix")
@@ -131,6 +164,84 @@ func TestRouter_Mount(t *testing.T) {
}
}
+func TestRouter_MountCredential(t *testing.T) {
+ r := NewRouter()
+ _, barrier, _ := mockBarrier(t)
+ view := NewBarrierView(barrier, credentialBarrierPrefix)
+
+ meUUID, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mountEntry := &MountEntry{
+ Path: "aws",
+ UUID: meUUID,
+ Accessor: "awsaccessor",
+ }
+
+ n := &NoopBackend{}
+ err = r.Mount(n, "auth/aws/", mountEntry, view)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ meUUID, err = uuid.GenerateUUID()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = r.Mount(n, "auth/aws/", &MountEntry{UUID: meUUID}, view)
+ if !strings.Contains(err.Error(), "cannot mount under existing mount") {
+ t.Fatalf("err: %v", err)
+ }
+
+ if path := r.MatchingMount("auth/aws/foo"); path != "auth/aws/" {
+ t.Fatalf("bad: %s", path)
+ }
+
+ if v := r.MatchingStorageView("auth/aws/foo"); v != view {
+ t.Fatalf("bad: %v", v)
+ }
+
+ if path := r.MatchingMount("auth/stage/aws/foo"); path != "" {
+ t.Fatalf("bad: %s", path)
+ }
+
+ if v := r.MatchingStorageView("auth/stage/aws/foo"); v != nil {
+ t.Fatalf("bad: %v", v)
+ }
+
+ mountEntryFetched := r.MatchingMountByUUID(mountEntry.UUID)
+ if mountEntryFetched == nil || !reflect.DeepEqual(mountEntry, mountEntryFetched) {
+ t.Fatalf("failed to fetch mount entry using its ID; expected: %#v\n actual: %#v\n", mountEntry, mountEntryFetched)
+ }
+
+ mount, prefix, ok := r.MatchingStoragePrefix("auth/foo")
+ if !ok {
+ t.Fatalf("missing storage prefix")
+ }
+ if mount != "auth/aws" || prefix != credentialBarrierPrefix {
+ t.Fatalf("Bad: %v - %v", mount, prefix)
+ }
+
+ req := &logical.Request{
+ Path: "auth/aws/foo",
+ }
+ resp, err := r.Route(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ // Verify the path
+ if len(n.Paths) != 1 || n.Paths[0] != "foo" {
+ t.Fatalf("bad: %v", n.Paths)
+ }
+}
+
func TestRouter_Unmount(t *testing.T) {
r := NewRouter()
_, barrier, _ := mockBarrier(t)
@@ -141,7 +252,7 @@ func TestRouter_Unmount(t *testing.T) {
t.Fatal(err)
}
n := &NoopBackend{}
- err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID}, view)
+ err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID, Accessor: "awsaccessor"}, view)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -174,7 +285,7 @@ func TestRouter_Remount(t *testing.T) {
t.Fatal(err)
}
n := &NoopBackend{}
- me := &MountEntry{Path: "prod/aws/", UUID: meUUID}
+ me := &MountEntry{Path: "prod/aws/", UUID: meUUID, Accessor: "awsaccessor"}
err = r.Mount(n, "prod/aws/", me, view)
if err != nil {
t.Fatalf("err: %v", err)
@@ -237,7 +348,7 @@ func TestRouter_RootPath(t *testing.T) {
"policy/*",
},
}
- err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -279,7 +390,7 @@ func TestRouter_LoginPath(t *testing.T) {
"oauth/*",
},
}
- err = r.Mount(n, "auth/foo/", &MountEntry{UUID: meUUID}, view)
+ err = r.Mount(n, "auth/foo/", &MountEntry{UUID: meUUID, Accessor: "authfooaccessor"}, view)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -314,7 +425,7 @@ func TestRouter_Taint(t *testing.T) {
t.Fatal(err)
}
n := &NoopBackend{}
- err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view)
if err != nil {
t.Fatalf("err: %v", err)
}
@@ -357,7 +468,7 @@ func TestRouter_Untaint(t *testing.T) {
t.Fatal(err)
}
n := &NoopBackend{}
- err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view)
if err != nil {
t.Fatalf("err: %v", err)
}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal_testing.go b/vendor/github.com/hashicorp/vault/vault/seal_testing.go
index f74b140..27271cf 100644
--- a/vendor/github.com/hashicorp/vault/vault/seal_testing.go
+++ b/vendor/github.com/hashicorp/vault/vault/seal_testing.go
@@ -107,7 +107,7 @@ func (d *TestSeal) SetRecoveryKey(key []byte) error {
func testCoreUnsealedWithConfigs(t *testing.T, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) {
seal := &TestSeal{}
- core := TestCoreWithSeal(t, seal)
+ core := TestCoreWithSeal(t, seal, false)
result, err := core.Initialize(&InitParams{
BarrierConfig: barrierConf,
RecoveryConfig: recoveryConf,
diff --git a/vendor/github.com/hashicorp/vault/vault/testing.go b/vendor/github.com/hashicorp/vault/vault/testing.go
index b567fe7..3e500c2 100644
--- a/vendor/github.com/hashicorp/vault/vault/testing.go
+++ b/vendor/github.com/hashicorp/vault/vault/testing.go
@@ -2,16 +2,26 @@ package vault
import (
"bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/base64"
"encoding/pem"
"fmt"
+ "io"
+ "io/ioutil"
+ "math/big"
+ mathrand "math/rand"
"net"
"net/http"
+ "os"
"os/exec"
- "testing"
+ "path/filepath"
+ "sync"
"time"
log "github.com/mgutz/logxi/v1"
@@ -25,10 +35,14 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/logformat"
+ "github.com/hashicorp/vault/helper/reload"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
"github.com/hashicorp/vault/physical"
+ "github.com/mitchellh/go-testing-interface"
+
+ physInmem "github.com/hashicorp/vault/physical/inmem"
)
// This file contains a number of methods that are useful for unit
@@ -70,24 +84,37 @@ oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
)
// TestCore returns a pure in-memory, uninitialized core for testing.
-func TestCore(t testing.TB) *Core {
- return TestCoreWithSeal(t, nil)
+func TestCore(t testing.T) *Core {
+ return TestCoreWithSeal(t, nil, false)
}
-// TestCoreNewSeal returns an in-memory, ininitialized core with the new seal
-// configuration.
-func TestCoreNewSeal(t testing.TB) *Core {
- return TestCoreWithSeal(t, &TestSeal{})
+// TestCoreRaw returns a pure in-memory, uninitialized core for testing. The raw
+// storage endpoints are enabled with this core.
+func TestCoreRaw(t testing.T) *Core {
+ return TestCoreWithSeal(t, nil, true)
+}
+
+// TestCoreNewSeal returns a pure in-memory, uninitialized core with
+// the new seal configuration.
+func TestCoreNewSeal(t testing.T) *Core {
+ return TestCoreWithSeal(t, &TestSeal{}, false)
}
// TestCoreWithSeal returns a pure in-memory, uninitialized core with the
// specified seal for testing.
-func TestCoreWithSeal(t testing.TB, testSeal Seal) *Core {
+func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core {
logger := logformat.NewVaultLogger(log.LevelTrace)
- physicalBackend := physical.NewInmem(logger)
+ physicalBackend, err := physInmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
conf := testCoreConfig(t, physicalBackend, logger)
+ if enableRaw {
+ conf.EnableRaw = true
+ }
+
if testSeal != nil {
conf.Seal = testSeal
}
@@ -100,7 +127,7 @@ func TestCoreWithSeal(t testing.TB, testSeal Seal) *Core {
return c
}
-func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
+func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
noopAudits := map[string]audit.Factory{
"noop": func(config *audit.BackendConfig) (audit.Backend, error) {
view := &logical.InmemStorage{}
@@ -108,14 +135,11 @@ func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.L
Key: "salt",
Value: []byte("foo"),
})
- var err error
- config.Salt, err = salt.NewSalt(view, &salt.Config{
+ config.SaltConfig = &salt.Config{
HMAC: sha256.New,
HMACType: "hmac-sha256",
- })
- if err != nil {
- t.Fatalf("error getting new salt: %v", err)
}
+ config.SaltView = view
return &noopAudit{
Config: config,
}, nil
@@ -134,7 +158,7 @@ func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.L
for backendName, backendFactory := range noopBackends {
logicalBackends[backendName] = backendFactory
}
- logicalBackends["generic"] = LeasedPassthroughBackendFactory
+ logicalBackends["kv"] = LeasedPassthroughBackendFactory
for backendName, backendFactory := range testLogicalBackends {
logicalBackends[backendName] = backendFactory
}
@@ -153,13 +177,13 @@ func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.L
// TestCoreInit initializes the core with a single key, and returns
// the key that must be used to unseal the core and a root token.
-func TestCoreInit(t testing.TB, core *Core) ([][]byte, string) {
- return TestCoreInitClusterWrapperSetup(t, core, nil, func() (http.Handler, http.Handler) { return nil, nil })
+func TestCoreInit(t testing.T, core *Core) ([][]byte, string) {
+ return TestCoreInitClusterWrapperSetup(t, core, nil, nil)
}
-func TestCoreInitClusterWrapperSetup(t testing.TB, core *Core, clusterAddrs []*net.TCPAddr, handlerSetupFunc func() (http.Handler, http.Handler)) ([][]byte, string) {
+func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, clusterAddrs []*net.TCPAddr, handler http.Handler) ([][]byte, string) {
core.SetClusterListenerAddrs(clusterAddrs)
- core.SetClusterSetupFuncs(handlerSetupFunc)
+ core.SetClusterHandler(handler)
result, err := core.Initialize(&InitParams{
BarrierConfig: &SealConfig{
SecretShares: 3,
@@ -177,14 +201,24 @@ func TestCoreInitClusterWrapperSetup(t testing.TB, core *Core, clusterAddrs []*n
}
func TestCoreUnseal(core *Core, key []byte) (bool, error) {
- core.SetClusterSetupFuncs(func() (http.Handler, http.Handler) { return nil, nil })
return core.Unseal(key)
}
// TestCoreUnsealed returns a pure in-memory core that is already
// initialized and unsealed.
-func TestCoreUnsealed(t testing.TB) (*Core, [][]byte, string) {
+func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
core := TestCore(t)
+ return testCoreUnsealed(t, core)
+}
+
+// TestCoreUnsealedRaw returns a pure in-memory core that is already
+// initialized, unsealed, and with raw endpoints enabled.
+func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) {
+ core := TestCoreRaw(t)
+ return testCoreUnsealed(t, core)
+}
+
+func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
keys, token := TestCoreInit(t, core)
for _, key := range keys {
if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
@@ -203,7 +237,7 @@ func TestCoreUnsealed(t testing.TB) (*Core, [][]byte, string) {
return core, keys, token
}
-func TestCoreUnsealedBackend(t testing.TB, backend physical.Backend) (*Core, [][]byte, string) {
+func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) {
logger := logformat.NewVaultLogger(log.LevelTrace)
conf := testCoreConfig(t, backend, logger)
conf.Seal = &TestSeal{}
@@ -231,7 +265,7 @@ func TestCoreUnsealedBackend(t testing.TB, backend physical.Backend) (*Core, [][
return core, keys, token
}
-func testTokenStore(t testing.TB, c *Core) *TokenStore {
+func testTokenStore(t testing.T, c *Core) *TokenStore {
me := &MountEntry{
Table: credentialTableType,
Path: "token/",
@@ -254,21 +288,23 @@ func testTokenStore(t testing.TB, c *Core) *TokenStore {
}
ts := tokenstore.(*TokenStore)
- router := NewRouter()
- router.Mount(ts, "auth/token/", &MountEntry{Table: credentialTableType, UUID: ""}, ts.view)
+ err = c.router.Unmount("auth/token/")
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.router.Mount(ts, "auth/token/", &MountEntry{Table: credentialTableType, UUID: "authtokenuuid", Path: "auth/token", Accessor: "authtokenaccessor"}, ts.view)
+ if err != nil {
+ t.Fatal(err)
+ }
- subview := c.systemBarrierView.SubView(expirationSubPath)
- logger := logformat.NewVaultLogger(log.LevelTrace)
-
- exp := NewExpirationManager(router, subview, ts, logger)
- ts.SetExpirationManager(exp)
+ ts.SetExpirationManager(c.expiration)
return ts
}
// TestCoreWithTokenStore returns an in-memory core that has a token store
// mounted, so that logical token functions can be used
-func TestCoreWithTokenStore(t testing.TB) (*Core, *TokenStore, [][]byte, string) {
+func TestCoreWithTokenStore(t testing.T) (*Core, *TokenStore, [][]byte, string) {
c, keys, root := TestCoreUnsealed(t)
ts := testTokenStore(t, c)
@@ -278,7 +314,7 @@ func TestCoreWithTokenStore(t testing.TB) (*Core, *TokenStore, [][]byte, string)
// TestCoreWithBackendTokenStore returns a core that has a token store
// mounted and used the provided physical backend, so that logical token
// functions can be used
-func TestCoreWithBackendTokenStore(t testing.TB, backend physical.Backend) (*Core, *TokenStore, [][]byte, string) {
+func TestCoreWithBackendTokenStore(t testing.T, backend physical.Backend) (*Core, *TokenStore, [][]byte, string) {
c, keys, root := TestCoreUnsealedBackend(t, backend)
ts := testTokenStore(t, c)
@@ -293,6 +329,51 @@ func TestKeyCopy(key []byte) []byte {
return result
}
+func TestDynamicSystemView(c *Core) *dynamicSystemView {
+ me := &MountEntry{
+ Config: MountConfig{
+ DefaultLeaseTTL: 24 * time.Hour,
+ MaxLeaseTTL: 2 * 24 * time.Hour,
+ },
+ }
+
+ return &dynamicSystemView{c, me}
+}
+
+func TestAddTestPlugin(t testing.T, c *Core, name, testFunc string) {
+ file, err := os.Open(os.Args[0])
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer file.Close()
+
+ hash := sha256.New()
+
+ _, err = io.Copy(hash, file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sum := hash.Sum(nil)
+
+ // Determine plugin directory path
+ fullPath, err := filepath.EvalSymlinks(os.Args[0])
+ if err != nil {
+ t.Fatal(err)
+ }
+ directoryPath := filepath.Dir(fullPath)
+
+ // Set core's plugin directory and plugin catalog directory
+ c.pluginDirectory = directoryPath
+ c.pluginCatalog.directory = directoryPath
+
+ command := fmt.Sprintf("%s --test.run=%s", filepath.Base(os.Args[0]), testFunc)
+ err = c.pluginCatalog.Set(name, command, sum)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
var testLogicalBackends = map[string]logical.Factory{}
// Starts the test server which responds to SSH authentication.
@@ -400,11 +481,17 @@ func AddTestLogicalBackend(name string, factory logical.Factory) error {
}
type noopAudit struct {
- Config *audit.BackendConfig
+ Config *audit.BackendConfig
+ salt *salt.Salt
+ saltMutex sync.RWMutex
}
-func (n *noopAudit) GetHash(data string) string {
- return n.Config.Salt.GetIdentifiedHMAC(data)
+func (n *noopAudit) GetHash(data string) (string, error) {
+ salt, err := n.Salt()
+ if err != nil {
+ return "", err
+ }
+ return salt.GetIdentifiedHMAC(data), nil
}
func (n *noopAudit) LogRequest(a *logical.Auth, r *logical.Request, e error) error {
@@ -419,6 +506,32 @@ func (n *noopAudit) Reload() error {
return nil
}
+func (n *noopAudit) Invalidate() {
+ n.saltMutex.Lock()
+ defer n.saltMutex.Unlock()
+ n.salt = nil
+}
+
+func (n *noopAudit) Salt() (*salt.Salt, error) {
+ n.saltMutex.RLock()
+ if n.salt != nil {
+ defer n.saltMutex.RUnlock()
+ return n.salt, nil
+ }
+ n.saltMutex.RUnlock()
+ n.saltMutex.Lock()
+ defer n.saltMutex.Unlock()
+ if n.salt != nil {
+ return n.salt, nil
+ }
+ salt, err := salt.NewSalt(n.Config.SaltView, n.Config.SaltConfig)
+ if err != nil {
+ return nil, err
+ }
+ n.salt = salt
+ return salt, nil
+}
+
type rawHTTP struct{}
func (n *rawHTTP) HandleRequest(req *logical.Request) (*logical.Response, error) {
@@ -446,6 +559,10 @@ func (n *rawHTTP) System() logical.SystemView {
}
}
+func (n *rawHTTP) Logger() log.Logger {
+ return logformat.NewVaultLogger(log.LevelTrace)
+}
+
func (n *rawHTTP) Cleanup() {
// noop
}
@@ -459,6 +576,19 @@ func (n *rawHTTP) InvalidateKey(string) {
// noop
}
+func (n *rawHTTP) Setup(config *logical.BackendConfig) error {
+ // noop
+ return nil
+}
+
+func (n *rawHTTP) Type() logical.BackendType {
+ return logical.TypeUnknown
+}
+
+func (n *rawHTTP) RegisterLicense(license interface{}) error {
+ return nil
+}
+
func GenerateRandBytes(length int) ([]byte, error) {
if length < 0 {
return nil, fmt.Errorf("length must be >= 0")
@@ -480,7 +610,8 @@ func GenerateRandBytes(length int) ([]byte, error) {
return buf, nil
}
-func TestWaitActive(t testing.TB, core *Core) {
+func TestWaitActive(t testing.T, core *Core) {
+ t.Helper()
start := time.Now()
var standby bool
var err error
@@ -498,6 +629,83 @@ func TestWaitActive(t testing.TB, core *Core) {
}
}
+type TestCluster struct {
+ BarrierKeys [][]byte
+ CACert *x509.Certificate
+ CACertBytes []byte
+ CACertPEM []byte
+ CACertPEMFile string
+ CAKey *ecdsa.PrivateKey
+ CAKeyPEM []byte
+ Cores []*TestClusterCore
+ ID string
+ RootToken string
+ RootCAs *x509.CertPool
+ TempDir string
+}
+
+func (c *TestCluster) Start() {
+ for _, core := range c.Cores {
+ if core.Server != nil {
+ for _, ln := range core.Listeners {
+ go core.Server.Serve(ln)
+ }
+ }
+ }
+}
+
+func (c *TestCluster) EnsureCoresSealed(t testing.T) {
+ t.Helper()
+ if err := c.ensureCoresSealed(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func (c *TestCluster) Cleanup() {
+ // Close listeners
+ for _, core := range c.Cores {
+ if core.Listeners != nil {
+ for _, ln := range core.Listeners {
+ ln.Close()
+ }
+ }
+ }
+
+ // Seal the cores
+ c.ensureCoresSealed()
+
+ // Remove any temp dir that exists
+ if c.TempDir != "" {
+ os.RemoveAll(c.TempDir)
+ }
+
+ // Give time to actually shut down/clean up before the next test
+ time.Sleep(time.Second)
+}
+
+func (c *TestCluster) ensureCoresSealed() error {
+ for _, core := range c.Cores {
+ if err := core.Shutdown(); err != nil {
+ return err
+ }
+ timeout := time.Now().Add(60 * time.Second)
+ for {
+ if time.Now().After(timeout) {
+ return fmt.Errorf("timeout waiting for core to seal")
+ }
+ sealed, err := core.Sealed()
+ if err != nil {
+ return err
+ }
+ if sealed {
+ break
+ }
+ time.Sleep(250 * time.Millisecond)
+ }
+ }
+ return nil
+}
+
type TestListener struct {
net.Listener
Address *net.TCPAddr
@@ -505,189 +713,283 @@ type TestListener struct {
type TestClusterCore struct {
*Core
- Listeners []*TestListener
- Root string
- BarrierKeys [][]byte
- CACertBytes []byte
- CACert *x509.Certificate
- TLSConfig *tls.Config
- ClusterID string
- Client *api.Client
+ Client *api.Client
+ Handler http.Handler
+ Listeners []*TestListener
+ ReloadFuncs *map[string][]reload.ReloadFunc
+ ReloadFuncsLock *sync.RWMutex
+ Server *http.Server
+ ServerCert *x509.Certificate
+ ServerCertBytes []byte
+ ServerCertPEM []byte
+ ServerKey *ecdsa.PrivateKey
+ ServerKeyPEM []byte
+ TLSConfig *tls.Config
}
-func (t *TestClusterCore) CloseListeners() {
- if t.Listeners != nil {
- for _, ln := range t.Listeners {
- ln.Close()
+type TestClusterOptions struct {
+ KeepStandbysSealed bool
+ SkipInit bool
+ HandlerFunc func(*Core) http.Handler
+ BaseListenAddress string
+ NumCores int
+}
+
+var DefaultNumCores = 3
+
+type certInfo struct {
+ cert *x509.Certificate
+ certPEM []byte
+ certBytes []byte
+ key *ecdsa.PrivateKey
+ keyPEM []byte
+}
+
+// NewTestCluster creates a new test cluster based on the provided core config
+// and test cluster options.
+func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
+ var numCores int
+ if opts == nil || opts.NumCores == 0 {
+ numCores = DefaultNumCores
+ } else {
+ numCores = opts.NumCores
+ }
+
+ certIPs := []net.IP{
+ net.IPv6loopback,
+ net.ParseIP("127.0.0.1"),
+ }
+ var baseAddr *net.TCPAddr
+ if opts != nil && opts.BaseListenAddress != "" {
+ var err error
+ baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress)
+ if err != nil {
+ t.Fatal("could not parse given base IP")
}
- }
- // Give time to actually shut down/clean up before the next test
- time.Sleep(time.Second)
-}
-
-func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unsealStandbys bool) []*TestClusterCore {
- if handlers == nil || len(handlers) != 3 {
- t.Fatal("handlers must be size 3")
+ certIPs = append(certIPs, baseAddr.IP)
}
- //
- // TLS setup
- //
- block, _ := pem.Decode([]byte(TestClusterCACert))
- if block == nil {
- t.Fatal("error decoding cluster CA cert")
+ var testCluster TestCluster
+ tempDir, err := ioutil.TempDir("", "vault-test-cluster-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ testCluster.TempDir = tempDir
+
+ caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ testCluster.CAKey = caKey
+ caCertTemplate := &x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: "localhost",
+ },
+ DNSNames: []string{"localhost"},
+ IPAddresses: certIPs,
+ KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
+ SerialNumber: big.NewInt(mathrand.Int63()),
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: time.Now().Add(262980 * time.Hour),
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+ caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey)
+ if err != nil {
+ t.Fatal(err)
}
- caBytes := block.Bytes
caCert, err := x509.ParseCertificate(caBytes)
if err != nil {
t.Fatal(err)
}
-
- serverCert, err := tls.X509KeyPair([]byte(TestClusterServerCert), []byte(TestClusterServerKey))
+ testCluster.CACert = caCert
+ testCluster.CACertBytes = caBytes
+ testCluster.RootCAs = x509.NewCertPool()
+ testCluster.RootCAs.AddCert(caCert)
+ caCertPEMBlock := &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: caBytes,
+ }
+ testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock)
+ testCluster.CACertPEMFile = filepath.Join(testCluster.TempDir, "ca_cert.pem")
+ err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ marshaledCAKey, err := x509.MarshalECPrivateKey(caKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ caKeyPEMBlock := &pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: marshaledCAKey,
+ }
+ testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock)
+ err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0755)
if err != nil {
t.Fatal(err)
}
- rootCAs := x509.NewCertPool()
- rootCAs.AppendCertsFromPEM([]byte(TestClusterCACert))
- tlsConfig := &tls.Config{
- Certificates: []tls.Certificate{serverCert},
- RootCAs: rootCAs,
- ClientCAs: rootCAs,
- ClientAuth: tls.VerifyClientCertIfGiven,
- }
- tlsConfig.BuildNameToCertificate()
+ var certInfoSlice []*certInfo
- // Sanity checking
- block, _ = pem.Decode([]byte(TestClusterServerCert))
- if block == nil {
- t.Fatal(err)
- }
- parsedServerCert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- t.Fatal(err)
- }
- chains, err := parsedServerCert.Verify(x509.VerifyOptions{
- DNSName: "127.0.0.1",
- Roots: rootCAs,
- KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
- })
- if err != nil {
- t.Fatal(err)
- }
- if chains == nil || len(chains) == 0 {
- t.Fatal("no verified chains for server auth")
- }
- chains, err = parsedServerCert.Verify(x509.VerifyOptions{
- DNSName: "127.0.0.1",
- Roots: rootCAs,
- KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
- })
- if err != nil {
- t.Fatal(err)
- }
- if chains == nil || len(chains) == 0 {
- t.Fatal("no verified chains for chains auth")
- }
+ //
+ // Certs generation
+ //
+ for i := 0; i < numCores; i++ {
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ certTemplate := &x509.Certificate{
+ Subject: pkix.Name{
+ CommonName: "localhost",
+ },
+ DNSNames: []string{"localhost"},
+ IPAddresses: certIPs,
+ ExtKeyUsage: []x509.ExtKeyUsage{
+ x509.ExtKeyUsageServerAuth,
+ x509.ExtKeyUsageClientAuth,
+ },
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
+ SerialNumber: big.NewInt(mathrand.Int63()),
+ NotBefore: time.Now().Add(-30 * time.Second),
+ NotAfter: time.Now().Add(262980 * time.Hour),
+ }
+ certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, key.Public(), caKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ certPEMBlock := &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: certBytes,
+ }
+ certPEM := pem.EncodeToMemory(certPEMBlock)
+ marshaledKey, err := x509.MarshalECPrivateKey(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ keyPEMBlock := &pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: marshaledKey,
+ }
+ keyPEM := pem.EncodeToMemory(keyPEMBlock)
- logger := logformat.NewVaultLogger(log.LevelTrace)
+ certInfoSlice = append(certInfoSlice, &certInfo{
+ cert: cert,
+ certPEM: certPEM,
+ certBytes: certBytes,
+ key: key,
+ keyPEM: keyPEM,
+ })
+ }
//
// Listener setup
//
- ln, err := net.ListenTCP("tcp", &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 0,
- })
- if err != nil {
- t.Fatal(err)
- }
- c1lns := []*TestListener{&TestListener{
- Listener: tls.NewListener(ln, tlsConfig),
- Address: ln.Addr().(*net.TCPAddr),
- },
- }
- ln, err = net.ListenTCP("tcp", &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 0,
- })
- if err != nil {
- t.Fatal(err)
- }
- c1lns = append(c1lns, &TestListener{
- Listener: tls.NewListener(ln, tlsConfig),
- Address: ln.Addr().(*net.TCPAddr),
- })
- server1 := &http.Server{
- Handler: handlers[0],
- }
- if err := http2.ConfigureServer(server1, nil); err != nil {
- t.Fatal(err)
- }
- for _, ln := range c1lns {
- go server1.Serve(ln)
+ logger := logformat.NewVaultLogger(log.LevelTrace)
+ ports := make([]int, numCores)
+ if baseAddr != nil {
+ for i := 0; i < numCores; i++ {
+ ports[i] = baseAddr.Port + i
+ }
+ } else {
+ baseAddr = &net.TCPAddr{
+ IP: net.ParseIP("127.0.0.1"),
+ Port: 0,
+ }
}
- ln, err = net.ListenTCP("tcp", &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 0,
- })
- if err != nil {
- t.Fatal(err)
- }
- c2lns := []*TestListener{&TestListener{
- Listener: tls.NewListener(ln, tlsConfig),
- Address: ln.Addr().(*net.TCPAddr),
- },
- }
- server2 := &http.Server{
- Handler: handlers[1],
- }
- if err := http2.ConfigureServer(server2, nil); err != nil {
- t.Fatal(err)
- }
- for _, ln := range c2lns {
- go server2.Serve(ln)
+ listeners := [][]*TestListener{}
+ servers := []*http.Server{}
+ handlers := []http.Handler{}
+ tlsConfigs := []*tls.Config{}
+ certGetters := []*reload.CertificateGetter{}
+ for i := 0; i < numCores; i++ {
+ baseAddr.Port = ports[i]
+ ln, err := net.ListenTCP("tcp", baseAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
+ keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
+ err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tlsCert, err := tls.X509KeyPair(certInfoSlice[i].certPEM, certInfoSlice[i].keyPEM)
+ if err != nil {
+ t.Fatal(err)
+ }
+ certGetter := reload.NewCertificateGetter(certFile, keyFile)
+ certGetters = append(certGetters, certGetter)
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{tlsCert},
+ RootCAs: testCluster.RootCAs,
+ ClientCAs: testCluster.RootCAs,
+ ClientAuth: tls.VerifyClientCertIfGiven,
+ NextProtos: []string{"h2", "http/1.1"},
+ GetCertificate: certGetter.GetCertificate,
+ }
+ tlsConfig.BuildNameToCertificate()
+ tlsConfigs = append(tlsConfigs, tlsConfig)
+ lns := []*TestListener{&TestListener{
+ Listener: tls.NewListener(ln, tlsConfig),
+ Address: ln.Addr().(*net.TCPAddr),
+ },
+ }
+ listeners = append(listeners, lns)
+ var handler http.Handler = http.NewServeMux()
+ handlers = append(handlers, handler)
+ server := &http.Server{
+ Handler: handler,
+ }
+ servers = append(servers, server)
+ if err := http2.ConfigureServer(server, nil); err != nil {
+ t.Fatal(err)
+ }
}
- ln, err = net.ListenTCP("tcp", &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 0,
- })
- if err != nil {
- t.Fatal(err)
- }
- c3lns := []*TestListener{&TestListener{
- Listener: tls.NewListener(ln, tlsConfig),
- Address: ln.Addr().(*net.TCPAddr),
- },
- }
- server3 := &http.Server{
- Handler: handlers[2],
- }
- if err := http2.ConfigureServer(server3, nil); err != nil {
- t.Fatal(err)
- }
- for _, ln := range c3lns {
- go server3.Serve(ln)
- }
-
- // Create three cores with the same physical and different redirect/cluster addrs
+ // Create three cores with the same physical and different redirect/cluster
+ // addrs.
// N.B.: On OSX, instead of random ports, it assigns new ports to new
// listeners sequentially. Aside from being a bad idea in a security sense,
// it also broke tests that assumed it was OK to just use the port above
- // the redirect addr. This has now been changed to 10 ports above, but if
+ // the redirect addr. This has now been changed to 105 ports above, but if
// we ever do more than three nodes in a cluster it may need to be bumped.
+ // Note: it's 105 so that we don't conflict with a running Consul by
+ // default.
coreConfig := &CoreConfig{
LogicalBackends: make(map[string]logical.Factory),
CredentialBackends: make(map[string]logical.Factory),
AuditBackends: make(map[string]audit.Factory),
- RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port),
- ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port+10),
+ RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port),
+ ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port+105),
DisableMlock: true,
+ EnableUI: true,
}
if base != nil {
+ coreConfig.DisableCache = base.DisableCache
+ coreConfig.EnableUI = base.EnableUI
+ coreConfig.DefaultLeaseTTL = base.DefaultLeaseTTL
+ coreConfig.MaxLeaseTTL = base.MaxLeaseTTL
+ coreConfig.CacheSize = base.CacheSize
+ coreConfig.PluginDirectory = base.PluginDirectory
+ coreConfig.Seal = base.Seal
+ coreConfig.DevToken = base.DevToken
+
+ if !coreConfig.DisableMlock {
+ base.DisableMlock = false
+ }
+
if base.Physical != nil {
coreConfig.Physical = base.Physical
}
@@ -723,36 +1025,43 @@ func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unseal
if base.Logger != nil {
coreConfig.Logger = base.Logger
}
+
+ coreConfig.ClusterCipherSuites = base.ClusterCipherSuites
+
+ coreConfig.DisableCache = base.DisableCache
+
+ coreConfig.DevToken = base.DevToken
}
if coreConfig.Physical == nil {
- coreConfig.Physical = physical.NewInmem(logger)
+ coreConfig.Physical, err = physInmem.NewInmem(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
}
if coreConfig.HAPhysical == nil {
- coreConfig.HAPhysical = physical.NewInmemHA(logger)
+ haPhys, err := physInmem.NewInmemHA(nil, logger)
+ if err != nil {
+ t.Fatal(err)
+ }
+ coreConfig.HAPhysical = haPhys.(physical.HABackend)
}
- c1, err := NewCore(coreConfig)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port)
- if coreConfig.ClusterAddr != "" {
- coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port+10)
- }
- c2, err := NewCore(coreConfig)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port)
- if coreConfig.ClusterAddr != "" {
- coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port+10)
- }
- c3, err := NewCore(coreConfig)
- if err != nil {
- t.Fatalf("err: %v", err)
+ cores := []*Core{}
+ for i := 0; i < numCores; i++ {
+ coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port)
+ if coreConfig.ClusterAddr != "" {
+ coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port+105)
+ }
+ c, err := NewCore(coreConfig)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ cores = append(cores, c)
+ if opts != nil && opts.HandlerFunc != nil {
+ handlers[i] = opts.HandlerFunc(c)
+ servers[i].Handler = handlers[i]
+ }
}
//
@@ -763,72 +1072,97 @@ func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unseal
for i, ln := range lns {
ret[i] = &net.TCPAddr{
IP: ln.Address.IP,
- Port: ln.Address.Port + 10,
+ Port: ln.Address.Port + 105,
}
}
return ret
}
- c2.SetClusterListenerAddrs(clusterAddrGen(c2lns))
- c2.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[1], logger))
- c3.SetClusterListenerAddrs(clusterAddrGen(c3lns))
- c3.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[2], logger))
- keys, root := TestCoreInitClusterWrapperSetup(t, c1, clusterAddrGen(c1lns), WrapHandlerForClustering(handlers[0], logger))
- for _, key := range keys {
- if _, err := c1.Unseal(TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
+ if numCores > 1 {
+ for i := 1; i < numCores; i++ {
+ cores[i].SetClusterListenerAddrs(clusterAddrGen(listeners[i]))
+ cores[i].SetClusterHandler(handlers[i])
}
}
- // Verify unsealed
- sealed, err := c1.Sealed()
- if err != nil {
- t.Fatalf("err checking seal status: %s", err)
- }
- if sealed {
- t.Fatal("should not be sealed")
- }
+ if opts == nil || !opts.SkipInit {
+ keys, root := TestCoreInitClusterWrapperSetup(t, cores[0], clusterAddrGen(listeners[0]), handlers[0])
+ barrierKeys, _ := copystructure.Copy(keys)
+ testCluster.BarrierKeys = barrierKeys.([][]byte)
+ testCluster.RootToken = root
- TestWaitActive(t, c1)
-
- if unsealStandbys {
- for _, key := range keys {
- if _, err := c2.Unseal(TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
+ // Write root token and barrier keys
+ err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var buf bytes.Buffer
+ for i, key := range testCluster.BarrierKeys {
+ buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
+ if i < len(testCluster.BarrierKeys)-1 {
+ buf.WriteRune('\n')
}
}
+ err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Unseal first core
for _, key := range keys {
- if _, err := c3.Unseal(TestKeyCopy(key)); err != nil {
+ if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil {
t.Fatalf("unseal err: %s", err)
}
}
- // Let them come fully up to standby
- time.Sleep(2 * time.Second)
+ // Verify unsealed
+ sealed, err := cores[0].Sealed()
+ if err != nil {
+ t.Fatalf("err checking seal status: %s", err)
+ }
+ if sealed {
+ t.Fatal("should not be sealed")
+ }
- // Ensure cluster connection info is populated
- isLeader, _, err := c2.Leader()
+ TestWaitActive(t, cores[0])
+
+ // Unseal other cores unless otherwise specified
+ if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 {
+ for i := 1; i < numCores; i++ {
+ for _, key := range keys {
+ if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil {
+ t.Fatalf("unseal err: %s", err)
+ }
+ }
+ }
+
+ // Let them come fully up to standby
+ time.Sleep(2 * time.Second)
+
+ // Ensure cluster connection info is populated.
+ // Other cores should not come up as leaders.
+ for i := 1; i < numCores; i++ {
+ isLeader, _, _, err := cores[i].Leader()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if isLeader {
+ t.Fatalf("core[%d] should not be leader", i)
+ }
+ }
+ }
+
+ //
+ // Set test cluster core(s) and test cluster
+ //
+ cluster, err := cores[0].Cluster()
if err != nil {
t.Fatal(err)
}
- if isLeader {
- t.Fatal("c2 should not be leader")
- }
- isLeader, _, err = c3.Leader()
- if err != nil {
- t.Fatal(err)
- }
- if isLeader {
- t.Fatal("c3 should not be leader")
- }
+ testCluster.ID = cluster.ID
}
- cluster, err := c1.Cluster()
- if err != nil {
- t.Fatal(err)
- }
-
- getAPIClient := func(port int) *api.Client {
+ getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client {
transport := cleanhttp.DefaultPooledTransport()
transport.TLSClientConfig = tlsConfig
client := &http.Client{
@@ -845,151 +1179,35 @@ func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unseal
if err != nil {
t.Fatal(err)
}
- apiClient.SetToken(root)
+ if opts == nil || !opts.SkipInit {
+ apiClient.SetToken(testCluster.RootToken)
+ }
return apiClient
}
var ret []*TestClusterCore
- keyCopies, _ := copystructure.Copy(keys)
- ret = append(ret, &TestClusterCore{
- Core: c1,
- Listeners: c1lns,
- Root: root,
- BarrierKeys: keyCopies.([][]byte),
- CACertBytes: caBytes,
- CACert: caCert,
- TLSConfig: tlsConfig,
- ClusterID: cluster.ID,
- Client: getAPIClient(c1lns[0].Address.Port),
- })
+ for i := 0; i < numCores; i++ {
+ tcc := &TestClusterCore{
+ Core: cores[i],
+ ServerKey: certInfoSlice[i].key,
+ ServerKeyPEM: certInfoSlice[i].keyPEM,
+ ServerCert: certInfoSlice[i].cert,
+ ServerCertBytes: certInfoSlice[i].certBytes,
+ ServerCertPEM: certInfoSlice[i].certPEM,
+ Listeners: listeners[i],
+ Handler: handlers[i],
+ Server: servers[i],
+ TLSConfig: tlsConfigs[i],
+ Client: getAPIClient(listeners[i][0].Address.Port, tlsConfigs[i]),
+ }
+ tcc.ReloadFuncs = &cores[i].reloadFuncs
+ tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock
+ tcc.ReloadFuncsLock.Lock()
+ (*tcc.ReloadFuncs)["listener|tcp"] = []reload.ReloadFunc{certGetters[i].Reload}
+ tcc.ReloadFuncsLock.Unlock()
+ ret = append(ret, tcc)
+ }
- keyCopies, _ = copystructure.Copy(keys)
- ret = append(ret, &TestClusterCore{
- Core: c2,
- Listeners: c2lns,
- Root: root,
- BarrierKeys: keyCopies.([][]byte),
- CACertBytes: caBytes,
- CACert: caCert,
- TLSConfig: tlsConfig,
- ClusterID: cluster.ID,
- Client: getAPIClient(c2lns[0].Address.Port),
- })
-
- keyCopies, _ = copystructure.Copy(keys)
- ret = append(ret, &TestClusterCore{
- Core: c3,
- Listeners: c3lns,
- Root: root,
- BarrierKeys: keyCopies.([][]byte),
- CACertBytes: caBytes,
- CACert: caCert,
- TLSConfig: tlsConfig,
- ClusterID: cluster.ID,
- Client: getAPIClient(c3lns[0].Address.Port),
- })
-
- return ret
+ testCluster.Cores = ret
+ return &testCluster
}
-
-const (
- TestClusterCACert = `-----BEGIN CERTIFICATE-----
-MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2
-NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
-9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS
-xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP
-67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE
-JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb
-cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY
-WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w
-DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU
-G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy
-MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
-AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW
-n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh
-MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/
-spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d
-CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q
-5gn6KxUPBKHEtNzs5DgGM7nq
------END CERTIFICATE-----`
-
- TestClusterCAKey = `-----BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c
-N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl
-HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2
-eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ
-1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z
-wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel
-CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg
-eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/
-fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW
-TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB
-nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud
-XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh
-Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X
-YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW
-2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7
-YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ
-48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8
-aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX
-Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB
-55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1
-HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt
-TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9
-hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP
-QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr
-PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3
------END RSA PRIVATE KEY-----`
-
- TestClusterServerCert = `-----BEGIN CERTIFICATE-----
-MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL
-BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2
-NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G
-CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB
-9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI
-b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL
-5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W
-1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF
-+czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj
-gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe
-Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH
-QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w
-LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE
-fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv
-cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX
-lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z
-6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch
-f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D
-Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe
-TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg==
------END CERTIFICATE-----`
-
- TestClusterServerKey = `-----BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm
-peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq
-/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61
-fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV
-T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq
-zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug
-RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6
-mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh
-bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL
-FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV
-WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m
-tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx
-PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3
-8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz
-HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8
-goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU
-jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu
-kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f
-DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB
-p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe
-X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS
-rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P
-aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455
-t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx
-we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ==
------END RSA PRIVATE KEY-----`
-)
diff --git a/vendor/github.com/hashicorp/vault/vault/token_store.go b/vendor/github.com/hashicorp/vault/vault/token_store.go
index 46614ed..2708e48 100644
--- a/vendor/github.com/hashicorp/vault/vault/token_store.go
+++ b/vendor/github.com/hashicorp/vault/vault/token_store.go
@@ -3,13 +3,19 @@ package vault
import (
"encoding/json"
"fmt"
+ "sync"
+ "sync/atomic"
+
"regexp"
"strings"
"time"
+ log "github.com/mgutz/logxi/v1"
+
"github.com/armon/go-metrics"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/helper/parseutil"
@@ -79,7 +85,6 @@ type TokenStore struct {
*framework.Backend
view *BarrierView
- salt *salt.Salt
expiration *ExpirationManager
@@ -90,6 +95,14 @@ type TokenStore struct {
tokenLocks []*locksutil.LockEntry
cubbyholeDestroyer func(*TokenStore, string) error
+
+ logger log.Logger
+
+ saltLock sync.RWMutex
+ salt *salt.Salt
+ saltConfig *salt.Config
+
+ tidyLock int64
}
// NewTokenStore is used to construct a token store that is
@@ -102,14 +115,15 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
t := &TokenStore{
view: view,
cubbyholeDestroyer: destroyCubbyhole,
+ logger: c.logger,
+ tokenLocks: locksutil.CreateLocks(),
+ saltLock: sync.RWMutex{},
}
if c.policyStore != nil {
t.policyLookupFunc = c.policyStore.GetPolicy
}
- t.tokenLocks = locksutil.CreateLocks()
-
// Setup the framework endpoints
t.Backend = &framework.Backend{
AuthRenew: t.authRenew,
@@ -126,7 +140,7 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
lookupPrefix,
accessorPrefix,
parentPrefix,
- "salt",
+ salt.DefaultLocation,
},
},
@@ -469,18 +483,50 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
}
func (ts *TokenStore) Initialize() error {
- // Setup the salt
- salt, err := salt.NewSalt(ts.view, &salt.Config{
+ ts.saltLock.Lock()
+
+ // Setup the salt config
+ ts.saltConfig = &salt.Config{
HashFunc: salt.SHA1Hash,
- })
- if err != nil {
- return err
+ Location: salt.DefaultLocation,
}
- ts.salt = salt
+ ts.salt = nil
+ ts.saltLock.Unlock()
return nil
}
+func (ts *TokenStore) Invalidate(key string) {
+ ts.logger.Trace("token: invalidating key", "key", key)
+
+ switch key {
+ case tokenSubPath + salt.DefaultLocation:
+ ts.saltLock.Lock()
+ ts.salt = nil
+ ts.saltLock.Unlock()
+ }
+}
+
+func (ts *TokenStore) Salt() (*salt.Salt, error) {
+ ts.saltLock.RLock()
+ if ts.salt != nil {
+ defer ts.saltLock.RUnlock()
+ return ts.salt, nil
+ }
+ ts.saltLock.RUnlock()
+ ts.saltLock.Lock()
+ defer ts.saltLock.Unlock()
+ if ts.salt != nil {
+ return ts.salt, nil
+ }
+ salt, err := salt.NewSalt(ts.view, ts.saltConfig)
+ if err != nil {
+ return nil, err
+ }
+ ts.salt = salt
+ return salt, nil
+}
+
// TokenEntry is used to represent a given token
type TokenEntry struct {
// ID of this entry, generally a random UUID
@@ -581,8 +627,13 @@ func (ts *TokenStore) SetExpirationManager(exp *ExpirationManager) {
}
// SaltID is used to apply a salt and hash to an ID to make sure its not reversible
-func (ts *TokenStore) SaltID(id string) string {
- return ts.salt.SaltID(id)
+func (ts *TokenStore) SaltID(id string) (string, error) {
+ s, err := ts.Salt()
+ if err != nil {
+ return "", err
+ }
+
+ return s.SaltID(id), nil
}
// RootToken is used to generate a new token with root privileges and no parent
@@ -610,7 +661,7 @@ func (ts *TokenStore) tokenStoreAccessorList(
ret := make([]string, 0, len(entries))
for _, entry := range entries {
- aEntry, err := ts.lookupBySaltedAccessor(entry)
+ aEntry, err := ts.lookupBySaltedAccessor(entry, false)
if err != nil {
resp.AddWarning("Found an accessor entry that could not be successfully decoded")
continue
@@ -641,7 +692,11 @@ func (ts *TokenStore) createAccessor(entry *TokenEntry) error {
entry.Accessor = accessorUUID
// Create index entry, mapping the accessor to the token ID
- path := accessorPrefix + ts.SaltID(entry.Accessor)
+ saltID, err := ts.SaltID(entry.Accessor)
+ if err != nil {
+ return err
+ }
+ path := accessorPrefix + saltID
aEntry := &accessorEntry{
TokenID: entry.ID,
@@ -672,9 +727,18 @@ func (ts *TokenStore) create(entry *TokenEntry) error {
entry.ID = entryUUID
}
+ saltedId, err := ts.SaltID(entry.ID)
+ if err != nil {
+ return err
+ }
+ exist, _ := ts.lookupSalted(saltedId, true)
+ if exist != nil {
+ return fmt.Errorf("cannot create a token with a duplicate ID")
+ }
+
entry.Policies = policyutil.SanitizePolicies(entry.Policies, policyutil.DoNotAddDefaultPolicy)
- err := ts.createAccessor(entry)
+ err = ts.createAccessor(entry)
if err != nil {
return err
}
@@ -692,7 +756,10 @@ func (ts *TokenStore) store(entry *TokenEntry) error {
// storeCommon handles the actual storage of an entry, possibly generating
// secondary indexes
func (ts *TokenStore) storeCommon(entry *TokenEntry, writeSecondary bool) error {
- saltedId := ts.SaltID(entry.ID)
+ saltedId, err := ts.SaltID(entry.ID)
+ if err != nil {
+ return err
+ }
// Marshal the entry
enc, err := json.Marshal(entry)
@@ -716,7 +783,11 @@ func (ts *TokenStore) storeCommon(entry *TokenEntry, writeSecondary bool) error
}
// Create the index entry
- path := parentPrefix + ts.SaltID(entry.Parent) + "/" + saltedId
+ parentSaltedID, err := ts.SaltID(entry.Parent)
+ if err != nil {
+ return err
+ }
+ path := parentPrefix + parentSaltedID + "/" + saltedId
le := &logical.StorageEntry{Key: path}
if err := ts.view.Put(le); err != nil {
return fmt.Errorf("failed to persist entry: %v", err)
@@ -756,7 +827,12 @@ func (ts *TokenStore) UseToken(te *TokenEntry) (*TokenEntry, error) {
defer lock.Unlock()
// Call lookupSalted instead of Lookup to avoid deadlocking since Lookup grabs a read lock
- te, err := ts.lookupSalted(ts.SaltID(te.ID), false)
+ saltedID, err := ts.SaltID(te.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ te, err = ts.lookupSalted(saltedID, false)
if err != nil {
return nil, fmt.Errorf("failed to refresh entry: %v", err)
}
@@ -808,15 +884,19 @@ func (ts *TokenStore) Lookup(id string) (*TokenEntry, error) {
lock.RLock()
defer lock.RUnlock()
- return ts.lookupSalted(ts.SaltID(id), false)
+ saltedID, err := ts.SaltID(id)
+ if err != nil {
+ return nil, err
+ }
+ return ts.lookupSalted(saltedID, false)
}
// lookupSalted is used to find a token given its salted ID. If tainted is
// true, entries that are in some revocation state (currently, indicated by num
// uses < 0), the entry will be returned anyways
-func (ts *TokenStore) lookupSalted(saltedId string, tainted bool) (*TokenEntry, error) {
+func (ts *TokenStore) lookupSalted(saltedID string, tainted bool) (*TokenEntry, error) {
// Lookup token
- path := lookupPrefix + saltedId
+ path := lookupPrefix + saltedID
raw, err := ts.view.Get(path)
if err != nil {
return nil, fmt.Errorf("failed to read entry: %v", err)
@@ -838,6 +918,19 @@ func (ts *TokenStore) lookupSalted(saltedId string, tainted bool) (*TokenEntry,
return nil, nil
}
+ // If we are still restoring the expiration manager, we want to ensure the
+ // token is not expired
+ if ts.expiration == nil {
+ return nil, nil
+ }
+ check, err := ts.expiration.RestoreSaltedTokenCheck(entry.Path, saltedID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to check token in restore mode: %v", err)
+ }
+ if !check {
+ return nil, nil
+ }
+
persistNeeded := false
// Upgrade the deprecated fields
@@ -891,7 +984,11 @@ func (ts *TokenStore) Revoke(id string) error {
return fmt.Errorf("cannot revoke blank token")
}
- return ts.revokeSalted(ts.SaltID(id))
+ saltedID, err := ts.SaltID(id)
+ if err != nil {
+ return err
+ }
+ return ts.revokeSalted(saltedID)
}
// revokeSalted is used to invalidate a given salted token,
@@ -981,7 +1078,12 @@ func (ts *TokenStore) revokeSalted(saltedId string) (ret error) {
// Clear the secondary index if any
if entry.Parent != "" {
- path := parentPrefix + ts.SaltID(entry.Parent) + "/" + saltedId
+ parentSaltedID, err := ts.SaltID(entry.Parent)
+ if err != nil {
+ return err
+ }
+
+ path := parentPrefix + parentSaltedID + "/" + saltedId
if err = ts.view.Delete(path); err != nil {
return fmt.Errorf("failed to delete entry: %v", err)
}
@@ -989,7 +1091,12 @@ func (ts *TokenStore) revokeSalted(saltedId string) (ret error) {
// Clear the accessor index if any
if entry.Accessor != "" {
- path := accessorPrefix + ts.SaltID(entry.Accessor)
+ accessorSaltedID, err := ts.SaltID(entry.Accessor)
+ if err != nil {
+ return err
+ }
+
+ path := accessorPrefix + accessorSaltedID
if err = ts.view.Delete(path); err != nil {
return fmt.Errorf("failed to delete entry: %v", err)
}
@@ -1014,7 +1121,10 @@ func (ts *TokenStore) RevokeTree(id string) error {
}
// Get the salted ID
- saltedId := ts.SaltID(id)
+ saltedId, err := ts.SaltID(id)
+ if err != nil {
+ return err
+ }
// Nuke the entire tree recursively
if err := ts.revokeTreeSalted(saltedId); err != nil {
@@ -1064,11 +1174,15 @@ func (ts *TokenStore) handleCreateAgainstRole(
return ts.handleCreateCommon(req, d, false, roleEntry)
}
-func (ts *TokenStore) lookupByAccessor(accessor string) (accessorEntry, error) {
- return ts.lookupBySaltedAccessor(ts.SaltID(accessor))
+func (ts *TokenStore) lookupByAccessor(accessor string, tainted bool) (accessorEntry, error) {
+ saltedID, err := ts.SaltID(accessor)
+ if err != nil {
+ return accessorEntry{}, err
+ }
+ return ts.lookupBySaltedAccessor(saltedID, tainted)
}
-func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEntry, error) {
+func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string, tainted bool) (accessorEntry, error) {
entry, err := ts.view.Get(accessorPrefix + saltedAccessor)
var aEntry accessorEntry
@@ -1082,8 +1196,12 @@ func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEnt
err = jsonutil.DecodeJSON(entry.Value, &aEntry)
// If we hit an error, assume it's a pre-struct straight token ID
if err != nil {
- aEntry.TokenID = string(entry.Value)
- te, err := ts.lookupSalted(ts.SaltID(aEntry.TokenID), false)
+ saltedID, err := ts.SaltID(string(entry.Value))
+ if err != nil {
+ return accessorEntry{}, err
+ }
+
+ te, err := ts.lookupSalted(saltedID, tainted)
if err != nil {
return accessorEntry{}, fmt.Errorf("failed to look up token using accessor index: %s", err)
}
@@ -1093,6 +1211,7 @@ func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEnt
// on lookup is nil, not an error, so we keep that behavior here to be
// safe...the token ID is simply not filled in.
if te != nil {
+ aEntry.TokenID = te.ID
aEntry.AccessorID = te.Accessor
}
}
@@ -1103,49 +1222,80 @@ func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEnt
// handleTidy handles the cleaning up of leaked accessor storage entries and
// cleaning up of leases that are associated to tokens that are expired.
func (ts *TokenStore) handleTidy(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ var tidyErrors *multierror.Error
+
+ if !atomic.CompareAndSwapInt64(&ts.tidyLock, 0, 1) {
+ ts.logger.Warn("token: tidy operation on tokens is already in progress")
+ return nil, fmt.Errorf("tidy operation on tokens is already in progress")
+ }
+
+ defer atomic.CompareAndSwapInt64(&ts.tidyLock, 1, 0)
+
+ ts.logger.Info("token: beginning tidy operation on tokens")
+ defer ts.logger.Info("token: finished tidy operation on tokens")
+
// List out all the accessors
saltedAccessorList, err := ts.view.List(accessorPrefix)
if err != nil {
- return nil, fmt.Errorf("failed to fetch accessor entries: %v", err)
+ return nil, fmt.Errorf("failed to fetch accessor index entries: %v", err)
}
- var tidyErrors *multierror.Error
-
// First, clean up secondary index entries that are no longer valid
parentList, err := ts.view.List(parentPrefix)
if err != nil {
return nil, fmt.Errorf("failed to fetch secondary index entries: %v", err)
}
+ var countParentList, deletedCountParentList int64
+
// Scan through the secondary index entries; if there is an entry
// with the token's salt ID at the end, remove it
for _, parent := range parentList {
- children, err := ts.view.List(parentPrefix + parent + "/")
+ children, err := ts.view.List(parentPrefix + parent)
if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read child index entry: %v", err))
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read secondary index: %v", err))
continue
}
for _, child := range children {
+ countParentList++
+ if countParentList%500 == 0 {
+ ts.logger.Info("token: checking validity of tokens in secondary index list", "progress", countParentList)
+ }
+
// Look up tainted entries so we can be sure that if this isn't
- // found, it doesn't exist
+ // found, it doesn't exist. Doing the following without locking
+ // since appropriate locks cannot be held with salted token IDs.
te, _ := ts.lookupSalted(child, true)
if te == nil {
- err = ts.view.Delete(parentPrefix + parent + "/" + child)
+ index := parentPrefix + parent + child
+ ts.logger.Trace("token: deleting invalid secondary index", "index", index)
+ err = ts.view.Delete(index)
if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete secondary index entry: %v", err))
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete secondary index: %v", err))
}
+ deletedCountParentList++
}
}
}
+ var countAccessorList,
+ deletedCountAccessorEmptyToken,
+ deletedCountAccessorInvalidToken,
+ deletedCountInvalidTokenInAccessor int64
+
// For each of the accessor, see if the token ID associated with it is
// a valid one. If not, delete the leases associated with that token
// and delete the accessor as well.
for _, saltedAccessor := range saltedAccessorList {
- accessorEntry, err := ts.lookupBySaltedAccessor(saltedAccessor)
+ countAccessorList++
+ if countAccessorList%500 == 0 {
+ ts.logger.Info("token: checking if accessors contain valid tokens", "progress", countAccessorList)
+ }
+
+ accessorEntry, err := ts.lookupBySaltedAccessor(saltedAccessor, true)
if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read the accessor entry: %v", err))
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read the accessor index: %v", err))
continue
}
@@ -1153,25 +1303,43 @@ func (ts *TokenStore) handleTidy(req *logical.Request, data *framework.FieldData
// in it. If not, it is an invalid accessor entry and needs to
// be deleted.
if accessorEntry.TokenID == "" {
+ index := accessorPrefix + saltedAccessor
// If deletion of accessor fails, move on to the next
// item since this is just a best-effort operation
- err = ts.view.Delete(accessorPrefix + saltedAccessor)
+ err = ts.view.Delete(index)
if err != nil {
- tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete the accessor entry: %v", err))
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete the accessor index: %v", err))
continue
}
+ deletedCountAccessorEmptyToken++
}
- saltedId := ts.SaltID(accessorEntry.TokenID)
+ lock := locksutil.LockForKey(ts.tokenLocks, accessorEntry.TokenID)
+ lock.RLock()
// Look up tainted variants so we only find entries that truly don't
// exist
+ saltedId, err := ts.SaltID(accessorEntry.TokenID)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read salt id: %v", err))
+ lock.RUnlock()
+ continue
+ }
te, err := ts.lookupSalted(saltedId, true)
+ if err != nil {
+ tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup tainted ID: %v", err))
+ lock.RUnlock()
+ continue
+ }
+
+ lock.RUnlock()
// If token entry is not found assume that the token is not valid any
// more and conclude that accessor, leases, and secondary index entries
// for this token should not exist as well.
if te == nil {
+ ts.logger.Info("token: deleting token with nil entry", "salted_token", saltedId)
+
// RevokeByToken expects a '*TokenEntry'. For the
// purposes of tidying, it is sufficient if the token
// entry only has ID set.
@@ -1186,26 +1354,31 @@ func (ts *TokenStore) handleTidy(req *logical.Request, data *framework.FieldData
tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke leases of expired token: %v", err))
continue
}
+ deletedCountInvalidTokenInAccessor++
+
+ index := accessorPrefix + saltedAccessor
// If deletion of accessor fails, move on to the next item since
// this is just a best-effort operation. We do this last so that on
// next run if something above failed we still have the accessor
// entry to try again.
- err = ts.view.Delete(accessorPrefix + saltedAccessor)
+ err = ts.view.Delete(index)
if err != nil {
tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete accessor entry: %v", err))
continue
}
+ deletedCountAccessorInvalidToken++
}
}
- // Later request handling code seems to check if the type is multierror so
- // if we haven't added any errors we need to just return a normal nil error
- if tidyErrors == nil {
- return nil, nil
- }
+ ts.logger.Debug("token: number of tokens scanned in parent index list", "count", countParentList)
+ ts.logger.Debug("token: number of tokens revoked in parent index list", "count", deletedCountParentList)
+ ts.logger.Debug("token: number of accessors scanned", "count", countAccessorList)
+ ts.logger.Debug("token: number of deleted accessors which had empty tokens", "count", deletedCountAccessorEmptyToken)
+ ts.logger.Debug("token: number of revoked tokens which were invalid but present in accessors", "count", deletedCountInvalidTokenInAccessor)
+ ts.logger.Debug("token: number of deleted accessors which had invalid tokens", "count", deletedCountAccessorInvalidToken)
- return nil, tidyErrors
+ return nil, tidyErrors.ErrorOrNil()
}
// handleUpdateLookupAccessor handles the auth/token/lookup-accessor path for returning
@@ -1221,7 +1394,7 @@ func (ts *TokenStore) handleUpdateLookupAccessor(req *logical.Request, data *fra
urlaccessor = true
}
- aEntry, err := ts.lookupByAccessor(accessor)
+ aEntry, err := ts.lookupByAccessor(accessor, false)
if err != nil {
return nil, err
}
@@ -1275,7 +1448,7 @@ func (ts *TokenStore) handleUpdateRevokeAccessor(req *logical.Request, data *fra
urlaccessor = true
}
- aEntry, err := ts.lookupByAccessor(accessor)
+ aEntry, err := ts.lookupByAccessor(accessor, true)
if err != nil {
return nil, err
}
@@ -1833,7 +2006,10 @@ func (ts *TokenStore) handleLookup(
defer lock.RUnlock()
// Lookup the token
- saltedId := ts.SaltID(id)
+ saltedId, err := ts.SaltID(id)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
+ }
out, err := ts.lookupSalted(saltedId, true)
if err != nil {
@@ -2194,6 +2370,10 @@ func (ts *TokenStore) tokenStoreRoleCreateUpdate(
entry.PathSuffix = data.Get("path_suffix").(string)
}
+ if strings.Contains(entry.PathSuffix, "..") {
+ return logical.ErrorResponse(fmt.Sprintf("error registering path suffix: %s", consts.ErrPathContainsParentReferences)), nil
+ }
+
allowedPoliciesStr, ok := data.GetOk("allowed_policies")
if ok {
entry.AllowedPolicies = policyutil.SanitizePolicies(strings.Split(allowedPoliciesStr.(string), ","), policyutil.DoNotAddDefaultPolicy)
diff --git a/vendor/github.com/hashicorp/vault/vault/token_store_test.go b/vendor/github.com/hashicorp/vault/vault/token_store_test.go
index 7a84fe7..ca4cbb2 100644
--- a/vendor/github.com/hashicorp/vault/vault/token_store_test.go
+++ b/vendor/github.com/hashicorp/vault/vault/token_store_test.go
@@ -3,6 +3,7 @@ package vault
import (
"encoding/json"
"fmt"
+ "path"
"reflect"
"sort"
"strings"
@@ -11,6 +12,7 @@ import (
"time"
"github.com/hashicorp/go-uuid"
+ "github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/logical"
)
@@ -54,7 +56,10 @@ func TestTokenStore_TokenEntryUpgrade(t *testing.T) {
t.Fatal(err)
}
- saltedId := ts.SaltID(entry.ID)
+ saltedId, err := ts.SaltID(entry.ID)
+ if err != nil {
+ t.Fatal(err)
+ }
path := lookupPrefix + saltedId
le := &logical.StorageEntry{
Key: path,
@@ -240,7 +245,7 @@ func TestTokenStore_AccessorIndex(t *testing.T) {
t.Fatalf("bad: %#v", out)
}
- aEntry, err := ts.lookupByAccessor(out.Accessor)
+ aEntry, err := ts.lookupByAccessor(out.Accessor, false)
if err != nil {
t.Fatalf("err: %s", err)
}
@@ -294,7 +299,11 @@ func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) {
}
// Revoke root to make the number of accessors match
- ts.revokeSalted(ts.SaltID(root))
+ salted, err := ts.SaltID(root)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ts.revokeSalted(salted)
req := logical.TestRequest(t, logical.ListOperation, "accessors")
@@ -312,20 +321,24 @@ func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) {
if len(keys) != len(testKeys) {
t.Fatalf("wrong number of accessors found")
}
- if len(resp.Warnings()) != 0 {
- t.Fatalf("got warnings:\n%#v", resp.Warnings())
+ if len(resp.Warnings) != 0 {
+ t.Fatalf("got warnings:\n%#v", resp.Warnings)
}
// Test upgrade from old struct method of accessor storage (of token id)
for _, accessor := range keys {
- aEntry, err := ts.lookupByAccessor(accessor)
+ aEntry, err := ts.lookupByAccessor(accessor, false)
if err != nil {
t.Fatal(err)
}
if aEntry.TokenID == "" || aEntry.AccessorID == "" {
t.Fatalf("error, accessor entry looked up is empty, but no error thrown")
}
- path := accessorPrefix + ts.SaltID(accessor)
+ salted, err := ts.SaltID(accessor)
+ if err != nil {
+ t.Fatal(err)
+ }
+ path := accessorPrefix + salted
le := &logical.StorageEntry{Key: path, Value: []byte(aEntry.TokenID)}
if err := ts.view.Put(le); err != nil {
t.Fatalf("failed to persist accessor index entry: %v", err)
@@ -347,12 +360,12 @@ func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) {
if len(keys) != len(testKeys) {
t.Fatalf("wrong number of accessors found")
}
- if len(resp.Warnings()) != 0 {
- t.Fatalf("got warnings:\n%#v", resp.Warnings())
+ if len(resp.Warnings) != 0 {
+ t.Fatalf("got warnings:\n%#v", resp.Warnings)
}
for _, accessor := range keys2 {
- aEntry, err := ts.lookupByAccessor(accessor)
+ aEntry, err := ts.lookupByAccessor(accessor, false)
if err != nil {
t.Fatal(err)
}
@@ -437,6 +450,8 @@ func TestTokenStore_CreateLookup(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
+ ts2.SetExpirationManager(c.expiration)
+
if err := ts2.Initialize(); err != nil {
t.Fatalf("err: %v", err)
}
@@ -465,6 +480,9 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) {
if ent.ID != "foobarbaz" {
t.Fatalf("bad: ent.ID: expected:\"foobarbaz\"\n actual:%s", ent.ID)
}
+ if err := ts.create(ent); err == nil {
+ t.Fatal("expected error creating token with the same ID")
+ }
out, err := ts.Lookup(ent.ID)
if err != nil {
@@ -479,6 +497,8 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
+ ts2.SetExpirationManager(c.expiration)
+
if err := ts2.Initialize(); err != nil {
t.Fatalf("err: %v", err)
}
@@ -493,6 +513,73 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) {
}
}
+func TestTokenStore_CreateLookup_ExpirationInRestoreMode(t *testing.T) {
+ _, ts, _, _ := TestCoreWithTokenStore(t)
+
+ ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
+ if err := ts.create(ent); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if ent.ID == "" {
+ t.Fatalf("missing ID")
+ }
+
+ // Replace the lease with a lease with an expire time in the past
+ saltedID, err := ts.SaltID(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Create a lease entry
+ leaseID := path.Join(ent.Path, saltedID)
+ le := &leaseEntry{
+ LeaseID: leaseID,
+ ClientToken: ent.ID,
+ Path: ent.Path,
+ IssueTime: time.Now(),
+ ExpireTime: time.Now().Add(1 * time.Hour),
+ }
+ if err := ts.expiration.persistEntry(le); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ out, err := ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if !reflect.DeepEqual(out, ent) {
+ t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out)
+ }
+
+ // Set to expired lease time
+ le.ExpireTime = time.Now().Add(-1 * time.Hour)
+ if err := ts.expiration.persistEntry(le); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ err = ts.expiration.Stop()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Reset expiration manager to restore mode
+ ts.expiration.restoreModeLock.Lock()
+ ts.expiration.restoreMode = 1
+ ts.expiration.restoreLocks = locksutil.CreateLocks()
+ ts.expiration.quitCh = make(chan struct{})
+ ts.expiration.restoreModeLock.Unlock()
+
+ // Test that the token lookup does not return the token entry due to the
+ // expired lease
+ out, err = ts.Lookup(ent.ID)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if out != nil {
+ t.Fatalf("lease expired, no token expected: %#v", out)
+ }
+}
+
func TestTokenStore_UseToken(t *testing.T) {
_, ts, _, root := TestCoreWithTokenStore(t)
@@ -605,7 +692,10 @@ func TestTokenStore_Revoke_Leases(t *testing.T) {
// Mount a noop backend
noop := &NoopBackend{}
- ts.expiration.router.Mount(noop, "", &MountEntry{UUID: ""}, view)
+ err := ts.expiration.router.Mount(noop, "noop/", &MountEntry{UUID: "noopuuid", Accessor: "noopaccessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}}
if err := ts.create(ent); err != nil {
@@ -615,7 +705,7 @@ func TestTokenStore_Revoke_Leases(t *testing.T) {
// Register a lease
req := &logical.Request{
Operation: logical.ReadOperation,
- Path: "secret/foo",
+ Path: "noop/foo",
ClientToken: ent.ID,
}
resp := &logical.Response{
@@ -2347,7 +2437,7 @@ func TestTokenStore_RoleExplicitMaxTTL(t *testing.T) {
if err != nil {
t.Fatalf("expected an error")
}
- if len(resp.Warnings()) == 0 {
+ if len(resp.Warnings) == 0 {
t.Fatalf("expected a warning")
}
@@ -2513,9 +2603,14 @@ func TestTokenStore_RoleExplicitMaxTTL(t *testing.T) {
t.Fatalf("expected error")
}
+ time.Sleep(2 * time.Second)
+
req.Operation = logical.ReadOperation
req.Path = "auth/token/lookup-self"
resp, err = core.HandleRequest(req)
+ if resp != nil && err == nil {
+ t.Fatalf("expected error, response is %#v", *resp)
+ }
if err == nil {
t.Fatalf("expected error")
}
@@ -3098,7 +3193,10 @@ func TestTokenStore_RevokeUseCountToken(t *testing.T) {
}
tut := resp.Auth.ClientToken
- saltTut := ts.SaltID(tut)
+ saltTut, err := ts.SaltID(tut)
+ if err != nil {
+ t.Fatal(err)
+ }
te, err := ts.lookupSalted(saltTut, false)
if err != nil {
t.Fatal(err)
@@ -3293,7 +3391,10 @@ func TestTokenStore_HandleTidyCase1(t *testing.T) {
// cubbyhole and by not deleting its secondary index, its accessor and
// associated leases.
- saltedTut := ts.SaltID(tut)
+ saltedTut, err := ts.SaltID(tut)
+ if err != nil {
+ t.Fatal(err)
+ }
_, err = ts.lookupSalted(saltedTut, true)
if err != nil {
t.Fatalf("failed to lookup token: %v", err)
@@ -3363,7 +3464,10 @@ func TestTokenStore_TidyLeaseRevocation(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view)
+ err = exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view)
+ if err != nil {
+ t.Fatal(err)
+ }
// Create new token
root, err := ts.rootToken()
@@ -3429,7 +3533,10 @@ func TestTokenStore_TidyLeaseRevocation(t *testing.T) {
}
// Now, delete the token entry. The leases should still exist.
- saltedTut := ts.SaltID(tut)
+ saltedTut, err := ts.SaltID(tut)
+ if err != nil {
+ t.Fatal(err)
+ }
te, err := ts.lookupSalted(saltedTut, true)
if err != nil {
t.Fatalf("failed to lookup token: %v", err)
diff --git a/vendor/github.com/hashicorp/vault/vault/wrapping.go b/vendor/github.com/hashicorp/vault/vault/wrapping.go
index 46409c3..5171593 100644
--- a/vendor/github.com/hashicorp/vault/vault/wrapping.go
+++ b/vendor/github.com/hashicorp/vault/vault/wrapping.go
@@ -115,6 +115,10 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l
resp.WrapInfo.Token = te.ID
resp.WrapInfo.CreationTime = creationTime
+ // If this is not a rewrap, store the request path as creation_path
+ if req.Path != "sys/wrapping/rewrap" {
+ resp.WrapInfo.CreationPath = req.Path
+ }
// This will only be non-nil if this response contains a token, so in that
// case put the accessor in the wrap info.
@@ -200,6 +204,12 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l
"creation_ttl": resp.WrapInfo.TTL,
"creation_time": creationTime,
}
+ // Store creation_path if not a rewrap
+ if req.Path != "sys/wrapping/rewrap" {
+ cubbyReq.Data["creation_path"] = req.Path
+ } else {
+ cubbyReq.Data["creation_path"] = resp.WrapInfo.CreationPath
+ }
cubbyResp, err = c.router.Route(cubbyReq)
if err != nil {
// Revoke since it's not yet being tracked for expiration
@@ -233,6 +243,7 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l
return nil, nil
}
+// ValidateWrappingToken checks whether a token is a wrapping token.
func (c *Core) ValidateWrappingToken(req *logical.Request) (bool, error) {
if req == nil {
return false, fmt.Errorf("invalid request")
diff --git a/vendor/github.com/hashicorp/vault/version/version_base.go b/vendor/github.com/hashicorp/vault/version/version_base.go
index bd1d2ca..07173e6 100644
--- a/vendor/github.com/hashicorp/vault/version/version_base.go
+++ b/vendor/github.com/hashicorp/vault/version/version_base.go
@@ -4,7 +4,7 @@ package version
func init() {
// The main version number that is being run at the moment.
- Version = "0.7.0"
+ Version = "0.8.3"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
diff --git a/vendor/github.com/hashicorp/vault/website/Gemfile b/vendor/github.com/hashicorp/vault/website/Gemfile
index 405a8c9..a2c4d15 100644
--- a/vendor/github.com/hashicorp/vault/website/Gemfile
+++ b/vendor/github.com/hashicorp/vault/website/Gemfile
@@ -1,3 +1,3 @@
source "https://rubygems.org"
-gem "middleman-hashicorp", "0.3.22"
+gem "middleman-hashicorp", "0.3.28"
diff --git a/vendor/github.com/hashicorp/vault/website/Gemfile.lock b/vendor/github.com/hashicorp/vault/website/Gemfile.lock
index 229218a..dea1709 100644
--- a/vendor/github.com/hashicorp/vault/website/Gemfile.lock
+++ b/vendor/github.com/hashicorp/vault/website/Gemfile.lock
@@ -6,7 +6,7 @@ GEM
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
- autoprefixer-rails (6.7.7.1)
+ autoprefixer-rails (7.1.1.2)
execjs
bootstrap-sass (3.3.7)
autoprefixer-rails (>= 5.2.1)
@@ -42,14 +42,15 @@ GEM
eventmachine (1.2.3)
execjs (2.7.0)
ffi (1.9.18)
- haml (4.0.7)
+ haml (5.0.1)
+ temple (>= 0.8.0)
tilt
hike (1.2.3)
hooks (0.4.1)
uber (~> 0.0.14)
http_parser.rb (0.6.0)
i18n (0.7.0)
- json (2.0.3)
+ json (2.1.0)
kramdown (1.13.2)
listen (3.0.8)
rb-fsevent (~> 0.9, >= 0.9.4)
@@ -77,7 +78,7 @@ GEM
rack (>= 1.4.5, < 2.0)
thor (>= 0.15.2, < 2.0)
tilt (~> 1.4.1, < 2.0)
- middleman-hashicorp (0.3.22)
+ middleman-hashicorp (0.3.28)
bootstrap-sass (~> 3.3)
builder (~> 3.2)
middleman (~> 3.4)
@@ -100,28 +101,28 @@ GEM
mime-types (3.1)
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0521)
- mini_portile2 (2.1.0)
- minitest (5.10.1)
+ mini_portile2 (2.2.0)
+ minitest (5.10.2)
multi_json (1.12.1)
- nokogiri (1.7.1)
- mini_portile2 (~> 2.1.0)
+ nokogiri (1.8.0)
+ mini_portile2 (~> 2.2.0)
padrino-helpers (0.12.8.1)
i18n (~> 0.6, >= 0.6.7)
padrino-support (= 0.12.8.1)
tilt (~> 1.4.1)
padrino-support (0.12.8.1)
activesupport (>= 3.1)
- rack (1.6.5)
+ rack (1.6.8)
rack-livereload (0.3.16)
rack
rack-test (0.6.3)
rack (>= 1.0)
rb-fsevent (0.9.8)
- rb-inotify (0.9.8)
- ffi (>= 0.5.0)
+ rb-inotify (0.9.10)
+ ffi (>= 0.5.0, < 2)
redcarpet (3.4.0)
- rouge (2.0.7)
- sass (3.4.23)
+ rouge (2.1.1)
+ sass (3.4.24)
sprockets (2.12.4)
hike (~> 1.2)
multi_json (~> 1.0)
@@ -132,26 +133,27 @@ GEM
sprockets-sass (1.3.1)
sprockets (~> 2.0)
tilt (~> 1.1)
+ temple (0.8.0)
thor (0.19.4)
thread_safe (0.3.6)
tilt (1.4.1)
turbolinks (5.0.1)
turbolinks-source (~> 5)
- turbolinks-source (5.0.0)
+ turbolinks-source (5.0.3)
tzinfo (1.2.3)
thread_safe (~> 0.1)
uber (0.0.15)
uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
- xpath (2.0.0)
+ xpath (2.1.0)
nokogiri (~> 1.3)
PLATFORMS
ruby
DEPENDENCIES
- middleman-hashicorp (= 0.3.22)
+ middleman-hashicorp (= 0.3.28)
BUNDLED WITH
- 1.14.6
+ 1.15.1
diff --git a/vendor/github.com/hashicorp/vault/website/Makefile b/vendor/github.com/hashicorp/vault/website/Makefile
index d7620d1..4d3d361 100644
--- a/vendor/github.com/hashicorp/vault/website/Makefile
+++ b/vendor/github.com/hashicorp/vault/website/Makefile
@@ -1,4 +1,4 @@
-VERSION?="0.3.22"
+VERSION?="0.3.28"
build:
@echo "==> Starting build in Docker..."
diff --git a/vendor/github.com/hashicorp/vault/website/config.rb b/vendor/github.com/hashicorp/vault/website/config.rb
index 1ca9c0c..a961753 100644
--- a/vendor/github.com/hashicorp/vault/website/config.rb
+++ b/vendor/github.com/hashicorp/vault/website/config.rb
@@ -2,7 +2,7 @@ set :base_url, "https://www.vaultproject.io/"
activate :hashicorp do |h|
h.name = "vault"
- h.version = "0.7.0"
+ h.version = "0.8.3"
h.github_slug = "hashicorp/vault"
h.website_root = "website"
end
diff --git a/vendor/github.com/hashicorp/vault/website/data/news.yml b/vendor/github.com/hashicorp/vault/website/data/news.yml
new file mode 100644
index 0000000..1958547
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/data/news.yml
@@ -0,0 +1,23 @@
+default_link_text: "Read more"
+posts:
+ -
+ title: "Vault 0.8.1 released"
+ body: >-
+ We are proud to announce the release of HashiCorp Vault 0.8.1. This
+ version includes Google Cloud IAM authentication, Oracle database
+ backends, self-reloading plugins, and much more!
+ link_url: "https://www.hashicorp.com/blog/vault-0-8-1/"
+ -
+ title: "Vault 0.8.0 released"
+ body: >-
+ We are proud to announce the release of HashiCorp Vault 0.8. This version
+ of Vault brings secure plugins, disaster recovery, mount filters for
+ replication, and MFA on paths.
+ link_url: "https://www.hashicorp.com/blog/vault-0-8/"
+ -
+ title: "Why New Relic uses Vault for secrets management"
+ body: >-
+ As New Relic's systems and infrastructure grew, they faced challenges with
+ securely storing and managing credentials. Vault provides them with a
+ consistent approach to manage secrets and credentials.
+ link_url: "https://www.hashicorp.com/blog/hashicorp-vault-helps-new-relic-manage-secrets-for-their-digital-intelligence-platform/"
diff --git a/vendor/github.com/hashicorp/vault/website/packer.json b/vendor/github.com/hashicorp/vault/website/packer.json
index 35de632..fd2618f 100644
--- a/vendor/github.com/hashicorp/vault/website/packer.json
+++ b/vendor/github.com/hashicorp/vault/website/packer.json
@@ -8,17 +8,14 @@
"builders": [
{
"type": "docker",
- "image": "hashicorp/middleman-hashicorp:0.3.22",
+ "image": "hashicorp/middleman-hashicorp:0.3.28",
"discard": "true",
- "run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"]
+ "volumes": {
+ "{{ pwd }}": "/website"
+ }
}
],
"provisioners": [
- {
- "type": "file",
- "source": ".",
- "destination": "/website"
- },
{
"type": "shell",
"environment_vars": [
@@ -30,7 +27,7 @@
"inline": [
"bundle check || bundle install",
"bundle exec middleman build",
- "/bin/sh ./scripts/deploy.sh"
+ "/bin/bash ./scripts/deploy.sh"
]
}
]
diff --git a/vendor/github.com/hashicorp/vault/website/redirects.txt b/vendor/github.com/hashicorp/vault/website/redirects.txt
new file mode 100644
index 0000000..fe5d039
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/redirects.txt
@@ -0,0 +1,100 @@
+#
+# REDIRECTS FILE
+#
+# This is a sample redirect file. Redirects allow individual projects to add
+# their own redirect rules in a declarative manner using Fastly edge
+# dictionaries.
+#
+# FORMAT
+#
+# Redirects are in the format. There must be at least one space between the
+# original path and the new path, and there must be exactly two entries per
+# line.
+#
+# /original-path /new-path
+#
+# GLOB MATCHING
+#
+# Because of the way lookup tables work, there is no support for glob matching.
+# Fastly does not provide a way to iterate through the lookup table, so it is
+# not possible to run through the table and find anything that matches. As such
+# URLs must match directly.
+#
+# More complex redirects are possible, but must be added directly to the
+# configuration. Please contact the release engineering team for assistance.
+#
+# DELETING
+#
+# Deleting items is not supported at this time. To delete an item, contact the
+# release engineering team and they will delete the dictionary item.
+#
+# MISC
+#
+# - Blank lines are ignored
+# - Comments are hash-style
+# - URLs are limited to 256 characters
+# - Items are case-sensitive (please use all lowercase)
+#
+
+/api/secret/generic/index.html /api/secret/kv/index.html
+/api/system/renew.html /api/system/leases.html
+/api/system/revoke.html /api/system/leases.html
+/api/system/revoke-force.html /api/system/leases.html
+/api/system/revoke-prefix.html /api/system/leases.html
+/docs/config/index.html /docs/configuration/index.html
+/docs/auth/aws-ec2.html /docs/auth/aws.html
+/docs/install/install.html /docs/install/index.html
+/docs/install/upgrade.html /guides/upgrading/index.html
+/docs/install/upgrade-to-0.5.html /guides/upgrading/upgrade-to-0.5.0.html
+/docs/install/upgrade-to-0.5.1.html /guides/upgrading/upgrade-to-0.5.1.html
+/docs/install/upgrade-to-0.6.html /guides/upgrading/upgrade-to-0.6.0.html
+/docs/install/upgrade-to-0.6.1.html /guides/upgrading/upgrade-to-0.6.1.html
+/docs/install/upgrade-to-0.6.2.html /guides/upgrading/upgrade-to-0.6.2.html
+/docs/http/sys-init.html /api/system/init.html
+/docs/http/sys-seal-status.html /api/system/seal-status.html
+/docs/http/sys-seal.html /api/system/seal.html
+/docs/http/sys-unseal.html /api/system/unseal.html
+/docs/http/sys-mounts.html /api/system/mounts.html
+/docs/http/sys-remount.html /api/system/remount.html
+/docs/http/sys-auth.html /api/system/auth.html
+/docs/http/sys-policy.html /api/system/policy.html
+/docs/http/sys-audit.html /api/system/audit.html
+/docs/http/sys-renew.html /api/system/leases.html
+/docs/http/sys-revoke.html /api/system/leases.html
+/docs/http/sys-revoke-prefix.html /api/system/leases.html
+/docs/http/sys-leader.html /api/system/leader.html
+/docs/http/sys-key-status.html /api/system/key-status.html
+/docs/http/sys-rekey.html /api/system/rekey.html
+/docs/http/sys-rotate.html /api/system/rotate.html
+/docs/http/sys-raw.html /api/system/raw.html
+/docs/http/sys-health.html /api/system/health.html
+/docs/guides/generate-root.html /guides/generate-root.html
+/docs/guides/index.html /guides/index.html
+/docs/guides/production.html /guides/production.html
+/docs/guides/replication.html /guides/replication.html
+/docs/guides/upgrading/index.html /guides/upgrading/index.html
+/docs/guides/upgrading/upgrade-to-0.5.0.html /guides/upgrading/upgrade-to-0.5.0.html
+/docs/guides/upgrading/upgrade-to-0.5.1.html /guides/upgrading/upgrade-to-0.5.1.html
+/docs/guides/upgrading/upgrade-to-0.6.0.html /guides/upgrading/upgrade-to-0.6.0.html
+/docs/guides/upgrading/upgrade-to-0.6.1.html /guides/upgrading/upgrade-to-0.6.1.html
+/docs/guides/upgrading/upgrade-to-0.6.2.html /guides/upgrading/upgrade-to-0.6.2.html
+/docs/guides/upgrading/upgrade-to-0.6.3.html /guides/upgrading/upgrade-to-0.6.3.html
+/docs/guides/upgrading/upgrade-to-0.6.4.html /guides/upgrading/upgrade-to-0.6.4.html
+/docs/guides/upgrading/upgrade-to-0.7.0.html /guides/upgrading/upgrade-to-0.7.0.html
+/docs/secrets/custom.html /docs/plugin/index.html
+/docs/secrets/generic/index.html /docs/secrets/kv/index.html
+/intro/getting-started/acl.html /intro/getting-started/policies.html
+
+/docs/vault-enterprise/index.html /docs/enterprise/index.html
+/docs/vault-enterprise/replication/index.html /docs/enterprise/replication/index.html
+/docs/vault-enterprise/hsm/index.html /docs/enterprise/hsm/index.html
+/docs/vault-enterprise/hsm/behavior.html /docs/enterprise/hsm/behavior.html
+/docs/vault-enterprise/hsm/configuration.html /docs/enterprise/hsm/configuration.html
+/docs/vault-enterprise/hsm/security.html /docs/enterprise/hsm/security.html
+/docs/vault-enterprise/ui/index.html /docs/enterprise/ui/index.html
+/docs/vault-enterprise/identity/index.html /docs/enterprise/identity/index.html
+/docs/vault-enterprise/mfa/index.html /docs/enterprise/mfa/index.html
+/docs/vault-enterprise/mfa/mfa-duo.html /docs/enterprise/mfa/mfa-duo.html
+/docs/vault-enterprise/mfa/mfa-okta.html /docs/enterprise/mfa/mfa-okta.html
+/docs/vault-enterprise/mfa/mfa-pingid.html /docs/enterprise/mfa/mfa-pingid.html
+/docs/vault-enterprise/mfa/mfa-totp.html /docs/enterprise/mfa/mfa-totp.html
diff --git a/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh b/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh
index 383ad8a..689ab1c 100755
--- a/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh
+++ b/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh
@@ -1,9 +1,10 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
PROJECT="vault"
PROJECT_URL="www.vaultproject.io"
FASTLY_SERVICE_ID="7GrxRJP3PVBuqQbyxYQ0MV"
+FASTLY_DICTIONARY_ID="4uTFhCUtoa1cV9DuXeC1Fo"
# Ensure the proper AWS environment variables are set
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
@@ -93,6 +94,75 @@ if [ -z "$NO_UPLOAD" ]; then
modify "s3://hc-sites/$PROJECT/latest/"
fi
+# Add redirects if they exist
+if [ -z "$NO_REDIRECTS" ] || [ ! test -f "./redirects.txt" ]; then
+ echo "Adding redirects..."
+ fields=()
+ while read -r line; do
+ [[ "$line" =~ ^#.* ]] && continue
+ [[ -z "$line" ]] && continue
+
+ # Read fields
+ IFS=" " read -ra parts <<<"$line"
+ fields+=("${parts[@]}")
+ done < "./redirects.txt"
+
+ # Check we have pairs
+ if [ $((${#fields[@]} % 2)) -ne 0 ]; then
+ echo "Bad redirects (not an even number)!"
+ exit 1
+ fi
+
+ # Check we don't have more than 1000 entries (yes, it says 2000 below, but that
+ # is because we've split into multiple lines).
+ if [ "${#fields}" -gt 2000 ]; then
+ echo "More than 1000 entries!"
+ exit 1
+ fi
+
+ # Validations
+ for field in "${fields[@]}"; do
+ if [ "${#field}" -gt 256 ]; then
+ echo "'$field' is > 256 characters!"
+ exit 1
+ fi
+
+ if [ "${field:0:1}" != "/" ]; then
+ echo "'$field' does not start with /!"
+ exit 1
+ fi
+ done
+
+ # Build the payload for single-request updates.
+ jq_args=()
+ jq_query="."
+ for (( i=0; i<${#fields[@]}; i+=2 )); do
+ original="${fields[i]}"
+ redirect="${fields[i+1]}"
+ echo "Redirecting ${original} -> ${redirect}"
+ jq_args+=(--arg "key$((i/2))" "${original}")
+ jq_args+=(--arg "value$((i/2))" "${redirect}")
+ jq_query+="| .items |= (. + [{op: \"upsert\", item_key: \$key$((i/2)), item_value: \$value$((i/2))}])"
+ done
+
+ # Do not post empty items (the API gets sad)
+ if [ "${#jq_args[@]}" -ne 0 ]; then
+ json="$(jq "${jq_args[@]}" "${jq_query}" <<<'{"items": []}')"
+
+ # Post the JSON body
+ curl \
+ --fail \
+ --silent \
+ --output /dev/null \
+ --request "PATCH" \
+ --header "Fastly-Key: $FASTLY_API_KEY" \
+ --header "Content-type: application/json" \
+ --header "Accept: application/json" \
+ --data "$json"\
+ "https://api.fastly.com/service/$FASTLY_SERVICE_ID/dictionary/$FASTLY_DICTIONARY_ID/items"
+ fi
+fi
+
# Perform a purge of the surrogate key.
if [ -z "$NO_PURGE" ]; then
echo "Purging Fastly cache..."
@@ -118,8 +188,13 @@ if [ -z "$NO_WARM" ]; then
echo "wget --recursive --delete-after https://$PROJECT_URL/"
echo ""
wget \
- --recursive \
--delete-after \
- --quiet \
+ --level inf \
+ --no-directories \
+ --no-host-directories \
+ --no-verbose \
+ --page-requisites \
+ --recursive \
+ --spider \
"https://$PROJECT_URL/"
fi
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/app-id/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/app-id/index.html.md
new file mode 100644
index 0000000..4d509f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/app-id/index.html.md
@@ -0,0 +1,17 @@
+---
+layout: "api"
+page_title: "App ID Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-appid"
+description: |-
+ This is the API documentation for the Vault App ID authentication backend.
+---
+
+# App ID Auth Backend HTTP API (DEPRECATED)
+
+This is the API documentation for the Vault App ID authentication backend. For
+general information about the usage and operation of the App ID backend, please
+see the [Vault App ID backend documentation](/docs/auth/app-id.html).
+
+This documentation assumes the App ID backend is mounted at the `/auth/app-id`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/approle/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/approle/index.html.md
new file mode 100644
index 0000000..2e10bee
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/approle/index.html.md
@@ -0,0 +1,632 @@
+---
+layout: "api"
+page_title: "AppRole Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-approle"
+description: |-
+ This is the API documentation for the Vault AppRole authentication backend.
+---
+
+# AppRole Auth Backend HTTP API
+
+This is the API documentation for the Vault AppRole authentication backend. For
+general information about the usage and operation of the AppRole backend, please
+see the [Vault AppRole backend documentation](/docs/auth/approle.html).
+
+This documentation assumes the AppRole backend is mounted at the `/auth/approle`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
+
+## List Roles
+
+This endpoint returns a list the existing AppRoles in the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/approle/role` | `200 application/json` |
+| `GET` | `/auth/approle/role?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/approle/role
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "dev",
+ "prod",
+ "test"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Create New AppRole
+
+Creates a new AppRole or updates an existing AppRole. This endpoint
+supports both `create` and `update` capabilities. There can be one or more
+constraints enabled on the role. It is required to have at least one of them
+enabled while creating or updating a role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name` | `204 (empty body)` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `bind_secret_id` `(bool: true)` - Require `secret_id` to be presented when
+ logging in using this AppRole.
+- `bound_cidr_list` `(array: [])` - Comma-separated list of CIDR blocks; if set,
+ specifies blocks of IP addresses which can perform the login operation.
+- `policies` `(array: [])` - Comma-separated list of policies set on tokens
+ issued via this AppRole.
+- `secret_id_num_uses` `(integer: 0)` - Number of times any particular SecretID
+ can be used to fetch a token from this AppRole, after which the SecretID will
+ expire. A value of zero will allow unlimited uses.
+- `secret_id_ttl` `(string: "")` - Duration in either an integer number of
+ seconds (`3600`) or an integer time unit (`60m`) after which any SecretID
+ expires.
+- `token_num_uses` `(integer: 0)` - Number of times issued tokens can be used.
+ A value of 0 means unlimited uses.
+- `token_ttl` `(string: "")` - Duration in either an integer number of seconds
+ (`3600`) or an integer time unit (`60m`) to set as the TTL for issued tokens
+ and at renewal time.
+- `token_max_ttl` `(string: "")` - Duration in either an integer number of
+ seconds (`3600`) or an integer time unit (`60m`) after which the issued token
+ can no longer be renewed.
+- `period` `(string: "")` - Duration in either an integer number of seconds
+ (`3600`) or an integer time unit (`60m`). If set, the token generated using
+ this AppRole is a _periodic_ token; so long as it is renewed it never expires,
+ but the TTL set on the token at each renewal is fixed to the value specified
+ here. If this value is modified, the token will pick up the new value at its
+ next renewal.
+
+### Sample Payload
+
+```json
+{
+ "token_ttl": "10m",
+ "token_max_ttl": "15m",
+ "policies": [
+ "default"
+ ],
+ "period": 0,
+ "bind_secret_id": true
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1
+```
+
+## Read AppRole
+
+Reads the properties of an existing AppRole.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/approle/role/:role_name` | `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/approle/role/application1
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "token_ttl": 1200,
+ "token_max_ttl": 1800,
+ "secret_id_ttl": 600,
+ "secret_id_num_uses": 40,
+ "policies": [
+ "default"
+ ],
+ "period": 0,
+ "bind_secret_id": true,
+ "bound_cidr_list": ""
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete AppRole
+
+Deletes an existing AppRole from the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/approle/role/:role_name` | `204 (empty body)` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/approle/role/application1
+```
+
+## Read AppRole Role ID
+
+Reads the RoleID of an existing AppRole.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/approle/role/:role_name/role-id` | `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/approle/role/application1/role-id
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "role_id": "e5a7b66e-5d08-da9c-7075-71984634b882"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Update AppRole Role ID
+
+Updates the RoleID of an existing AppRole to a custom value.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name/role-id` | `204 (empty body)` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `role_id` `(string: )` - Value to be set as RoleID.
+
+### Sample Payload
+
+```json
+{
+ "role_id": "custom-role-id"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1/role-id
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "role_id": "e5a7b66e-5d08-da9c-7075-71984634b882"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Generate New Secret ID
+
+Generates and issues a new SecretID on an existing AppRole. Similar to
+tokens, the response will also contain a `secret_id_accessor` value which can
+be used to read the properties of the SecretID without divulging the SecretID
+itself, and also to delete the SecretID from the AppRole.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name/secret-id` | `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `metadata` `(map: {})` - Metadata to be tied to the SecretID. This should be
+ a JSON-formatted string containing the metadata in key-value pairs. This
+ metadata will be set on tokens issued with this SecretID, and is logged in
+ audit logs _in plaintext_.
+- `cidr_list` `(string: "")` - Comma separated list of CIDR blocks enforcing
+ secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list'
+ is set on the role, then the list of CIDR blocks listed here should be a
+ subset of the CIDR blocks listed on the role.
+
+### Sample Payload
+
+```json
+{
+ "metadata": {
+ "tag1": "production"
+ }
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1/secret-id
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
+ "secret_id": "841771dc-11c9-bbc7-bcac-6a3945a69cd9"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## List Secret ID Accessors
+
+Lists the accessors of all the SecretIDs issued against the AppRole.
+This includes the accessors for "custom" SecretIDs as well.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/approle/role/:role_name/secret-id` | `200 application/json` |
+| `GET` | `/auth/approle/role/:role_name/secret-id?list=true` | `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/approle/role/application1/secret-id
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "ce102d2a-8253-c437-bf9a-aceed4241491",
+ "a1c8dee4-b869-e68d-3520-2040c1a0849a",
+ "be83b7e2-044c-7244-07e1-47560ca1c787",
+ "84896a0c-1347-aa90-a4f6-aca8b7558780",
+ "239b1328-6523-15e7-403a-a48038cdc45a"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Read AppRole Secret ID
+
+Reads out the properties of a SecretID.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name/secret-id/lookup` | `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `secret_id` `(string: )` - Secret ID attached to the role.
+
+### Sample Payload
+
+```json
+{
+ "secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --payload @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1/secret-id/lookup
+```
+
+## Destroy AppRole Secret ID
+
+Destroy an AppRole secret ID.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name/secret-id/destroy` | `204 (empty body)` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `secret_id` `(string: )` - Secret ID attached to the role.
+
+### Sample Payload
+
+```json
+{
+ "secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --payload @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1/secret-id/destroy
+```
+
+## Read AppRole Secret ID Accessor
+
+Reads out the properties of a SecretID.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name/secret-id-accessor/lookup` | `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `secret_id_accessor` `(string: )` - Secret ID accessor attached to the role.
+
+### Sample Payload
+
+```json
+{
+ "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --payload @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1/secret-id-accessor/lookup
+```
+
+## Destroy AppRole Secret ID Accessor
+
+Destroy an AppRole secret ID by its accessor.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name/secret-id-accessor/destroy` | `204 (empty body)` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `secret_id_accessor` `(string: )` - Secret ID accessor attached to the role.
+
+### Sample Payload
+
+```json
+{
+ "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --payload @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1/secret-id-accessor/destroy
+```
+
+## Create Custom AppRole Secret ID
+
+Assigns a "custom" SecretID against an existing AppRole. This is used in the
+"Push" model of operation.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/role/:role_name/custom-secret-id` | `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - Name of the AppRole.
+- `secret_id` `(string: )` - SecretID to be attached to the Role.
+- `metadata` `(map: {})` - Metadata to be tied to the SecretID. This should be
+ a JSON-formatted string containing the metadata in key-value pairs. This
+ metadata will be set on tokens issued with this SecretID, and is logged in
+ audit logs _in plaintext_.
+- `cidr_list` `(string: "")` - Comma separated list of CIDR blocks enforcing
+ secret IDs to be used from ppecific set of IP addresses. If 'bound_cidr_list'
+ is set on the role, then the list of CIDR blocks listed here should be a
+ subset of the CIDR blocks listed on the role.
+
+### Sample Payload
+
+```json
+{
+ "secret-id": "testsecretid"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/approle/role/application1/custom-secret-id
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
+ "secret_id": "testsecretid"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Login With AppRole
+
+Issues a Vault token based on the presented credentials. `role_id` is always
+required; if `bind_secret_id` is enabled (the default) on the AppRole,
+`secret_id` is required too. Any other bound authentication values on the
+AppRole (such as client IP CIDR) are also evaluated.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/approle/login` | `200 application/json` |
+
+### Parameters
+
+- `role_id` `(string: )` - RoleID of the AppRole.
+- `secret_id` `(string: )` - SecretID belonging to AppRole.
+
+### Sample Payload
+
+```json
+{
+ "role_id": "59d6d1ca-47bb-4e7e-a40b-8be3bc5a0ba8",
+ "secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/approle/login
+```
+
+### Sample Response
+
+```json
+{
+ "auth": {
+ "renewable": true,
+ "lease_duration": 1200,
+ "metadata": null,
+ "policies": [
+ "default"
+ ],
+ "accessor": "fd6c9a00-d2dc-3b11-0be5-af7ae0e1d374",
+ "client_token": "5b1a0318-679c-9c45-e5c6-d1b9a9035d49"
+ },
+ "warnings": null,
+ "wrap_info": null,
+ "data": null,
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Read, Update, or Delete AppRole Properties
+
+Updates the respective property in the existing AppRole. All of these
+parameters of the AppRole can be updated using the `/auth/approle/role/:role_name`
+endpoint directly. The endpoints for each field is provided separately
+to be able to delegate specific endpoints using Vault's ACL system.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/policies` | `200/204` |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/secret-id-num-uses` | `200/204` |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/secret-id-ttl` | `200/204` |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/token-ttl` | `200/204` |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/token-max-ttl` | `200/204` |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/bind-secret-id` | `200/204` |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/bound-cidr-list` | `200/204` |
+| `GET/POST/DELETE` | `/auth/approle/role/:role_name/period` | `200/204` |
+
+Refer to `/auth/approle/role/:role_name` endpoint.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/aws/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/aws/index.html.md
new file mode 100644
index 0000000..8ba4671
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/aws/index.html.md
@@ -0,0 +1,1264 @@
+---
+layout: "api"
+page_title: "AWS Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-aws"
+description: |-
+ This is the API documentation for the Vault AWS authentication backend.
+---
+
+# AWS Auth Backend HTTP API
+
+This is the API documentation for the Vault AWS authentication backend. For
+general information about the usage and operation of the AWS backend, please
+see the [Vault AWS backend documentation](/docs/auth/aws.html).
+
+This documentation assumes the AWS backend is mounted at the `/auth/aws`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
+
+## Configure Client
+
+Configures the credentials required to perform API calls to AWS as well as
+custom endpoints to talk to AWS APIs. The instance identity document
+fetched from the PKCS#7 signature will provide the EC2 instance ID. The
+credentials configured using this endpoint will be used to query the status
+of the instances via DescribeInstances API. If static credentials are not
+provided using this endpoint, then the credentials will be retrieved from
+the environment variables `AWS_ACCESS_KEY`, `AWS_SECRET_KEY` and
+`AWS_REGION` respectively. If the credentials are still not found and if the
+backend is configured on an EC2 instance with metadata querying
+capabilities, the credentials are fetched automatically.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/config/client` | `204 (empty body)` |
+
+### Parameters
+
+- `access_key` `(string: "")` - AWS Access key with permissions to query AWS
+ APIs. The permissions required depend on the specific configurations. If using
+ the `iam` auth method without inferencing, then no credentials are necessary.
+ If using the `ec2` auth method or using the `iam` auth method with
+ inferencing, then these credentials need access to `ec2:DescribeInstances`. If
+ additionally a `bound_iam_role` is specified, then these credentials also need
+ access to `iam:GetInstanceProfile`. If, however, an alternate sts
+ configuration is set for the target account, then the credentials must be
+ permissioned to call `sts:AssumeRole` on the configured role, and that role
+ must have the permissions described here.
+- `secret_key` `(string: "")` - AWS Secret key with permissions to query AWS
+ APIs.
+- `endpoint` `(string: "")` - URL to override the default generated endpoint for
+ making AWS EC2 API calls.
+- `iam_endpoint` `(string: "")` - URL to override the default generated endpoint
+ for making AWS IAM API calls.
+- `sts_endpoint` `(string: "")` - URL to override the default generated endpoint
+ for making AWS STS API calls.
+- `iam_server_id_header_value` `(string: "")` - The value to require in the
+ `X-Vault-AWS-IAM-Server-ID` header as part of GetCallerIdentity requests that
+ are used in the iam auth method. If not set, then no value is required or
+ validated. If set, clients must include an X-Vault-AWS-IAM-Server-ID header in
+ the headers of login requests, and further this header must be among the
+ signed headers validated by AWS. This is to protect against different types of
+ replay attacks, for example a signed request sent to a dev server being resent
+ to a production server. Consider setting this to the Vault server's DNS name.
+
+### Sample Payload
+
+```json
+{
+ "access_key": "VKIAJBRHKH6EVTTNXDHA",
+ "secret_key": "vCtSM8ZUEQ3mOFVlYPBQkf2sO6F/W7a5TVzrl3Oj"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/config/client
+```
+
+## Read Config
+
+Returns the previously configured AWS access credentials.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/config/client` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/config/client
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "secret_key": "vCtSM8ZUEQ3mOFVlYPBQkf2sO6F/W7a5TVzrl3Oj",
+ "access_key": "VKIAJBRHKH6EVTTNXDHA"
+ "endpoint" "",
+ "iam_endpoint" "",
+ "sts_endpoint" "",
+ "iam_server_id_header_value" "",
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete Config
+
+Deletes the previously configured AWS access credentials.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/aws/config/client` | `204 (empty body)` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/aws/config/client
+```
+
+## Create Certificate Configuration
+
+Registers an AWS public key to be used to verify the instance identity
+documents. While the PKCS#7 signature of the identity documents have DSA
+digest, the identity signature will have RSA digest, and hence the public
+keys for each type varies respectively. Indicate the type of the public key
+using the "type" parameter.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/config/certificate/:cert_name` | `204 (empty body)` |
+
+### Parameters
+
+- `cert_name` `(string: )` - Name of the certificate.
+- `aws_public_cert` `(string: )` - AWS Public key required to verify
+ PKCS7 signature of the EC2 instance metadata.
+- `type` `(string: "pkcs7")` - Takes the value of either "pkcs7" or "identity",
+ indicating the type of document which can be verified using the given
+ certificate. The PKCS#7 document will have a DSA digest and the identity
+ signature will have an RSA signature, and accordingly the public certificates
+ to verify those also vary. Defaults to "pkcs7".
+
+### Sample Payload
+
+```json
+{
+ "aws_public_cert": "-----BEGIN CERTIFICATE-----\nMIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw\nFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD\nVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z\nODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u\nIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl\ncnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e\nih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3\nVyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P\nhviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j\nk+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U\nhhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF\nlRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf\nMNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW\nMXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw\nvSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw\n7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K\n-----END CERTIFICATE-----\n"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/config/certificate/test-cert
+```
+
+## Read Certificate Configuration
+
+Returns the previously configured AWS public key.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/config/certificate/:cert_name` | `200 application/json` |
+
+### Parameters
+
+- `cert_name` `(string: )` - Name of the certificate.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/config/certificate/test-cert
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "aws_public_cert": "-----BEGIN CERTIFICATE-----\nMIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw\nFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD\nVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z\nODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u\nIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl\ncnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e\nih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3\nVyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P\nhviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j\nk+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U\nhhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF\nlRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf\nMNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW\nMXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw\nvSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw\n7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K\n-----END CERTIFICATE-----\n"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## List Certificate Configurations
+
+Lists all the AWS public certificates that are registered with the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/aws/config/certificates` | `200 application/json` |
+| `GET` | `/auth/aws/config/certificates?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/aws/config/certificates
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "cert1"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Create STS Role
+
+Allows the explicit association of STS roles to satellite AWS accounts
+(i.e. those which are not the account in which the Vault server is
+running.) Login attempts from EC2 instances running in these accounts will
+be verified using credentials obtained by assumption of these STS roles.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/config/sts/:account_id` | `204 (empty body)` |
+
+### Parameters
+
+- `account_id` `(string: )` - AWS account ID to be associated with
+ STS role. If set, Vault will use assumed credentials to verify any login
+ attempts from EC2 instances in this account.
+- `sts_role` `(string: )` - AWS ARN for STS role to be assumed when
+ interacting with the account specified. The Vault server must have
+ permissions to assume this role.
+
+### Sample Payload
+
+```json
+{
+ "sts_role": "arn:aws:iam:111122223333:role/myRole"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/config/sts/111122223333
+```
+
+## Read STS Role
+
+Returns the previously configured STS role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/config/sts/:account_id` | `200 application/json` |
+
+### Parameters
+
+- `account_id` `(string: )` - AWS account ID to be associated with
+ STS role. If set, Vault will use assumed credentials to verify any login
+ attempts from EC2 instances in this account.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/config/sts/111122223333
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "sts_role ": "arn:aws:iam:111122223333:role/myRole"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## List STS Roles
+
+Lists all the AWS Account IDs for which an STS role is registered.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/aws/config/sts` | `200 application/json` |
+| `GET` | `/auth/aws/config/sts?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/aws/config/sts
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "keys": [
+ "111122223333",
+ "999988887777"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete STS Role
+
+Deletes a previously configured AWS account/STS role association.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/aws/config/sts` | `204 (empty body)` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/aws/config/sts
+```
+
+## Configure Identity Whitelist Tidy Operation
+
+Configures the periodic tidying operation of the whitelisted identity entries.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/config/tidy/identity-whitelist` | `204 (empty body)` |
+
+### Parameters
+
+- `safety_buffer` `(string: "72h")` - The amount of extra time that must have
+ passed beyond the `roletag` expiration, before it is removed from the backend
+ storage. Defaults to 72h.
+- `disable_periodic_tidy` `(bool: false)` - If set to 'true', disables the
+ periodic tidying of the `identity-whitelist/` entries.
+
+### Sample Payload
+
+```json
+{
+ "safety_buffer": "48h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/config/tidy/identity-whitelist
+```
+
+## Read Identity Whitelist Tidy Settings
+
+Returns the previously configured periodic whitelist tidying settings.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/config/tidy/identity-whitelist` | `200 applicaiton/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/config/tidy/identity-whitelist
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "safety_buffer": 600,
+ "disable_periodic_tidy": false
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete Identity Whitelist Tidy Settings
+
+Deletes the previously configured periodic whitelist tidying settings.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/aws/config/tidy/identity-whitelist` | `204 (empty body)` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/aws/config/tidy/identity-whitelist
+```
+
+## Configure Role Tag Blacklist Tidy Operation
+
+Configures the periodic tidying operation of the blacklisted role tag entries.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/config/tidy/roletag-blacklist` | `204 (empty body)` |
+
+### Parameters
+
+- `safety_buffer` `(string: "72h")` - The amount of extra time that must have
+ passed beyond the `roletag` expiration, before it is removed from the backend
+ storage. Defaults to 72h.
+- `disable_periodic_tidy` `(bool: false)` - If set to 'true', disables the
+ periodic tidying of the `roletag-blacklist/` entries.
+
+### Sample Payload
+
+```json
+{
+ "safety_buffer": "48h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/config/tidy/roletag-blacklist
+```
+
+## Read Role Tag Blackist Tidy Settings
+
+Returns the previously configured periodic blacklist tidying settings.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/config/tidy/roletag-blacklist` | `200 applicaiton/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/config/tidy/roletag-blacklist
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "safety_buffer": 600,
+ "disable_periodic_tidy": false
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete Role Tag Blackist Tidy Settings
+
+Deletes the previously configured periodic blacklist tidying settings.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/aws/config/tidy/roletag-blacklist` | `204 (empty body)` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/aws/config/tidy/roletag-blacklist
+```
+
+## Create Role
+
+Registers a role in the backend. Only those instances or principals which
+are using the role registered using this endpoint, will be able to perform
+the login operation. Contraints can be specified on the role, that are
+applied on the instances or principals attempting to login. At least one
+constraint should be specified on the role. The available constraints you
+can choose are dependent on the `auth_type` of the role and, if the
+`auth_type` is `iam`, then whether inferencing is enabled. A role will not
+let you configure a constraint if it is not checked by the `auth_type` and
+inferencing configuration of that role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/role/:role` | `204 (empty body)` |
+
+### Parameters
+
+- `role` `(string: )` - Name of the role.
+- `auth_type` `(string: "iam")` - The auth type permitted for this role. Valid
+ choices are "ec2" or "iam". If no value is specified, then it will default to
+ "iam" (except for legacy `aws-ec2` auth types, for which it will default to
+ "ec2"). Only those bindings applicable to the auth type chosen will be allowed
+ to be configured on the role.
+- `bound_ami_id` `(string: "")` - If set, defines a constraint on the EC2
+ instances that they should be using the AMI ID specified by this parameter.
+ This constraint is checked during ec2 auth as well as the iam auth method only
+ when inferring an EC2 instance.
+- `bound_account_id` `(string: "")` - If set, defines a constraint on the EC2
+ instances that the account ID in its identity document to match the one
+ specified by this parameter. This constraint is checked during ec2 auth as
+ well as the iam auth method only when inferring an EC2 instance.
+- `bound_region` `(string: "")` - If set, defines a constraint on the EC2
+ instances that the region in its identity document must match the one
+ specified by this parameter. This constraint is only checked by the ec2 auth
+ method as well as the iam auth method only when inferring an ec2 instance.
+- `bound_vpc_id` `(string: "")` - If set, defines a constraint on the EC2
+ instance to be associated with the VPC ID that matches the value specified by
+ this parameter. This constraint is only checked by the ec2 auth method as well
+ as the iam auth method only when inferring an ec2 instance.
+- `bound_subnet_id` `(string: "")` - If set, defines a constraint on the EC2
+ instance to be associated with the subnet ID that matches the value specified
+ by this parameter. This constraint is only checked by the ec2 auth method as
+ well as the iam auth method only when inferring an ec2 instance.
+- `bound_iam_role_arn` `(string: "")` - If set, defines a constraint on the
+ authenticating EC2 instance that it must match the IAM role ARN specified by
+ this parameter. The value is refix-matched (as though it were a glob ending
+ in `*`). The configured IAM user or EC2 instance role must be allowed to
+ execute the `iam:GetInstanceProfile` action if this is specified. This
+ constraint is checked by the ec2 auth method as well as the iam auth method
+ only when inferring an EC2 instance.
+- `bound_iam_instance_profile_arn` `(string: "")` - If set, defines a constraint
+ on the EC2 instances to be associated with an IAM instance profile ARN which
+ has a prefix that matches the value specified by this parameter. The value is
+ prefix-matched (as though it were a glob ending in `*`). This constraint is
+ checked by the ec2 auth method as well as the iam auth method only when
+ inferring an ec2 instance.
+- `role_tag` `(string: "")` - If set, enables the role tags for this role. The
+ value set for this field should be the 'key' of the tag on the EC2 instance.
+ The 'value' of the tag should be generated using `role//tag` endpoint.
+ Defaults to an empty string, meaning that role tags are disabled. This
+ constraint is valid only with the ec2 auth method and is not allowed when an
+ auth_type is iam.
+- `bound_iam_principal_arn` `(string: "")` - Defines the IAM principal that must
+ be authenticated using the iam auth method. It should look like
+ "arn:aws:iam::123456789012:user/MyUserName" or
+ "arn:aws:iam::123456789012:role/MyRoleName". Wildcards are supported at the
+ end of the ARN, e.g., "arn:aws:iam::123456789012:\*" will match any IAM
+ principal in the AWS account 123456789012. This constraint is only checked by
+ the iam auth method. Wildcards are supported at the end of the ARN, e.g.,
+ "arn:aws:iam::123456789012:role/\*" will match all roles in the AWS account.
+- `inferred_entity_type` `(string: "")` - When set, instructs Vault to turn on
+ inferencing. The only current valid value is "ec2\_instance" instructing Vault
+ to infer that the role comes from an EC2 instance in an IAM instance profile.
+ This only applies to the iam auth method. If you set this on an existing role
+ where it had not previously been set, tokens that had been created prior will
+ not be renewable; clients will need to get a new token.
+- `inferred_aws_region` `(string: "")` - When role inferencing is activated, the
+ region to search for the inferred entities (e.g., EC2 instances). Required if
+ role inferencing is activated. This only applies to the iam auth method.
+- `resolve_aws_unique_ids` `(bool: false)` - When set, resolves the
+ `bound_iam_principal_arn` to the
+ [AWS Unique ID](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-unique-ids)
+ for the bound principal ARN. This field is ignored when
+ `bound_iam_principal_arn` ends with a wildcard character.
+ This requires Vault to be able to call `iam:GetUser` or `iam:GetRole` on the
+ `bound_iam_principal_arn` that is being bound. Resolving to internal AWS IDs
+ more closely mimics the behavior of AWS services in that if an IAM user or
+ role is deleted and a new one is recreated with the same name, those new users
+ or roles won't get access to roles in Vault that were permissioned to the
+ prior principals of the same name. The default value for new roles is true,
+ while the default value for roles that existed prior to this option existing
+ is false (you can check the value for a given role using the GET method on the
+ role). Any authentication tokens created prior to this being supported won't
+ verify the unique ID upon token renewal. When this is changed from false to
+ true on an existing role, Vault will attempt to resolve the role's bound IAM
+ ARN to the unique ID and, if unable to do so, will fail to enable this option.
+ Changing this from `true` to `false` is not supported; if absolutely
+ necessary, you would need to delete the role and recreate it explicitly
+ setting it to `false`. However; the instances in which you would want to do
+ this should be rare. If the role creation (or upgrading to use this) succeed,
+ then Vault has already been able to resolve internal IDs, and it doesn't need
+ any further IAM permissions to authenticate users. If a role has been deleted
+ and recreated, and Vault has cached the old unique ID, you should just call
+ this endpoint specifying the same `bound_iam_principal_arn` and, as long as
+ Vault still has the necessary IAM permissions to resolve the unique ID, Vault
+ will update the unique ID. (If it does not have the necessary permissions to
+ resolve the unique ID, then it will fail to update.) If this option is set to
+ false, then you MUST leave out the path component in bound_iam_principal_arn
+ for **roles** only, but not IAM users. That is, if your IAM role ARN is of the
+ form `arn:aws:iam::123456789012:role/some/path/to/MyRoleName`, you **must**
+ specify a bound_iam_principal_arn of
+ `arn:aws:iam::123456789012:role/MyRoleName` for authentication to work.
+- `ttl` `(string: "")` - The TTL period of tokens issued using this role,
+ provided as "1h", where hour is the largest suffix.
+- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens issued using
+ this role.
+- `period` `(string: "")` - If set, indicates that the token generated using
+ this role should never expire. The token should be renewed within the duration
+ specified by this value. At each renewal, the token's TTL will be set to the
+ value of this parameter. The maximum allowed lifetime of tokens issued using
+ this role.
+- `policies` `(array: [])` - Policies to be set on tokens issued using this
+ role.
+- `allow_instance_migration` `(bool: false)` - If set, allows migration of the
+ underlying instance where the client resides. This keys off of pendingTime in
+ the metadata document, so essentially, this disables the client nonce check
+ whenever the instance is migrated to a new host and pendingTime is newer than
+ the previously-remembered time. Use with caution. This only applies to
+ authentications via the ec2 auth method.
+- `disallow_reauthentication` `(bool: false)` - If set, only allows a single
+ token to be granted per instance ID. In order to perform a fresh login, the
+ entry in whitelist for the instance ID needs to be cleared using
+ 'auth/aws/identity-whitelist/' endpoint. Defaults to 'false'.
+ This only applies to authentications via the ec2 auth method.
+
+### Sample Payload
+
+```json
+{
+ "bound_ami_id": "ami-fce36987",
+ "role_tag": "",
+ "policies": [
+ "default",
+ "dev",
+ "prod"
+ ],
+ "max_ttl": 1800000,
+ "disallow_reauthentication": false,
+ "allow_instance_migration": false
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/role/dev-role
+```
+
+## Read Role
+
+Returns the previously registered role configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/role/:role` | `200 application/json` |
+
+### Parameters
+
+- `role` `(string: )` - Name of the role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/role/dev-role
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "bound_ami_id": "ami-fce36987",
+ "role_tag": "",
+ "policies": [
+ "default",
+ "dev",
+ "prod"
+ ],
+ "max_ttl": 1800000,
+ "disallow_reauthentication": false,
+ "allow_instance_migration": false
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## List Roles
+
+Lists all the roles that are registered with the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/aws/roles` | `200 application/json` |
+| `GET` | `/auth/aws/roles?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/aws/roles
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "keys": [
+ "dev-role",
+ "prod-role"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete Role
+
+Deletes the previously registered role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/aws/role/:role` | `204 (empty body)` |
+
+### Parameters
+
+- `role` `(string: )` - Name of the role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/aws/role/dev-role
+```
+
+## Create Role Tags
+
+Creates a role tag on the role, which help in restricting the capabilities
+that are set on the role. Role tags are not tied to any specific ec2
+instance unless specified explicitly using the `instance_id` parameter. By
+default, role tags are designed to be used across all instances that
+satisfies the constraints on the role. Regardless of which instances have
+role tags on them, capabilities defined in a role tag must be a strict
+subset of the given role's capabilities. Note that, since adding and
+removing a tag is often a widely distributed privilege, care needs to be
+taken to ensure that the instances are attached with correct tags to not
+let them gain more privileges than what were intended. If a role tag is
+changed, the capabilities inherited by the instance will be those defined
+on the new role tag. Since those must be a subset of the role
+capabilities, the role should never provide more capabilities than any
+given instance can be allowed to gain in a worst-case scenario.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/role/:role/tag` | `200 application/json` |
+
+### Parameters
+
+- `role` `(string: )` - Name of the role.
+- `policies` `(array: [])` - Policies to be associated with the tag. If set,
+ must be a subset of the role's policies. If set, but set to an empty value,
+ only the 'default' policy will be given to issued tokens.
+- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens issued using
+ this role.
+- `instance_id` `(string: "")` - Instance ID for which this tag is intended for.
+ If set, the created tag can only be used by the instance with the given ID.
+- `allow_instance_migration` `(bool: false)` - If set, allows migration of the
+ underlying instance where the client resides. This keys off of pendingTime in
+ the metadata document, so essentially, this disables the client nonce check
+ whenever the instance is migrated to a new host and pendingTime is newer than
+ the previously-remembered time. Use with caution. Defaults to 'false'.
+- `disallow_reauthentication` `(bool: false)` - If set, only allows a single
+ token to be granted per instance ID. This can be cleared with the
+ auth/aws/identity-whitelist endpoint. Defaults to 'false'.
+
+### Sample Payload
+
+```json
+{
+ "policies": ["default", "prod"]
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/role/dev-role/tag
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "tag_value": "v1:09Vp0qGuyB8=:r=dev-role:p=default,prod:d=false:t=300h0m0s:uPLKCQxqsefRhrp1qmVa1wsQVUXXJG8UZP/pJIdVyOI=",
+ "tag_key": "VaultRole"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Login
+
+Fetch a token. This endpoint verifies the pkcs7 signature of the instance
+identity document or the signature of the signed GetCallerIdentity request.
+With the ec2 auth method, or when inferring an EC2 instance, verifies that
+the instance is actually in a running state. Cross checks the constraints
+defined on the role with which the login is being performed. With the ec2
+auth method, as an alternative to pkcs7 signature, the identity document
+along with its RSA digest can be supplied to this endpoint.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/login` | `200 application/json` |
+
+### Sample Payload
+
+- `role` `(string: "")` - Name of the role against which the login is being
+ attempted. If `role` is not specified, then the login endpoint looks for a
+ role bearing the name of the AMI ID of the EC2 instance that is trying to
+ login if using the ec2 auth method, or the "friendly name" (i.e., role name or
+ username) of the IAM principal authenticated. If a matching role is not found,
+ login fails.
+- `identity` `(string: )` - Base64 encoded EC2 instance identity
+ document. This needs to be supplied along with the `signature` parameter. If
+ using `curl` for fetching the identity document, consider using the option
+ `-w 0` while piping the output to `base64` binary.
+- `signature` `(string: )` - Base64 encoded SHA256 RSA signature of
+ the instance identity document. This needs to be supplied along with
+ `identity` parameter when using the ec2 auth method.
+- `pkcs7` `(string: )` - PKCS7 signature of the identity document with
+ all `\n` characters removed. Either this needs to be set *OR* both `identity`
+ and `signature` need to be set when using the ec2 auth method.
+- `nonce` `(string: "")` - The nonce to be used for subsequent login requests.
+ If this parameter is not specified at all and if reauthentication is allowed,
+ then the backend will generate a random nonce, attaches it to the instance's
+ identity-whitelist entry and returns the nonce back as part of auth metadata.
+ This value should be used with further login requests, to establish client
+ authenticity. Clients can choose to set a custom nonce if preferred, in which
+ case, it is recommended that clients provide a strong nonce. If a nonce is
+ provided but with an empty value, it indicates intent to disable
+ reauthentication. Note that, when `disallow_reauthentication` option is
+ enabled on either the role or the role tag, the `nonce` holds no significance.
+ This is ignored unless using the ec2 auth method.
+- `iam_http_request_method` `(string: )` - HTTP method used in the
+ signed request. Currently only POST is supported, but other methods may be
+ supported in the future. This is required when using the iam auth method.
+- `iam_request_url` `(string: )` - Base64-encoded HTTP URL used in
+ the signed request. Most likely just `aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=`
+ (base64-encoding of `https://sts.amazonaws.com/`) as most requests will
+ probably use POST with an empty URI. This is required when using the iam auth
+ method.
+- `iam_request_body` `(string: )` - Base64-encoded body of the
+ signed request. Most likely
+ `QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==` which is the
+ base64 encoding of `Action=GetCallerIdentity&Version=2011-06-15`. This is
+ required when using the iam auth method.
+- `iam_request_headers` `(string: )` - Base64-encoded,
+ JSON-serialized representation of the sts:GetCallerIdentity HTTP request
+ headers. The JSON serialization assumes that each header key maps to either a
+ string value or an array of string values (though the length of that array
+ will probably only be one). If the `iam_server_id_header_value` is configured
+ in Vault for the aws auth mount, then the headers must include the
+ X-Vault-AWS-IAM-Server-ID header, its value must match the value configured,
+ and the header must be included in the signed headers. This is required when
+ using the iam auth method.
+
+
+### Sample Payload
+
+```json
+{}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/aws/login
+```
+
+### Sample Response
+
+```json
+{
+ "auth": {
+ "renewable": true,
+ "lease_duration": 1800000,
+ "metadata": {
+ "role_tag_max_ttl": "0",
+ "instance_id": "i-de0f1344"
+ "ami_id": "ami-fce36983"
+ "role": "dev-role",
+ "auth_type": "ec2"
+ },
+ "policies": [
+ "default",
+ "dev",
+ ],
+ "accessor": "20b89871-e6f2-1160-fb29-31c2f6d4645e",
+ "client_token": "c9368254-3f21-aded-8a6f-7c818e81b17a"
+ },
+ "warnings": null,
+ "data": null,
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Place Role Tags in Blacklist
+
+Places a valid role tag in a blacklist. This ensures that the role tag
+cannot be used by any instance to perform a login operation again. Note
+that if the role tag was previously used to perform a successful login,
+placing the tag in the blacklist does not invalidate the already issued
+token.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/roletag-blacklist/:role_tag` | `204 (empty body)` |
+
+### Parameters
+
+- `role_tag` `(string: )` - Role tag to be blacklisted. The tag can be
+ supplied as-is. In order to avoid any encoding problems, it can be base64
+ encoded.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ https://vault.rocks/v1/auth/aws/roletag-blacklist/djE6MDlWcDBxR3V5Qjg9OmE9YW1pLWZjZTNjNjk2OnA9ZGVmYXVsdCxwcm9kOmQ9ZmFsc2U6dD0zMDBoMG0wczp1UExLQ1F4cXNlZlJocnAxcW1WYTF3c1FWVVhYSkc4VVpQLwo=
+```
+
+### Read Role Tag Blacklist Information
+
+Returns the blacklist entry of a previously blacklisted role tag.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/roletag-blacklist/:role_tag` | `200 application/json` |
+
+### Parameters
+
+- `role_tag` `(string: )` - Role tag to be blacklisted. The tag can be
+ supplied as-is. In order to avoid any encoding problems, it can be base64
+ encoded.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/roletag-blacklist/djE6MDlWcDBxR3V5Qjg9OmE9YW1pLWZjZTNjNjk2OnA9ZGVmYXVsdCxwcm9kOmQ9ZmFsc2U6dD0zMDBoMG0wczp1UExLQ1F4cXNlZlJocnAxcW1WYTF3c1FWVVhYSkc4VVpQLwo=
+```
+
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "expiration_time": "2016-04-25T10:35:20.127058773-04:00",
+ "creation_time": "2016-04-12T22:35:01.178348124-04:00"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## List Blacklist Tags
+
+Lists all the role tags that are blacklisted.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/aws/roletag-blacklist` | `200 application/json` |
+| `GET` | `/auth/aws/roletag-blacklist?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/aws/roletag-blacklist
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "keys": [
+ "v1:09Vp0qGuyB8=:a=ami-fce3c696:p=default,prod:d=false:t=300h0m0s:uPLKCQxqsefRhrp1qmVa1wsQVUXXJG8UZP/"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete Blacklist Tags
+
+Deletes a blacklisted role tag.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/aws/roletag-blacklist/:role_tag` | `204 (empty body)` |
+
+### Parameters
+
+- `role_tag` `(string: )` - Role tag to be blacklisted. The tag can be
+ supplied as-is. In order to avoid any encoding problems, it can be base64
+ encoded.
+
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/aws/roletag-blacklist/djE6MDlWcDBxR3V5Qjg9OmE9YW1pLWZjZTNjNjk2OnA9ZGVmYXVsdCxwcm9kOmQ9ZmFsc2U6dD0zMDBoMG0wczp1UExLQ1F4cXNlZlJocnAxcW1WYTF3c1FWVVhYSkc4VVpQLwo=
+```
+
+## Tidy Blacklist Tags
+
+Cleans up the entries in the blacklist based on expiration time on the entry and
+`safety_buffer`.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/tidy/roletag-blacklist` | `204 (empty body)` |
+
+### Parameters
+
+- `safety_buffer` `(string: "72h")` - The amount of extra time that must have
+ passed beyond the `roletag` expiration, before it is removed from the backend
+ storage. Defaults to 72h.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ https://vault.rocks/v1/auth/aws/tidy/roletag-blacklist
+```
+
+### Read Identity Whitelist Information
+
+Returns an entry in the whitelist. An entry will be created/updated by every
+successful login.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/aws/identity-whitelist/:instance_id` | `200 application/json` |
+
+### Parameters
+
+- `instance_id` `(string: )` - EC2 instance ID. A successful login
+ operation from an EC2 instance gets cached in this whitelist, keyed off of
+ instance ID.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/aws/identity-whitelist/i-aab47d37
+```
+
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "pending_time": "2016-04-14T01:01:41Z",
+ "expiration_time": "2016-05-05 10:09:16.67077232 +0000 UTC",
+ "creation_time": "2016-04-14 14:09:16.67077232 +0000 UTC",
+ "client_nonce": "5defbf9e-a8f9-3063-bdfc-54b7a42a1f95",
+ "role": "dev-role"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## List Identity Whitelist Entries
+
+ Lists all the instance IDs that are in the whitelist of successful logins.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/aws/identity-whitelist` | `200 application/json` |
+| `GET` | `/auth/aws/identity-whitelist?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/aws/roletag-blacklist
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "data": {
+ "keys": [
+ "i-aab47d37"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete Identity Whitelist Entries
+
+Deletes a cache of the successful login from an instance.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/aws/identity-whitelist/:instance_id` | `204 (empty body)` |
+
+### Parameters
+
+- `instance_id` `(string: )` - EC2 instance ID. A successful login
+ operation from an EC2 instance gets cached in this whitelist, keyed off of
+ instance ID.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/aws/identity-whitelist/i-aab47d37
+```
+
+## Tidy Identity Whitelist Entries
+
+Cleans up the entries in the whitelist based on expiration time and
+`safety_buffer`.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/aws/tidy/identity-whitelist` | `204 (empty body)` |
+
+### Parameters
+
+- `safety_buffer` `(string: "72h")` - The amount of extra time that must have
+ passed beyond the `roletag` expiration, before it is removed from the backend
+ storage. Defaults to 72h.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ https://vault.rocks/v1/auth/aws/tidy/identity-whitelist
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/cert/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/cert/index.html.md
new file mode 100644
index 0000000..0de838e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/cert/index.html.md
@@ -0,0 +1,330 @@
+---
+layout: "api"
+page_title: "TLS Certificate Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-cert"
+description: |-
+ This is the API documentation for the Vault TLS Certificate authentication
+ backend.
+---
+
+# TLS Certificate Auth Backend HTTP API
+
+This is the API documentation for the Vault TLS Certificate authentication
+backend. For general information about the usage and operation of the TLS
+Certificate backend, please see the [Vault TLS Certificate backend documentation](/docs/auth/cert.html).
+
+This documentation assumes the TLS Certificate backend is mounted at the
+`/auth/cert` path in Vault. Since it is possible to mount auth backends at any
+location, please update your API calls accordingly.
+
+## Create CA Certificate Role
+
+Sets a CA cert and associated parameters in a role name.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/cert/certs/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` - The name of the certificate role.
+- `certificate` `(string: )` - The PEM-format CA certificate.
+- `allowed_names` `(string: "")` - Constrain the Common and Alternative Names in
+ the client certificate with a [globbed pattern]
+ (https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is
+ a comma-separated list of patterns. Authentication requires at least one Name matching at least one pattern. If not set, defaults to allowing all names.
+- `policies` `(string: "")` - A comma-separated list of policies to set on tokens
+ issued when authenticating against this CA certificate.
+- `display_name` `(string: "")` - The `display_name` to set on tokens issued
+ when authenticating against this CA certificate. If not set, defaults to the
+ name of the role.
+- `ttl` `(string: "")` - The TTL period of the token, provided as a number of
+ seconds. If not provided, the token is valid for the the mount or system
+ default TTL time, in that order.
+
+### Sample Payload
+
+```json
+{
+ "certificate": "-----BEGIN CERTIFICATE-----\nMIIEtzCCA5+.......ZRtAfQ6r\nwlW975rYa1ZqEdA=\n-----END CERTIFICATE-----",
+ "display_name": "test"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json
+ https://vault.rocks/v1/auth/cert/certs/test-ca
+```
+
+## Read CA Certificate Role
+
+Gets information associated with the named role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/cert/certs/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` - The name of the certificate role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/cert/certs/test-ca
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "certificate": "-----BEGIN CERTIFICATE-----\nMIIEtzCCA5+.......ZRtAfQ6r\nwlW975rYa1ZqEdA=\n-----END CERTIFICATE-----",
+ "display_name": "test",
+ "policies": "",
+ "allowed_names": "",
+ "ttl": 2764800
+ },
+ "warnings": null,
+ "auth": null
+}
+```
+
+## List Certificate Roles
+
+Lists configured certificate names.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/cert/certs` | `200 application/json` |
+| `GET` | `/auth/cert/certs?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/cert/certs
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "cert1",
+ "cert2"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Delete Certificate Role
+
+Deletes the named role and CA cert from the backend mount.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/cert/certs/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` - The name of the certificate role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/cert/certs/cert1
+```
+
+## Create CRL
+
+Sets a named CRL.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/cert/crls/:name` | `204 (empty body)` |
+
+
+### Parameters
+
+- `name` `(string: )` - The name of the CRL.
+- `crl` `(string: )` - The PEM format CRL.
+
+### Sample Payload
+
+```json
+{
+ "crl": "-----BEGIN X509 CRL-----\n...\n-----END X509 CRL-----"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --date @payload.json \
+ https://vault.rocks/v1/auth/cert/crls/custom-crl
+```
+
+## Read CRL
+
+Gets information associated with the named CRL (currently, the serial
+numbers contained within). As the serials can be integers up to an
+arbitrary size, these are returned as strings.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/cert/crls/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` - The name of the CRL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/cert/crls/custom-crl
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "data": {
+ "serials": {
+ "13": {}
+ }
+ },
+ "lease_duration": 0,
+ "lease_id": "",
+ "renewable": false,
+ "warnings": null
+}
+```
+
+## Delete CRL
+
+Deletes the named CRL from the backend mount.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/cert/crls/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` - The name of the CRL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/cert/crls/cert1
+```
+
+## Configure TLS Certificate Backend
+
+Configuration options for the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/cert/config` | `204 (empty body)` |
+
+### Parameters
+
+- `disable_binding` `(boolean: false)` - If set, during renewal, skips the
+ matching of presented client identity with the client identity used during
+ login.
+
+### Sample Payload
+
+```json
+{
+ "disable_binding": true
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --date @payload.json \
+ https://vault.rocks/v1/auth/cert/certs/cert1
+```
+
+## Login with TLS Certiicate Backend
+
+Log in and fetch a token. If there is a valid chain to a CA configured in the
+backend and all role constraints are matched, a token will be issued.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/cert/login` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: "")` - Authenticate against only the named certificate role,
+ returning its policy list if successful. If not set, defaults to trying all
+ certificate roles and returning any one that matches.
+
+### Sample Payload
+
+```json
+{
+ "name": "cert1"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --date @payload.json \
+ https://vault.rocks/v1/auth/cert/login
+```
+
+### Sample Response
+
+```json
+{
+ "auth": {
+ "client_token": "cf95f87d-f95b-47ff-b1f5-ba7bff850425",
+ "policies": [
+ "web",
+ "stage"
+ ],
+ "lease_duration": 3600,
+ "renewable": true,
+ }
+}
+```
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/gcp/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/gcp/index.html.md
new file mode 100644
index 0000000..cfedc24
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/gcp/index.html.md
@@ -0,0 +1,482 @@
+---
+layout: "api"
+page_title: "Google Cloud Platform Auth Plugin Backend - HTTP API"
+sidebar_current: "docs-http-auth-gcp"
+description: |-
+ This is the API documentation for the Vault GCP authentication
+ backend plugin.
+---
+
+# GCP Auth Plugin HTTP API
+
+This is the API documentation for the Vault GCP authentication backend
+plugin. To learn more about the usage and operation, see the
+[Vault GCP backend documentation](/docs/auth/gcp.html).
+
+This documentation assumes the plugin backend is mounted at the
+`/auth/gcp` path in Vault. Since it is possible to mount auth backends
+at any location, please update your API calls accordingly.
+
+## Configure
+
+Configures the credentials required for the plugin to perform API calls
+to GCP. These credentials will be used to query the status of IAM
+entities and get service account or other Google public certificates
+to confirm signed JWTs passed in during login.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/gcp/config` | `204 (empty body)` |
+
+### Parameters
+
+- `credentials` `(string: "")` - A marshaled JSON string that is the content
+ of a GCP credentials file. If you would rather specify a file, you can use
+ `credentials="@path/to/creds.json`. The GCP permissions
+ Vault currently requires are:
+ - `iam.serviceAccounts.get`
+ - `iam.serviceAccountKeys.get`
+
+ If this value is not specified or if it is explicitly set to empty,
+ Vault will attempt to use [Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials)
+ for that server's machine.
+
+- `google_certs_endpoint` `(string: "")`: The Google OAuth2 endpoint to obtain public certificates for. This is used
+ primarily for testing and should generally not be set. If not set, will default to the [Google public certs
+ endpoint](https://www.googleapis.com/oauth2/v3/certs)
+
+### Sample Payload
+
+```json
+{
+ "credentials": "{ \"type\": \"service_account\", \"project_id\": \"project-123456\",...}"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/gcp/config
+```
+
+## Read Config
+
+Returns the previously configured config, including credentials.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/gcp/config` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/gcp/config
+```
+
+### Sample Response
+
+```json
+{
+ "data":{
+ "client_email":"serviceaccount1@project-123456.iam.gserviceaccount.com",
+ "client_id":"...",
+ "private_key":"-----BEGIN PRIVATE KEY-----...-----END PRIVATE KEY-----\n",
+ "private_key_id":"...",
+ "project_id":"project-123456",
+ "google_certs_url": ""
+ },
+ ...
+}
+
+```
+
+## Delete Config
+
+Deletes the previously configured GCP config and credentials.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/gcp/config` | `204 (empty body)` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/gcp/config
+```
+
+## Create Role
+
+Registers a role in the backend. Role types have specific entities
+that can perform login operations against this endpoint. Constraints specific
+to the role type must be set on the role. These are applied to the authenticated
+entities attempting to login.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/gcp/role/:name` | `204 (empty body)` |
+
+### Parameters
+- `name` `(string: )` - Name of the role.
+- `type` `(string: )` - The type of this role. Only the
+ restrictions applicable to this role type will be allowed to
+ be configured on the role (see below). Valid choices are: `iam`.
+- `project_id` `(string: "")` - Required. Only entities belonging to this
+ project can login for this role.
+- `ttl` `(string: "")` - The TTL period of tokens issued using this role in
+ seconds.
+- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens
+ issued in seconds using this role.
+- `period` `(string: "")` - If set, indicates that the token generated using
+ this role should never expire. The token should be renewed within the duration
+ specified by this value. At each renewal, the token's TTL will be set to the
+ value of this parameter.
+- `policies` `(array: [])` - Policies to be set on tokens issued using this
+ role.
+- `bound_service_accounts` `(array: [])` - Required for `iam` roles.
+ A comma-separated list of service account emails or ids.
+ Defines the service accounts that login is restricted to. If set to `\*`, all
+ service accounts are allowed (role will still be bound by project). Will be
+ inferred from service account used to issue metadata token for GCE instances.
+
+**`iam`-only params**:
+
+- `max_jwt_exp` `(string: "")` - Optional, defaults to 900 (15min).
+ Number of seconds past the time of authentication that the login param JWT
+ must expire within. For example, if a user attempts to login with a token
+ that expires within an hour and this is set to 15 minutes, Vault will return
+ an error prompting the user to create a new signed JWT with a shorter `exp`.
+ The GCE metadata tokens currently do not allow the `exp` claim to be customized.
+
+- `allow_gce_inference` `(bool: true)` - A flag to determine if this role should
+ allow GCE instances to authenticate by inferring service accounts from the
+ GCE identity metadata token.
+
+- `service_accounts` `(array: [])` - Required for `iam` roles.
+ A comma-separated list of service account emails or ids.
+ Defines the service accounts that login is restricted to. If set to `*`, all
+ service accounts are allowed (role will still be bound by project).
+
+**`gce`-only params**:
+
+- `bound_zone` `(string: "")`: If set, determines the zone that a GCE instance must belong to.
+ If bound_instance_group is provided, it is assumed to be a zonal group and the group must belong to this zone.
+
+- `bound_region` `(string: "")`: If set, determines the region that a GCE instance must belong to.
+ If bound_instance_group is provided, it is assumed to be a regional group and the group must belong to this region.
+ **If bound_zone is provided, region will be ignored.**
+
+- `bound_instance_group` `(string: "")`: If set, determines the instance group that an authorized instance must belong to.
+ bound_zone or bound_region must also be set if bound_instance_group is set.
+
+- `bound_labels` `(array: [])`: A comma-separated list of Google Cloud Platform labels formatted as "$key:$value" strings that
+ must be set on authorized GCE instances. Because GCP labels are not currently ACL'd, we recommend that this be used in
+ conjunction with other restrictions.
+
+### Sample Payload
+
+Example `iam` Role:
+
+```json
+{
+ "type": "iam",
+ "project": "project-123456",
+ "policies": [
+ "default",
+ "dev",
+ "prod"
+ ],
+ "max_ttl": 1800000,
+ "max_jwt_exp": 10000,
+ "service_accounts": [
+ "dev-1@project-123456.iam.gserviceaccount.com",
+ "dev-2@project-123456.iam.gserviceaccount.com",
+ "123456789"
+ ],
+ "allow_instance_migration": false
+}
+```
+
+Example `gce` Role:
+
+```json
+{
+ "type": "gce",
+ "project": "project-123456",
+ "policies": [
+ "default",
+ "dev",
+ "prod"
+ ],
+ "max_ttl": 1800000,
+ "max_jwt_exp": 10000,
+ "service_accounts": [
+ "dev-1@project-123456.iam.gserviceaccount.com",
+ "dev-2@project-123456.iam.gserviceaccount.com",
+ "123456789"
+ ],
+ "allow_instance_migration": false
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/gcp/role/dev-role
+```
+
+## Edit Service Accounts For IAM Role
+
+Edit service accounts for an existing IAM role in the backend.
+This allows you to add or remove service accounts from the list of
+service accounts on the role.
+
+| Method | Path | Produces |
+| :------- | :---------------------------------------| :------------------|
+| `POST` | `/auth/gcp/role/:name/service-accounts` | `204 (empty body)` |
+
+### Parameters
+- `name` `(string: )` - Name of an existing `iam` role.
+ Returns error if role is not an `iam` role.
+- `add` `(array: [])` - List of service accounts to add to the role's
+ service accounts
+- `remove` `(array: [])` - List of service accounts to remove from the
+ role's service accounts
+
+### Sample Payload
+
+```json
+{
+ "add": [
+ "dev-1@project-123456.iam.gserviceaccount.com",
+ "123456789"
+ ],
+ "remove": [
+ "dev-2@project-123456.iam.gserviceaccount.com"
+ ]
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/gcp/role/dev-role
+```
+
+## Edit Labels For GCE Role
+
+Edit service accounts for an existing IAM role in the backend.
+This allows you to add or remove service accounts from the list of
+service accounts on the role.
+
+| Method | Path | Produces |
+| :------- | :---------------------------------------| :------------------|
+| `POST` | `/auth/gcp/role/:name/labels` | `204 (empty body)` |
+
+### Parameters
+- `name` `(string: )` - Name of an existing `gce` role. Returns error if role is not an `gce` role.
+- `add` `(array: [])` - List of `$key:$value` labels to add to the GCE role's bound labels.
+- `remove` `(array: [])` - List of label keys to remove from the role's bound labels.
+
+### Sample Payload
+
+```json
+{
+ "add": [
+ "foo:bar",
+ "env:dev",
+ "key:value"
+ ],
+ "remove": [
+ "keyInLabel1, keyInLabel2"
+ ]
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/gcp/role/dev-role
+```
+
+## Read Role
+
+Returns the previously registered role configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/gcp/role/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` - Name of the role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/gcp/role/dev-role
+```
+
+### Sample Response
+
+```json
+{
+ "data":{
+ "max_jwt_exp": 900,
+ "max_ttl": 0,
+ "ttl":0,
+ "period": 0,
+ "policies":[
+ "default",
+ "dev",
+ "prod"
+ ],
+ "project_id":"project-123456",
+ "role_type":"iam",
+ "service_accounts": [
+ "dev-1@project-123456.iam.gserviceaccount.com",
+ "dev-2@project-123456.iam.gserviceaccount.com",
+ "123456789",
+ ]
+ },
+ ...
+}
+
+```
+
+## List Roles
+
+Lists all the roles that are registered with the plugin.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/gcp/roles` | `200 application/json` |
+| `GET` | `/auth/gcp/roles?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/gcp/roles
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "keys": [
+ "dev-role",
+ "prod-role"
+ ]
+ },
+ ...
+}
+```
+
+## Delete Role
+
+Deletes the previously registered role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/gcp/role/:role` | `204 (empty body)` |
+
+### Parameters
+
+- `role` `(string: )` - Name of the role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/gcp/role/dev-role
+```
+
+## Login
+
+Fetch a token. This endpoint takes a signed JSON Web Token (JWT) and
+a role name for some entity. It verifies the JWT signature to authenticate that
+entity and then authorizes the entity for the given role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/gcp/login` | `200 application/json` |
+
+### Sample Payload
+
+- `role` `(string: "")` - Name of the role against which the login is being
+ attempted.
+- `jwt` `(string: "")` - Signed [JSON Web Token](https://tools.ietf.org/html/rfc7519) (JWT).
+ For `iam`, this is a JWT generated using the IAM API method
+ [signJwt](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt)
+ or a self-signed JWT. For `gce`, this is an [identity metadata token](https://cloud.google.com/compute/docs/instances/verifying-instance-identity#request_signature).
+
+
+### Sample Payload
+
+```json
+{
+ "role": "dev-role",
+ "jwt": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/gcp/login
+```
+
+### Sample Response
+
+```json
+{
+ "auth":{
+ "client_token":"f33f8c72-924e-11f8-cb43-ac59d697597c",
+ "accessor":"0e9e354a-520f-df04-6867-ee81cae3d42d",
+ "policies":[
+ "default",
+ "dev",
+ "prod"
+ ],
+ "metadata":{
+ "role": "dev-role",
+ "service_account_email": "dev1@project-123456.iam.gserviceaccount.com",
+ "service_account_id": "111111111111111111111"
+ },
+ "lease_duration":2764800,
+ "renewable":true
+ },
+ ...
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/github/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/github/index.html.md
new file mode 100644
index 0000000..8020c73
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/github/index.html.md
@@ -0,0 +1,139 @@
+---
+layout: "api"
+page_title: "Github Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-github"
+description: |-
+ This is the API documentation for the Vault Github authentication backend.
+---
+
+# Github Auth Backend HTTP API
+
+This is the API documentation for the Vault Github authentication backend. For
+general information about the usage and operation of the Github backend, please
+see the [Vault Github backend documentation](/docs/auth/github.html).
+
+This documentation assumes the Github backend is mounted at the `/auth/github`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
+
+## Configure Backend
+
+Configures the connection parameters for Okta. This path honors the
+distinction between the `create` and `update` capabilities inside ACL policies.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/github/config` | `204 (empty body)` |
+
+### Parameters
+
+- `organization` `(string: )` - The organization users must be part
+ of.
+- `base_url` `(string: "")` - The API endpoint to use. Useful if you are running
+ GitHub Enterprise or an API-compatible authentication server.
+- `ttl` `(string: "")` - Duration after which authentication will be expired.
+- `max_ttl` `(string: "")` - Maximum duration after which authentication will
+ be expired.
+
+### Sample Payload
+
+```json
+{
+ "organization": "acme-org"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/githubokta/config
+```
+
+## Read Configuration
+
+Reads the Okta configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/github/config` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/github/config
+```
+
+### Sample Response
+
+```json
+{
+ "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6",
+ "lease_id": "",
+ "lease_duration": 0,
+ "renewable": false,
+ "data": {
+ "organization": "acme-org",
+ "base_url": "",
+ "ttl": "",
+ "max_ttl": ""
+ },
+ "warnings": null
+}
+```
+
+## Login
+
+Login using GitHub access token.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/github/login` | `200 application/json` |
+
+### Parameters
+
+- `token` `(string: )` - GitHub personal API token.
+
+### Sample Payload
+
+```json
+{
+ "token": "ABC123..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ https://vault.rocks/v1/auth/github/login
+```
+
+### Sample Response
+
+```javascript
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": null,
+ "warnings": null,
+ "auth": {
+ "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344",
+ "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d",
+ "policies": ["default"],
+ "metadata": {
+ "username": "fred",
+ "org": "acme-org"
+ },
+ },
+ "lease_duration": 7200,
+ "renewable": true
+}
+ ```
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/index.html.md
new file mode 100644
index 0000000..b89d31c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/index.html.md
@@ -0,0 +1,19 @@
+---
+layout: "api"
+page_title: "HTTP API"
+sidebar_current: "docs-http-auth"
+description: |-
+ Each authentication backend publishes its own set of API paths and methods.
+ These endpoints are documented in this section.
+---
+
+# Authentication Backends
+
+Each authentication backend publishes its own set of API paths and methods.
+These endpoints are documented in this section. Authentication backends are
+mount at a path, but the documentation will assume the default mount points for
+simplicity. If you are mounting at a different path, you should adjust your API
+calls accordingly.
+
+For the API documentation for a specific authentication backend, please choose a
+authentication backend from the navigation.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/kubernetes/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/kubernetes/index.html.md
new file mode 100644
index 0000000..3963361
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/kubernetes/index.html.md
@@ -0,0 +1,293 @@
+---
+layout: "api"
+page_title: "Kubernetes Auth Plugin Backend - HTTP API"
+sidebar_current: "docs-http-auth-kubernetes"
+description: |-
+ This is the API documentation for the Vault Kubernetes authentication
+ backend plugin.
+---
+
+# Kubernetes Auth Plugin HTTP API
+
+This is the API documentation for the Vault Kubernetes authentication backend
+plugin. To learn more about the usage and operation, see the
+[Vault Kubernetes backend documentation](/docs/auth/kubernetes.html).
+
+This documentation assumes the backend is mounted at the
+`/auth/kubernetes` path in Vault. Since it is possible to mount auth backends
+at any location, please update your API calls accordingly.
+
+## Configure
+
+The Kubernetes Auth backend validates service account JWTs and verifies their
+existence with the Kubernetes TokenReview API. This endpoint configures the
+public key used to validate the JWT signature and the necessary information to
+access the Kubernetes API.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/kubernetes/config` | `204 (empty body)` |
+
+### Parameters
+ - `pem_keys` `(array: )` - List of PEM-formated public keys or certificates
+ used to verify the signatures of kubernetes service account
+ JWTs. If a certificate is given, its public key will be
+ extracted.
+ - `kubernetes_host` `(string: )` - Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server.
+ - `kubernetes_ca_cert` `(string: "")` - PEM encoded CA cert for use by the TLS client used to talk with the API.
+
+### Sample Payload
+
+```json
+{
+ "pem_keys": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----",
+ "kubernetes_host": "https://192.168.99.100:8443",
+ "kubernetes_ca_cert": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/kubernetes/config
+```
+
+## Read Config
+
+Returns the previously configured config, including credentials.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/kubernetes/config` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/kubernetes/config
+```
+
+### Sample Response
+
+```json
+{
+ "data":{
+ "pem_keys": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----",
+ "kubernetes_host": "https://192.168.99.100:8443",
+ "kubernetes_ca_cert": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----"
+ },
+ ...
+}
+
+```
+
+## Create Role
+
+Registers a role in the backend. Role types have specific entities
+that can perform login operations against this endpoint. Constraints specific
+to the role type must be set on the role. These are applied to the authenticated
+entities attempting to login.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/kubernetes/role/:name`| `204 (empty body)` |
+
+### Parameters
+- `name` `(string: )` - Name of the role.
+- `bound_service_account_names` `(array: )` - List of service account
+ names able to access this role. If set to "\*" all names are allowed, both this
+ and bound_service_account_namespaces can not be "\*".
+- `bound_service_account_namespaces` `(array: )` - List of namespaces
+ allowed to access this role. If set to "\*" all namespaces are allowed, both
+ this and bound_service_account_names can not be set to "\*".
+- `ttl` `(string: "")` - The TTL period of tokens issued using this role in
+ seconds.
+- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens
+ issued in seconds using this role.
+- `period` `(string: "")` - If set, indicates that the token generated using
+ this role should never expire. The token should be renewed within the duration
+ specified by this value. At each renewal, the token's TTL will be set to the
+ value of this parameter.
+- `policies` `(array: [])` - Policies to be set on tokens issued using this
+ role.
+
+### Sample Payload
+
+```json
+{
+ "bound_service_account_names": "vault-auth",
+ "bound_service_account_namespaces": "default",
+ "policies": [
+ "dev",
+ "prod"
+ ],
+ "max_ttl": 1800000,
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/kubernetes/role/dev-role
+```
+## Read Role
+
+Returns the previously registered role configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/kubernetes/role/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` - Name of the role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/kubernetes/role/dev-role
+```
+
+### Sample Response
+
+```json
+{
+ "data":{
+ "bound_service_account_names": "vault-auth",
+ "bound_service_account_namespaces": "default",
+ "max_ttl": 1800000,,
+ "ttl":0,
+ "period": 0,
+ "policies":[
+ "dev",
+ "prod"
+ ],
+ },
+ ...
+}
+
+```
+
+## List Roles
+
+Lists all the roles that are registered with the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/kubernetes/roles` | `200 application/json` |
+| `GET` | `/auth/kubernetes/roles?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/kubernetes/roles
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "keys": [
+ "dev-role",
+ "prod-role"
+ ]
+ },
+ ...
+}
+```
+
+## Delete Role
+
+Deletes the previously registered role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/kubernetes/role/:role`| `204 (empty body)` |
+
+### Parameters
+
+- `role` `(string: )` - Name of the role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/kubernetes/role/dev-role
+```
+
+## Login
+
+Fetch a token. This endpoint takes a signed JSON Web Token (JWT) and
+a role name for some entity. It verifies the JWT signature to authenticate that
+entity and then authorizes the entity for the given role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/kubernetes/login` | `200 application/json` |
+
+### Sample Payload
+
+- `role` `(string: )` - Name of the role against which the login is being
+ attempted.
+- `jwt` `(string: )` - Signed [JSON Web
+ Token](https://tools.ietf.org/html/rfc7519) (JWT) for authenticating a service
+ account.
+
+### Sample Payload
+
+```json
+{
+ "role": "dev-role",
+ "jwt": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/kubernetes/login
+```
+
+### Sample Response
+
+```json
+{
+ "auth": {
+ "client_token": "62b858f9-529c-6b26-e0b8-0457b6aacdb4",
+ "accessor": "afa306d0-be3d-c8d2-b0d7-2676e1c0d9b4",
+ "policies": [
+ "default"
+ ],
+ "metadata": {
+ "role": "test",
+ "service_account_name": "vault-auth",
+ "service_account_namespace": "default",
+ "service_account_secret_name": "vault-auth-token-pd21c",
+ "service_account_uid": "aa9aa8ff-98d0-11e7-9bb7-0800276d99bf"
+ },
+ "lease_duration": 2764800,
+ "renewable": true
+ }
+ ...
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/ldap/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/ldap/index.html.md
new file mode 100644
index 0000000..b0ff4ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/ldap/index.html.md
@@ -0,0 +1,452 @@
+---
+layout: "api"
+page_title: "LDAP Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-ldap"
+description: |-
+ This is the API documentation for the Vault LDAP authentication backend.
+---
+
+# LDAP Auth Backend HTTP API
+
+This is the API documentation for the Vault LDAP authentication backend. For
+general information about the usage and operation of the LDAP backend, please
+see the [Vault LDAP backend documentation](/docs/auth/ldap.html).
+
+This documentation assumes the LDAP backend is mounted at the `/auth/ldap`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
+
+## Configure LDAP Backend
+
+This endpoint configures the LDAP authentication backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/ldap/config` | `204 (empty body)` |
+
+### Parameters
+
+- `url` `(string: )` – The LDAP server to connect to. Examples:
+ `ldap://ldap.myorg.com`, `ldaps://ldap.myorg.com:636`
+- `starttls` `(bool: false)` – If true, issues a `StartTLS` command after
+ establishing an unencrypted connection.
+- `tls_min_version` `(string: tls12)` – Minimum TLS version to use. Accepted
+ values are `tls10`, `tls11` or `tls12`.
+- `tls_max_version` `(string: tls12)` – Maximum TLS version to use. Accepted
+ values are `tls10`, `tls11` or `tls12`.
+- `insecure_tls` `(bool: false)` – If true, skips LDAP server SSL certificate
+ verification - insecure, use with caution!
+- `certificate` `(string: "")` – CA certificate to use when verifying LDAP server
+ certificate, must be x509 PEM encoded.
+- `binddn` `(string: "")` – Distinguished name of object to bind when performing
+ user search. Example: `cn=vault,ou=Users,dc=example,dc=com`
+- `bindpass` `(string: "")` – Password to use along with `binddn` when performing
+ user search.
+- `userdn` `(string: "")` – Base DN under which to perform user search. Example:
+ `ou=Users,dc=example,dc=com`
+- `userattr` `(string: "")` – Attribute on user attribute object matching the
+ username passed when authenticating. Examples: `sAMAccountName`, `cn`, `uid`
+- `discoverdn` `(bool: false)` – Use anonymous bind to discover the bind DN of a
+ user.
+- `deny_null_bind` `(bool: true)` – This option prevents users from bypassing
+ authentication when providing an empty password.
+- `upndomain` `(string: "")` – The userPrincipalDomain used to construct the UPN
+ string for the authenticating user. The constructed UPN will appear as
+ `[username]@UPNDomain`. Example: `example.com`, which will cause vault to bind
+ as `username@example.com`.
+- `groupfilter` `(string: "")` – Go template used when constructing the group
+ membership query. The template can access the following context variables:
+ \[`UserDN`, `Username`\]. The default is
+ `(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`,
+ which is compatible with several common directory schemas. To support
+ nested group resolution for Active Directory, instead use the following
+ query: `(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))`.
+- `groupdn` `(string: "")` – LDAP search base to use for group membership
+ search. This can be the root containing either groups or users. Example:
+ `ou=Groups,dc=example,dc=com`
+- `groupattr` `(string: "")` – LDAP attribute to follow on objects returned by
+ `groupfilter` in order to enumerate user group membership. Examples: for
+ groupfilter queries returning _group_ objects, use: `cn`. For queries
+ returning _user_ objects, use: `memberOf`. The default is `cn`.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/ldap/config
+```
+
+### Sample Payload
+
+```json
+{
+ "binddn": "cn=vault,ou=Users,dc=example,dc=com",
+ "deny_null_bind": true,
+ "discoverdn": false,
+ "groupattr": "cn",
+ "groupdn": "ou=Groups,dc=example,dc=com",
+ "groupfilter": "(\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))",
+ "insecure_tls": false,
+ "starttls": false,
+ "tls_max_version": "tls12",
+ "tls_min_version": "tls12",
+ "url": "ldaps://ldap.myorg.com:636",
+ "userattr": "samaccountname",
+ "userdn": "ou=Users,dc=example,dc=com"
+}
+```
+
+## Read LDAP Configuration
+
+This endpoint retrieves the LDAP configuration for the authentication backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/ldap/config` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/ldap/config
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "binddn": "cn=vault,ou=Users,dc=example,dc=com",
+ "bindpass": "",
+ "certificate": "",
+ "deny_null_bind": true,
+ "discoverdn": false,
+ "groupattr": "cn",
+ "groupdn": "ou=Groups,dc=example,dc=com",
+ "groupfilter": "(\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))",
+ "insecure_tls": false,
+ "starttls": false,
+ "tls_max_version": "tls12",
+ "tls_min_version": "tls12",
+ "upndomain": "",
+ "url": "ldaps://ldap.myorg.com:636",
+ "userattr": "samaccountname",
+ "userdn": "ou=Users,dc=example,dc=com"
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## List LDAP Groups
+
+This endpoint returns a list of existing groups in the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/ldap/groups` | `200 application/json` |
+| `GET` | `/auth/ldap/groups?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/ldap/groups
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "scientists",
+ "engineers"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Read LDAP Group
+
+This endpoint returns the policies associated with a LDAP group.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/ldap/groups/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – The name of the LDAP group
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/ldap/groups/admins
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "policies": "admin,default"
+ },
+ "renewable": false,
+ "lease_id": ""
+ "lease_duration": 0,
+ "warnings": null
+}
+```
+
+## Create/Update LDAP Group
+
+This endpoint creates or updates LDAP group policies.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/ldap/groups/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – The name of the LDAP group
+- `policies` `(string: "")` – Comma-separated list of policies associated to the
+ group.
+
+### Sample Payload
+
+```json
+{
+ "policies": "admin,default"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/ldap/groups/admins
+```
+
+## Delete LDAP Group
+
+This endpoint deletes the LDAP group and policy association.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/ldap/groups/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – The name of the LDAP group
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/ldap/groups/admins
+```
+
+## List LDAP Users
+
+This endpoint returns a list of existing users in the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/ldap/users` | `200 application/json` |
+| `GET` | `/auth/ldap/users?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/ldap/users
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "mitchellh",
+ "armon"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Read LDAP User
+
+This endpoint returns the policies associated with a LDAP user.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/ldap/users/:username` | `200 application/json` |
+
+### Parameters
+
+- `username` `(string: )` – The username of the LDAP user
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/ldap/users/mitchellh
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "policies": "admin,default",
+ "groups": ""
+ },
+ "renewable": false,
+ "lease_id": ""
+ "lease_duration": 0,
+ "warnings": null
+}
+```
+
+## Create/Update LDAP User
+
+This endpoint creates or updates LDAP users policies and group associations.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/ldap/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` – The username of the LDAP user
+- `policies` `(string: "")` – Comma-separated list of policies associated to the
+ user.
+- `groups` `(string: "")` – Comma-separated list of groups associated to the
+ user.
+
+### Sample Payload
+
+```json
+{
+ "policies": "admin,default"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/ldap/users/mitchellh
+```
+
+## Delete LDAP User
+
+This endpoint deletes the LDAP user and policy association.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/ldap/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` – The username of the LDAP user
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/ldap/users/mitchellh
+```
+
+## Login with LDAP User
+
+This endpoint allows you to log in with LDAP credentials
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/ldap/login/:username` | `200 application/json` |
+
+### Parameters
+
+- `username` `(string: )` – The username of the LDAP user
+- `password` `(string: )` – The password for the LDAP user
+
+### Sample Payload
+
+```json
+{
+ "password": "MyPassword1"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/ldap/login/mitchellh
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": null,
+ "auth": {
+ "client_token": "c4f280f6-fdb2-18eb-89d3-589e2e834cdb",
+ "policies": [
+ "admins",
+ "default"
+ ],
+ "metadata": {
+ "username": "mitchellh"
+ },
+ "lease_duration": 0,
+ "renewable": false
+ }
+}
+```
+
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/okta/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/okta/index.html.md
new file mode 100644
index 0000000..cff51af
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/okta/index.html.md
@@ -0,0 +1,395 @@
+---
+layout: "api"
+page_title: "Okta Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-okta"
+description: |-
+ This is the API documentation for the Vault Okta authentication backend.
+---
+
+# Okta Auth Backend HTTP API
+
+This is the API documentation for the Vault Okta authentication backend. For
+general information about the usage and operation of the Okta backend, please
+see the [Vault Okta backend documentation](/docs/auth/okta.html).
+
+This documentation assumes the Okta backend is mounted at the `/auth/okta`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
+
+## Create Configuration
+
+Configures the connection parameters for Okta. This path honors the
+distinction between the `create` and `update` capabilities inside ACL policies.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/okta/config` | `204 (empty body)` |
+
+### Parameters
+
+- `org_name` `(string: )` - Name of the organization to be used in the
+ Okta API.
+- `api_token` `(string: "")` - Okta API token. This is required to query Okta
+ for user group membership. If this is not supplied only locally configured
+ groups will be enabled.
+- `base_url` `(string: "")` - If set, will be used as the base domain
+ for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com.
+- `ttl` `(string: "")` - Duration after which authentication will be expired.
+- `max_ttl` `(string: "")` - Maximum duration after which authentication will
+ be expired.
+
+### Sample Payload
+
+```json
+{
+ "org_name": "example",
+ "api_token": "abc123"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/okta/config
+```
+
+## Read Configuration
+
+Reads the Okta configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/okta/config` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/okta/config
+```
+
+### Sample Response
+
+```json
+{
+ "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6",
+ "lease_id": "",
+ "lease_duration": 0,
+ "renewable": false,
+ "data": {
+ "org_name": "example",
+ "api_token": "abc123",
+ "base_url": "okta.com",
+ "ttl": "",
+ "max_ttl": ""
+ },
+ "warnings": null
+}
+```
+
+## List Users
+
+List the users configurated in the Okta backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/okta/users` | `200 application/json` |
+| `GET` | `/auth/okta/users?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/okta/users
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "fred",
+ "jane"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Register User
+
+Registers a new user and maps a set of policies to it.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/okta/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` - Name of the user.
+- `groups` `(string: "")` - Comma-separated list of groups associated with the
+ user.
+- `policies` `(string: "")` - Comma-separated list of policies associated with
+ the user.
+
+```json
+{
+ "policies": "dev,prod",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/okta/users/fred
+```
+
+## Read User
+
+Reads the properties of an existing username.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/okta/users/:username` | `200 application/json` |
+
+### Parameters
+
+- `username` `(string: )` - Username for this user.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/okta/users/test-user
+```
+
+### Sample Response
+
+```json
+{
+ "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6",
+ "lease_id": "",
+ "lease_duration": 0,
+ "renewable": false,
+ "data": {
+ "policies": "default,dev",
+ "groups": ""
+ },
+ "warnings": null
+}
+```
+
+## Delete User
+
+Deletes an existing username from the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/okta/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` - Username for this user.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/okta/users/test-user
+```
+
+## List Groups
+
+List the groups configurated in the Okta backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/okta/groups` | `200 application/json` |
+| `GET` | `/auth/okta/groups?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/okta/groups
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "admins",
+ "dev-users"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Register Group
+
+Registers a new group and maps a set of policies to it.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/okta/groups/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` - The name of the group.
+- `policies` `(string: "")` - Comma-separated list of policies associated with
+ the group.
+
+```json
+{
+ "policies": "dev,prod",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/okta/groups/admins
+```
+
+## Read Group
+
+Reads the properties of an existing group.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/okta/groups/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` - The name for the group.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/okta/groups/admins
+```
+
+### Sample Response
+
+```json
+{
+ "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6",
+ "lease_id": "",
+ "lease_duration": 0,
+ "renewable": false,
+ "data": {
+ "policies": "default,admin"
+ },
+ "warnings": null
+}
+```
+
+## Delete Group
+
+Deletes an existing group from the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/okta/groups/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` - The name for the group.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/okta/users/test-user
+```
+
+## Login
+
+Login with the username and password.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/okta/login/:username` | `200 application/json` |
+
+### Parameters
+
+- `username` `(string: )` - Username for this user.
+- `password` `(string: )` - Password for the autheticating user.
+
+### Sample Payload
+
+```json
+{
+ "password": "Password!"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/okta/login/fred
+```
+
+### Sample Response
+
+```javascript
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": null,
+ "warnings": null,
+ "auth": {
+ "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344",
+ "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d",
+ "policies": ["default"],
+ "metadata": {
+ "username": "fred",
+ "policies": "default"
+ },
+ },
+ "lease_duration": 7200,
+ "renewable": true
+}
+ ```
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/radius/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/radius/index.html.md
new file mode 100644
index 0000000..9132e0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/radius/index.html.md
@@ -0,0 +1,237 @@
+---
+layout: "api"
+page_title: "RADIUS Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-radius"
+description: |-
+ This is the API documentation for the Vault RADIUS authentication backend.
+---
+
+# RADIUS Auth Backend HTTP API
+
+This is the API documentation for the Vault RADIUS authentication backend. For
+general information about the usage and operation of the RADIUS backend, please
+see the [Vault RADIUS backend documentation](/docs/auth/radius.html).
+
+This documentation assumes the RADIUS backend is mounted at the `/auth/radius`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
+
+## Configure RADIUS
+
+Configures the connection parameters and shared secret used to communicate with
+RADIUS.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/radius/config` | `204 (empty body)` |
+
+### Parameters
+
+- `host` `(string: )` - The RADIUS server to connect to. Examples:
+ `radius.myorg.com`, `127.0.0.1`
+- `port` `(integer: 1812)` - The UDP port where the RADIUS server is listening
+ on. Defaults is 1812.
+- `secret` `(string: )` - The RADIUS shared secret.
+- `unregistered_user_policies` `(string: "")` - A comma-separated list of
+ policies to be granted to unregistered users.
+- `dial_timeout` `(integer: 10)` - Number of second to wait for a backend
+ connection before timing out. Default is 10.
+- `nas_port` `(integer: 10)` - The NAS-Port attribute of the RADIUS request.
+ Defaults is 10.
+
+### Sample Payload
+
+```json
+{
+ "host": "radius.myorg.com",
+ "port": 1812,
+ "secret": "mySecret"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/radius/config
+```
+
+## Register User
+
+Registers a new user and maps a set of policies to it. This path honors the
+distinction between the `create` and `update` capabilities inside ACL policies.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/radius/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` - Username for this user.
+- `policies` `(string: "")` - Comma-separated list of policies. If set to
+ empty string, only the `default` policy will be applicable to the user.
+
+```json
+{
+ "policies": "dev,prod",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/radius/users/test-user
+```
+
+## Read User
+
+Reads the properties of an existing username.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/radius/users/:username` | `200 application/json` |
+
+### Parameters
+
+- `username` `(string: )` - Username for this user.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/radius/users/test-user
+```
+
+### Sample Response
+
+```json
+{
+ "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6",
+ "lease_id": "",
+ "lease_duration": 0,
+ "renewable": false,
+ "data": {
+ "policies": "default,dev"
+ },
+ "warnings": null
+}
+```
+
+## Delete User
+
+Deletes an existing username from the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/radius/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` - Username for this user.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/radius/users/test-user
+```
+
+## List Users
+
+List the users registered with the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/radius/users` | `200 application/json` |
+| `GET` | `/auth/radius/users?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/radius/users
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "devuser",
+ "produser"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Login
+
+Login with the username and password.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/radius/login` | `200 application/json` |
+| `POST` | `/auth/radius/login/:username` | `200 application/json` |
+
+### Parameters
+
+- `username` `(string: )` - Username for this user.
+- `password` `(string: )` - Password for the autheticating user.
+
+### Sample Payload
+
+```json
+{
+ "password": "Password!"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/radius/login/test-user
+```
+
+### Sample Response
+
+```javascript
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": null,
+ "warnings": null,
+ "auth": {
+ "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344",
+ "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d",
+ "policies": ["default"],
+ "metadata": {
+ "username": "vishal"
+ },
+ },
+ "lease_duration": 7200,
+ "renewable": true
+}
+ ```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/token/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/token/index.html.md
new file mode 100644
index 0000000..10c88a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/token/index.html.md
@@ -0,0 +1,704 @@
+---
+layout: "api"
+page_title: "Token Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-token"
+description: |-
+ This is the API documentation for the Vault token authentication backend.
+---
+
+# Token Auth Backend HTTP API
+
+This is the API documentation for the Vault token authentication backend. For
+general information about the usage and operation of the token backend, please
+see the [Vault Token backend documentation](/docs/auth/token.html).
+
+## List Accessors
+
+This endpoint lists token accessor. This requires `sudo` capability, and access
+to it should be tightly controlled as the accessors can be used to revoke very
+large numbers of tokens and their associated leases at once.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/token/accessors` | `200 application/json` |
+| `GET` | `/auth/token/accessors?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/auth/token/accessors
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "warnings": null,
+ "wrap_info": null,
+ "data": {
+ "keys": [
+ "476ea048-ded5-4d07-eeea-938c6b4e43ec",
+ "bb00c093-b7d3-b0e9-69cc-c4d85081165b"
+ ]
+ },
+ "lease_duration": 0,
+ "renewable": false,
+ "lease_id": ""
+}
+```
+
+## Create Token
+
+Creates a new token. Certain options are only available when called by a
+root token. If used via the `/auth/token/create-orphan` endpoint, a root
+token is not required to create an orphan token (otherwise set with the
+`no_parent` option). If used with a role name in the path, the token will
+be created against the specified role name; this may override options set
+during this call.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/create` | `200 application/json` |
+| `POST` | `/auth/token/create-orphan` | `200 application/json` |
+| `POST` | `/auth/token/create/:role_name` | `200 application/json` |
+
+### Parameters
+
+- `id` `(string: "")` – The ID of the client token. Can only be specified by a
+ root token. Otherwise, the token ID is a randomly generated UUID.
+- `role_name` `(string: "")` – The name of the token role.
+- `policies` `(array: "")` – A list of policies for the token. This must be a
+ subset of the policies belonging to the token making the request, unless root.
+ If not specified, defaults to all the policies of the calling token.
+- `meta` `(map: {})` – A map of string to string valued metadata. This is
+ passed through to the audit backends.
+- `no_parent` `(bool: false)` - If true and set by a root caller, the token will
+ not have the parent token of the caller. This creates a token with no parent.
+- `no_default_policy` `(bool: false)` - If true the `default` policy will not be
+ contained in this token's policy set.
+- `renewable` `(bool: true)` - Set to `false` to disable the ability of the token
+ to be renewed past its initial TTL. Setting the value to `true` will allow
+ the token to be renewable up to the system/mount maximum TTL.
+- `lease` `(string: "")` - DEPRECATED; use `ttl` instead
+- `ttl` `(string: "")` -The TTL period of the token, provided as "1h", where
+ hour is the largest suffix. If not provided, the token is valid for the
+ [default lease TTL](/docs/configuration/index.html), or indefinitely if the
+ root policy is used.
+- `explicit_max_ttl` `(string: "")` - If set, the token will have an explicit
+ max TTL set upon it. This maximum token TTL *cannot* be changed later, and
+ unlike with normal tokens, updates to the system/mount max TTL value will
+ have no effect at renewal time -- the token will never be able to be renewed
+ or used past the value set at issue time.
+- `display_name` `(string: "token")` - The display name of the token.
+- `num_uses` `(integer: 0)` - The maximum uses for the given token. This can be
+ used to create a one-time-token or limited use token. The value of 0 has no
+ limit to the number of uses.
+- `period` `(string: "")` - If specified, the token will be periodic; it will have
+ no maximum TTL (unless an "explicit-max-ttl" is also set) but every renewal
+ will use the given period. Requires a root/sudo token to use.
+
+### Sample Payload
+
+```json
+{
+ "policies": [
+ "web",
+ "stage"
+ ],
+ "metadata": {
+ "user": "armon"
+ },
+ "ttl": "1h",
+ "renewable": true
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/create
+```
+
+### Sample Response
+
+```json
+{
+ "auth": {
+ "client_token": "ABCD",
+ "policies": [
+ "web",
+ "stage"
+ ],
+ "metadata": {
+ "user": "armon"
+ },
+ "lease_duration": 3600,
+ "renewable": true,
+ }
+}
+```
+
+## Lookup a Token
+
+Returns information about the client token.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/lookup` | `200 application/json` |
+| `GET` | `/auth/token/lookup/:token` | `200 application/json` |
+
+### Parameters
+
+- `token` `(string: )` - Token to lookup.
+
+### Sample Payload
+
+```json
+{
+ "token": "ClientToken"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/lookup
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "id": "ClientToken",
+ "policies": [
+ "web",
+ "stage"
+ ],
+ "path": "auth/github/login",
+ "meta": {
+ "user": "armon",
+ "organization": "hashicorp"
+ },
+ "display_name": "github-armon",
+ "num_uses": 0,
+ }
+}
+```
+
+## Lookup a Token (Self)
+
+Returns information about the current client token.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/token/lookup-self` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/token/lookup-self
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "id": "ClientToken",
+ "policies": [
+ "web",
+ "stage"
+ ],
+ "path": "auth/github/login",
+ "meta": {
+ "user": "armon",
+ "organization": "hashicorp"
+ },
+ "display_name": "github-armon",
+ "num_uses": 0,
+ }
+}
+```
+
+## Lookup a Token Accessor
+
+Returns information about the client token from the accessor.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/lookup-accessor` | `200 application/json` |
+| `GET` | `/auth/token/lookup-accessor/:accessor` | `200 application/json` |
+
+### Parameters
+
+- `accessor` `(string: )` - Token accessor to lookup.
+
+### Sample Payload
+
+```json
+{
+ "accessor": "2c84f488-2133-4ced-87b0-570f93a76830"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/lookup-accessor
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "creation_time": 1457533232,
+ "creation_ttl": 2764800,
+ "display_name": "token",
+ "meta": null,
+ "num_uses": 0,
+ "orphan": false,
+ "path": "auth/token/create",
+ "policies": [
+ "default",
+ "web"
+ ],
+ "ttl": 2591976
+ },
+ "warnings": null,
+ "auth": null
+}
+```
+
+## Renew a Token
+
+Renews a lease associated with a token. This is used to prevent the expiration
+of a token, and the automatic revocation of it. Token renewal is possible only
+if there is a lease associated with it.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/renew` | `200 application/json` |
+| `POST` | `/auth/token/renew/:token` | `200 application/json` |
+
+### Parameters
+
+- `token` `(string: )` - Token to renew. This can be part of the URL
+ or the body.
+- `increment` `(string: "")` - An optional requested lease increment can be
+ provided. This increment may be ignored.
+
+### Sample Payload
+
+```json
+{
+ "token": "ClientToken"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/renew
+```
+
+### Sample Response
+
+```json
+{
+ "auth": {
+ "client_token": "ABCD",
+ "policies": [
+ "web",
+ "stage"
+ ],
+ "metadata": {
+ "user": "armon"
+ },
+ "lease_duration": 3600,
+ "renewable": true,
+ }
+}
+```
+
+## Renew a Token (Self)
+
+Renews a lease associated with the calling token. This is used to prevent the
+expiration of a token, and the automatic revocation of it. Token renewal is
+possible only if there is a lease associated with it.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/renew-self` | `200 application/json` |
+
+### Parameters
+
+- `increment` `(string: "")` - An optional requested lease increment can be
+ provided. This increment may be ignored.
+
+### Sample Payload
+
+```json
+{
+ "increment": "1h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/renew-self
+```
+
+### Sample Response
+
+```json
+{
+ "auth": {
+ "client_token": "ABCD",
+ "policies": [
+ "web",
+ "stage"
+ ],
+ "metadata": {
+ "user": "armon"
+ },
+ "lease_duration": 3600,
+ "renewable": true,
+ }
+}
+```
+
+## Revoke a Token
+
+Revokes a token and all child tokens. When the token is revoked, all secrets
+generated with it are also revoked.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/revoke` | `204 (empty body)` |
+
+### Parameters
+
+- `token` `(string: )` - Token to revoke.
+
+### Sample Payload
+
+```json
+{
+ "token": "ClientToken"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/revoke
+```
+
+## Revoke a Token (Self)
+
+Revokes the token used to call it and all child tokens. When the token is
+revoked, all dynamic secrets generated with it are also revoked.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/revoke-self` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ https://vault.rocks/v1/auth/token/revoke-self
+```
+
+## Revoke a Token Accessor
+
+Revoke the token associated with the accessor and all the child tokens. This is
+meant for purposes where there is no access to token ID but there is need to
+revoke a token and its children.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/revoke-accessor` | `204 (empty body)` |
+
+### Parameters
+
+- `accessor` `(string: )` - Accessor of the token.
+
+### Sample Payload
+
+```json
+{
+ "accessor": "2c84f488-2133-4ced-87b0-570f93a76830"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/revoke-accessor
+```
+
+## Revoke Token and Orphan Children
+
+Revokes a token but not its child tokens. When the token is revoked, all secrets
+generated with it are also revoked. All child tokens are orphaned, but can be
+revoked sub-sequently using `/auth/token/revoke/`. This is a root-protected
+endpoint.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/revoke-orphan` | `204 (empty body)` |
+| `POST` | `/auth/token/revoke-orphan/:token` | `204 (empty body)` |
+
+### Parameters
+
+- `token` `(string: )` - Token to revoke. This can be part of the URL
+ or the body.
+
+### Sample Payload
+
+```json
+{
+ "token": "ClientToken"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/token/revoke-orphan
+```
+
+## Read Token Role
+
+Fetches the named role configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/token/roles/:role_name`| `200 application/json` |
+
+### Parameters
+
+- `role_name` `(string: )` - The name of the token role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/token/roles/nomad
+```
+
+### Sample Response
+
+```javascript
+{
+ "request_id": "075a19cd-4e56-a3ca-d956-7609819831ec",
+ "lease_id": "",
+ "lease_duration": 0,
+ "renewable": false,
+ "data": {
+ "allowed_policies": [
+ "dev"
+ ],
+ "disallowed_policies": [],
+ "explicit_max_ttl": 0,
+ "name": "nomad",
+ "orphan": false,
+ "path_suffix": "",
+ "period": 0,
+ "renewable": true
+ },
+ "warnings": null
+}
+```
+
+## List Token Roles
+
+List available token roles.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/token/roles` | `200 application/json` |
+| `GET` | `/auth/token/roles?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST
+ https://vault.rocks/v1/auth/token/roles
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "keys": [
+ "role1",
+ "role2"
+ ]
+ }
+}
+```
+
+## Create/Update Token Role
+
+Creates (or replaces) the named role. Roles enforce specific behavior when
+creating tokens that allow token functionality that is otherwise not
+available or would require `sudo`/root privileges to access. Role
+parameters, when set, override any provided options to the `create`
+endpoints. The role name is also included in the token path, allowing all
+tokens created against a role to be revoked using the `sys/revoke-prefix`
+endpoint.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/roles/:role_name` | `204 (empty body)` |
+
+### Parameters
+
+- `role_name` `(string: )` – The name of the token role.
+- `allowed_policies` `(list: [])` – If set, tokens can be created with any
+ subset of the policies in this list, rather than the normal semantics of
+ tokens being a subset of the calling token's policies. The parameter is a
+ comma-delimited string of policy names. If at creation time
+ `no_default_policy` is not set and `"default"` is not contained in
+ `disallowed_policies`, the `"default"` policy will be added to the created
+ token automatically.
+- `disallowed_policies` `(list: [])` – If set, successful token creation via
+ this role will require that no policies in the given list are requested. The
+ parameter is a comma-delimited string of policy names. Adding `"default"` to
+ this list will prevent `"default"` from being added automatically to created
+ tokens.
+- `orphan` `(bool: true)` - If `true`, tokens created against this policy will
+ be orphan tokens (they will have no parent). As such, they will not be
+ automatically revoked by the revocation of any other token.
+- `period` `(string: "")` - If specified, the token will be periodic; it will have
+ no maximum TTL (unless an "explicit-max-ttl" is also set) but every renewal
+ will use the given period. Requires a root/sudo token to use.
+- `renewable` `(bool: true)` - Set to `false` to disable the ability of the token
+ to be renewed past its initial TTL. Setting the value to `true` will allow
+ the token to be renewable up to the system/mount maximum TTL.
+- `explicit_max_ttl` `(string: "")` - If set, the token will have an explicit
+ max TTL set upon it. This maximum token TTL *cannot* be changed later, and
+ unlike with normal tokens, updates to the system/mount max TTL value will
+ have no effect at renewal time -- the token will never be able to be renewed
+ or used past the value set at issue time.
+- `path_suffix` `(string: "")` - If set, tokens created against this role will
+ have the given suffix as part of their path in addition to the role name. This
+ can be useful in certain scenarios, such as keeping the same role name in the
+ future but revoking all tokens created against it before some point in time.
+ The suffix can be changed, allowing new callers to have the new suffix as part
+ of their path, and then tokens with the old suffix can be revoked via
+ `sys/revoke-prefix`.
+
+### Sample Payload
+
+```json
+ "allowed_policies": [
+ "dev"
+ ],
+ "name": "nomad",
+ "orphan": false,
+ "renewable": true
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST
+ --data @payload.json
+ https://vault.rocks/v1/auth/token/roles/nomad
+```
+
+## Delete Token Role
+
+This endpoint deletes the named token role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/token/roles/:role_name` | `204 (empty body)` |
+
+### Parameters
+
+- `role_name` `(string: )` - The name of the token role.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/token/roles/admins
+```
+
+## Tidy Tokens
+
+Performs some maintenance tasks to clean up invalid entries that may remain
+in the token store. Generally, running this is not needed unless upgrade
+notes or support personnel suggest it. This may perform a lot of I/O to the
+storage backend so should be used sparingly.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/token/tidy` | `204 (empty body)` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ https://vault.rocks/v1/auth/token/tidy
+```
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/userpass/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/userpass/index.html.md
new file mode 100644
index 0000000..42a35d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/userpass/index.html.md
@@ -0,0 +1,254 @@
+---
+layout: "api"
+page_title: "Userpass Auth Backend - HTTP API"
+sidebar_current: "docs-http-auth-userpass"
+description: |-
+ This is the API documentation for the Vault username and password
+ authentication backend.
+---
+
+# Username & Password Auth Backend HTTP API
+
+This is the API documentation for the Vault Username & Password authentication backend. For
+general information about the usage and operation of the Username and Password backend, please
+see the [Vault Userpass backend documentation](/docs/auth/userpass.html).
+
+This documentation assumes the Username & Password backend is mounted at the `/auth/userpass`
+path in Vault. Since it is possible to mount auth backends at any location,
+please update your API calls accordingly.
+
+## Create/Update User
+
+Create a new user or update an existing user. This path honors the distinction between the `create` and `update` capabilities inside ACL policies.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/userpass/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` – The username for the user.
+- `password` `(string: )` - The password for the user. Only required
+ when creating the user.
+- `policies` `(string: "")` – Comma-separated list of policies. If set to empty
+ string, only the `default` policy will be applicable to the user.
+- `ttl` `(string: "")` - The lease duration which decides login expiration.
+- `max_ttl` `(string: "")` - Maximum duration after which login should expire.
+
+### Sample Payload
+
+```json
+{
+ "password": "superSecretPassword",
+ "policies": "admin,default"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/userpass/users/mitchellh
+```
+
+## Read User
+
+Reads the properties of an existing username.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/auth/userpass/users/:username` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/auth/userpass/users/mitchellh
+```
+
+### Sample Response
+
+```json
+{
+ "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6",
+ "lease_id": "",
+ "lease_duration": 0,
+ "renewable": false,
+ "data": {
+ "max_ttl": 0,
+ "policies": "default,dev",
+ "ttl": 0
+ },
+ "warnings": null
+}
+```
+
+## Delete User
+
+This endpoint deletes the user from the backend.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/auth/userpass/users/:username` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` - The username for the user.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/auth/userpass/users/mitchellh
+```
+
+## Update Password on User
+
+Update password for an existing user.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/userpass/users/:username/password` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` – The username for the user.
+- `password` `(string: )` - The password for the user.
+
+### Sample Payload
+
+```json
+{
+ "password": "superSecretPassword2",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/userpass/users/mitchellh/password
+```
+
+## Update Policies on User
+
+Update policies for an existing user.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/userpass/users/:username/policies` | `204 (empty body)` |
+
+### Parameters
+
+- `username` `(string: )` – The username for the user.
+- `policies` `(string: "")` – Comma-separated list of policies. If set to empty
+
+### Sample Payload
+
+```json
+{
+ "policies": "policy1,policy2",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/userpass/users/mitchellh/policies
+```
+
+## List Users
+
+List available userpass users.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/auth/userpass/users` | `200 application/json` |
+| `GET` | `/auth/userpass/users?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST
+ https://vault.rocks/v1/auth/userpass/users
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "keys": [
+ "mitchellh",
+ "armon"
+ ]
+ }
+}
+```
+
+## Login
+
+Login with the username and password.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/auth/userpass/login/:username` | `200 application/json` |
+
+### Parameters
+
+- `username` `(string: )` – The username for the user.
+- `password` `(string: )` - The password for the user.
+
+### Sample Payload
+
+```json
+{
+ "password": "superSecretPassword2",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/auth/userpass/login/mitchellh
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": null,
+ "warnings": null,
+ "auth": {
+ "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344",
+ "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d",
+ "policies": ["default"],
+ "metadata": {
+ "username": "mitchellh"
+ },
+ "lease_duration": 7200,
+ "renewable": true
+ }
+}
+```
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/index.html.md
index 719b471..144bb0c 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/index.html.md
@@ -52,7 +52,7 @@ via the `X-Vault-Token` header for future requests.
## Reading, Writing, and Listing Secrets
Different backends implement different APIs according to their functionality.
-The examples below are created with the `generic` backend, which acts like a
+The examples below are created with the `kv` backend, which acts like a
Key/Value store. Read the documentation for a particular backend for detailed
information on its API; this simply provides a general overview.
@@ -64,7 +64,7 @@ following URL:
```
This maps to `secret/foo` where `foo` is the key in the `secret/` mount, which
-is mounted by default on a fresh Vault install and is of type `generic`.
+is mounted by default on a fresh Vault install and is of type `kv`.
Here is an example of reading a secret using cURL:
@@ -76,7 +76,7 @@ $ curl \
```
You can list secrets as well. To do this, either issue a GET with the query
-parameter `list=true`, or you can use the LIST HTTP verb. For the `generic`
+parameter `list=true`, or you can use the LIST HTTP verb. For the `kv`
backend, listing is allowed on directories only, and returns the keys in the
given directory:
@@ -154,10 +154,11 @@ The following HTTP status codes are used throughout the API.
- `200` - Success with data.
- `204` - Success, no data returned.
-- `400` - Invalid request, missing or invalid data. See the
- "validation" section for more details on the error response.
+- `400` - Invalid request, missing or invalid data.
- `403` - Forbidden, your authentication details are either
- incorrect or you don't have access to this feature.
+ incorrect, you don't have access to this feature, or - if CORS is
+ enabled - you made a cross-origin request from an origin that is
+ not allowed to make such requests.
- `404` - Invalid path. This can both mean that the path truly
doesn't exist or that you don't have permission to view a
specific path. We use 404 in some cases to avoid state leakage.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md b/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md
index 3e35be5..acfaff2 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md
@@ -37,6 +37,7 @@ These libraries are provided by the community.
### Ansible
+* [ansible-vault](https://github.com/jhaals/ansible-vault) lookup plugin without third-party dependencies.
* [Ansible Modules Hashivault](https://pypi.python.org/pypi/ansible-modules-hashivault)
```shell
@@ -84,7 +85,6 @@ $ cabal install vault-tool
### Java
* [Spring Vault](https://github.com/spring-projects/spring-vault)
-* [vault-java](https://github.com/jhaals/vault-java)
* [vault-java-driver](https://github.com/BetterCloud/vault-java-driver)
### Kotlin
@@ -119,6 +119,20 @@ $ composer require jippi/vault-php-sdk
$ composer require violuke/vault-php-sdk
```
+* [vault-php](https://github.com/CSharpRU/vault-php)
+
+```shell
+$ composer require csharpru/vault-php
+```
+
+### PowerShell
+
+* [Zyborg.Vault](https://github.com/zyborg/Zyborg.Vault)
+
+```PowerShell
+Install-Module Zyborg.Vault
+```
+
### Python
* [HVAC](https://github.com/ianunruh/hvac)
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md
index 25dc268..5e9236c 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md
@@ -23,13 +23,17 @@ are multiple ways to pass root IAM credentials to the Vault server, specified
below with the highest precedence first. If credentials already exist, this will
overwrite them.
+The official AWS SDK is used for sourcing credentials from env vars, shared
+files, or IAM/ECS instances.
+
- Static credentials provided to the API as a payload
- Credentials in the `AWS_ACCESS_KEY`, `AWS_SECRET_KEY`, and `AWS_REGION`
environment variables **on the server**
-- Querying the EC2 metadata service if the **Vault server** is on EC2 and has
- querying capabilities
+- Shared credentials files
+
+- Assigned IAM role or ECS task role credentials
At present, this endpoint does not confirm that the provided AWS credentials are
valid AWS credentials with proper permissions.
@@ -44,7 +48,9 @@ valid AWS credentials with proper permissions.
- `secret_key` `(string: )` – Specifies the AWS secret access key.
-- `region` `(string: )` – Specifies the AWS region.
+- `region` `(string: )` – Specifies the AWS region. If not set it
+ will use the `AWS_REGION` env var, `AWS_DEFAULT_REGION` env var, or
+ `us-east-1` in that order.
### Sample Payload
@@ -230,6 +236,7 @@ This endpoint lists all existing roles in the backend.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/aws/roles` | `200 application/json` |
+| `GET` | `/aws/roles?list=true` | `200 application/json` |
### Sample Request
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md
index 9f9471c..5bfabb5 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md
@@ -8,6 +8,11 @@ description: |-
# Cassandra Secret Backend HTTP API
+~> **Deprecation Note:** This backend is deprecated in favor of the
+combined databases backend added in v0.7.1. See the API documentation for
+the new implementation of this backend at
+[Cassandra Database Plugin HTTP API](/api/secret/databases/cassandra.html).
+
This is the API documentation for the Vault Cassandra secret backend. For
general information about the usage and operation of the Cassandra backend,
please see the
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md
index b91e956..3d8a3d2 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md
@@ -152,6 +152,7 @@ This endpoint lists all existing roles in the backend.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/consul/roles` | `200 application/json` |
+| `GET` | `/consul/roles?list=true` | `200 application/json` |
### Sample Request
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md
index 903baaa..3178af0 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md
@@ -60,7 +60,8 @@ not return a value. The values themselves are not accessible via this command.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
-| `List` | `/cubbyhole/:path` | `200 application/json` |
+| `LIST` | `/cubbyhole/:path` | `200 application/json` |
+| `GET` | `/cubbyhole/:path?list=true` | `200 application/json` |
### Parameters
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/cassandra.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/cassandra.html.md
new file mode 100644
index 0000000..5b60b27
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/cassandra.html.md
@@ -0,0 +1,132 @@
+---
+layout: "api"
+page_title: "Cassandra Database Plugin - HTTP API"
+sidebar_current: "docs-http-secret-databases-cassandra"
+description: |-
+ The Cassandra plugin for Vault's Database backend generates database credentials to access Cassandra servers.
+---
+
+# Cassandra Database Plugin HTTP API
+
+The Cassandra Database Plugin is one of the supported plugins for the Database
+backend. This plugin generates database credentials dynamically based on
+configured roles for the Cassandra database.
+
+## Configure Connection
+
+In addition to the parameters defined by the [Database
+Backend](/api/secret/databases/index.html#configure-connection), this plugin
+has a number of parameters to further configure a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `hosts` `(string: )` – Specifies a set of comma-delineated Cassandra
+ hosts to connect to.
+
+- `port` `(int: 9042)` – Specifies the default port to use if none is provided
+ as part of the host URI. Defaults to Cassandra's default transport port, 9042.
+
+- `username` `(string: )` – Specifies the username to use for
+ superuser access.
+
+- `password` `(string: )` – Specifies the password corresponding to
+ the given username.
+
+- `tls` `(bool: true)` – Specifies whether to use TLS when connecting to
+ Cassandra.
+
+- `insecure_tls` `(bool: false)` – Specifies whether to skip verification of the
+ server certificate when using TLS.
+
+- `pem_bundle` `(string: "")` – Specifies concatenated PEM blocks containing a
+ certificate and private key; a certificate, private key, and issuing CA
+ certificate; or just a CA certificate.
+
+- `pem_json` `(string: "")` – Specifies JSON containing a certificate and
+ private key; a certificate, private key, and issuing CA certificate; or just a
+ CA certificate. For convenience format is the same as the output of the
+ `issue` command from the `pki` backend; see
+ [the pki documentation](/docs/secrets/pki/index.html).
+
+- `protocol_version` `(int: 2)` – Specifies the CQL protocol version to use.
+
+- `connect_timeout` `(string: "5s")` – Specifies the connection timeout to use.
+
+TLS works as follows:
+
+- If `tls` is set to true, the connection will use TLS; this happens
+ automatically if `pem_bundle`, `pem_json`, or `insecure_tls` is set
+
+- If `insecure_tls` is set to true, the connection will not perform verification
+ of the server certificate; this also sets `tls` to true
+
+- If only `issuing_ca` is set in `pem_json`, or the only certificate in
+ `pem_bundle` is a CA certificate, the given CA certificate will be used for
+ server certificate verification; otherwise the system CA certificates will be
+ used
+
+- If `certificate` and `private_key` are set in `pem_bundle` or `pem_json`,
+ client auth will be turned on for the connection
+
+`pem_bundle` should be a PEM-concatenated bundle of a private key + client
+certificate, an issuing CA certificate, or both. `pem_json` should contain the
+same information; for convenience, the JSON format is the same as that output by
+the issue command from the PKI backend.
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "cassandra-database-plugin",
+ "allowed_roles": "readonly",
+ "hosts": "cassandra1.local",
+ "username": "user",
+ "password": "pass"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/cassandra/config/connection
+```
+
+## Statements
+
+Statements are configured during role creation and are used by the plugin to
+determine what is sent to the datatabse on user creation, renewing, and
+revocation. For more information on configuring roles see the [Role
+API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
+
+### Parameters
+
+The following are the statements used by this plugin. If not mentioned in this
+list the plugin does not support that statement type.
+
+- `creation_statements` `(string: "")` – Specifies the database
+ statements executed to create and configure a user. Must be a
+ semicolon-separated string, a base64-encoded semicolon-separated string, a
+ serialized JSON string array, or a base64-encoded serialized JSON string
+ array. The '{{name}}' and '{{password}}' values will be substituted. If not
+ provided, defaults to a generic create user statements that creates a
+ non-superuser.
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{name}}' value will be
+ substituted. If not provided defaults to a generic drop user statement.
+
+- `rollback_statements` `(string: "")` – Specifies the database statements to be
+ executed to rollback a create operation in the event of an error. Must be a
+ semicolon-separated string, a base64-encoded semicolon-separated string, a
+ serialized JSON string array, or a base64-encoded serialized JSON string
+ array. The '{{name}}' value will be substituted. If not provided, defaults to
+ a generic drop user statement
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/hanadb.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/hanadb.html.md
new file mode 100644
index 0000000..e108815
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/hanadb.html.md
@@ -0,0 +1,87 @@
+---
+layout: "api"
+page_title: "HANA Database Plugin - HTTP API"
+sidebar_current: "docs-http-secret-databases-hana"
+description: |-
+ The HANA plugin for Vault's Database backend generates database credentials to access HANA servers.
+---
+
+# HANA Database Plugin HTTP API
+
+The HANA Database Plugin is one of the supported plugins for the Database
+backend. This plugin generates database credentials dynamically based on
+configured roles for the HANA database.
+
+## Configure Connection
+
+In addition to the parameters defined by the [Database
+Backend](/api/secret/databases/index.html#configure-connection), this plugin
+has a number of parameters to further configure a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `connection_url` `(string: )` - Specifies the HANA DSN.
+
+- `max_open_connections` `(int: 2)` - Specifies the maximum number of open
+ connections to the database.
+
+- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle
+ connections to the database. A zero uses the value of `max_open_connections`
+ and a negative value disables idle connections. If larger than
+ `max_open_connections` it will be reduced to be equal.
+
+- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of
+ time a connection may be reused. If <= 0s connections are reused forever.
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "hana-database-plugin",
+ "allowed_roles": "readonly",
+ "connection_url": "hdb://username:password@localhost:1433",
+ "max_open_connections": 5,
+ "max_connection_lifetime": "5s",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/config/hana
+```
+
+## Statements
+
+Statements are configured during role creation and are used by the plugin to
+determine what is sent to the datatabse on user creation, renewing, and
+revocation. For more information on configuring roles see the [Role
+API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
+
+### Parameters
+
+The following are the statements used by this plugin. If not mentioned in this
+list the plugin does not support that statement type.
+
+- `creation_statements` `(string: )` – Specifies the database
+ statements executed to create and configure a user. Must be a
+ semicolon-separated string, a base64-encoded semicolon-separated string, a
+ serialized JSON string array, or a base64-encoded serialized JSON string
+ array. The '{{name}}', '{{password}}', and '{{expiration}}' values will be
+ substituted.
+ - The expiration time will be HANA server time plus the role's `default_ttl`.
+ If `default_ttl` is 0 or not set, a SQL HdbError 438 will be returned.
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{name}}' value will be
+ substituted. If not provided, defaults to dropping the user only if they have
+ no dependent objects.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/index.html.md
new file mode 100644
index 0000000..55b8fcb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/index.html.md
@@ -0,0 +1,337 @@
+---
+layout: "api"
+page_title: "Databases - HTTP API"
+sidebar_current: "docs-http-secret-databases"
+description: |-
+ Top page for database secret backend information
+---
+
+# Database Secret Backend HTTP API
+
+This is the API documentation for the Vault Database secret backend. For
+general information about the usage and operation of the Database backend,
+please see the
+[Vault Database backend documentation](/docs/secrets/databases/index.html).
+
+This documentation assumes the Database backend is mounted at the
+`/database` path in Vault. Since it is possible to mount secret backends at
+any location, please update your API calls accordingly.
+
+## Configure Connection
+
+This endpoint configures the connection string used to communicate with the
+desired database. In addition to the parameters listed here, each Database
+plugin has additional, database plugin specifig, parameters for this endpoint.
+Please read the HTTP API for the plugin you'd wish to configure to see the full
+list of additional parameters.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `name` `(string: )` – Specifies the name for this database
+ connection. This is specified as part of the URL.
+
+- `plugin_name` `(string: )` - Specifies the name of the plugin to use
+ for this connection.
+
+- `verify_connection` `(bool: true)` – Specifies if the connection is verified
+ during initial configuration. Defaults to true.
+
+- `allowed_roles` `(slice: [])` - Array or comma separated string of the roles
+ allowed to use this connection. Defaults to empty (no roles), if contains a
+ "*" any role can use this connection.
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "mysql-database-plugin",
+ "allowed_roles": "readonly",
+ "connection_url": "root:mysql@tcp(127.0.0.1:3306)/"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/config/mysql
+```
+
+## Read Connection
+
+This endpoint returns the configuration settings for a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/database/config/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the connection to read.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request GET \
+ https://vault.rocks/v1/database/config/mysql
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "allowed_roles": [
+ "readonly"
+ ],
+ "connection_details": {
+ "connection_url": "root:mysql@tcp(127.0.0.1:3306)/",
+ },
+ "plugin_name": "mysql-database-plugin"
+ },
+}
+```
+
+## Delete Connection
+
+This endpoint deletes a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the connection to delete.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/database/config/mysql
+```
+
+## Reset Connection
+
+This endpoint closes a connection and it's underlying plugin and restarts it
+with the configuration stored in the barrier.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/reset/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the connection to delete.
+ This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ https://vault.rocks/v1/database/reset/mysql
+```
+
+## Create Role
+
+This endpoint creates or updates a role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to create. This
+ is specified as part of the URL.
+
+- `db_name` `(string: )` - The name of the database connection to use
+ for this role.
+
+- `default_ttl` `(string/int: 0)` - Specifies the TTL for the leases
+ associated with this role. Accepts time suffixed strings ("1h") or an integer
+ number of seconds. Defaults to system/backend default TTL time.
+
+- `max_ttl` `(string/int: 0)` - Specifies the maximum TTL for the leases
+ associated with this role. Accepts time suffixed strings ("1h") or an integer
+ number of seconds. Defaults to system/backend default TTL time.
+
+- `creation_statements` `(string: )` – Specifies the database
+ statements executed to create and configure a user. See the plugin's API page
+ for more information on support and formatting for this parameter.
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. See the plugin's API page for more information
+ on support and formatting for this parameter.
+
+- `rollback_statements` `(string: "")` – Specifies the database statements to be
+ executed rollback a create operation in the event of an error. Not every
+ plugin type will support this functionality. See the plugin's API page for
+ more information on support and formatting for this parameter.
+
+- `renew_statements` `(string: "")` – Specifies the database statements to be
+ executed to renew a user. Not every plugin type will support this
+ functionality. See the plugin's API page for more information on support and
+ formatting for this parameter.
+
+
+
+### Sample Payload
+
+```json
+{
+ "db_name": "mysql",
+ "creation_statements": "CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';",
+ "default_ttl": "1h",
+ "max_ttl": "24h"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/roles/my-role
+```
+
+## Read Role
+
+This endpoint queries the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/database/roles/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to read. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/database/roles/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "creation_statements": "CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"{{name}}\";",
+ "db_name": "mysql",
+ "default_ttl": 3600,
+ "max_ttl": 86400,
+ "renew_statements": "",
+ "revocation_statements": "",
+ "rollback_statements": ""
+ },
+}
+```
+
+## List Roles
+
+This endpoint returns a list of available roles. Only the role names are
+returned, not any values.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/database/roles` | `200 application/json` |
+| `GET` | `/database/roles?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/database/roles
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "data": {
+ "keys": ["dev", "prod"]
+ },
+ "lease_duration": 2764800,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## Delete Role
+
+This endpoint deletes the role definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/database/roles/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to delete. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/database/roles/my-role
+```
+
+## Generate Credentials
+
+This endpoint generates a new set of dynamic credentials based on the named
+role.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/database/creds/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the role to create
+ credentials against. This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/database/creds/my-role
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "username": "root-1430158508-126",
+ "password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21"
+ }
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mongodb.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mongodb.html.md
new file mode 100644
index 0000000..48a8ae2
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mongodb.html.md
@@ -0,0 +1,87 @@
+---
+layout: "api"
+page_title: "MongoDB Database Plugin - HTTP API"
+sidebar_current: "docs-http-secret-databases-mongodb"
+description: |-
+ The MongoDB plugin for Vault's Database backend generates database credentials to access MongoDB servers.
+---
+
+# MongoDB Database Plugin HTTP API
+
+The MongoDB Database Plugin is one of the supported plugins for the Database
+backend. This plugin generates database credentials dynamically based on
+configured roles for the MongoDB database.
+
+## Configure Connection
+
+In addition to the parameters defined by the [Database
+Backend](/api/secret/databases/index.html#configure-connection), this plugin
+has a number of parameters to further configure a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `connection_url` `(string: )` – Specifies the MongoDB standard connection string (URI).
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "mongodb-database-plugin",
+ "allowed_roles": "readonly",
+ "connection_url": "mongodb://admin:Password!@mongodb.acme.com:27017/admin?ssl=true"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/config/mongodb
+```
+
+## Statements
+
+Statements are configured during role creation and are used by the plugin to
+determine what is sent to the datatabse on user creation, renewing, and
+revocation. For more information on configuring roles see the [Role
+API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
+
+### Parameters
+
+The following are the statements used by this plugin. If not mentioned in this
+list the plugin does not support that statement type.
+
+- `creation_statements` `(string: )` – Specifies the database
+ statements executed to create and configure a user. Must be a
+ serialized JSON object, or a base64-encoded serialized JSON object.
+ The object can optionally contain a "db" string for session connection,
+ and must contain a "roles" array. This array contains objects that holds
+ a "role", and an optional "db" value, and is similar to the BSON document that
+ is accepted by MongoDB's `roles` field. Vault will transform this array into
+ such format. For more information regarding the `roles` field, refer to
+ [MongoDB's documentation](https://docs.mongodb.com/manual/reference/method/db.createUser/).
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. Must be a serialized JSON object, or a base64-encoded
+ serialized JSON object. The object can optionally contain a "db" string. If no
+ "db" value is provided, it defaults to the "admin" database.
+
+### Sample Creation Statement
+
+```json
+{
+ "db": "admin",
+ "roles": [
+ {
+ "role": "read",
+ "db": "foo",
+ }
+ ]
+}
+```
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mssql.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mssql.html.md
new file mode 100644
index 0000000..42d7546
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mssql.html.md
@@ -0,0 +1,83 @@
+---
+layout: "api"
+page_title: "MSSQL Database Plugin - HTTP API"
+sidebar_current: "docs-http-secret-databases-mssql"
+description: |-
+ The MSSQL plugin for Vault's Database backend generates database credentials to access MSSQL servers.
+---
+
+# MSSQL Database Plugin HTTP API
+
+The MSSQL Database Plugin is one of the supported plugins for the Database
+backend. This plugin generates database credentials dynamically based on
+configured roles for the MSSQL database.
+
+## Configure Connection
+
+In addition to the parameters defined by the [Database
+Backend](/api/secret/databases/index.html#configure-connection), this plugin
+has a number of parameters to further configure a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `connection_url` `(string: )` - Specifies the MSSQL DSN.
+
+- `max_open_connections` `(int: 2)` - Specifies the maximum number of open
+ connections to the database.
+
+- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle
+ connections to the database. A zero uses the value of `max_open_connections`
+ and a negative value disables idle connections. If larger than
+ `max_open_connections` it will be reduced to be equal.
+
+- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of
+ time a connection may be reused. If <= 0s connections are reused forever.
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "mssql-database-plugin",
+ "allowed_roles": "readonly",
+ "connection_url": "sqlserver://sa:yourStrong(!)Password@localhost:1433",
+ "max_open_connections": 5,
+ "max_connection_lifetime": "5s",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/config/mssql
+```
+
+## Statements
+
+Statements are configured during role creation and are used by the plugin to
+determine what is sent to the datatabse on user creation, renewing, and
+revocation. For more information on configuring roles see the [Role
+API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
+
+### Parameters
+
+The following are the statements used by this plugin. If not mentioned in this
+list the plugin does not support that statement type.
+
+- `creation_statements` `(string: )` – Specifies the database
+ statements executed to create and configure a user. Must be a
+ semicolon-separated string, a base64-encoded semicolon-separated string, a
+ serialized JSON string array, or a base64-encoded serialized JSON string
+ array. The '{{name}}' and '{{password}}' values will be substituted.
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{name}}' value will be
+ substituted. If not provided defaults to a generic drop user statement.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mysql-maria.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mysql-maria.html.md
new file mode 100644
index 0000000..0a64ab4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mysql-maria.html.md
@@ -0,0 +1,83 @@
+---
+layout: "api"
+page_title: "MySQL/MariaDB Database Plugin - HTTP API"
+sidebar_current: "docs-http-secret-databases-mysql-maria"
+description: |-
+ The MySQL/MariaDB plugin for Vault's Database backend generates database credentials to access MySQL and MariaDB servers.
+---
+
+# MySQL/MariaDB Database Plugin HTTP API
+
+The MySQL Database Plugin is one of the supported plugins for the Database
+backend. This plugin generates database credentials dynamically based on
+configured roles for the MySQL database.
+
+## Configure Connection
+
+In addition to the parameters defined by the [Database
+Backend](/api/secret/databases/index.html#configure-connection), this plugin
+has a number of parameters to further configure a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `connection_url` `(string: )` - Specifies the MySQL DSN.
+
+- `max_open_connections` `(int: 2)` - Specifies the maximum number of open
+ connections to the database.
+
+- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle
+ connections to the database. A zero uses the value of `max_open_connections`
+ and a negative value disables idle connections. If larger than
+ `max_open_connections` it will be reduced to be equal.
+
+- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of
+ time a connection may be reused. If <= 0s connections are reused forever.
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "mysql-database-plugin",
+ "allowed_roles": "readonly",
+ "connection_url": "root:mysql@tcp(127.0.0.1:3306)/",
+ "max_open_connections": 5,
+ "max_connection_lifetime": "5s",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/config/mysql
+```
+
+## Statements
+
+Statements are configured during role creation and are used by the plugin to
+determine what is sent to the datatabse on user creation, renewing, and
+revocation. For more information on configuring roles see the [Role
+API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
+
+### Parameters
+
+The following are the statements used by this plugin. If not mentioned in this
+list the plugin does not support that statement type.
+
+- `creation_statements` `(string: )` – Specifies the database
+ statements executed to create and configure a user. Must be a
+ semicolon-separated string, a base64-encoded semicolon-separated string, a
+ serialized JSON string array, or a base64-encoded serialized JSON string
+ array. The '{{name}}' and '{{password}}' values will be substituted.
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{name}}' value will be
+ substituted. If not provided defaults to a generic drop user statement.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/oracle.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/oracle.html.md
new file mode 100644
index 0000000..5a6f543
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/oracle.html.md
@@ -0,0 +1,83 @@
+---
+layout: "api"
+page_title: "Oracle Database Plugin - HTTP API"
+sidebar_current: "docs-http-secret-databases-oracle-maria"
+description: |-
+ The Oracle plugin for Vault's Database backend generates database credentials to access Oracle servers.
+---
+
+# Oracle Database Plugin HTTP API
+
+The Oracle Database Plugin is one of the supported plugins for the Database
+backend. This plugin generates database credentials dynamically based on
+configured roles for the Oracle database.
+
+## Configure Connection
+
+In addition to the parameters defined by the [Database
+Backend](/api/secret/databases/index.html#configure-connection), this plugin
+has a number of parameters to further configure a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `connection_url` `(string: )` - Specifies the Oracle DSN.
+
+- `max_open_connections` `(int: 2)` - Specifies the maximum number of open
+ connections to the database.
+
+- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle
+ connections to the database. A zero uses the value of `max_open_connections`
+ and a negative value disables idle connections. If larger than
+ `max_open_connections` it will be reduced to be equal.
+
+- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of
+ time a connection may be reused. If <= 0s connections are reused forever.
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "oracle-database-plugin",
+ "allowed_roles": "readonly",
+ "connection_url": "system/Oracle@localhost:1521/OraDoc.localhost",
+ "max_open_connections": 5,
+ "max_connection_lifetime": "5s"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/config/oracle
+```
+
+## Statements
+
+Statements are configured during role creation and are used by the plugin to
+determine what is sent to the datatabse on user creation, renewing, and
+revocation. For more information on configuring roles see the [Role
+API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
+
+### Parameters
+
+The following are the statements used by this plugin. If not mentioned in this
+list the plugin does not support that statement type.
+
+- `creation_statements` `(string: )` – Specifies the database
+ statements executed to create and configure a user. Must be a
+ semicolon-separated string, a base64-encoded semicolon-separated string, a
+ serialized JSON string array, or a base64-encoded serialized JSON string
+ array. The '{{name}}' and '{{password}}' values will be substituted.
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{name}}' value will be
+ substituted. If not provided defaults to a generic drop user statement.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/postgresql.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/postgresql.html.md
new file mode 100644
index 0000000..bb58a52
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/postgresql.html.md
@@ -0,0 +1,98 @@
+---
+layout: "api"
+page_title: "PostgreSQL Database Plugin - HTTP API"
+sidebar_current: "docs-http-secret-databases-postgresql"
+description: |-
+ The PostgreSQL plugin for Vault's Database backend generates database credentials to access PostgreSQL servers.
+---
+
+# PostgreSQL Database Plugin HTTP API
+
+The PostgreSQL Database Plugin is one of the supported plugins for the Database
+backend. This plugin generates database credentials dynamically based on
+configured roles for the PostgreSQL database.
+
+## Configure Connection
+
+In addition to the parameters defined by the [Database
+Backend](/api/secret/databases/index.html#configure-connection), this plugin
+has a number of parameters to further configure a connection.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/database/config/:name` | `204 (empty body)` |
+
+### Parameters
+- `connection_url` `(string: )` - Specifies the PostgreSQL DSN.
+
+- `max_open_connections` `(int: 2)` - Specifies the maximum number of open
+ connections to the database.
+
+- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle
+ connections to the database. A zero uses the value of `max_open_connections`
+ and a negative value disables idle connections. If larger than
+ `max_open_connections` it will be reduced to be equal.
+
+- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of
+ time a connection may be reused. If <= 0s connections are reused forever.
+
+### Sample Payload
+
+```json
+{
+ "plugin_name": "postgresql-database-plugin",
+ "allowed_roles": "readonly",
+ "connection_url": "postgresql://root:root@localhost:5432/postgres",
+ "max_open_connections": 5,
+ "max_connection_lifetime": "5s",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/database/config/postgresql
+```
+
+## Statements
+
+Statements are configured during role creation and are used by the plugin to
+determine what is sent to the datatabse on user creation, renewing, and
+revocation. For more information on configuring roles see the [Role
+API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
+
+### Parameters
+
+The following are the statements used by this plugin. If not mentioned in this
+list the plugin does not support that statement type.
+
+- `creation_statements` `(string: )` – Specifies the database
+ statements executed to create and configure a user. Must be a
+ semicolon-separated string, a base64-encoded semicolon-separated string, a
+ serialized JSON string array, or a base64-encoded serialized JSON string
+ array. The '{{name}}', '{{password}}' and '{{expiration}}' values will be
+ substituted.
+
+- `revocation_statements` `(string: "")` – Specifies the database statements to
+ be executed to revoke a user. Must be a semicolon-separated string, a
+ base64-encoded semicolon-separated string, a serialized JSON string array, or
+ a base64-encoded serialized JSON string array. The '{{name}}' value will be
+ substituted. If not provided defaults to a generic drop user statement.
+
+- `rollback_statements` `(string: "")` – Specifies the database statements to be
+ executed rollback a create operation in the event of an error. Not every
+ plugin type will support this functionality. Must be a semicolon-separated
+ string, a base64-encoded semicolon-separated string, a serialized JSON string
+ array, or a base64-encoded serialized JSON string array. The '{{name}}' value
+ will be substituted.
+
+- `renew_statements` `(string: "")` – Specifies the database statements to be
+ executed to renew a user. Not every plugin type will support this
+ functionality. Must be a semicolon-separated string, a base64-encoded
+ semicolon-separated string, a serialized JSON string array, or a
+ base64-encoded serialized JSON string array. The '{{name}}' and
+ '{{expiration}}` values will be substituted.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/identity/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/identity/index.html.md
new file mode 100644
index 0000000..df87993
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/identity/index.html.md
@@ -0,0 +1,421 @@
+---
+layout: "api"
+page_title: "Identity Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-identity"
+description: |-
+ This is the API documentation for the Vault Identity secret backend.
+---
+
+# Identity Secret Backend HTTP API
+
+This is the API documentation for the Vault Identity secret backend. For
+general information about the usage and operation of the Identity backend,
+please see the
+[Vault Identity backend documentation](/docs/secrets/identity/index.html).
+
+## Register Entity
+
+This endpoint creates or updates an Entity.
+
+| Method | Path | Produces |
+| :------- | :------------------ | :----------------------|
+| `POST` | `/identity/entity` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: entity-)` – Name of the entity.
+
+- `metadata` `(list of strings: [])` – Metadata to be associated with the entity. Format should be a list of `key=value` pairs.
+
+- `policies` `(list of strings: [])` – Policies to be tied to the entity. Comma separated list of strings.
+
+### Sample Payload
+
+```json
+{
+ "metadata": ["organization=hashicorp", "team=vault"],
+ "policies": ["eng-dev", "infra-dev"]
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/identity/entity
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297",
+ "personas": null
+ }
+}
+```
+
+## Read Entity by ID
+
+This endpoint queries the entity by its identifier.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/identity/entity/id/:id` | `200 application/json` |
+
+### Parameters
+
+- `id` `(string: )` – Specifies the identifier of the entity.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/identity/entity/id/8d6a45e5-572f-8f13-d226-cd0d1ec57297
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "bucket_key_hash": "177553e4c58987f4cc5d7e530136c642",
+ "creation_time": "2017-07-25T20:29:22.614756844Z",
+ "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297",
+ "last_update_time": "2017-07-25T20:29:22.614756844Z",
+ "metadata": {
+ "organization": "hashicorp",
+ "team": "vault"
+ },
+ "name": "entity-c323de27-2ad2-5ded-dbf3-0c7ef98bc613",
+ "personas": [],
+ "policies": [
+ "eng-dev",
+ "infra-dev"
+ ]
+ }
+}
+```
+
+## Update Entity by ID
+
+This endpoint is used to update an existing entity.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/identity/entity/id/:id` | `200 application/json` |
+
+### Parameters
+
+- `id` `(string: )` – Specifies the identifier of the entity.
+
+- `name` `(string: entity-)` – Name of the entity.
+
+- `metadata` `(list of strings: [])` – Metadata to be associated with the entity. Format should be a list of `key=value` pairs.
+
+- `policies` `(list of strings: [])` – Policies to be tied to the entity. Comma separated list of strings.
+
+
+### Sample Payload
+
+```json
+{
+ "name":"updatedEntityName",
+ "metadata": ["organization=hashi", "team=nomad"],
+ "policies": ["eng-developers", "infra-developers"]
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/identity/entity/id/8d6a45e5-572f-8f13-d226-cd0d1ec57297
+```
+
+### Sample Response
+
+```
+{
+ "data": {
+ "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297",
+ "personas": null
+ }
+}
+```
+
+## Delete Entity by ID
+
+This endpoint deletes an entity and all its associated personas.
+
+| Method | Path | Produces |
+| :--------- | :-------------------------- | :----------------------|
+| `DELETE` | `/identity/entity/id/:id` | `204 (empty body)` |
+
+## Parameters
+
+- `id` `(string: )` – Specifies the identifier of the entity.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/identity/entity/id/8d6a45e5-572f-8f13-d226-cd0d1ec57297
+```
+
+## List Entities by ID
+
+This endpoint returns a list of available entities by their identifiers.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/identity/entity/id` | `200 application/json` |
+| `GET` | `/identity/entity/id?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/identity/entity/id
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "keys": [
+ "02fe5a88-912b-6794-62ed-db873ef86a95",
+ "3bf81bc9-44df-8138-57f9-724a9ae36d04",
+ "627fba68-98c9-c012-71ba-bfb349585ce1",
+ "6c4c805b-b384-3d0e-4d51-44d349887b96",
+ "70a72feb-35d1-c775-0813-8efaa8b4b9b5",
+ "f1092a67-ce34-48fd-161d-c13a367bc1cd",
+ "faedd89a-0d82-c197-c8f9-93a3e6cf0cd0"
+ ]
+ }
+}
+```
+
+## Register Persona
+
+This endpoint creates a new persona and attaches it to the entity with the
+given identifier.
+
+| Method | Path | Produces |
+| :------- | :------------------ | :----------------------|
+| `POST` | `/identity/persona` | `200 application/json` |
+
+### Parameters
+
+- `name` (string: Required) - Name of the persona. Name should be the
+ identifier of the client in the authentication source. For example, if the
+ persona belongs to userpass backend, the name should be a valid username
+ within userpass backend. If persona belongs to GitHub, it should be the
+ GitHub username.
+
+- `entity_id` (string: required) - Entity ID to which this persona belongs to.
+
+- `mount_accessor` (string: required) - Accessor of the mount to which the
+ persona should belong to.
+
+- `metadata` `(list of strings: [])` – Metadata to be associated with the persona. Format should be a list of `key=value` pairs.
+
+### Sample Payload
+
+```
+{
+ "name": "testuser",
+ "metadata": ["group=san_francisco", "region=west"],
+ "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3",
+ "mount_accessor": "auth_userpass_e50b1a44"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/identity/persona
+```
+
+### Sample Response
+
+```
+{
+ "data": {
+ "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3",
+ "id": "34982d3d-e3ce-5d8b-6e5f-b9bb34246c31"
+ }
+}
+```
+
+## Read Persona by ID
+
+This endpoint queries the persona by its identifier.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/identity/persona/id/:id` | `200 application/json` |
+
+### Parameters
+
+- `id` `(string: )` – Specifies the identifier of the persona.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31
+```
+
+### Sample Response
+
+```
+{
+ "data": {
+ "creation_time": "2017-07-25T21:41:09.820717636Z",
+ "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3",
+ "id": "34982d3d-e3ce-5d8b-6e5f-b9bb34246c31",
+ "last_update_time": "2017-07-25T21:41:09.820717636Z",
+ "metadata": {
+ "group": "san_francisco",
+ "region": "west"
+ },
+ "mount_accessor": "auth_userpass_e50b1a44",
+ "mount_path": "userpass/",
+ "mount_type": "userpass",
+ "name": "testuser"
+ }
+}
+```
+
+## Update Persona by ID
+
+This endpoint is used to update an existing persona.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/identity/persona/id/:id` | `200 application/json` |
+
+### Parameters
+
+- `id` `(string: )` – Specifies the identifier of the entity.
+
+- `name` (string: Required) - Name of the persona. Name should be the
+ identifier of the client in the authentication source. For example, if the
+ persona belongs to userpass backend, the name should be a valid username
+ within userpass backend. If persona belongs to GitHub, it should be the
+ GitHub username.
+
+- `entity_id` (string: required) - Entity ID to which this persona belongs to.
+
+- `mount_accessor` (string: required) - Accessor of the mount to which the
+ persona should belong to.
+
+- `metadata` `(list of strings: [])` – Metadata to be associated with the
+ persona. Format should be a list of `key=value` pairs.
+
+### Sample Payload
+
+```
+{
+ "name": "testuser",
+ "metadata": ["group=philadelphia", "region=east"],
+ "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3",
+ "mount_accessor": "auth_userpass_e50b1a44"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31
+```
+
+### Sample Response
+
+```
+{
+ "data": {
+ "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3",
+ "id": "34982d3d-e3ce-5d8b-6e5f-b9bb34246c31"
+ }
+}
+```
+
+### Delete Persona by ID
+
+This endpoint deletes a persona from its corresponding entity.
+
+| Method | Path | Produces |
+| :--------- | :-------------------------- | :----------------------|
+| `DELETE` | `/identity/persona/id/:id` | `204 (empty body)` |
+
+## Parameters
+
+- `id` `(string: )` – Specifies the identifier of the persona.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31
+```
+
+### List Personas by ID
+
+This endpoint returns a list of available personas by their identifiers.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/identity/persona/id` | `200 application/json` |
+| `GET` | `/identity/persona/id?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/identity/persona/id
+```
+
+### Sample Response
+
+```
+{
+ "data": {
+ "keys": [
+ "2e8217fa-8cb6-8aec-9e22-3196d74ca2ba",
+ "91ebe973-ec86-84db-3c7c-f760415326de",
+ "92308b08-4139-3ec6-7af2-8e98166b4e0c",
+ "a3b042e6-5cc1-d5a9-8874-d53a51954de2",
+ "d5844921-017f-e496-2a9a-23d4a2f3e8a3"
+ ]
+ }
+}
+```
+
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/kv/index.html.md
similarity index 78%
rename from vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md
rename to vendor/github.com/hashicorp/vault/website/source/api/secret/kv/index.html.md
index be00171..ffa784e 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/kv/index.html.md
@@ -1,18 +1,18 @@
---
layout: "api"
-page_title: "Generic Secret Backend - HTTP API"
-sidebar_current: "docs-http-secret-generic"
+page_title: "Key/Value Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-kv"
description: |-
- This is the API documentation for the Vault Generic secret backend.
+ This is the API documentation for the Vault Key/Value secret backend.
---
-# Generic Secret Backend HTTP API
+# Key/Value Secret Backend HTTP API
-This is the API documentation for the Vault Generic secret backend. For general
-information about the usage and operation of the Generic backend, please see
-the [Vault Generic backend documentation](/docs/secrets/generic/index.html).
+This is the API documentation for the Vault Key/Value secret backend. For general
+information about the usage and operation of the Key/Value backend, please see
+the [Vault Key/Value backend documentation](/docs/secrets/kv/index.html).
-This documentation assumes the Generic backend is mounted at the `/secret`
+This documentation assumes the Key/Value backend is mounted at the `/secret`
path in Vault. Since it is possible to mount secret backends at any location,
please update your API calls accordingly.
@@ -51,6 +51,12 @@ $ curl \
}
```
+_Note_: the `lease_duration` field (which on the CLI shows as
+`refresh_interval`) is advisory. No lease is created. This is a way for writers
+to indicate how often a given value shold be re-read by the client. See the
+[Vault Key/Value backend documentation](/docs/secrets/kv/index.html) for
+more details.
+
## List Secrets
This endpoint returns a list of key names at the specified location. Folders are
@@ -62,6 +68,7 @@ this command.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/secret/:path` | `200 application/json` |
+| `GET` | `/secret/:path?list=true` | `200 application/json` |
### Parameters
@@ -114,8 +121,9 @@ policy granting the `update` capability.
- `:key` `(string: "")` – Specifies a key, paired with an associated value, to
be held at the given location. Multiple key/value pairs can be specified, and
- all will be returned on a read operation. A key called `ttl` will trigger some
- special behavior; see above for details.
+ all will be returned on a read operation. A key called `ttl` will trigger
+ some special behavior; see the [Vault Key/Value backend
+ documentation](/docs/secrets/kv/index.html) for details.
### Sample Payload
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md
index e833b5c..763027e 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md
@@ -8,6 +8,11 @@ description: |-
# MongoDB Secret Backend HTTP API
+~> **Deprecation Note:** This backend is deprecated in favor of the
+combined databases backend added in v0.7.1. See the API documentation for
+the new implementation of this backend at
+[MongoDB Database Plugin HTTP API](/api/secret/databases/mongodb.html).
+
This is the API documentation for the Vault MongoDB secret backend. For general
information about the usage and operation of the MongoDB backend, please see
the [Vault MongoDB backend documentation](/docs/secrets/mongodb/index.html).
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md
index 678eea5..e340012 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md
@@ -8,6 +8,11 @@ description: |-
# MSSQL Secret Backend HTTP API
+~> **Deprecation Note:** This backend is deprecated in favor of the
+combined databases backend added in v0.7.1. See the API documentation for
+the new implementation of this backend at
+[MSSQL Database Plugin HTTP API](/api/secret/databases/mssql.html).
+
This is the API documentation for the Vault MSSQL secret backend. For general
information about the usage and operation of the MSSQL backend, please see
the [Vault MSSQL backend documentation](/docs/secrets/mssql/index.html).
@@ -164,6 +169,7 @@ returned, not any values.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/mssql/roles` | `200 application/json` |
+| `GET` | `/mssql/roles?list=true` | `200 application/json` |
### Sample Request
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md
index 1d4bb90..8f0d55a 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md
@@ -8,6 +8,11 @@ description: |-
# MySQL Secret Backend HTTP API
+~> **Deprecation Note:** This backend is deprecated in favor of the
+combined databases backend added in v0.7.1. See the API documentation for
+the new implementation of this backend at
+[MySQL/MariaDB Database Plugin HTTP API](/api/secret/databases/mysql-maria.html).
+
This is the API documentation for the Vault MySQL secret backend. For general
information about the usage and operation of the MySQL backend, please see
the [Vault MySQL backend documentation](/docs/secrets/mysql/index.html).
@@ -185,6 +190,7 @@ returned, not any values.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/mysql/roles` | `200 application/json` |
+| `GET` | `/mysql/roles?list=true` | `200 application/json` |
### Sample Request
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md
index 37eadb0..ade89d3 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md
@@ -16,11 +16,42 @@ This documentation assumes the PKI backend is mounted at the `/pki` path in
Vault. Since it is possible to mount secret backends at any location, please
update your API calls accordingly.
+## Table of Contents
+
+* [Read CA Certificate](#read-ca-certificate)
+* [Read CA Certificate Chain](#read-ca-certificate-chain)
+* [Read Certificate](#read-certificate)
+* [List Certificates](#list-certificates)
+* [Submit CA Information](#submit-ca-information)
+* [Read CRL Configuration](#read-crl-configuration)
+* [Set CRL Configuration](#set-crl-configuration)
+* [Read URLs](#read-urls)
+* [Set URLs](#set-urls)
+* [Read CRL](#read-crl)
+* [Rotate CRLs](#rotate-crls)
+* [Generate Intermediate](#generate-intermediate)
+* [Set Signed Intermediate](#set-signed-intermediate)
+* [Read Certificate](#read-certificate)
+* [Generate Certificate](#generate-certificate)
+* [Revoke Certificate](#revoke-certificate)
+* [Create/Update Role](#create-update-role)
+* [Read Role](#read-role)
+* [List Roles](#list-roles)
+* [Delete Role](#delete-role)
+* [Generate Root](#generate-root)
+* [Delete Root](#delete-root)
+* [Sign Intermediate](#sign-intermediate)
+* [Sign Self-Issued](#sign-self-issued)
+* [Sign Certificate](#sign-certificate)
+* [Sign Verbatim](#sign-verbatim)
+* [Tidy](#tidy)
+
## Read CA Certificate
This endpoint retrieves the CA certificate *in raw DER-encoded form*. This is a
-bare endpoint that does not return a standard Vault data structure. If `/pem` is
-added to the endpoint, the CA certificate is returned in PEM format.
+bare endpoint that does not return a standard Vault data structure and cannot
+be read by the Vault CLI. If `/pem` is added to the endpoint, the CA
+certificate is returned in PEM format.
This is an unauthenticated endpoint.
@@ -45,7 +76,7 @@ $ curl \
This endpoint retrieves the CA certificate chain, including the CA _in PEM
format_. This is a bare endpoint that does not return a standard Vault data
-structure.
+structure and cannot be read by the Vault CLI.
This is an unauthenticated endpoint.
@@ -110,6 +141,7 @@ This endpoint returns a list of the current certificates by serial number only.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/pki/certs` | `200 application/json` |
+| `GET` | `/pki/certs?list=true` | `200 application/json` |
### Sample Request
@@ -432,8 +464,6 @@ $ curl \
https://vault.rocks/v1/pki/intermediate/generate/internal
```
-### Sample Response
-
```json
{
"lease_id": "",
@@ -805,6 +835,7 @@ returned, not any values.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/pki/roles` | `200 application/json` |
+| `GET` | `/pki/roles?list=true` | `200 application/json` |
### Sample Request
@@ -854,14 +885,18 @@ $ curl \
## Generate Root
-This endpoint generates a new self-signed CA certificate and private key. _This
-will overwrite any previously-existing private key and certificate._ If the path
-ends with `exported`, the private key will be returned in the response; if it is
-`internal` the private key will not be returned and *cannot be retrieved later*.
-Distribution points use the values set via `config/urls`.
+This endpoint generates a new self-signed CA certificate and private key. If
+the path ends with `exported`, the private key will be returned in the
+response; if it is `internal` the private key will not be returned and *cannot
+be retrieved later*. Distribution points use the values set via `config/urls`.
-As with other issued certificates, Vault will automatically revoke the generated
-root at the end of its lease period; the CA certificate will sign its own CRL.
+As with other issued certificates, Vault will automatically revoke the
+generated root at the end of its lease period; the CA certificate will sign its
+own CRL.
+
+As of Vault 0.8.1, if a CA cert/key already exists within the backend, this
+function will return a 204 and will not overwrite it. Previous versions of
+Vault would overwrite the existing cert/key with new values.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
@@ -912,6 +947,12 @@ root at the end of its lease period; the CA certificate will sign its own CRL.
Useful if the CN is not a hostname or email address, but is instead some
human-readable identifier.
+- `permitted_dns_domains` `(string: "")` – A comma separated string (or, string
+ array) containing DNS domains for which certificates are allowed to be issued
+ or signed by this CA certificate. Supports subdomains via a `.` in front of
+ the domain, as per
+ [RFC](https://tools.ietf.org/html/rfc5280#section-4.2.1.10).
+
### Sample Payload
```json
@@ -946,6 +987,26 @@ $ curl \
}
```
+## Delete Root
+
+This endpoint deletes the current CA key (the old CA certificate will still be
+accessible for reading until a new certificate/key are generated or uploaded).
+_This endpoint requires sudo/root privileges._
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/pki/root` | `204 (empty body)` |
+
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/pki/root
+```
+
## Sign Intermediate
This endpoint uses the configured CA certificate to issue a certificate with
@@ -974,7 +1035,8 @@ verbatim.
- `ttl` `(string: "")` – Specifies the requested Time To Live (after which the
certificate will be expired). This cannot be larger than the mount max (or, if
- not set, the system max).
+ not set, the system max). However, this can be after the expiration of the
+ signing CA.
- `format` `(string: "pem")` – Specifies the format for returned data. Can be
`pem`, `der`, or `pem_bundle`. If `der`, the output is base64 encoded. If
@@ -1001,13 +1063,18 @@ verbatim.
path; 3) Extensions requested in the CSR will be copied into the issued
certificate.
+- `permitted_dns_domains` `(string: "")` – A comma separated string (or, string
+ array) containing DNS domains for which certificates are allowed to be issued
+ or signed by this CA certificate. Supports subdomains via a `.` in front of
+ the domain, as per
+ [RFC](https://tools.ietf.org/html/rfc5280#section-4.2.1.10).
+
### Sample Payload
```json
{
"csr": "...",
"common_name": "example.com"
-
}
```
@@ -1037,6 +1104,65 @@ $ curl \
"auth": null
}
```
+## Sign Self-Issued
+
+This endpoint uses the configured CA certificate to sign a self-issued
+certificate (which will usually be a self-signed certificate as well).
+
+**_This is an extremely privileged endpoint_**. The given certificate will be
+signed as-is with only minimal validation performed (is it a CA cert, and is it
+actually self-issued). The only values that will be changed will be the
+authority key ID, the issuer DN, and, if set, any distribution points.
+
+This is generally only needed for root certificate rolling in cases where you
+don't want/can't get access to a CSR (such as if it's a root stored in Vault
+where the key is not exposed). If you don't know whether you need this
+endpoint, you most likely should be using a different endpoint (such as
+`sign-intermediate`).
+
+This endpoint requires `sudo` capability.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/pki/root/sign-self-issued` | `200 application/json` |
+
+### Parameters
+
+- `certificate` `(string: )` – Specifies the PEM-encoded self-issued certificate.
+
+### Sample Payload
+
+```json
+{
+ "certificate": "..."
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/pki/root/sign-self-issued
+```
+
+### Sample Response
+
+```json
+{
+ "lease_id": "",
+ "renewable": false,
+ "lease_duration": 0,
+ "data": {
+ "certificate": "-----BEGIN CERTIFICATE-----\nMIIDzDCCAragAwIBAgIUOd0ukLcjH43TfTHFG9qE0FtlMVgwCwYJKoZIhvcNAQEL\n...\numkqeYeO30g1uYvDuWLXVA==\n-----END CERTIFICATE-----\n",
+ "issuing_ca": "-----BEGIN CERTIFICATE-----\nMIIDUTCCAjmgAwIBAgIJAKM+z4MSfw2mMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNV\n...\nG/7g4koczXLoUM3OQXd5Aq2cs4SS1vODrYmgbioFsQ3eDHd1fg==\n-----END CERTIFICATE-----\n",
+ },
+ "auth": null
+}
+```
+
## Sign Certificate
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md
index 7c3e2b6..e974ffe 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md
@@ -8,6 +8,11 @@ description: |-
# PostgreSQL Secret Backend HTTP API
+~> **Deprecation Note:** This backend is deprecated in favor of the
+combined databases backend added in v0.7.1. See the API documentation for
+the new implementation of this backend at
+[PostgreSQL Database Plugin HTTP API](/api/secret/databases/postgresql.html).
+
This is the API documentation for the Vault PostgreSQL secret backend. For
general information about the usage and operation of the PostgreSQL backend,
please see the
@@ -179,6 +184,7 @@ returned, not any values.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/postgresql/roles` | `200 application/json` |
+| `GET` | `/postgresql/roles?list=true` | `200 application/json` |
### Sample Request
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md
index 3948928..e5dffb5 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md
@@ -61,8 +61,7 @@ $ curl \
## Configure Lease
-This endpoint configures the lease settings for generated credentials. This is
-endpoint requires sudo privileges.
+This endpoint configures the lease settings for generated credentials.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
@@ -168,7 +167,7 @@ This endpoint deletes the role definition.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
-| `DELETE` | `/rabbitmq/roles/:namer` | `204 (empty body)` |
+| `DELETE` | `/rabbitmq/roles/:name` | `204 (empty body)` |
### Parameters
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md
index 03133ad..37da8a4 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md
@@ -144,7 +144,7 @@ This endpoint creates or updates a named role.
`allow_subdomains`.
- `key_option_specs` `(string: "")` – Specifies a aomma separated option
- specification which will be prefixed to RSA keys in the remote host's
+ specification which will be prefixed to RSA keys in the remote host's
authorized_keys file. N.B.: Vault does not check this string for validity.
- `ttl` `(string: "")` – Specifies the Time To Live value provided as a string
@@ -195,6 +195,13 @@ This endpoint creates or updates a named role.
will always be the token display name. The key ID is logged by the SSH server
and can be useful for auditing.
+- `key_id_format` `(string: "")` – When supplied, this value specifies a custom
+ format for the key id of a signed certificate. The following variables are
+ availble for use: '{{token_display_name}}' - The display name of the token used
+ to make the request. '{{role_name}}' - The name of the role signing the request.
+ '{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed.
+ e.g. "custom-keyid-{{token_display_name}}",
+
### Sample Payload
```json
@@ -286,6 +293,7 @@ returned, not any values.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/ssh/roles` | `200 application/json` |
+| `GET` | `/ssh/roles?list=true` | `200 application/json` |
### Sample Request
@@ -612,7 +620,7 @@ overridden._
- `public_key` `(string: "")` – Specifies the public key part of the SSH CA key
pair; required if `generate_signing_key` is false.
-- `generate_signing_key` `(bool: false)` – Specifies if Vault should generate
+- `generate_signing_key` `(bool: true)` – Specifies if Vault should generate
the signing key pair internally. The generated public key will be returned so
you can add it to your configuration.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/totp/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/totp/index.html.md
new file mode 100644
index 0000000..0ed35d6
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/totp/index.html.md
@@ -0,0 +1,273 @@
+---
+layout: "api"
+page_title: "TOTP Secret Backend - HTTP API"
+sidebar_current: "docs-http-secret-totp"
+description: |-
+ This is the API documentation for the Vault TOTP secret backend.
+---
+
+# TOTP Secret Backend HTTP API
+
+This is the API documentation for the Vault TOTP secret backend. For
+general information about the usage and operation of the TOTP backend,
+please see the
+[Vault TOTP backend documentation](/docs/secrets/totp/index.html).
+
+This documentation assumes the TOTP backend is mounted at the
+`/totp` path in Vault. Since it is possible to mount secret backends at
+any location, please update your API calls accordingly.
+
+## Create Key
+
+This endpoint creates or updates a key definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------------------------------------------------------------------------------- |
+| `POST` | `/totp/keys/:name` | if generating a key and exported is true: `200 application/json` else: `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the key to create. This is specified as part of the URL.
+
+- `generate` `(bool: false)` – Specifies if a key should be generated by Vault or if a key is being passed from another service.
+
+- `exported` `(bool: true)` – Specifies if a QR code and url are returned upon generating a key. Only used if generate is true.
+
+- `key_size` `(int: 20)` – Specifies the size in bytes of the Vault generated key. Only used if generate is true.
+
+- `url` `(string: "")` – Specifies the TOTP key url string that can be used to configure a key. Only used if generate is false.
+
+- `key` `(string: )` – Specifies the master key used to generate a TOTP code. Only used if generate is false.
+
+- `issuer` `(string: "" )` – Specifies the name of the key’s issuing organization.
+
+- `account_name` `(string: "" )` – Specifies the name of the account associated with the key.
+
+- `period` `(int or duration format string: 30)` – Specifies the length of time in seconds used to generate a counter for the TOTP code calculation.
+
+- `algorithm` `(string: "SHA1")` – Specifies the hashing algorithm used to generate the TOTP code. Options include "SHA1", "SHA256" and "SHA512".
+
+- `digits` `(int: 6)` – Specifies the number of digits in the generated TOTP code. This value can be set to 6 or 8.
+
+- `skew` `(int: 1)` – Specifies the number of delay periods that are allowed when validating a TOTP code. This value can be either 0 or 1. Only used if generate is true.
+
+- `qr_size` `(int: 200)` – Specifies the pixel size of the square QR code when generating a new key. Only used if generate is true and exported is true. If this value is 0, a QR code will not be returned.
+
+### Sample Payload
+
+```json
+{
+ "url": "otpauth://totp/Google:test@gmail.com?secret=Y64VEVMBTSXCYIWRSHRNDZW62MPGVU2G&issuer=Google"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/totp/keys/my-key
+```
+
+### Sample Payload
+
+```json
+{
+ "generate": true,
+ "issuer": "Google",
+ "account_name": "test@gmail.com",
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/totp/keys/my-key
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "barcode": "iVBORw0KGgoAAAANSUhEUgAAAMgAAADIEAAAAADYoy0BAAAGXklEQVR4nOyd4Y4iOQyEmRPv/8p7upX6BJm4XbbDbK30fT9GAtJJhpLjdhw3z1+/HmDEP396AvDO878/X1+9i1frWvu5Po/6Xz+P2kft1nFVa1f7z+YdjT/5PrEQMxDEDAQx4/n6orsGr6z9ZP1mviMbP/MBav/R6/U61Ud0vk8sxAwEMQNBzHju3lTvv6P2ajwS9Ve9zz+9pkfjRp+r/SjzwULMQBAzEMSMrQ/pUo0bouun7dW9LXVvrBq/TMBCzEAQMxDEjKM+JFqT17W4mu9Y+49eq/OL3r/GVX3CJ7KtWIgZCGIGgpix9SHTtXGa4476qfoa1adVc+HV/6/yfWIhZiCIGQhixpsP6Z4nulD3lqavV7q+Yvo6G7/zfWIhZiCIGQhixteJ/Rh1Da3e71d9RjRul2ocdeK7xELMQBAzEMSM3z6ku6dTrdOo1l9M6y5O7clVx5n4SCzEDAQxA0HMuN3L+qlavqj9itpePY+VtVdrHqfzeQULMQNBzEAQM97ikAv1vr/brltTeCp/svarcjLe2F1PnbohCGIGgphRqjG8mJ6PmtYMVnP363Vqv6d8qZrzf2AhfiCIGQhixm0c8n+jQ8+7+jZ4cY3PrlfHO/1Ml+45st18sRAzEMQMBDHjdxyixgPqs0lWsvvwqH00zrSO41R80p3XXXssxAwEMQNBzJCeuaieo6pedzGtb1/76fqgLH6ofg+dZ65gIWYgiBkIYsbbs9/V+/EVde1V+62eh1I/r/qIrs+Ixo2uYy/LGAQxA0HMeNvLilDX1OraXc2jVNtPzxJXr6v+HzuwEDMQxAwEMWNbp95d21WmzzBR6066e07dPMq0XoW9LEMQxAwEMUOqUz+1p9ONd07Xz586u6yifp/4EEMQxAwEMUPay7rIcthqTrx6v1/NTX+qZrIbF63v34GFmIEgZiCIGdvfU++e1a3GM2oOPjtvpfbfjS+qeZFJXgcLMQNBzEAQM6Tn9p7OLVdrFqP5TFF9ZXTdqfqTV7AQMxDEDAQx482HdPMPGdN8SjeHr6710zzJidrCB/kQTxDEDAQxY7uXdTGNC9S9pK6vqs6nWzdyej53PhELMQNBzEAQM0o59YtTz/xQfVO3jmOdl0rmE6f5ort5YSFmIIgZCGLGbU69eka3ep+v5sCzcbp5jZXMR0zr+aPPqVM3BkHMQBAzRs/tjejmwj9d05ihzq96nQr5EEMQxAwEMWPrQy6q9/fdevFTcVA0v+n5K7U/tf4lGhcfYgiCmIEgZtw+6+RCXUurvkKlepZ2vS5i+oyTaby0GxcLMQNBzEAQM0r5kKnv6K6xK9X4R13zu+eyJnXpazssxAwEMQNBzNj+fkg3nqjGK9laPz1vleXwq2v+p+vciUMMQRAzEMSM298xrOYDVqrtpmtzt59uHqc6v2zcBxbiB4KYgSBmbOvUV7q577VdOIliXqLr87p7Tere2YnrsRAzEMQMBDFj+zuGar3Gp+rNp3kUtR5lmj/Jxo/GvZsvFmIGgpiBIGbcPi/rW+MPPaeqOs407xL1E1E9lzWpg8FCzEAQMxDEDOk3qC66a7f6fsSn1uz18+o8P+GzsBAzEMQMBDFjm1Ov7L3s3p+2/6lcfoa6ZxaNm50DWyEOMQRBzEAQM7Zne6PX3XilW5M3zbd0c/3ZHpvqY6P+7j7HQsxAEDMQxIxRPqRaT6Kuzemkh7WJ3RrJbJxq7eOuPyzEDAQxA0HMKJ3t/XbxobW/Gmdka/PpPMxPgoWYgSBmIIgZ0m9QrXTP1mb9Ru2y+/hsD2xaM9jN5UfjEIf8RSCIGQhiRus3qLp7ONU6jK4vynxMdn10XdY+m4/SHxZiBoKYgSBm3MYhGdl9/qkzvN18ilpDqF6nxiPVGs3Xz7EQMxDEDAQx4/ZcVoR6fqobZ6h7Vtm81TVejZdWuvHNXXssxAwEMQNBzHju3pyujdO68Ky9Wm+h9qPGJVG/6nyU+WIhZiCIGQhixtaHdFF9hlqLeOrcVPcMQDeOmtTNYyFmIIgZCGLGUR/SPQs73QuL5tGtiVznlc1X/T8iXtthIWYgiBkIYsbWh3T3nNS1dXqe6tReW8S0Hr1b5/LAQvxAEDMQxIw3H9I9nzU9R6XGHdn41dx4d4+rGp9En7OX9ReAIGYgiBlff6IWG2KwEDP+DQAA//+TDHXGhqE4+AAAAABJRU5ErkJggg==",
+ "url" : "otpauth://totp/Google:test@gmail.com?algorithm=SHA1&digits=6&issuer=Google&period=30&secret=HTXT7KJFVNAJUPYWQRWMNVQE5AF5YZI2",
+ }
+}
+```
+
+If a QR code is returned, it consists of base64-formatted PNG bytes. You can embed it in a web page by including the base64 string in an `img` tag with the prefix `data:image/png;base64`
+
+```
+
+```
+
+## Read Key
+
+This endpoint queries the key definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/totp/keys/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the key to read. This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/totp/keys/my-key
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "account_name": "test@gmail.com",
+ "algorithm" : "SHA1",
+ "digits" : 6,
+ "issuer": "Google",
+ "period" : 30,
+ }
+}
+```
+
+## List Keys
+
+This endpoint returns a list of available keys. Only the key names are
+returned, not any values.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `LIST` | `/totp/keys` | `200 application/json` |
+| `GET` | `/totp/keys?list=true` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request LIST \
+ https://vault.rocks/v1/totp/keys
+```
+
+### Sample Response
+
+```json
+{
+ "auth": null,
+ "data": {
+ "keys": ["my-key"]
+ },
+ "lease_duration": 0,
+ "lease_id": "",
+ "renewable": false
+}
+```
+
+## Delete Key
+
+This endpoint deletes the key definition.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/totp/keys/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the key to delete. This
+ is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/totp/keys/my-key
+```
+
+## Generate Code
+
+This endpoint generates a new time-based one-time use password based on the named
+key.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/totp/code/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the key to create
+ credentials against. This is specified as part of the URL.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/totp/code/my-key
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "code": "810920",
+ }
+}
+```
+
+## Validate Code
+
+This endpoint validates a time-based one-time use password generated from the named
+key.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `POST` | `/totp/code/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Specifies the name of the key used to generate the password. This is specified as part of the URL.
+
+- `code` `(string: )` – Specifies the password you want to validate.
+
+### Sample Payload
+
+```json
+{
+ "code": "123802"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/totp/code/my-key
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "valid": true,
+ }
+}
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md
index 37c7a7a..9437b01 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md
@@ -48,8 +48,10 @@ values set here cannot be changed after key creation.
- `type` `(string: "aes256-gcm96")` – Specifies the type of key to create. The
currently-supported types are:
- - `aes256-gcm96` – AES-256 wrapped with GCM using a 12-byte nonce size (symmetric)
+ - `aes256-gcm96` – AES-256 wrapped with GCM using a 12-byte nonce size
+ (symmetric, supports derivation)
- `ecdsa-p256` – ECDSA using the P-256 elliptic curve (asymmetric)
+ - `ed25519` – ED25519 (asymmetric, supports derivation)
### Sample Payload
@@ -107,7 +109,8 @@ $ curl \
"keys": {
"1": 1442851412
},
- "min_decryption_version": 0,
+ "min_decryption_version": 1,
+ "min_encryption_version": 0,
"name": "foo",
"supports_encryption": true,
"supports_decryption": true,
@@ -125,6 +128,7 @@ actual keys themselves).
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/transit/keys` | `200 application/json` |
+| `GET` | `/transit/keys?list=true` | `200 application/json` |
### Sample Request
@@ -173,7 +177,7 @@ $ curl \
https://vault.rocks/v1/transit/keys/my-key
```
-#### Update Key Configuration
+## Update Key Configuration
This endpoint allows tuning configuration values for a given key. (These values
are returned during a read operation on the named key.)
@@ -189,8 +193,12 @@ are returned during a read operation on the named key.)
policy can prevent old copies of ciphertext from being decrypted, should they
fall into the wrong hands. For signatures, this value controls the minimum
version of signature that can be verified against. For HMACs, this controls
- the minimum version of a key allowed to be used as the key for the HMAC
- function.
+ the minimum version of a key allowed to be used as the key for verification.
+
+- `min_encryption_version` `(int: 0)` – Specifies the minimum version of the
+ key that can be used to encrypt plaintext, sign payloads, or generate HMACs.
+ Must be `0` (which will use the latest version) or a value greater or equal
+ to `min_decryption_version`.
- `deletion_allowed` `(bool: false)`- Specifies if the key is allowed to be
deleted.
@@ -234,7 +242,7 @@ $ curl \
https://vault.rocks/v1/transit/keys/my-key/rotate
```
-## Read Key
+## Export Key
This endpoint returns the named key. The `keys` object shows the value of the
key for each version. If `version` is specified, the specific version will be
@@ -259,9 +267,9 @@ be valid.
- `name` `(string: )` – Specifies the name of the key to read
information about. This is specified as part of the URL.
-- `version` `(int: "")` – Specifies the version of the key to read. If omitted,
+- `version` `(string: "")` – Specifies the version of the key to read. If omitted,
all versions of the key will be returned. This is specified as part of the
- URL.
+ URL. If the version is set to `latest`, the current key will be returned.
### Sample Request
@@ -310,6 +318,10 @@ the key does not exist, an error will be returned.
- `context` `(string: "")` – Specifies the **base64 encoded** context for key
derivation. This is required if key derivation is enabled for this key.
+- `key_version` `(int: 0)` – Specifies the version of the key to use for
+ encryption. If not set, uses the latest version. Must be greater than or
+ equal to the key's `min_encryption_version`, if set.
+
- `nonce` `(string: "")` – Specifies the **base64 encoded** nonce value. This
must be provided if convergent encryption is enabled for this key and the key
was generated with Vault 0.6.1. Not required for keys created in 0.6.2+. The
@@ -338,7 +350,7 @@ the key does not exist, an error will be returned.
- `type` `(string: "aes256-gcm96")` –This parameter is required when encryption
key is expected to be created. When performing an upsert operation, the type
of key to create. Currently, "aes256-gcm96" (symmetric) is the only type
- supported.
+ supported.
- `convergent_encryption` `(string: "")` – This parameter will only be used when
a key is expected to be created. Whether to support convergent encryption.
@@ -468,6 +480,10 @@ functionality to untrusted users or scripts.
- `context` `(string: "")` – Specifies the **base64 encoded** context for key
derivation. This is required if key derivation is enabled.
+- `key_version` `(int: 0)` – Specifies the version of the key to use for the
+ operation. If not set, uses the latest version. Must be greater than or equal
+ to the key's `min_encryption_version`, if set.
+
- `nonce` `(string: "")` – Specifies a base64 encoded nonce value used during
encryption. Must be provided if convergent encryption is enabled for this key
and the key was generated with Vault 0.6.1. Not required for keys created in
@@ -680,7 +696,7 @@ $ curl \
}
```
-## Generate HMAC with Key
+## Generate HMAC
This endpoint returns the digest of given data using the specified hash
algorithm and the named key. The key can be of any type supported by `transit`;
@@ -697,6 +713,10 @@ be used.
- `name` `(string: )` – Specifies the name of the encryption key to
generate hmac against. This is specified as part of the URL.
+- `key_version` `(int: 0)` – Specifies the version of the key to use for the
+ operation. If not set, uses the latest version. Must be greater than or equal
+ to the key's `min_encryption_version`, if set.
+
- `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use. This
can also be specified as part of the URL. Currently-supported algorithms are:
@@ -707,9 +727,6 @@ be used.
- `input` `(string: )` – Specifies the **base64 encoded** input data.
-- `format` `(string: "hex")` – Specifies the output encoding. This can be either
- `hex` or `base64`.
-
### Sample Payload
```json
@@ -738,7 +755,7 @@ $ curl \
}
```
-## Sign Data with Key
+## Sign Data
This endpoint returns the cryptographic signature of the given data using the
named key and the specified hash algorithm. The key must be of a type that
@@ -751,10 +768,16 @@ supports signing.
### Parameters
- `name` `(string: )` – Specifies the name of the encryption key to
- generate hmac against. This is specified as part of the URL.
+ use for signing. This is specified as part of the URL.
-- `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use. This
- can also be specified as part of the URL. Currently-supported algorithms are:
+- `key_version` `(int: 0)` – Specifies the version of the key to use for
+ signing. If not set, uses the latest version. Must be greater than or equal
+ to the key's `min_encryption_version`, if set.
+
+- `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use for
+ supporting key types (notably, not including `ed25519` which specifies its
+ own hash algorithm). This can also be specified as part of the URL.
+ Currently-supported algorithms are:
- `sha2-224`
- `sha2-256`
@@ -763,9 +786,6 @@ supports signing.
- `input` `(string: )` – Specifies the **base64 encoded** input data.
-- `format` `(string: "hex")` – Specifies the output encoding. This can be either
- `hex` or `base64`.
-
### Sample Payload
```json
@@ -794,7 +814,7 @@ $ curl \
}
```
-## Verify Data with Key
+## Verify Signed Data
This endpoint returns whether the provided signature is valid for the given
data.
@@ -805,8 +825,8 @@ data.
### Parameters
-- `name` `(string: )` – Specifies the name of the encryption key to
- generate hmac against. This is specified as part of the URL.
+- `name` `(string: )` – Specifies the name of the encryption key that
+ was used to generate the signature or HMAC.
- `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use. This
can also be specified as part of the URL. Currently-supported algorithms are:
@@ -818,9 +838,6 @@ data.
- `input` `(string: )` – Specifies the **base64 encoded** input data.
-- `format` `(string: "hex")` – Specifies the output encoding. This can be either
- `hex` or `base64`.
-
- `signature` `(string: "")` – Specifies the signature output from the
`/transit/sign` function. Either this must be supplied or `hmac` must be
supplied.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md
index eb9a61d..36ca664 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md
@@ -74,10 +74,22 @@ For example, mounting the "foo" auth backend will make it accessible at
- `type` `(string: )` – Specifies the name of the authentication
backend type, such as "github" or "token".
-Additionally, the following options are allowed in Vault open-source, but
+- `config` `(map: nil)` – Specifies configuration options for
+ this mount. These are the possible values:
+
+ - `plugin_name`
+
+ The plugin_name can be provided in the config map or as a top-level option,
+ with the former taking precedence.
+
+- `plugin_name` `(string: "")` – Specifies the name of the auth plugin to
+ use based from the name in the plugin catalog. Applies only to plugin
+ backends.
+
+Additionally, the following options are allowed in Vault open-source, but
relevant functionality is only supported in Vault Enterprise:
-- `local` `(bool: false)` – Specifies if the auth backend is a local mount
+- `local` `(bool: false)` – Specifies if the auth backend is a local mount
only. Local mounts are not replicated nor (if a secondary) removed by
replication.
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md
index 70b4494..06a2bf3 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md
@@ -44,7 +44,7 @@ for the given path.
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
- --data payload.json \
+ --data @payload.json \
https://vault.rocks/v1/sys/capabilities-accessor
```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md
index 175c2b3..4adfb96 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md
@@ -41,7 +41,7 @@ client token is the Vault token with which this API call is made.
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
- --data payload.json \
+ --data @payload.json \
https://vault.rocks/v1/sys/capabilities-self
```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md
index 62a0fbf..83fac0a 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md
@@ -50,7 +50,7 @@ This endpoint lists the information for the given request header.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
-| `POST` | `/sys/config/auditing/request-headers/:name` | `200 application/json` |
+| `GET` | `/sys/config/auditing/request-headers/:name` | `200 application/json` |
### Parameters
@@ -105,7 +105,7 @@ This endpoint enables auditing of a header.
$ curl \
--header "X-Vault-Token: ..." \
--request PUT \
- --data payload.json \
+ --data @payload.json \
https://vault.rocks/v1/sys/config/auditing/request-headers/my-header
```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/config-cors.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/config-cors.html.md
new file mode 100644
index 0000000..26c5b42
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/config-cors.html.md
@@ -0,0 +1,99 @@
+---
+layout: "api"
+page_title: "/sys/config/cors - HTTP API"
+sidebar_current: "docs-http-system-config-cors"
+description: |-
+ The '/sys/config/cors' endpoint configures how the Vault server responds to cross-origin requests.
+---
+
+# `/sys/config/cors`
+
+The `/sys/config/cors` endpoint is used to configure CORS settings.
+
+- **`sudo` required** – All CORS endpoints require `sudo` capability in
+ addition to any path-specific capabilities.
+
+## Read CORS Settings
+
+This endpoint returns the current CORS configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `GET` | `/sys/config/cors` | `200 application/json` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ https://vault.rocks/v1/sys/config/cors
+```
+
+### Sample Response
+
+```json
+{
+ "enabled": true,
+ "allowed_origins": ["http://www.example.com"],
+ "allowed_headers": [
+ "Content-Type",
+ "X-Requested-With",
+ "X-Vault-AWS-IAM-Server-ID",
+ "X-Vault-No-Request-Forwarding",
+ "X-Vault-Token",
+ "X-Vault-Wrap-Format",
+ "X-Vault-Wrap-TTL",
+ ]
+}
+```
+
+## Configure CORS Settings
+
+This endpoint allows configuring the origins that are permitted to make
+cross-origin requests, as well as headers that are allowed on cross-origin requests.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `PUT` | `/sys/config/cors` | `204 (empty body)` |
+
+### Parameters
+
+- `allowed_origins` `(string or string array: )` – A wildcard (`*`), comma-delimited string, or array of strings specifying the origins that are permitted to make cross-origin requests.
+
+- `allowed_headers` `(string or string array: "" or [])` – A comma-delimited string or array of strings specifying headers that are permitted to be on cross-origin requests. Headers set via this parameter will be appended to the list of headers that Vault allows by default.
+
+### Sample Payload
+
+```json
+{
+ "allowed_origins": "*",
+ "allowed_headers": "X-Custom-Header"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request PUT \
+ --data @payload.json \
+ https://vault.rocks/v1/sys/config/cors
+```
+
+## Delete CORS Settings
+
+This endpoint removes any CORS configuration.
+
+| Method | Path | Produces |
+| :------- | :--------------------------- | :--------------------- |
+| `DELETE` | `/sys/config/cors` | `204 (empty body)` |
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/sys/config/cors
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md
index e6d71f3..54be70e 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md
@@ -81,7 +81,7 @@ generation attempt can take place at a time. One (and only one) of `otp` or
```
$ curl \
--request PUT \
- --data payload.json \
+ --data @payload.json \
https://vault.rocks/v1/sys/generate-root/attempt
```
@@ -139,7 +139,7 @@ nonce must be provided with each call.
```json
{
"key": "acbd1234",
- "nonce": "ad235",
+ "nonce": "ad235"
}
```
@@ -148,7 +148,7 @@ nonce must be provided with each call.
```
$ curl \
--request PUT \
- --data payload.json \
+ --data @payload.json \
https://vault.rocks/v1/sys/generate-root/update
```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md
index e9de30a..ad3b7e6 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md
@@ -94,7 +94,7 @@ Additionally, the following options are only supported on Vault Pro/Enterprise:
```
$ curl \
--request PUT \
- --data payload.json \
+ --data @payload.json \
https://vault.rocks/v1/sys/init
```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md
index 5e20ca9..358fffb 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md
@@ -34,6 +34,7 @@ $ curl \
{
"ha_enabled": true,
"is_self": false,
- "leader_address": "https://127.0.0.1:8200/"
+ "leader_address": "https://127.0.0.1:8200/",
+ "leader_cluster_address": "https://127.0.0.1:8201/"
}
```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md
index ec51664..7d20d76 100644
--- a/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md
@@ -62,6 +62,7 @@ This endpoint returns a list of lease ids.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/sys/leases/lookup/:prefix` | `200 application/json` |
+| `GET` | `/sys/leases/lookup/:prefix?list=true` | `200 application/json` |
### Sample Request
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-duo.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-duo.html.md
new file mode 100644
index 0000000..db081fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-duo.html.md
@@ -0,0 +1,119 @@
+---
+layout: "api"
+page_title: "/sys/mfa/method/duo - HTTP API"
+sidebar_current: "docs-http-system-mfa-duo"
+description: |-
+ The '/sys/mfa/method/duo' endpoint focuses on managing Duo MFA behaviors in Vault Enterprise.
+---
+
+## Configure Duo MFA Method
+
+This endpoint defines a MFA method of type Duo.
+
+| Method | Path | Produces |
+| :------- | :----------------------------- | :--------------------- |
+| `POST` | `/sys/mfa/method/duo/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Name of the MFA method.
+
+- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping.
+
+- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings:
+ - persona.name: The name returned by the mount configured via the `mount_accessor` parameter
+ - entity.name: The name configured for the Entity
+ - persona.metadata.``: The value of the Persona's metadata parameter
+ - entity.metadata.``: The value of the Entity's metadata paramater
+
+- `secret_key` `(string)` - Secret key for Duo.
+
+- `integration_key` `(string)` - Integration key for Duo.
+
+- `api_hostname` `(string)` - API hostname for Duo.
+
+- `push_info` `(string)` - Push information for Duo.
+
+### Sample Payload
+
+```json
+{
+ "mount_accessor": "auth_userpass_1793464a",
+ "secret_key": "BIACEUEAXI20BNWTEYXT",
+ "integration_key":"8C7THtrIigh2rPZQMbguugt8IUftWhMRCOBzbuyz",
+ "api_hostname":"api-2b5c39f5.duosecurity.com"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/sys/mfa/method/duo/my_duo
+```
+
+## Read Duo MFA Method
+
+This endpoint queries the MFA configuration of Duo type for a given method
+name.
+
+| Method | Path | Produces |
+| :------- | :----------------------------- | :----------------------- |
+| `GET` | `/sys/mfa/method/duo/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Name of the MFA method.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request GET \
+ https://vault.rocks/v1/sys/mfa/method/duo/my_duo
+
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "api_hostname": "api-2b5c39f5.duosecurity.com",
+ "id": "0ad21b78-e9bb-64fa-88b8-1e38db217bde",
+ "integration_key": "BIACEUEAXI20BNWTEYXT",
+ "mount_accessor": "auth_userpass_1793464a",
+ "name": "my_duo",
+ "pushinfo": "",
+ "secret_key": "8C7THtrIigh2rPZQMbguugt8IUftWhMRCOBzbuyz",
+ "type": "duo",
+ "username_format": ""
+ }
+}
+```
+## Delete Duo MFA Method
+
+This endpoint deletes a Duo MFA method.
+
+| Method | Path | Produces |
+| :------- | :----------------------------- | :----------------------- |
+| `DELETE` | `/sys/mfa/method/duo/:name` | `204 (empty body)` |
+
+
+### Parameters
+
+- `name` `(string: )` - Name of the MFA method.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/sys/mfa/method/duo/my_duo
+
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-okta.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-okta.html.md
new file mode 100644
index 0000000..1b82370
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-okta.html.md
@@ -0,0 +1,115 @@
+---
+layout: "api"
+page_title: "/sys/mfa/method/okta - HTTP API"
+sidebar_current: "docs-http-system-mfa-okta"
+description: |-
+ The '/sys/mfa/method/okta' endpoint focuses on managing Okta MFA behaviors in Vault Enterprise.
+---
+
+## Configure Okta MFA Method
+
+This endpoint defines a MFA method of type Okta.
+
+| Method | Path | Produces |
+| :------- | :----------------------------- | :--------------------- |
+| `POST` | `/sys/mfa/method/okta/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Name of the MFA method.
+
+- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping.
+
+- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings:
+ - persona.name: The name returned by the mount configured via the `mount_accessor` parameter
+ - entity.name: The name configured for the Entity
+ - persona.metadata.``: The value of the Persona's metadata parameter
+ - entity.metadata.``: The value of the Entity's metadata paramater
+
+- `org_name` `(string)` - Name of the organization to be used in the Okta API.
+
+- `api_token` `(string)` - Okta API key.
+
+- `base_url` `(string)` - If set, will be used as the base domain for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com.
+
+### Sample Payload
+
+```json
+{
+ "mount_accessor": "auth_userpass_1793464a",
+ "org_name": "dev-262778",
+ "api_token": "0081u7KrReNkzmABZJAP2oDyIXccveqx9vIOEyCZDC"
+}
+```
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request POST \
+ --data @payload.json \
+ https://vault.rocks/v1/sys/mfa/method/okta/my_okta
+```
+
+## Read Okta MFA Method
+
+This endpoint queries the MFA configuration of Okta type for a given method
+name.
+
+| Method | Path | Produces |
+| :------- | :----------------------------- | :----------------------- |
+| `GET` | `/sys/mfa/method/okta/:name` | `200 application/json` |
+
+### Parameters
+
+- `name` `(string: )` – Name of the MFA method.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request GET \
+ https://vault.rocks/v1/sys/mfa/method/okta/my_okta
+
+```
+
+### Sample Response
+
+```json
+{
+ "data": {
+ "api_token": "0081u7KrReNkzmABZJAP2oDyIXccveqx9vIOEyCZDC",
+ "id": "e39f08a1-a42d-143d-5b87-15c61d89c15a",
+ "mount_accessor": "auth_userpass_1793464a",
+ "name": "my_okta",
+ "org_name": "dev-262778",
+ "production": true,
+ "type": "okta",
+ "username_format": ""
+ }
+}
+```
+## Delete Okta MFA Method
+
+This endpoint deletes a Okta MFA method.
+
+| Method | Path | Produces |
+| :------- | :----------------------------- | :----------------------- |
+| `DELETE` | `/sys/mfa/method/okta/:name` | `204 (empty body)` |
+
+
+### Parameters
+
+- `name` `(string: )` - Name of the MFA method.
+
+### Sample Request
+
+```
+$ curl \
+ --header "X-Vault-Token: ..." \
+ --request DELETE \
+ https://vault.rocks/v1/sys/mfa/method/okta/my_okta
+
+```
diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-pingid.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-pingid.html.md
new file mode 100644
index 0000000..a519f87
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-pingid.html.md
@@ -0,0 +1,111 @@
+---
+layout: "api"
+page_title: "/sys/mfa/method/pingid - HTTP API"
+sidebar_current: "docs-http-system-mfa-pingid"
+description: |-
+ The '/sys/mfa/method/pingid' endpoint focuses on managing PingID MFA behaviors in Vault Enterprise.
+---
+
+## Configure PingID MFA Method
+
+This endpoint defines a MFA method of type PingID.
+
+| Method | Path | Produces |
+| :------- | :----------------------------- | :--------------------- |
+| `POST` | `/sys/mfa/method/pingid/:name` | `204 (empty body)` |
+
+### Parameters
+
+- `name` `(string: )` – Name of the MFA method.
+
+- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping.
+
+- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings:
+ - persona.name: The name returned by the mount configured via the `mount_accessor` parameter
+ - entity.name: The name configured for the Entity
+ - persona.metadata.``: The value of the Persona's metadata parameter
+ - entity.metadata.`