diff --git a/Gopkg.lock b/Gopkg.lock index fd3ab1d..9f1d1c1 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -4,18 +4,26 @@ [[projects]] name = "github.com/Luzifer/rconfig" packages = ["."] - revision = "c27bd3a64b5b19556914d9fec69922cf3852d585" - version = "v1.1.0" + revision = "7aef1d393c1e2d0758901853b59981c7adc67c7e" + version = "v1.2.0" [[projects]] name = "github.com/Sirupsen/logrus" packages = ["."] - revision = "3ec0642a7fb6488f65b06f9040adc67e3990296a" + revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e" + version = "v1.0.3" [[projects]] name = "github.com/fatih/structs" packages = ["."] - revision = "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2" + revision = "a720dfa8df582c51dee1b36feabb906bde1588bd" + version = "v1.0" + +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "553a641470496b2327abcac10b36396bd98e45c9" [[projects]] branch = "master" @@ -24,14 +32,16 @@ revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" [[projects]] + branch = "master" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] - revision = "ad28ea4487f05916463e2423a55166280e8254b5" + revision = "3573b8b52aa7b37b9358d966a898feb387f62437" [[projects]] + branch = "master" name = "github.com/hashicorp/go-multierror" packages = ["."] - revision = "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" + revision = "83588e72410abfbe4df460eeb6f30841ae47d4c4" [[projects]] branch = "master" @@ -40,59 +50,88 @@ revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" [[projects]] + branch = "master" name = "github.com/hashicorp/hcl" packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"] - revision = "ef8133da8cda503718a74741312bf50821e6de79" + revision = "42e33e2d55a0ff1d6263f738896ea8c13571a8d0" [[projects]] name = "github.com/hashicorp/vault" - packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil"] - revision = "4490e93395fb70c3a25ade1fe88f363561a7d584" + packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil","helper/parseutil"] + revision = "6b29fb2b7f70ed538ee2b3c057335d706b6d4e36" + version = "v0.8.3" [[projects]] name = "github.com/mattn/go-runewidth" packages = ["."] - revision = "14207d285c6c197daabb5c9793d63e7af9ab2d50" + revision = "9e777a8366cce605130a531d2cd6363d07ad7317" + version = "v0.0.2" [[projects]] + branch = "master" name = "github.com/mitchellh/go-homedir" packages = ["."] - revision = "981ab348d865cf048eb7d17e78ac7192632d8415" + revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" [[projects]] + branch = "master" name = "github.com/mitchellh/mapstructure" packages = ["."] - revision = "ca63d7c062ee3c9f34db231e352b60012b4fd0c1" + revision = "d0303fe809921458f417bcf828397a65db30a7e4" [[projects]] + branch = "master" name = "github.com/olekukonko/tablewriter" packages = ["."] - revision = "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb" + revision = "a7a4c189eb47ed33ce7b35f2880070a0c82a67d4" [[projects]] + branch = "master" name = "github.com/sethgrid/pester" packages = ["."] - revision = "4f4c0a67b6496764028e1ab9fd8dfb630282ed2f" + revision = "0af5bab1e1ea2860c5aef8e77427bab011d774d8" [[projects]] name = "github.com/spf13/pflag" packages = ["."] - revision = "c7e63cf4530bcd3ba943729cee0efeff2ebea63f" + revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" + version = "v1.0.0" [[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3" + +[[projects]] + branch = "master" name = "golang.org/x/net" packages = ["http2","http2/hpack","idna","lex/httplex"] - revision = "f09c4662a0bd6bd8943ac7b4931e185df9471da4" + revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0" [[projects]] + branch = "master" name = "golang.org/x/sys" - packages = ["unix"] - revision = "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9" + packages = ["unix","windows"] + revision = "ebfc5b4631820b793c9010c87fd8fef0f39eb082" [[projects]] + branch = "master" + name = "golang.org/x/text" + packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] + revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25" + +[[projects]] + branch = "v2" + name = "gopkg.in/validator.v2" + packages = ["."] + revision = "460c83432a98c35224a6fe352acf8b23e067ad06" + +[[projects]] + branch = "v2" name = "gopkg.in/yaml.v2" packages = ["."] - revision = "31c299268d302dd0aa9a0dcf765a3d58971ac83f" + revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" [solve-meta] analyzer-name = "dep" diff --git a/vendor/github.com/Luzifer/rconfig/.travis.yml b/vendor/github.com/Luzifer/rconfig/.travis.yml index 520bedf..b5c25ee 100644 --- a/vendor/github.com/Luzifer/rconfig/.travis.yml +++ b/vendor/github.com/Luzifer/rconfig/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.4 - - 1.5 + - 1.6 + - 1.7 - tip script: go test -v -race -cover ./... diff --git a/vendor/github.com/Luzifer/rconfig/History.md b/vendor/github.com/Luzifer/rconfig/History.md index 8bc33a6..5adadd9 100644 --- a/vendor/github.com/Luzifer/rconfig/History.md +++ b/vendor/github.com/Luzifer/rconfig/History.md @@ -1,3 +1,7 @@ +# 1.2.0 / 2017-06-19 + + * Add ParseAndValidate method + # 1.1.0 / 2016-06-28 * Support time.Duration config parameters diff --git a/vendor/github.com/Luzifer/rconfig/README.md b/vendor/github.com/Luzifer/rconfig/README.md index 67fbf87..f42a664 100644 --- a/vendor/github.com/Luzifer/rconfig/README.md +++ b/vendor/github.com/Luzifer/rconfig/README.md @@ -29,34 +29,31 @@ go test -v -race -cover github.com/Luzifer/rconfig ## Usage -As a first step define a struct holding your configuration: +A very simple usecase is to just configure a struct inside the vars section of your `main.go` and to parse the commandline flags from the `main()` function: ```go -type config struct { - Username string `default:"unknown" flag:"user" description:"Your name"` - Details struct { - Age int `default:"25" flag:"age" env:"age" description:"Your age"` - } -} -``` +package main -Next create an instance of that struct and let `rconfig` fill that config: +import ( + "fmt" + "github.com/Luzifer/rconfig" +) -```go -var cfg config -func init() { - cfg = config{} - rconfig.Parse(&cfg) -} -``` +var ( + cfg = struct { + Username string `default:"unknown" flag:"user" description:"Your name"` + Details struct { + Age int `default:"25" flag:"age" env:"age" description:"Your age"` + } + }{} +) -You're ready to access your configuration: - -```go func main() { + rconfig.Parse(&cfg) + fmt.Printf("Hello %s, happy birthday for your %dth birthday.", - cfg.Username, - cfg.Details.Age) + cfg.Username, + cfg.Details.Age) } ``` @@ -72,18 +69,14 @@ The order of the directives (lower number = higher precedence): 1. `default` tag in the struct ```go -type config struct { +var cfg = struct { Username string `vardefault:"username" flag:"username" description:"Your username"` } -var cfg = config{} - -func init() { +func main() { rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml")) rconfig.Parse(&cfg) -} -func main() { fmt.Printf("Username = %s", cfg.Username) // Output: Username = luzifer } diff --git a/vendor/github.com/Luzifer/rconfig/config.go b/vendor/github.com/Luzifer/rconfig/config.go index dd37238..251909d 100644 --- a/vendor/github.com/Luzifer/rconfig/config.go +++ b/vendor/github.com/Luzifer/rconfig/config.go @@ -13,6 +13,7 @@ import ( "time" "github.com/spf13/pflag" + validator "gopkg.in/validator.v2" ) var ( @@ -45,6 +46,15 @@ func Parse(config interface{}) error { return parse(config, nil) } +// ParseAndValidate works exactly like Parse but implements an additional run of +// the go-validator package on the configuration struct. Therefore additonal struct +// tags are supported like described in the readme file of the go-validator package: +// +// https://github.com/go-validator/validator/tree/v2#usage +func ParseAndValidate(config interface{}) error { + return parseAndValidate(config, nil) +} + // Args returns the non-flag command-line arguments. func Args() []string { return fs.Args() @@ -65,6 +75,14 @@ func SetVariableDefaults(defaults map[string]string) { variableDefaults = defaults } +func parseAndValidate(in interface{}, args []string) error { + if err := parse(in, args); err != nil { + return err + } + + return validator.Validate(in) +} + func parse(in interface{}, args []string) error { if args == nil { args = os.Args diff --git a/vendor/github.com/Luzifer/rconfig/general_test.go b/vendor/github.com/Luzifer/rconfig/general_test.go index d9ff8fe..e7f29b7 100644 --- a/vendor/github.com/Luzifer/rconfig/general_test.go +++ b/vendor/github.com/Luzifer/rconfig/general_test.go @@ -15,6 +15,10 @@ var _ = Describe("Testing general parsing", func() { SadFlag string } + type tValidated struct { + Test string `flag:"test" default:"" validate:"nonzero"` + } + var ( err error args []string @@ -106,4 +110,19 @@ var _ = Describe("Testing general parsing", func() { }) }) + Context("making use of the validator package", func() { + var cfgValidated tValidated + + BeforeEach(func() { + cfgValidated = tValidated{} + args = []string{} + }) + + JustBeforeEach(func() { + err = parseAndValidate(&cfgValidated, args) + }) + + It("should have errored", func() { Expect(err).To(HaveOccurred()) }) + }) + }) diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml index dee4eb2..a23296a 100644 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -1,10 +1,15 @@ language: go go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 + - 1.6.x + - 1.7.x + - 1.8.x - tip +env: + - GOMAXPROCS=4 GORACE=halt_on_error=1 install: - - go get -t ./... -script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./... + - go get github.com/stretchr/testify/assert + - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 + - go get golang.org/x/sys/unix + - go get golang.org/x/sys/windows +script: + - go test -race -v ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md index f2c2bc2..8236d8b 100644 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,50 @@ +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + # 0.10.0 * feature: Add a test hook (#180) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index ab48929..4f5ce57 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -1,11 +1,24 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** +the standard library logger. + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +**Are you interested in assisting in maintaining Logrus?** Currently I have a +lot of obligations, and I am unable to provide Logrus with the maintainership it +needs. If you'd like to help, please reach out to me at `simon at author's +username dot com`. Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -46,6 +59,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822 exit status 1 ``` +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + #### Example The simplest way to use Logrus is simply the package-level exported logger: @@ -54,7 +73,7 @@ The simplest way to use Logrus is simply the package-level exported logger: package main import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func main() { @@ -65,7 +84,7 @@ func main() { ``` Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` and you'll now have the flexibility of Logrus. You can customize it all you want: @@ -74,15 +93,16 @@ package main import ( "os" - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func init() { // Log as JSON instead of the default ASCII formatter. log.SetFormatter(&log.JSONFormatter{}) - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) // Only log the warning severity or above. log.SetLevel(log.WarnLevel) @@ -123,7 +143,8 @@ application, you can also create an instance of the `logrus` Logger: package main import ( - "github.com/Sirupsen/logrus" + "os" + "github.com/sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. @@ -132,7 +153,15 @@ var log = logrus.New() func main() { // The API for setting attributes is a little different than the package level // exported logger. See Godoc. - log.Out = os.Stderr + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } log.WithFields(logrus.Fields{ "animal": "walrus", @@ -143,7 +172,7 @@ func main() { #### Fields -Logrus encourages careful, structured logging though logging fields instead of +Logrus encourages careful, structured logging through logging fields instead of long, unparseable error messages. For example, instead of: `log.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: @@ -165,6 +194,20 @@ In general, with Logrus using any of the `printf`-family functions should be seen as a hint you should add a field, however, you can still use the `printf`-family functions with Logrus. +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + #### Hooks You can add hooks for logging levels. For example to send errors to an exception @@ -176,9 +219,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in ```go import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" ) @@ -200,37 +243,52 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | Hook | Description | | ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | | [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | | [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | | [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | | [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | - +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | #### Level logging @@ -279,7 +337,7 @@ could do: ```go import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) init() { @@ -306,11 +364,15 @@ The built-in logging formatters are: without colors. * *Note:* to force colored output when there is no TTY, set the `ForceColors` field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). Third party logging formatters: +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine. * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. @@ -356,6 +418,18 @@ srv := http.Server{ Each line written to that writer will be printed the usual way, using formatters and hooks. The level for those entries is `info`. +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + #### Rotation Log rotation is not provided with Logrus. Log rotation should be done by an @@ -367,6 +441,7 @@ entries. It should not be a feature of the application-level logger. | Tool | Description | | ---- | ----------- | |[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing @@ -376,15 +451,24 @@ Logrus has a built in facility for asserting the presence of log messages. This * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go -logger, hook := NewNullLogger() -logger.Error("Hello error") +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") -hook.Reset() -assert.Nil(hook.LastEntry()) + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} ``` #### Fatal handlers @@ -403,7 +487,7 @@ logrus.RegisterExitHandler(handler) ... ``` -#### Thread safty +#### Thread safety By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go index b4c9e84..8af9063 100644 --- a/vendor/github.com/Sirupsen/logrus/alt_exit.go +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -1,7 +1,7 @@ package logrus // The following code was sourced and modified from the -// https://bitbucket.org/tebeka/atexit package governed by the following license: +// https://github.com/tebeka/atexit package governed by the following license: // // Copyright (c) 2012 Miki Tebeka . // diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go index 022b778..a08b1a8 100644 --- a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go +++ b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go @@ -2,7 +2,10 @@ package logrus import ( "io/ioutil" + "log" + "os" "os/exec" + "path/filepath" "testing" "time" ) @@ -11,30 +14,36 @@ func TestRegister(t *testing.T) { current := len(handlers) RegisterExitHandler(func() {}) if len(handlers) != current+1 { - t.Fatalf("can't add handler") + t.Fatalf("expected %d handlers, got %d", current+1, len(handlers)) } } func TestHandler(t *testing.T) { - gofile := "/tmp/testprog.go" + tempDir, err := ioutil.TempDir("", "test_handler") + if err != nil { + log.Fatalf("can't create temp dir. %q", err) + } + defer os.RemoveAll(tempDir) + + gofile := filepath.Join(tempDir, "gofile.go") if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { - t.Fatalf("can't create go file") + t.Fatalf("can't create go file. %q", err) } - outfile := "/tmp/testprog.out" + outfile := filepath.Join(tempDir, "outfile.out") arg := time.Now().UTC().String() - err := exec.Command("go", "run", gofile, outfile, arg).Run() + err = exec.Command("go", "run", gofile, outfile, arg).Run() if err == nil { t.Fatalf("completed normally, should have failed") } data, err := ioutil.ReadFile(outfile) if err != nil { - t.Fatalf("can't read output file %s", outfile) + t.Fatalf("can't read output file %s. %q", outfile, err) } if string(data) != arg { - t.Fatalf("bad data") + t.Fatalf("bad data. Expected %q, got %q", data, arg) } } @@ -44,7 +53,7 @@ var testprog = []byte(` package main import ( - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "flag" "fmt" "io/ioutil" diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000..96c2ce1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go index dddd5f8..da67aba 100644 --- a/vendor/github.com/Sirupsen/logrus/doc.go +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger: package main import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func main() { @@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger: Output: time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 -For a full guide visit https://github.com/Sirupsen/logrus +For a full guide visit https://github.com/sirupsen/logrus */ package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go index 4edbe7a..5bf582e 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -35,6 +35,7 @@ type Entry struct { Time time.Time // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level // Message passed to Debug, Info, Warn, Error, Fatal or Panic @@ -126,7 +127,7 @@ func (entry Entry) log(level Level, msg string) { } func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.log(DebugLevel, fmt.Sprint(args...)) } } @@ -136,13 +137,13 @@ func (entry *Entry) Print(args ...interface{}) { } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.log(InfoLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.log(WarnLevel, fmt.Sprint(args...)) } } @@ -152,20 +153,20 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) } } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.log(FatalLevel, fmt.Sprint(args...)) } Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) @@ -174,13 +175,13 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(fmt.Sprintf(format, args...)) } } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(fmt.Sprintf(format, args...)) } } @@ -190,7 +191,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(fmt.Sprintf(format, args...)) } } @@ -200,20 +201,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(fmt.Sprintf(format, args...)) } } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(fmt.Sprintf(format, args...)) } Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(fmt.Sprintf(format, args...)) } } @@ -221,13 +222,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(entry.sprintlnn(args...)) } } func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(entry.sprintlnn(args...)) } } @@ -237,7 +238,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(entry.sprintlnn(args...)) } } @@ -247,20 +248,20 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(entry.sprintlnn(args...)) } } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(entry.sprintlnn(args...)) } Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(entry.sprintlnn(args...)) } } diff --git a/vendor/github.com/Sirupsen/logrus/example_basic_test.go b/vendor/github.com/Sirupsen/logrus/example_basic_test.go new file mode 100644 index 0000000..a2acf55 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/example_basic_test.go @@ -0,0 +1,69 @@ +package logrus_test + +import ( + "github.com/sirupsen/logrus" + "os" +) + +func Example_basic() { + var log = logrus.New() + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) //default + log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output + log.Level = logrus.DebugLevel + log.Out = os.Stdout + + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + defer func() { + err := recover() + if err != nil { + entry := err.(*logrus.Entry) + log.WithFields(logrus.Fields{ + "omg": true, + "err_animal": entry.Data["animal"], + "err_size": entry.Data["size"], + "err_level": entry.Level, + "err_message": entry.Message, + "number": 100, + }).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") + + // Output: + // level=debug msg="Started observing beach" animal=walrus number=8 + // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 + // level=warning msg="The group's number increased tremendously!" number=122 omg=true + // level=debug msg="Temperature changes" temperature=-4 + // level=panic msg="It's over 9000!" animal=orca size=9009 + // level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true +} diff --git a/vendor/github.com/Sirupsen/logrus/example_hook_test.go b/vendor/github.com/Sirupsen/logrus/example_hook_test.go new file mode 100644 index 0000000..d4ddffc --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/example_hook_test.go @@ -0,0 +1,35 @@ +package logrus_test + +import ( + "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" + "os" +) + +func Example_hook() { + var log = logrus.New() + log.Formatter = new(logrus.TextFormatter) // default + log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) + log.Out = os.Stdout + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Error("The ice breaks!") + + // Output: + // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 + // level=warning msg="The group's number increased tremendously!" number=122 omg=true + // level=error msg="The ice breaks!" number=100 omg=true +} diff --git a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go deleted file mode 100644 index a1623ec..0000000 --- a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) // default - log.Level = logrus.DebugLevel -} - -func main() { - defer func() { - err := recover() - if err != nil { - log.WithFields(logrus.Fields{ - "omg": true, - "err": err, - "number": 100, - }).Fatal("The ice breaks!") - } - }() - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "number": 8, - }).Debug("Started observing beach") - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "temperature": -4, - }).Debug("Temperature changes") - - log.WithFields(logrus.Fields{ - "animal": "orca", - "size": 9009, - }).Panic("It's over 9000!") -} diff --git a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go deleted file mode 100644 index 3187f6d..0000000 --- a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) -} - -func main() { - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go index 9a0120a..013183e 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) { func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() - std.Level = level + std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { std.mu.Lock() defer std.mu.Unlock() - return std.Level + return std.level() } // AddHook adds a hook to the standard logger hooks. diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go index b5fbe93..b183ff5 100644 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -2,7 +2,7 @@ package logrus import "time" -const DefaultTimestampFormat = time.RFC3339 +const defaultTimestampFormat = time.RFC3339 // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go index c6d290c..d948158 100644 --- a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go +++ b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -80,11 +80,14 @@ func BenchmarkLargeJSONFormatter(b *testing.B) { } func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + logger := New() + entry := &Entry{ Time: time.Time{}, Level: InfoLevel, Message: "message", Data: fields, + Logger: logger, } var d []byte var err error diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md index 066704b..1bbc0f7 100644 --- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -5,13 +5,13 @@ ```go import ( "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "github.com/sirupsen/logrus" + lSyslog "github.com/sirupsen/logrus/hooks/syslog" ) func main() { log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err == nil { log.Hooks.Add(hook) @@ -24,16 +24,16 @@ If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or " ```go import ( "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "github.com/sirupsen/logrus" + lSyslog "github.com/sirupsen/logrus/hooks/syslog" ) func main() { log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") + hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "") if err == nil { log.Hooks.Add(hook) } } -``` \ No newline at end of file +``` diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go index a36e200..329ce0d 100644 --- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -1,12 +1,13 @@ // +build !windows,!nacl,!plan9 -package logrus_syslog +package syslog import ( "fmt" - "github.com/Sirupsen/logrus" "log/syslog" "os" + + "github.com/sirupsen/logrus" ) // SyslogHook to send logs via syslog. diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go index 42762dc..5ec3a44 100644 --- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -1,9 +1,10 @@ -package logrus_syslog +package syslog import ( - "github.com/Sirupsen/logrus" "log/syslog" "testing" + + "github.com/sirupsen/logrus" ) func TestLocalhostAddAndPrint(t *testing.T) { diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go index 0688125..62c4845 100644 --- a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go +++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go @@ -1,17 +1,25 @@ +// The Test package is used for testing logrus. It is here for backwards +// compatibility from when logrus' organization was upper-case. Please use +// lower-case logrus and the `null` package instead of this one. package test import ( "io/ioutil" + "sync" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) -// test.Hook is a hook designed for dealing with logs in test scenarios. +// Hook is a hook designed for dealing with logs in test scenarios. type Hook struct { + // Entries is an array of all entries that have been received by this hook. + // For safe access, use the AllEntries() method, rather than reading this + // value directly. Entries []*logrus.Entry + mu sync.RWMutex } -// Installs a test hook for the global logger. +// NewGlobal installs a test hook for the global logger. func NewGlobal() *Hook { hook := new(Hook) @@ -21,7 +29,7 @@ func NewGlobal() *Hook { } -// Installs a test hook for a given local logger. +// NewLocal installs a test hook for a given local logger. func NewLocal(logger *logrus.Logger) *Hook { hook := new(Hook) @@ -31,7 +39,7 @@ func NewLocal(logger *logrus.Logger) *Hook { } -// Creates a discarding logger and installs the test hook. +// NewNullLogger creates a discarding logger and installs the test hook. func NewNullLogger() (*logrus.Logger, *Hook) { logger := logrus.New() @@ -42,6 +50,8 @@ func NewNullLogger() (*logrus.Logger, *Hook) { } func (t *Hook) Fire(e *logrus.Entry) error { + t.mu.Lock() + defer t.mu.Unlock() t.Entries = append(t.Entries, e) return nil } @@ -51,17 +61,35 @@ func (t *Hook) Levels() []logrus.Level { } // LastEntry returns the last entry that was logged or nil. -func (t *Hook) LastEntry() (l *logrus.Entry) { - - if i := len(t.Entries) - 1; i < 0 { +func (t *Hook) LastEntry() *logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + i := len(t.Entries) - 1 + if i < 0 { return nil - } else { - return t.Entries[i] } + // Make a copy, for safety + e := *t.Entries[i] + return &e +} +// AllEntries returns all entries that were logged. +func (t *Hook) AllEntries() []*logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + // Make a copy so the returned value won't race with future log requests + entries := make([]*logrus.Entry, len(t.Entries)) + for i, entry := range t.Entries { + // Make a copy, for safety + e := *entry + entries[i] = &e + } + return entries } // Reset removes all Entries from this test hook. func (t *Hook) Reset() { + t.mu.Lock() + defer t.mu.Unlock() t.Entries = make([]*logrus.Entry, 0) } diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go index d69455b..3f55cfe 100644 --- a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go +++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go @@ -3,7 +3,7 @@ package test import ( "testing" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go index 2ad6dc5..fb01c1b 100644 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -5,18 +5,54 @@ import ( "fmt" ) +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +// Default key names for the default fields +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap } +// Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+3) for k, v := range entry.Data { switch v := v.(type) { case error: // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 + // https://github.com/sirupsen/logrus/issues/137 data[k] = v.Error() default: data[k] = v @@ -26,12 +62,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { timestampFormat := f.TimestampFormat if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + timestampFormat = defaultTimestampFormat } - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() serialized, err := json.Marshal(data) if err != nil { diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go index 1d70873..51093a7 100644 --- a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go +++ b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go @@ -3,7 +3,7 @@ package logrus import ( "encoding/json" "errors" - + "strings" "testing" ) @@ -118,3 +118,82 @@ func TestJSONEntryEndsWithNewline(t *testing.T) { t.Fatal("Expected JSON log entry to end with a newline") } } + +func TestJSONMessageKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyMsg: "message", + }, + } + + b, err := formatter.Format(&Entry{Message: "oh hai"}) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) { + t.Fatal("Expected JSON to format message key") + } +} + +func TestJSONLevelKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyLevel: "somelevel", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "somelevel") { + t.Fatal("Expected JSON to format level key") + } +} + +func TestJSONTimeKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyTime: "timeywimey", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "timeywimey") { + t.Fatal("Expected JSON to format time key") + } +} + +func TestJSONDisableTimestamp(t *testing.T) { + formatter := &JSONFormatter{ + DisableTimestamp: true, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if strings.Contains(s, FieldKeyTime) { + t.Error("Did not prevent timestamp", s) + } +} + +func TestJSONEnableTimestamp(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, FieldKeyTime) { + t.Error("Timestamp not present", s) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index b769f3d..2acab05 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -4,6 +4,7 @@ import ( "io" "os" "sync" + "sync/atomic" ) type Logger struct { @@ -24,7 +25,7 @@ type Logger struct { Formatter Formatter // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in + // logged. Level Level // Used to sync writing to the log. Locking is enabled by Default mu MutexWrap @@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry { } func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugf(format, args...) logger.releaseEntry(entry) @@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) { } func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infof(format, args...) logger.releaseEntry(entry) @@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) { } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) { } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorf(format, args...) logger.releaseEntry(entry) @@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) { } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalf(format, args...) logger.releaseEntry(entry) @@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicf(format, args...) logger.releaseEntry(entry) @@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { } func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debug(args...) logger.releaseEntry(entry) @@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) { } func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Info(args...) logger.releaseEntry(entry) @@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) { } func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) { } func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Error(args...) logger.releaseEntry(entry) @@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) { } func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatal(args...) logger.releaseEntry(entry) @@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) { } func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panic(args...) logger.releaseEntry(entry) @@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) { } func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugln(args...) logger.releaseEntry(entry) @@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) { } func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infoln(args...) logger.releaseEntry(entry) @@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) { } func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) { } func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorln(args...) logger.releaseEntry(entry) @@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) { } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalln(args...) logger.releaseEntry(entry) @@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) { } func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicln(args...) logger.releaseEntry(entry) @@ -306,3 +307,11 @@ func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) SetNoLock() { logger.mu.Disable() } + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go index e596691..dd38999 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -10,7 +10,7 @@ import ( type Fields map[string]interface{} // Level type -type Level uint8 +type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go index bfc4780..78cbc28 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus_test.go +++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -359,3 +359,28 @@ func TestLogrusInterface(t *testing.T) { e := logger.WithField("another", "value") fn(e) } + +// Implements io.Writer using channels for synchronization, so we can wait on +// the Entry.Writer goroutine to write in a non-racey way. This does assume that +// there is a single call to Logger.Out for each message. +type channelWriter chan []byte + +func (cw channelWriter) Write(p []byte) (int, error) { + cw <- p + return len(p), nil +} + +func TestEntryWriter(t *testing.T) { + cw := channelWriter(make(chan []byte, 1)) + log := New() + log.Out = cw + log.Formatter = new(JSONFormatter) + log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n")) + + bs := <-cw + var fields Fields + err := json.Unmarshal(bs, &fields) + assert.Nil(t, err) + assert.Equal(t, fields["foo"], "bar") + assert.Equal(t, fields["level"], "warning") +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go deleted file mode 100644 index 1960169..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build appengine - -package logrus - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - return true -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go index 5f6be4d..d7b3893 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -3,8 +3,8 @@ package logrus -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TIOCGETA +const ioctlReadTermios = unix.TIOCGETA -type Termios syscall.Termios +type Termios unix.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go index 308160c..88d7298 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -7,8 +7,8 @@ package logrus -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TCGETS +const ioctlReadTermios = unix.TCGETS -type Termios syscall.Termios +type Termios unix.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 329038f..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,22 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly -// +build !appengine - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index a3c6f6e..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris,!appengine - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 3727e8a..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,!appengine - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index cce61f2..be412aa 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -3,10 +3,14 @@ package logrus import ( "bytes" "fmt" - "runtime" + "io" + "os" "sort" "strings" + "sync" "time" + + "golang.org/x/crypto/ssh/terminal" ) const ( @@ -14,24 +18,19 @@ const ( red = 31 green = 32 yellow = 33 - blue = 34 + blue = 36 gray = 37 ) var ( baseTimestamp time.Time - isTerminal bool ) func init() { baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) } +// TextFormatter formats logs into text type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. ForceColors bool @@ -54,11 +53,35 @@ type TextFormatter struct { // that log extremely frequently and don't use the JSON formatter this may not // be desired. DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once } +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = f.checkIfTerminal(entry.Logger.Out) + } +} + +func (f *TextFormatter) checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} + +// Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { var b *bytes.Buffer - var keys []string = make([]string, 0, len(entry.Data)) + keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } @@ -74,12 +97,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { prefixFieldClashes(entry.Data) - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors timestampFormat := f.TimestampFormat if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + timestampFormat = defaultTimestampFormat } if isColored { f.printColored(b, entry, keys, timestampFormat) @@ -115,23 +139,29 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelText := strings.ToUpper(entry.Level.String())[0:4] - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) } else { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) } for _, k := range keys { v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) } } -func needsQuoting(text string) bool { +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { return true } } @@ -139,27 +169,23 @@ func needsQuoting(text string) bool { } func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - + if b.Len() > 0 { + b.WriteByte(' ') + } b.WriteString(key) b.WriteByte('=') + f.appendValue(b, value) +} - switch value := value.(type) { - case string: - if !needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if !needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) } - b.WriteByte(' ') + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } } diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go index e25a44f..d93b931 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go @@ -3,17 +3,38 @@ package logrus import ( "bytes" "errors" + "fmt" + "strings" "testing" "time" ) +func TestFormatting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + testCases := []struct { + value string + expected string + }{ + {`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + + if string(b) != tc.expected { + t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + func TestQuoting(t *testing.T) { tf := &TextFormatter{DisableColors: true} checkQuoting := func(q bool, value interface{}) { b, _ := tf.Format(WithField("test", value)) idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) + cont := bytes.Contains(b[idx+5:], []byte("\"")) if cont != q { if q { t.Errorf("quoting expected for: %#v", value) @@ -23,14 +44,67 @@ func TestQuoting(t *testing.T) { } } + checkQuoting(false, "") checkQuoting(false, "abcd") checkQuoting(false, "v1.0") checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") + checkQuoting(false, "/foobar") + checkQuoting(false, "foo_bar") + checkQuoting(false, "foo@bar") + checkQuoting(false, "foobar^") + checkQuoting(false, "+/-_^@f.oobar") + checkQuoting(true, "foobar$") + checkQuoting(true, "&foobar") checkQuoting(true, "x y") checkQuoting(true, "x,y") checkQuoting(false, errors.New("invalid")) checkQuoting(true, errors.New("invalid argument")) + + // Test for quoting empty fields. + tf.QuoteEmptyFields = true + checkQuoting(true, "") + checkQuoting(false, "abcd") + checkQuoting(true, errors.New("invalid argument")) +} + +func TestEscaping(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + testCases := []struct { + value string + expected string + }{ + {`ba"r`, `ba\"r`}, + {`ba'r`, `ba'r`}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + if !bytes.Contains(b, []byte(tc.expected)) { + t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + +func TestEscaping_Interface(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + ts := time.Now() + + testCases := []struct { + value interface{} + expected string + }{ + {ts, fmt.Sprintf("\"%s\"", ts.String())}, + {errors.New("error: something went wrong"), "\"error: something went wrong\""}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + if !bytes.Contains(b, []byte(tc.expected)) { + t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } } func TestTimestampFormat(t *testing.T) { @@ -39,10 +113,7 @@ func TestTimestampFormat(t *testing.T) { customStr, _ := customFormatter.Format(WithField("test", "test")) timeStart := bytes.Index(customStr, ([]byte)("time=")) timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } + timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")] if format == "" { format = time.RFC3339 } @@ -57,5 +128,14 @@ func TestTimestampFormat(t *testing.T) { checkTimeStr("") } +func TestDisableTimestampWithColoredOutput(t *testing.T) { + tf := &TextFormatter{DisableTimestamp: true, ForceColors: true} + + b, _ := tf.Format(WithField("test", "test")) + if strings.Contains(string(b), "[0000]") { + t.Error("timestamp not expected when DisableTimestamp is true") + } +} + // TODO add tests for sorting etc., this requires a parser for the text // formatter output. diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go index f74d2aa..7bdebed 100644 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter { } func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + switch level { case DebugLevel: - printFunc = logger.Debug + printFunc = entry.Debug case InfoLevel: - printFunc = logger.Info + printFunc = entry.Info case WarnLevel: - printFunc = logger.Warn + printFunc = entry.Warn case ErrorLevel: - printFunc = logger.Error + printFunc = entry.Error case FatalLevel: - printFunc = logger.Fatal + printFunc = entry.Fatal case PanicLevel: - printFunc = logger.Panic + printFunc = entry.Panic default: - printFunc = logger.Print + printFunc = entry.Print } - go logger.writerScanner(reader, printFunc) + go entry.writerScanner(reader, printFunc) runtime.SetFinalizer(writer, writerFinalizer) return writer } -func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) for scanner.Scan() { printFunc(scanner.Text()) } if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) + entry.Errorf("Error while reading from Writer: %s", err) } reader.Close() } diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml index 845012b..cbf2ccc 100644 --- a/vendor/github.com/fatih/structs/.travis.yml +++ b/vendor/github.com/fatih/structs/.travis.yml @@ -1,6 +1,6 @@ language: go go: - - 1.6 + - 1.7.x - tip sudo: false before_install: diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go index ad705f0..e697832 100644 --- a/vendor/github.com/fatih/structs/field.go +++ b/vendor/github.com/fatih/structs/field.go @@ -117,7 +117,16 @@ func (f *Field) Field(name string) *Field { // FieldOk returns the field from a nested struct. The boolean returns whether // the field was found (true) or not (false). func (f *Field) FieldOk(name string) (*Field, bool) { - v := strctVal(f.value.Interface()) + value := &f.value + // value must be settable so we need to make sure it holds the address of the + // variable and not a copy, so we can pass the pointer to strctVal instead of a + // copy (which is not assigned to any variable, hence not settable). + // see "https://blog.golang.org/laws-of-reflection#TOC_8." + if f.value.Kind() != reflect.Ptr { + a := f.value.Addr() + value = &a + } + v := strctVal(value.Interface()) t := v.Type() field, ok := t.FieldByName(name) diff --git a/vendor/github.com/fatih/structs/field_test.go b/vendor/github.com/fatih/structs/field_test.go index b77e951..de9dc3b 100644 --- a/vendor/github.com/fatih/structs/field_test.go +++ b/vendor/github.com/fatih/structs/field_test.go @@ -133,6 +133,20 @@ func TestField_Set(t *testing.T) { } } +func TestField_NotSettable(t *testing.T) { + a := map[int]Baz{ + 4: Baz{ + A: "value", + }, + } + + s := New(a[4]) + + if err := s.Field("A").Set("newValue"); err != errNotSettable { + t.Errorf("Trying to set non-settable field should error with %q. Got %q instead.", errNotSettable, err) + } +} + func TestField_Zero(t *testing.T) { s := newStruct() diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go index 39eb083..be3816a 100644 --- a/vendor/github.com/fatih/structs/structs.go +++ b/vendor/github.com/fatih/structs/structs.go @@ -56,7 +56,7 @@ func New(s interface{}) *Struct { // in the output map. Example: // // // The FieldStruct's fields will be flattened into the output map. -// FieldStruct time.Time `structs:"flatten"` +// FieldStruct time.Time `structs:",flatten"` // // A tag value with the option of "omitnested" stops iterating further if the type // is a struct. Example: @@ -115,17 +115,17 @@ func (s *Struct) FillMap(out map[string]interface{}) { } } - if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { - // look out for embedded structs, and convert them to a - // map[string]interface{} too - n := New(val.Interface()) - n.TagName = s.TagName - m := n.Map() - isSubStruct = true - if len(m) == 0 { - finalVal = val.Interface() - } else { - finalVal = m + if !tagOpts.Has("omitnested") { + finalVal = s.nested(val) + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Map, reflect.Struct: + isSubStruct = true } } else { finalVal = val.Interface() @@ -431,7 +431,7 @@ func strctVal(s interface{}) reflect.Value { v := reflect.ValueOf(s) // if pointer get the underlying element≤ - if v.Kind() == reflect.Ptr { + for v.Kind() == reflect.Ptr { v = v.Elem() } @@ -505,3 +505,82 @@ func IsStruct(s interface{}) bool { func Name(s interface{}) string { return New(s).Name() } + +// nested retrieves recursively all types for the given value and returns the +// nested value. +func (s *Struct) nested(val reflect.Value) interface{} { + var finalVal interface{} + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + n := New(val.Interface()) + n.TagName = s.TagName + m := n.Map() + + // do not add the converted value if there are no exported fields, ie: + // time.Time + if len(m) == 0 { + finalVal = val.Interface() + } else { + finalVal = m + } + case reflect.Map: + // get the element type of the map + mapElem := val.Type() + switch val.Type().Kind() { + case reflect.Ptr, reflect.Array, reflect.Map, + reflect.Slice, reflect.Chan: + mapElem = val.Type().Elem() + if mapElem.Kind() == reflect.Ptr { + mapElem = mapElem.Elem() + } + } + + // only iterate over struct types, ie: map[string]StructType, + // map[string][]StructType, + if mapElem.Kind() == reflect.Struct || + (mapElem.Kind() == reflect.Slice && + mapElem.Elem().Kind() == reflect.Struct) { + m := make(map[string]interface{}, val.Len()) + for _, k := range val.MapKeys() { + m[k.String()] = s.nested(val.MapIndex(k)) + } + finalVal = m + break + } + + // TODO(arslan): should this be optional? + finalVal = val.Interface() + case reflect.Slice, reflect.Array: + if val.Type().Kind() == reflect.Interface { + finalVal = val.Interface() + break + } + + // TODO(arslan): should this be optional? + // do not iterate of non struct types, just pass the value. Ie: []int, + // []string, co... We only iterate further if it's a struct. + // i.e []foo or []*foo + if val.Type().Elem().Kind() != reflect.Struct && + !(val.Type().Elem().Kind() == reflect.Ptr && + val.Type().Elem().Elem().Kind() == reflect.Struct) { + finalVal = val.Interface() + break + } + + slices := make([]interface{}, val.Len(), val.Len()) + for x := 0; x < val.Len(); x++ { + slices[x] = s.nested(val.Index(x)) + } + finalVal = slices + default: + finalVal = val.Interface() + } + + return finalVal +} diff --git a/vendor/github.com/fatih/structs/structs_example_test.go b/vendor/github.com/fatih/structs/structs_example_test.go index 32bb829..329c130 100644 --- a/vendor/github.com/fatih/structs/structs_example_test.go +++ b/vendor/github.com/fatih/structs/structs_example_test.go @@ -81,7 +81,7 @@ func ExampleMap_tags() { } -func ExampleMap_nested() { +func ExampleMap_omitNested() { // By default field with struct types are processed too. We can stop // processing them via "omitnested" tag option. type Server struct { diff --git a/vendor/github.com/fatih/structs/structs_test.go b/vendor/github.com/fatih/structs/structs_test.go index b1b05a1..8a18a07 100644 --- a/vendor/github.com/fatih/structs/structs_test.go +++ b/vendor/github.com/fatih/structs/structs_test.go @@ -268,6 +268,268 @@ func TestMap_Nested(t *testing.T) { } } +func TestMap_NestedMapWithStructValues(t *testing.T) { + type A struct { + Name string + } + + type B struct { + A map[string]*A + } + + a := &A{Name: "example"} + + b := &B{ + A: map[string]*A{ + "example_key": a, + }, + } + + m := Map(b) + + if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map { + t.Errorf("Map should return a map type, got: %v", typ) + } + + in, ok := m["A"].(map[string]interface{}) + if !ok { + t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["A"]) + } + + example := in["example_key"].(map[string]interface{}) + if name := example["Name"].(string); name != "example" { + t.Errorf("Map nested struct's name field should give example, got: %s", name) + } +} + +func TestMap_NestedMapWithStringValues(t *testing.T) { + type B struct { + Foo map[string]string + } + + type A struct { + B *B + } + + b := &B{ + Foo: map[string]string{ + "example_key": "example", + }, + } + + a := &A{B: b} + + m := Map(a) + + if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map { + t.Errorf("Map should return a map type, got: %v", typ) + } + + in, ok := m["B"].(map[string]interface{}) + if !ok { + t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"]) + } + + foo := in["Foo"].(map[string]string) + if name := foo["example_key"]; name != "example" { + t.Errorf("Map nested struct's name field should give example, got: %s", name) + } +} +func TestMap_NestedMapWithInterfaceValues(t *testing.T) { + type B struct { + Foo map[string]interface{} + } + + type A struct { + B *B + } + + b := &B{ + Foo: map[string]interface{}{ + "example_key": "example", + }, + } + + a := &A{B: b} + + m := Map(a) + + if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map { + t.Errorf("Map should return a map type, got: %v", typ) + } + + in, ok := m["B"].(map[string]interface{}) + if !ok { + t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"]) + } + + foo := in["Foo"].(map[string]interface{}) + if name := foo["example_key"]; name != "example" { + t.Errorf("Map nested struct's name field should give example, got: %s", name) + } +} + +func TestMap_NestedMapWithSliceIntValues(t *testing.T) { + type B struct { + Foo map[string][]int + } + + type A struct { + B *B + } + + b := &B{ + Foo: map[string][]int{ + "example_key": []int{80}, + }, + } + + a := &A{B: b} + + m := Map(a) + + if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map { + t.Errorf("Map should return a map type, got: %v", typ) + } + + in, ok := m["B"].(map[string]interface{}) + if !ok { + t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"]) + } + + foo := in["Foo"].(map[string][]int) + if name := foo["example_key"]; name[0] != 80 { + t.Errorf("Map nested struct's name field should give example, got: %s", name) + } +} + +func TestMap_NestedMapWithSliceStructValues(t *testing.T) { + type address struct { + Country string `structs:"country"` + } + + type B struct { + Foo map[string][]address + } + + type A struct { + B *B + } + + b := &B{ + Foo: map[string][]address{ + "example_key": []address{ + {Country: "Turkey"}, + }, + }, + } + + a := &A{B: b} + m := Map(a) + + if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map { + t.Errorf("Map should return a map type, got: %v", typ) + } + + in, ok := m["B"].(map[string]interface{}) + if !ok { + t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"]) + } + + foo := in["Foo"].(map[string]interface{}) + + addresses := foo["example_key"].([]interface{}) + + addr, ok := addresses[0].(map[string]interface{}) + if !ok { + t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"]) + } + + if _, exists := addr["country"]; !exists { + t.Errorf("Expecting country, but found Country") + } +} + +func TestMap_NestedSliceWithStructValues(t *testing.T) { + type address struct { + Country string `structs:"customCountryName"` + } + + type person struct { + Name string `structs:"name"` + Addresses []address `structs:"addresses"` + } + + p := person{ + Name: "test", + Addresses: []address{ + address{Country: "England"}, + address{Country: "Italy"}, + }, + } + mp := Map(p) + + mpAddresses := mp["addresses"].([]interface{}) + if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists { + t.Errorf("Expecting customCountryName, but found Country") + } + + if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists { + t.Errorf("customCountryName key not found") + } +} + +func TestMap_NestedSliceWithPointerOfStructValues(t *testing.T) { + type address struct { + Country string `structs:"customCountryName"` + } + + type person struct { + Name string `structs:"name"` + Addresses []*address `structs:"addresses"` + } + + p := person{ + Name: "test", + Addresses: []*address{ + &address{Country: "England"}, + &address{Country: "Italy"}, + }, + } + mp := Map(p) + + mpAddresses := mp["addresses"].([]interface{}) + if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists { + t.Errorf("Expecting customCountryName, but found Country") + } + + if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists { + t.Errorf("customCountryName key not found") + } +} + +func TestMap_NestedSliceWithIntValues(t *testing.T) { + type person struct { + Name string `structs:"name"` + Ports []int `structs:"ports"` + } + + p := person{ + Name: "test", + Ports: []int{80}, + } + m := Map(p) + + ports, ok := m["ports"].([]int) + if !ok { + t.Errorf("Nested type of map should be of type []int, have %T", m["ports"]) + } + + if ports[0] != 80 { + t.Errorf("Map nested struct's ports field should give 80, got: %v", ports) + } +} + func TestMap_Anonymous(t *testing.T) { type A struct { Name string @@ -1022,6 +1284,28 @@ func TestNestedNilPointer(t *testing.T) { _ = Map(personWithDogWithCollar) // Doesn't panic } +func TestSetValueOnNestedField(t *testing.T) { + type Base struct { + ID int + } + + type User struct { + Base + Name string + } + + u := User{} + s := New(&u) + f := s.Field("Base").Field("ID") + err := f.Set(10) + if err != nil { + t.Errorf("Error %v", err) + } + if f.Value().(int) != 10 { + t.Errorf("Value should be equal to 10, got %v", f.Value()) + } +} + type Person struct { Name string Age int @@ -1107,3 +1391,63 @@ func TestNonStringerTagWithStringOption(t *testing.T) { t.Errorf("Value for field Animal should not exist") } } + +func TestMap_InterfaceValue(t *testing.T) { + type TestStruct struct { + A interface{} + } + + expected := []byte("test value") + + a := TestStruct{A: expected} + s := Map(a) + if !reflect.DeepEqual(s["A"], expected) { + t.Errorf("Value does not match expected: %q != %q", s["A"], expected) + } +} + +func TestPointer2Pointer(t *testing.T) { + defer func() { + err := recover() + if err != nil { + fmt.Printf("err %+v\n", err) + t.Error("Internal nil pointer should not panic") + } + }() + a := &Animal{ + Name: "Fluff", + Age: 4, + } + _ = Map(&a) + + b := &a + _ = Map(&b) + + c := &b + _ = Map(&c) +} + +func TestMap_InterfaceTypeWithMapValue(t *testing.T) { + type A struct { + Name string `structs:"name"` + Ip string `structs:"ip"` + Query string `structs:"query"` + Payload interface{} `structs:"payload"` + } + + a := A{ + Name: "test", + Ip: "127.0.0.1", + Query: "", + Payload: map[string]string{"test_param": "test_param"}, + } + + defer func() { + err := recover() + if err != nil { + t.Error("Converting Map with an interface{} type with map value should not panic") + } + }() + + _ = Map(a) +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 0000000..042091d --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 0000000..bcfa195 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 0000000..931ae31 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 0000000..6050c10 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 0000000..cea1287 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp b/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp new file mode 100644 index 0000000..fc31f51 --- /dev/null +++ b/vendor/github.com/golang/snappy/cmd/snappytool/main.cpp @@ -0,0 +1,77 @@ +/* +To build the snappytool binary: +g++ main.cpp /usr/lib/libsnappy.a -o snappytool +or, if you have built the C++ snappy library from source: +g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool +after running "make" from your snappy checkout directory. +*/ + +#include +#include +#include +#include + +#include "snappy.h" + +#define N 1000000 + +char dst[N]; +char src[N]; + +int main(int argc, char** argv) { + // Parse args. + if (argc != 2) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + bool decode = strcmp(argv[1], "-d") == 0; + bool encode = strcmp(argv[1], "-e") == 0; + if (decode == encode) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + + // Read all of stdin into src[:s]. + size_t s = 0; + while (1) { + if (s == N) { + fprintf(stderr, "input too large\n"); + return 1; + } + ssize_t n = read(0, src+s, N-s); + if (n == 0) { + break; + } + if (n < 0) { + fprintf(stderr, "read error: %s\n", strerror(errno)); + // TODO: handle EAGAIN, EINTR? + return 1; + } + s += n; + } + + // Encode or decode src[:s] to dst[:d], and write to stdout. + size_t d = 0; + if (encode) { + if (N < snappy::MaxCompressedLength(s)) { + fprintf(stderr, "input too large after encoding\n"); + return 1; + } + snappy::RawCompress(src, s, dst, &d); + } else { + if (!snappy::GetUncompressedLength(src, s, &d)) { + fprintf(stderr, "could not get uncompressed length\n"); + return 1; + } + if (N < d) { + fprintf(stderr, "input too large after decoding\n"); + return 1; + } + if (!snappy::RawUncompress(src, s, dst)) { + fprintf(stderr, "input was not valid Snappy-compressed data\n"); + return 1; + } + } + write(1, dst, d); + return 0; +} diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 0000000..72efb03 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 0000000..fcd192b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000..e6179f6 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000..8c9f204 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 0000000..8d393e9 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 0000000..150d91b --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000..adfd979 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000..dbcae90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go new file mode 100644 index 0000000..e4496f9 --- /dev/null +++ b/vendor/github.com/golang/snappy/golden_test.go @@ -0,0 +1,1965 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +// extendMatchGoldenTestCases is the i and j arguments, and the returned value, +// for every extendMatch call issued when encoding the +// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the +// extendMatch implementation. +// +// It was generated manually by adding some print statements to the (pure Go) +// extendMatch implementation: +// +// func extendMatch(src []byte, i, j int) int { +// i0, j0 := i, j +// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { +// } +// println("{", i0, ",", j0, ",", j, "},") +// return j +// } +// +// and running "go test -test.run=EncodeGoldenInput -tags=noasm". +var extendMatchGoldenTestCases = []struct { + i, j, want int +}{ + {11, 61, 62}, + {80, 81, 82}, + {86, 87, 101}, + {85, 133, 149}, + {152, 153, 162}, + {133, 168, 193}, + {168, 207, 225}, + {81, 255, 275}, + {278, 279, 283}, + {306, 417, 417}, + {373, 428, 430}, + {389, 444, 447}, + {474, 510, 512}, + {465, 533, 533}, + {47, 547, 547}, + {307, 551, 554}, + {420, 582, 587}, + {309, 604, 604}, + {604, 625, 625}, + {538, 629, 629}, + {328, 640, 640}, + {573, 645, 645}, + {319, 657, 657}, + {30, 664, 664}, + {45, 679, 680}, + {621, 684, 684}, + {376, 700, 700}, + {33, 707, 708}, + {601, 733, 733}, + {334, 744, 745}, + {625, 758, 759}, + {382, 763, 763}, + {550, 769, 771}, + {533, 789, 789}, + {804, 813, 813}, + {342, 841, 842}, + {742, 847, 847}, + {74, 852, 852}, + {810, 864, 864}, + {758, 868, 869}, + {714, 883, 883}, + {582, 889, 891}, + {61, 934, 935}, + {894, 942, 942}, + {939, 949, 949}, + {785, 956, 957}, + {886, 978, 978}, + {792, 998, 998}, + {998, 1005, 1005}, + {572, 1032, 1032}, + {698, 1051, 1053}, + {599, 1067, 1069}, + {1056, 1079, 1079}, + {942, 1089, 1090}, + {831, 1094, 1096}, + {1088, 1100, 1103}, + {732, 1113, 1114}, + {1037, 1118, 1118}, + {872, 1128, 1130}, + {1079, 1140, 1142}, + {332, 1162, 1162}, + {207, 1168, 1186}, + {1189, 1190, 1225}, + {105, 1229, 1230}, + {79, 1256, 1257}, + {1190, 1261, 1283}, + {255, 1306, 1306}, + {1319, 1339, 1358}, + {364, 1370, 1370}, + {955, 1378, 1380}, + {122, 1403, 1403}, + {1325, 1407, 1419}, + {664, 1423, 1424}, + {941, 1461, 1463}, + {867, 1477, 1478}, + {757, 1488, 1489}, + {1140, 1499, 1499}, + {31, 1506, 1506}, + {1487, 1510, 1512}, + {1089, 1520, 1521}, + {1467, 1525, 1529}, + {1394, 1537, 1537}, + {1499, 1541, 1541}, + {367, 1558, 1558}, + {1475, 1564, 1564}, + {1525, 1568, 1571}, + {1541, 1582, 1583}, + {864, 1587, 1588}, + {704, 1597, 1597}, + {336, 1602, 1602}, + {1383, 1613, 1613}, + {1498, 1617, 1618}, + {1051, 1623, 1625}, + {401, 1643, 1645}, + {1072, 1654, 1655}, + {1067, 1667, 1669}, + {699, 1673, 1674}, + {1587, 1683, 1684}, + {920, 1696, 1696}, + {1505, 1710, 1710}, + {1550, 1723, 1723}, + {996, 1727, 1727}, + {833, 1733, 1734}, + {1638, 1739, 1740}, + {1654, 1744, 1744}, + {753, 1761, 1761}, + {1548, 1773, 1773}, + {1568, 1777, 1780}, + {1683, 1793, 1794}, + {948, 1801, 1801}, + {1666, 1805, 1808}, + {1502, 1814, 1814}, + {1696, 1822, 1822}, + {502, 1836, 1837}, + {917, 1843, 1843}, + {1733, 1854, 1855}, + {970, 1859, 1859}, + {310, 1863, 1863}, + {657, 1872, 1872}, + {1005, 1876, 1876}, + {1662, 1880, 1880}, + {904, 1892, 1892}, + {1427, 1910, 1910}, + {1772, 1929, 1930}, + {1822, 1937, 1940}, + {1858, 1949, 1950}, + {1602, 1956, 1956}, + {1150, 1962, 1962}, + {1504, 1966, 1967}, + {51, 1971, 1971}, + {1605, 1979, 1979}, + {1458, 1983, 1988}, + {1536, 2001, 2006}, + {1373, 2014, 2018}, + {1494, 2025, 2025}, + {1667, 2029, 2031}, + {1592, 2035, 2035}, + {330, 2045, 2045}, + {1376, 2053, 2053}, + {1991, 2058, 2059}, + {1635, 2065, 2065}, + {1992, 2073, 2074}, + {2014, 2080, 2081}, + {1546, 2085, 2087}, + {59, 2099, 2099}, + {1996, 2106, 2106}, + {1836, 2110, 2110}, + {2068, 2114, 2114}, + {1338, 2122, 2122}, + {1562, 2128, 2130}, + {1934, 2134, 2134}, + {2114, 2141, 2142}, + {977, 2149, 2150}, + {956, 2154, 2155}, + {1407, 2162, 2162}, + {1773, 2166, 2166}, + {883, 2171, 2171}, + {623, 2175, 2178}, + {1520, 2191, 2192}, + {1162, 2200, 2200}, + {912, 2204, 2204}, + {733, 2208, 2208}, + {1777, 2212, 2215}, + {1532, 2219, 2219}, + {718, 2223, 2225}, + {2069, 2229, 2229}, + {2207, 2245, 2246}, + {1139, 2264, 2264}, + {677, 2274, 2274}, + {2099, 2279, 2279}, + {1863, 2283, 2283}, + {1966, 2305, 2306}, + {2279, 2313, 2313}, + {1628, 2319, 2319}, + {755, 2329, 2329}, + {1461, 2334, 2334}, + {2117, 2340, 2340}, + {2313, 2349, 2349}, + {1859, 2353, 2353}, + {1048, 2362, 2362}, + {895, 2366, 2366}, + {2278, 2373, 2373}, + {1884, 2377, 2377}, + {1402, 2387, 2392}, + {700, 2398, 2398}, + {1971, 2402, 2402}, + {2009, 2419, 2419}, + {1441, 2426, 2428}, + {2208, 2432, 2432}, + {2038, 2436, 2436}, + {932, 2443, 2443}, + {1759, 2447, 2448}, + {744, 2452, 2452}, + {1875, 2458, 2458}, + {2405, 2468, 2468}, + {1596, 2472, 2473}, + {1953, 2480, 2482}, + {736, 2487, 2487}, + {1913, 2493, 2493}, + {774, 2497, 2497}, + {1484, 2506, 2508}, + {2432, 2512, 2512}, + {752, 2519, 2519}, + {2497, 2523, 2523}, + {2409, 2528, 2529}, + {2122, 2533, 2533}, + {2396, 2537, 2538}, + {2410, 2547, 2548}, + {1093, 2555, 2560}, + {551, 2564, 2565}, + {2268, 2569, 2569}, + {1362, 2580, 2580}, + {1916, 2584, 2585}, + {994, 2589, 2590}, + {1979, 2596, 2596}, + {1041, 2602, 2602}, + {2104, 2614, 2616}, + {2609, 2621, 2628}, + {2329, 2638, 2638}, + {2211, 2657, 2658}, + {2638, 2662, 2667}, + {2578, 2676, 2679}, + {2153, 2685, 2686}, + {2608, 2696, 2697}, + {598, 2712, 2712}, + {2620, 2719, 2720}, + {1888, 2724, 2728}, + {2709, 2732, 2732}, + {1365, 2739, 2739}, + {784, 2747, 2748}, + {424, 2753, 2753}, + {2204, 2759, 2759}, + {812, 2768, 2769}, + {2455, 2773, 2773}, + {1722, 2781, 2781}, + {1917, 2792, 2792}, + {2705, 2799, 2799}, + {2685, 2806, 2807}, + {2742, 2811, 2811}, + {1370, 2818, 2818}, + {2641, 2830, 2830}, + {2512, 2837, 2837}, + {2457, 2841, 2841}, + {2756, 2845, 2845}, + {2719, 2855, 2855}, + {1423, 2859, 2859}, + {2849, 2863, 2865}, + {1474, 2871, 2871}, + {1161, 2875, 2876}, + {2282, 2880, 2881}, + {2746, 2888, 2888}, + {1783, 2893, 2893}, + {2401, 2899, 2900}, + {2632, 2920, 2923}, + {2422, 2928, 2930}, + {2715, 2939, 2939}, + {2162, 2943, 2943}, + {2859, 2947, 2947}, + {1910, 2951, 2951}, + {1431, 2955, 2956}, + {1439, 2964, 2964}, + {2501, 2968, 2969}, + {2029, 2973, 2976}, + {689, 2983, 2984}, + {1658, 2988, 2988}, + {1031, 2996, 2996}, + {2149, 3001, 3002}, + {25, 3009, 3013}, + {2964, 3023, 3023}, + {953, 3027, 3028}, + {2359, 3036, 3036}, + {3023, 3049, 3049}, + {2880, 3055, 3056}, + {2973, 3076, 3077}, + {2874, 3090, 3090}, + {2871, 3094, 3094}, + {2532, 3100, 3100}, + {2938, 3107, 3108}, + {350, 3115, 3115}, + {2196, 3119, 3121}, + {1133, 3127, 3129}, + {1797, 3134, 3150}, + {3032, 3158, 3158}, + {3016, 3172, 3172}, + {2533, 3179, 3179}, + {3055, 3187, 3188}, + {1384, 3192, 3193}, + {2799, 3199, 3199}, + {2126, 3203, 3207}, + {2334, 3215, 3215}, + {2105, 3220, 3221}, + {3199, 3229, 3229}, + {2891, 3233, 3233}, + {855, 3240, 3240}, + {1852, 3253, 3256}, + {2140, 3263, 3263}, + {1682, 3268, 3270}, + {3243, 3274, 3274}, + {924, 3279, 3279}, + {2212, 3283, 3283}, + {2596, 3287, 3287}, + {2999, 3291, 3291}, + {2353, 3295, 3295}, + {2480, 3302, 3304}, + {1959, 3308, 3311}, + {3000, 3318, 3318}, + {845, 3330, 3330}, + {2283, 3334, 3334}, + {2519, 3342, 3342}, + {3325, 3346, 3348}, + {2397, 3353, 3354}, + {2763, 3358, 3358}, + {3198, 3363, 3364}, + {3211, 3368, 3372}, + {2950, 3376, 3377}, + {3245, 3388, 3391}, + {2264, 3398, 3398}, + {795, 3403, 3403}, + {3287, 3407, 3407}, + {3358, 3411, 3411}, + {3317, 3415, 3415}, + {3232, 3431, 3431}, + {2128, 3435, 3437}, + {3236, 3441, 3441}, + {3398, 3445, 3446}, + {2814, 3450, 3450}, + {3394, 3466, 3466}, + {2425, 3470, 3470}, + {3330, 3476, 3476}, + {1612, 3480, 3480}, + {1004, 3485, 3486}, + {2732, 3490, 3490}, + {1117, 3494, 3495}, + {629, 3501, 3501}, + {3087, 3514, 3514}, + {684, 3518, 3518}, + {3489, 3522, 3524}, + {1760, 3529, 3529}, + {617, 3537, 3537}, + {3431, 3541, 3541}, + {997, 3547, 3547}, + {882, 3552, 3553}, + {2419, 3558, 3558}, + {610, 3562, 3563}, + {1903, 3567, 3569}, + {3005, 3575, 3575}, + {3076, 3585, 3586}, + {3541, 3590, 3590}, + {3490, 3594, 3594}, + {1899, 3599, 3599}, + {3545, 3606, 3606}, + {3290, 3614, 3615}, + {2056, 3619, 3620}, + {3556, 3625, 3625}, + {3294, 3632, 3633}, + {637, 3643, 3644}, + {3609, 3648, 3650}, + {3175, 3658, 3658}, + {3498, 3665, 3665}, + {1597, 3669, 3669}, + {1983, 3673, 3673}, + {3215, 3682, 3682}, + {3544, 3689, 3689}, + {3694, 3698, 3698}, + {3228, 3715, 3716}, + {2594, 3720, 3722}, + {3573, 3726, 3726}, + {2479, 3732, 3735}, + {3191, 3741, 3742}, + {1113, 3746, 3747}, + {2844, 3751, 3751}, + {3445, 3756, 3757}, + {3755, 3766, 3766}, + {3421, 3775, 3780}, + {3593, 3784, 3786}, + {3263, 3796, 3796}, + {3469, 3806, 3806}, + {2602, 3815, 3815}, + {723, 3819, 3821}, + {1608, 3826, 3826}, + {3334, 3830, 3830}, + {2198, 3835, 3835}, + {2635, 3840, 3840}, + {3702, 3852, 3853}, + {3406, 3858, 3859}, + {3681, 3867, 3870}, + {3407, 3880, 3880}, + {340, 3889, 3889}, + {3772, 3893, 3893}, + {593, 3897, 3897}, + {2563, 3914, 3916}, + {2981, 3929, 3929}, + {1835, 3933, 3934}, + {3906, 3951, 3951}, + {1459, 3958, 3958}, + {3889, 3974, 3974}, + {2188, 3982, 3982}, + {3220, 3986, 3987}, + {3585, 3991, 3993}, + {3712, 3997, 4001}, + {2805, 4007, 4007}, + {1879, 4012, 4013}, + {3618, 4018, 4018}, + {1145, 4031, 4032}, + {3901, 4037, 4037}, + {2772, 4046, 4047}, + {2802, 4053, 4054}, + {3299, 4058, 4058}, + {3725, 4066, 4066}, + {2271, 4070, 4070}, + {385, 4075, 4076}, + {3624, 4089, 4090}, + {3745, 4096, 4098}, + {1563, 4102, 4102}, + {4045, 4106, 4111}, + {3696, 4115, 4119}, + {3376, 4125, 4126}, + {1880, 4130, 4130}, + {2048, 4140, 4141}, + {2724, 4149, 4149}, + {1767, 4156, 4156}, + {2601, 4164, 4164}, + {2757, 4168, 4168}, + {3974, 4172, 4172}, + {3914, 4178, 4178}, + {516, 4185, 4185}, + {1032, 4189, 4190}, + {3462, 4197, 4198}, + {3805, 4202, 4203}, + {3910, 4207, 4212}, + {3075, 4221, 4221}, + {3756, 4225, 4226}, + {1872, 4236, 4237}, + {3844, 4241, 4241}, + {3991, 4245, 4249}, + {2203, 4258, 4258}, + {3903, 4267, 4268}, + {705, 4272, 4272}, + {1896, 4276, 4276}, + {1955, 4285, 4288}, + {3746, 4302, 4303}, + {2672, 4311, 4311}, + {3969, 4317, 4317}, + {3883, 4322, 4322}, + {1920, 4339, 4340}, + {3527, 4344, 4346}, + {1160, 4358, 4358}, + {3648, 4364, 4366}, + {2711, 4387, 4387}, + {3619, 4391, 4392}, + {1944, 4396, 4396}, + {4369, 4400, 4400}, + {2736, 4404, 4407}, + {2546, 4411, 4412}, + {4390, 4422, 4422}, + {3610, 4426, 4427}, + {4058, 4431, 4431}, + {4374, 4435, 4435}, + {3463, 4445, 4446}, + {1813, 4452, 4452}, + {3669, 4456, 4456}, + {3830, 4460, 4460}, + {421, 4464, 4465}, + {1719, 4471, 4471}, + {3880, 4475, 4475}, + {1834, 4485, 4487}, + {3590, 4491, 4491}, + {442, 4496, 4497}, + {4435, 4501, 4501}, + {3814, 4509, 4509}, + {987, 4513, 4513}, + {4494, 4518, 4521}, + {3218, 4526, 4529}, + {4221, 4537, 4537}, + {2778, 4543, 4545}, + {4422, 4552, 4552}, + {4031, 4558, 4559}, + {4178, 4563, 4563}, + {3726, 4567, 4574}, + {4027, 4578, 4578}, + {4339, 4585, 4587}, + {3796, 4592, 4595}, + {543, 4600, 4613}, + {2855, 4620, 4621}, + {2795, 4627, 4627}, + {3440, 4631, 4632}, + {4279, 4636, 4639}, + {4245, 4643, 4645}, + {4516, 4649, 4650}, + {3133, 4654, 4654}, + {4042, 4658, 4659}, + {3422, 4663, 4663}, + {4046, 4667, 4668}, + {4267, 4672, 4672}, + {4004, 4676, 4677}, + {2490, 4682, 4682}, + {2451, 4697, 4697}, + {3027, 4705, 4705}, + {4028, 4717, 4717}, + {4460, 4721, 4721}, + {2471, 4725, 4727}, + {3090, 4735, 4735}, + {3192, 4739, 4740}, + {3835, 4760, 4760}, + {4540, 4764, 4764}, + {4007, 4772, 4774}, + {619, 4784, 4784}, + {3561, 4789, 4791}, + {3367, 4805, 4805}, + {4490, 4810, 4811}, + {2402, 4815, 4815}, + {3352, 4819, 4822}, + {2773, 4828, 4828}, + {4552, 4832, 4832}, + {2522, 4840, 4841}, + {316, 4847, 4852}, + {4715, 4858, 4858}, + {2959, 4862, 4862}, + {4858, 4868, 4869}, + {2134, 4873, 4873}, + {578, 4878, 4878}, + {4189, 4889, 4890}, + {2229, 4894, 4894}, + {4501, 4898, 4898}, + {2297, 4903, 4903}, + {2933, 4909, 4909}, + {3008, 4913, 4913}, + {3153, 4917, 4917}, + {4819, 4921, 4921}, + {4921, 4932, 4933}, + {4920, 4944, 4945}, + {4814, 4954, 4955}, + {576, 4966, 4966}, + {1854, 4970, 4971}, + {1374, 4975, 4976}, + {3307, 4980, 4980}, + {974, 4984, 4988}, + {4721, 4992, 4992}, + {4898, 4996, 4996}, + {4475, 5006, 5006}, + {3819, 5012, 5012}, + {1948, 5019, 5021}, + {4954, 5027, 5029}, + {3740, 5038, 5040}, + {4763, 5044, 5045}, + {1936, 5051, 5051}, + {4844, 5055, 5060}, + {4215, 5069, 5072}, + {1146, 5076, 5076}, + {3845, 5082, 5082}, + {4865, 5090, 5090}, + {4624, 5094, 5094}, + {4815, 5098, 5098}, + {5006, 5105, 5105}, + {4980, 5109, 5109}, + {4795, 5113, 5115}, + {5043, 5119, 5121}, + {4782, 5129, 5129}, + {3826, 5139, 5139}, + {3876, 5156, 5156}, + {3111, 5167, 5171}, + {1470, 5177, 5177}, + {4431, 5181, 5181}, + {546, 5189, 5189}, + {4225, 5193, 5193}, + {1672, 5199, 5201}, + {4207, 5205, 5209}, + {4220, 5216, 5217}, + {4658, 5224, 5225}, + {3295, 5235, 5235}, + {2436, 5239, 5239}, + {2349, 5246, 5246}, + {2175, 5250, 5250}, + {5180, 5257, 5258}, + {3161, 5263, 5263}, + {5105, 5272, 5272}, + {3552, 5282, 5282}, + {4944, 5299, 5300}, + {4130, 5312, 5313}, + {902, 5323, 5323}, + {913, 5327, 5327}, + {2987, 5333, 5334}, + {5150, 5344, 5344}, + {5249, 5348, 5348}, + {1965, 5358, 5359}, + {5330, 5364, 5364}, + {2012, 5373, 5377}, + {712, 5384, 5386}, + {5235, 5390, 5390}, + {5044, 5398, 5399}, + {564, 5406, 5406}, + {39, 5410, 5410}, + {4642, 5422, 5425}, + {4421, 5437, 5438}, + {2347, 5449, 5449}, + {5333, 5453, 5454}, + {4136, 5458, 5459}, + {3793, 5468, 5468}, + {2243, 5480, 5480}, + {4889, 5492, 5493}, + {4295, 5504, 5504}, + {2785, 5511, 5511}, + {2377, 5518, 5518}, + {3662, 5525, 5525}, + {5097, 5529, 5530}, + {4781, 5537, 5538}, + {4697, 5547, 5548}, + {436, 5552, 5553}, + {5542, 5558, 5558}, + {3692, 5562, 5562}, + {2696, 5568, 5569}, + {4620, 5578, 5578}, + {2898, 5590, 5590}, + {5557, 5596, 5618}, + {2797, 5623, 5625}, + {2792, 5629, 5629}, + {5243, 5633, 5633}, + {5348, 5637, 5637}, + {5547, 5643, 5643}, + {4296, 5654, 5655}, + {5568, 5662, 5662}, + {3001, 5670, 5671}, + {3794, 5679, 5679}, + {4006, 5685, 5686}, + {4969, 5690, 5692}, + {687, 5704, 5704}, + {4563, 5708, 5708}, + {1723, 5738, 5738}, + {649, 5742, 5742}, + {5163, 5748, 5755}, + {3907, 5759, 5759}, + {3074, 5764, 5764}, + {5326, 5771, 5771}, + {2951, 5776, 5776}, + {5181, 5780, 5780}, + {2614, 5785, 5788}, + {4709, 5794, 5794}, + {2784, 5799, 5799}, + {5518, 5803, 5803}, + {4155, 5812, 5815}, + {921, 5819, 5819}, + {5224, 5823, 5824}, + {2853, 5830, 5836}, + {5776, 5840, 5840}, + {2955, 5844, 5845}, + {5745, 5853, 5853}, + {3291, 5857, 5857}, + {2988, 5861, 5861}, + {2647, 5865, 5865}, + {5398, 5869, 5870}, + {1085, 5874, 5875}, + {4906, 5881, 5881}, + {802, 5886, 5886}, + {5119, 5890, 5893}, + {5802, 5899, 5900}, + {3415, 5904, 5904}, + {5629, 5908, 5908}, + {3714, 5912, 5914}, + {5558, 5921, 5921}, + {2710, 5927, 5928}, + {1094, 5932, 5934}, + {2653, 5940, 5941}, + {4735, 5954, 5954}, + {5861, 5958, 5958}, + {1040, 5971, 5971}, + {5514, 5977, 5977}, + {5048, 5981, 5982}, + {5953, 5992, 5993}, + {3751, 5997, 5997}, + {4991, 6001, 6002}, + {5885, 6006, 6007}, + {5529, 6011, 6012}, + {4974, 6019, 6020}, + {5857, 6024, 6024}, + {3483, 6032, 6032}, + {3594, 6036, 6036}, + {1997, 6040, 6040}, + {5997, 6044, 6047}, + {5197, 6051, 6051}, + {1764, 6055, 6055}, + {6050, 6059, 6059}, + {5239, 6063, 6063}, + {5049, 6067, 6067}, + {5957, 6073, 6074}, + {1022, 6078, 6078}, + {3414, 6083, 6084}, + {3809, 6090, 6090}, + {4562, 6095, 6096}, + {5878, 6104, 6104}, + {594, 6108, 6109}, + {3353, 6115, 6116}, + {4992, 6120, 6121}, + {2424, 6125, 6125}, + {4484, 6130, 6130}, + {3900, 6134, 6135}, + {5793, 6139, 6141}, + {3562, 6145, 6145}, + {1438, 6152, 6153}, + {6058, 6157, 6158}, + {4411, 6162, 6163}, + {4590, 6167, 6171}, + {4748, 6175, 6175}, + {5517, 6183, 6184}, + {6095, 6191, 6192}, + {1471, 6203, 6203}, + {2643, 6209, 6210}, + {450, 6220, 6220}, + {5266, 6226, 6226}, + {2576, 6233, 6233}, + {2607, 6239, 6240}, + {5164, 6244, 6251}, + {6054, 6255, 6255}, + {1789, 6260, 6261}, + {5250, 6265, 6265}, + {6062, 6273, 6278}, + {5990, 6282, 6282}, + {3283, 6286, 6286}, + {5436, 6290, 6290}, + {6059, 6294, 6294}, + {5668, 6298, 6300}, + {3072, 6324, 6329}, + {3132, 6338, 6339}, + {3246, 6343, 6344}, + {28, 6348, 6349}, + {1503, 6353, 6355}, + {6067, 6359, 6359}, + {3384, 6364, 6364}, + {545, 6375, 6376}, + {5803, 6380, 6380}, + {5522, 6384, 6385}, + {5908, 6389, 6389}, + {2796, 6393, 6396}, + {4831, 6403, 6404}, + {6388, 6412, 6412}, + {6005, 6417, 6420}, + {4450, 6430, 6430}, + {4050, 6435, 6435}, + {5372, 6441, 6441}, + {4378, 6447, 6447}, + {6199, 6452, 6452}, + {3026, 6456, 6456}, + {2642, 6460, 6462}, + {6392, 6470, 6470}, + {6459, 6474, 6474}, + {2829, 6487, 6488}, + {2942, 6499, 6504}, + {5069, 6508, 6511}, + {5341, 6515, 6516}, + {5853, 6521, 6525}, + {6104, 6531, 6531}, + {5759, 6535, 6538}, + {4672, 6542, 6543}, + {2443, 6550, 6550}, + {5109, 6554, 6554}, + {6494, 6558, 6560}, + {6006, 6570, 6572}, + {6424, 6576, 6580}, + {4693, 6591, 6592}, + {6439, 6596, 6597}, + {3179, 6601, 6601}, + {5299, 6606, 6607}, + {4148, 6612, 6613}, + {3774, 6617, 6617}, + {3537, 6623, 6624}, + {4975, 6628, 6629}, + {3848, 6636, 6636}, + {856, 6640, 6640}, + {5724, 6645, 6645}, + {6632, 6651, 6651}, + {4630, 6656, 6658}, + {1440, 6662, 6662}, + {4281, 6666, 6667}, + {4302, 6671, 6672}, + {2589, 6676, 6677}, + {5647, 6681, 6687}, + {6082, 6691, 6693}, + {6144, 6698, 6698}, + {6103, 6709, 6710}, + {3710, 6714, 6714}, + {4253, 6718, 6721}, + {2467, 6730, 6730}, + {4778, 6734, 6734}, + {6528, 6738, 6738}, + {4358, 6747, 6747}, + {5889, 6753, 6753}, + {5193, 6757, 6757}, + {5797, 6761, 6761}, + {3858, 6765, 6766}, + {5951, 6776, 6776}, + {6487, 6781, 6782}, + {3282, 6786, 6787}, + {4667, 6797, 6799}, + {1927, 6803, 6806}, + {6583, 6810, 6810}, + {4937, 6814, 6814}, + {6099, 6824, 6824}, + {4415, 6835, 6836}, + {6332, 6840, 6841}, + {5160, 6850, 6850}, + {4764, 6854, 6854}, + {6814, 6858, 6859}, + {3018, 6864, 6864}, + {6293, 6868, 6869}, + {6359, 6877, 6877}, + {3047, 6884, 6886}, + {5262, 6890, 6891}, + {5471, 6900, 6900}, + {3268, 6910, 6912}, + {1047, 6916, 6916}, + {5904, 6923, 6923}, + {5798, 6933, 6938}, + {4149, 6942, 6942}, + {1821, 6946, 6946}, + {3599, 6952, 6952}, + {6470, 6957, 6957}, + {5562, 6961, 6961}, + {6268, 6965, 6967}, + {6389, 6971, 6971}, + {6596, 6975, 6976}, + {6553, 6980, 6981}, + {6576, 6985, 6989}, + {1375, 6993, 6993}, + {652, 6998, 6998}, + {4876, 7002, 7003}, + {5768, 7011, 7013}, + {3973, 7017, 7017}, + {6802, 7025, 7025}, + {6955, 7034, 7036}, + {6974, 7040, 7040}, + {5944, 7044, 7044}, + {6992, 7048, 7054}, + {6872, 7059, 7059}, + {2943, 7063, 7063}, + {6923, 7067, 7067}, + {5094, 7071, 7071}, + {4873, 7075, 7075}, + {5819, 7079, 7079}, + {5945, 7085, 7085}, + {1540, 7090, 7091}, + {2090, 7095, 7095}, + {5024, 7104, 7105}, + {6900, 7109, 7109}, + {6024, 7113, 7114}, + {6000, 7118, 7120}, + {2187, 7124, 7125}, + {6760, 7129, 7130}, + {5898, 7134, 7136}, + {7032, 7144, 7144}, + {4271, 7148, 7148}, + {3706, 7152, 7152}, + {6970, 7156, 7157}, + {7088, 7161, 7163}, + {2718, 7168, 7169}, + {5674, 7175, 7175}, + {4631, 7182, 7182}, + {7070, 7188, 7189}, + {6220, 7196, 7196}, + {3458, 7201, 7202}, + {2041, 7211, 7212}, + {1454, 7216, 7216}, + {5199, 7225, 7227}, + {3529, 7234, 7234}, + {6890, 7238, 7238}, + {3815, 7242, 7243}, + {5490, 7250, 7253}, + {6554, 7257, 7263}, + {5890, 7267, 7269}, + {6877, 7273, 7273}, + {4877, 7277, 7277}, + {2502, 7285, 7285}, + {1483, 7289, 7295}, + {7210, 7304, 7308}, + {6845, 7313, 7316}, + {7219, 7320, 7320}, + {7001, 7325, 7329}, + {6853, 7333, 7334}, + {6120, 7338, 7338}, + {6606, 7342, 7343}, + {7020, 7348, 7350}, + {3509, 7354, 7354}, + {7133, 7359, 7363}, + {3434, 7371, 7374}, + {2787, 7384, 7384}, + {7044, 7388, 7388}, + {6960, 7394, 7395}, + {6676, 7399, 7400}, + {7161, 7404, 7404}, + {7285, 7417, 7418}, + {4558, 7425, 7426}, + {4828, 7430, 7430}, + {6063, 7436, 7436}, + {3597, 7442, 7442}, + {914, 7446, 7446}, + {7320, 7452, 7454}, + {7267, 7458, 7460}, + {5076, 7464, 7464}, + {7430, 7468, 7469}, + {6273, 7473, 7474}, + {7440, 7478, 7487}, + {7348, 7491, 7494}, + {1021, 7510, 7510}, + {7473, 7515, 7515}, + {2823, 7519, 7519}, + {6264, 7527, 7527}, + {7302, 7531, 7531}, + {7089, 7535, 7535}, + {7342, 7540, 7541}, + {3688, 7547, 7551}, + {3054, 7558, 7560}, + {4177, 7566, 7567}, + {6691, 7574, 7575}, + {7156, 7585, 7586}, + {7147, 7590, 7592}, + {7407, 7598, 7598}, + {7403, 7602, 7603}, + {6868, 7607, 7607}, + {6636, 7611, 7611}, + {4805, 7617, 7617}, + {5779, 7623, 7623}, + {7063, 7627, 7627}, + {5079, 7632, 7632}, + {7377, 7637, 7637}, + {7337, 7641, 7642}, + {6738, 7655, 7655}, + {7338, 7659, 7659}, + {6541, 7669, 7671}, + {595, 7675, 7675}, + {7658, 7679, 7680}, + {7647, 7685, 7686}, + {2477, 7690, 7690}, + {5823, 7694, 7694}, + {4156, 7699, 7699}, + {5931, 7703, 7706}, + {6854, 7712, 7712}, + {4931, 7718, 7718}, + {6979, 7722, 7722}, + {5085, 7727, 7727}, + {6965, 7732, 7732}, + {7201, 7736, 7737}, + {3639, 7741, 7743}, + {7534, 7749, 7749}, + {4292, 7753, 7753}, + {3427, 7759, 7763}, + {7273, 7767, 7767}, + {940, 7778, 7778}, + {4838, 7782, 7785}, + {4216, 7790, 7792}, + {922, 7800, 7801}, + {7256, 7810, 7811}, + {7789, 7815, 7819}, + {7225, 7823, 7825}, + {7531, 7829, 7829}, + {6997, 7833, 7833}, + {7757, 7837, 7838}, + {4129, 7842, 7842}, + {7333, 7848, 7849}, + {6776, 7855, 7855}, + {7527, 7859, 7859}, + {4370, 7863, 7863}, + {4512, 7868, 7868}, + {5679, 7880, 7880}, + {3162, 7884, 7885}, + {3933, 7892, 7894}, + {7804, 7899, 7902}, + {6363, 7906, 7907}, + {7848, 7911, 7912}, + {5584, 7917, 7921}, + {874, 7926, 7926}, + {3342, 7930, 7930}, + {4507, 7935, 7937}, + {3672, 7943, 7944}, + {7911, 7948, 7949}, + {6402, 7956, 7956}, + {7940, 7960, 7960}, + {7113, 7964, 7964}, + {1073, 7968, 7968}, + {7740, 7974, 7974}, + {7601, 7978, 7982}, + {6797, 7987, 7988}, + {3528, 7994, 7995}, + {5483, 7999, 7999}, + {5717, 8011, 8011}, + {5480, 8017, 8017}, + {7770, 8023, 8030}, + {2452, 8034, 8034}, + {5282, 8047, 8047}, + {7967, 8051, 8051}, + {1128, 8058, 8066}, + {6348, 8070, 8070}, + {8055, 8077, 8077}, + {7925, 8081, 8086}, + {6810, 8090, 8090}, + {5051, 8101, 8101}, + {4696, 8109, 8110}, + {5129, 8119, 8119}, + {4449, 8123, 8123}, + {7222, 8127, 8127}, + {4649, 8131, 8134}, + {7994, 8138, 8138}, + {5954, 8148, 8148}, + {475, 8152, 8153}, + {7906, 8157, 8157}, + {7458, 8164, 8166}, + {7632, 8171, 8173}, + {3874, 8177, 8183}, + {4391, 8187, 8187}, + {561, 8191, 8191}, + {2417, 8195, 8195}, + {2357, 8204, 8204}, + {2269, 8216, 8218}, + {3968, 8222, 8222}, + {2200, 8226, 8227}, + {3453, 8247, 8247}, + {2439, 8251, 8252}, + {7175, 8257, 8257}, + {976, 8262, 8264}, + {4953, 8273, 8273}, + {4219, 8278, 8278}, + {6, 8285, 8291}, + {5703, 8295, 8296}, + {5272, 8300, 8300}, + {8037, 8304, 8304}, + {8186, 8314, 8314}, + {8304, 8318, 8318}, + {8051, 8326, 8326}, + {8318, 8330, 8330}, + {2671, 8334, 8335}, + {2662, 8339, 8339}, + {8081, 8349, 8350}, + {3328, 8356, 8356}, + {2879, 8360, 8362}, + {8050, 8370, 8371}, + {8330, 8375, 8376}, + {8375, 8386, 8386}, + {4961, 8390, 8390}, + {1017, 8403, 8405}, + {3533, 8416, 8416}, + {4555, 8422, 8422}, + {6445, 8426, 8426}, + {8169, 8432, 8432}, + {990, 8436, 8436}, + {4102, 8440, 8440}, + {7398, 8444, 8446}, + {3480, 8450, 8450}, + {6324, 8462, 8462}, + {7948, 8466, 8467}, + {5950, 8471, 8471}, + {5189, 8476, 8476}, + {4026, 8490, 8490}, + {8374, 8494, 8495}, + {4682, 8501, 8501}, + {7387, 8506, 8506}, + {8164, 8510, 8515}, + {4079, 8524, 8524}, + {8360, 8529, 8531}, + {7446, 8540, 8543}, + {7971, 8547, 8548}, + {4311, 8552, 8552}, + {5204, 8556, 8557}, + {7968, 8562, 8562}, + {7847, 8571, 8573}, + {8547, 8577, 8577}, + {5320, 8581, 8581}, + {8556, 8585, 8586}, + {8504, 8590, 8590}, + {7669, 8602, 8604}, + {5874, 8608, 8609}, + {5828, 8613, 8613}, + {7998, 8617, 8617}, + {8519, 8625, 8625}, + {7250, 8637, 8637}, + {426, 8641, 8641}, + {8436, 8645, 8645}, + {5986, 8649, 8656}, + {8157, 8660, 8660}, + {7182, 8665, 8665}, + {8421, 8675, 8675}, + {8509, 8681, 8681}, + {5137, 8688, 8689}, + {8625, 8694, 8695}, + {5228, 8701, 8702}, + {6661, 8714, 8714}, + {1010, 8719, 8719}, + {6648, 8723, 8723}, + {3500, 8728, 8728}, + {2442, 8735, 8735}, + {8494, 8740, 8741}, + {8171, 8753, 8755}, + {7242, 8763, 8764}, + {4739, 8768, 8769}, + {7079, 8773, 8773}, + {8386, 8777, 8777}, + {8624, 8781, 8787}, + {661, 8791, 8794}, + {8631, 8801, 8801}, + {7753, 8805, 8805}, + {4783, 8809, 8810}, + {1673, 8814, 8815}, + {6623, 8819, 8819}, + {4404, 8823, 8823}, + {8089, 8827, 8828}, + {8773, 8832, 8832}, + {5394, 8836, 8836}, + {6231, 8841, 8843}, + {1015, 8852, 8853}, + {6873, 8857, 8857}, + {6289, 8865, 8865}, + {8577, 8869, 8869}, + {8114, 8873, 8875}, + {8534, 8883, 8883}, + {3007, 8887, 8888}, + {8827, 8892, 8893}, + {4788, 8897, 8900}, + {5698, 8906, 8907}, + {7690, 8911, 8911}, + {6643, 8919, 8919}, + {7206, 8923, 8924}, + {7866, 8929, 8931}, + {8880, 8942, 8942}, + {8630, 8951, 8952}, + {6027, 8958, 8958}, + {7749, 8966, 8967}, + {4932, 8972, 8973}, + {8892, 8980, 8981}, + {634, 9003, 9003}, + {8109, 9007, 9008}, + {8777, 9012, 9012}, + {3981, 9016, 9017}, + {5723, 9025, 9025}, + {7662, 9034, 9038}, + {8955, 9042, 9042}, + {8070, 9060, 9062}, + {8910, 9066, 9066}, + {5363, 9070, 9071}, + {7699, 9075, 9076}, + {8991, 9081, 9081}, + {6850, 9085, 9085}, + {5811, 9092, 9094}, + {9079, 9098, 9102}, + {6456, 9106, 9106}, + {2259, 9111, 9111}, + {4752, 9116, 9116}, + {9060, 9120, 9123}, + {8090, 9127, 9127}, + {5305, 9131, 9132}, + {8623, 9137, 9137}, + {7417, 9141, 9141}, + {6564, 9148, 9149}, + {9126, 9157, 9158}, + {4285, 9169, 9170}, + {8698, 9174, 9174}, + {8869, 9178, 9178}, + {2572, 9182, 9183}, + {6482, 9188, 9190}, + {9181, 9201, 9201}, + {2968, 9208, 9209}, + {2506, 9213, 9215}, + {9127, 9219, 9219}, + {7910, 9225, 9227}, + {5422, 9235, 9239}, + {8813, 9244, 9246}, + {9178, 9250, 9250}, + {8748, 9255, 9255}, + {7354, 9265, 9265}, + {7767, 9269, 9269}, + {7710, 9281, 9283}, + {8826, 9288, 9290}, + {861, 9295, 9295}, + {4482, 9301, 9301}, + {9264, 9305, 9306}, + {8805, 9310, 9310}, + {4995, 9314, 9314}, + {6730, 9318, 9318}, + {7457, 9328, 9328}, + {2547, 9335, 9336}, + {6298, 9340, 9343}, + {9305, 9353, 9354}, + {9269, 9358, 9358}, + {6338, 9370, 9370}, + {7289, 9376, 9379}, + {5780, 9383, 9383}, + {7607, 9387, 9387}, + {2065, 9392, 9392}, + {7238, 9396, 9396}, + {8856, 9400, 9400}, + {8069, 9412, 9413}, + {611, 9420, 9420}, + {7071, 9424, 9424}, + {3089, 9430, 9431}, + {7117, 9435, 9438}, + {1976, 9445, 9445}, + {6640, 9449, 9449}, + {5488, 9453, 9453}, + {8739, 9457, 9459}, + {5958, 9466, 9466}, + {7985, 9470, 9470}, + {8735, 9475, 9475}, + {5009, 9479, 9479}, + {8073, 9483, 9484}, + {2328, 9490, 9491}, + {9250, 9495, 9495}, + {4043, 9502, 9502}, + {7712, 9506, 9506}, + {9012, 9510, 9510}, + {9028, 9514, 9515}, + {2190, 9521, 9524}, + {9029, 9528, 9528}, + {9519, 9532, 9532}, + {9495, 9536, 9536}, + {8527, 9540, 9540}, + {2137, 9550, 9550}, + {8419, 9557, 9557}, + {9383, 9561, 9562}, + {8970, 9575, 9578}, + {8911, 9582, 9582}, + {7828, 9595, 9596}, + {6180, 9600, 9600}, + {8738, 9604, 9607}, + {7540, 9611, 9612}, + {9599, 9616, 9618}, + {9187, 9623, 9623}, + {9294, 9628, 9629}, + {4536, 9639, 9639}, + {3867, 9643, 9643}, + {6305, 9648, 9648}, + {1617, 9654, 9657}, + {5762, 9666, 9666}, + {8314, 9670, 9670}, + {9666, 9674, 9675}, + {9506, 9679, 9679}, + {9669, 9685, 9686}, + {9683, 9690, 9690}, + {8763, 9697, 9698}, + {7468, 9702, 9702}, + {460, 9707, 9707}, + {3115, 9712, 9712}, + {9424, 9716, 9717}, + {7359, 9721, 9724}, + {7547, 9728, 9729}, + {7151, 9733, 9738}, + {7627, 9742, 9742}, + {2822, 9747, 9747}, + {8247, 9751, 9753}, + {9550, 9758, 9758}, + {7585, 9762, 9763}, + {1002, 9767, 9767}, + {7168, 9772, 9773}, + {6941, 9777, 9780}, + {9728, 9784, 9786}, + {9770, 9792, 9796}, + {6411, 9801, 9802}, + {3689, 9806, 9808}, + {9575, 9814, 9816}, + {7025, 9820, 9821}, + {2776, 9826, 9826}, + {9806, 9830, 9830}, + {9820, 9834, 9835}, + {9800, 9839, 9847}, + {9834, 9851, 9852}, + {9829, 9856, 9862}, + {1400, 9866, 9866}, + {3197, 9870, 9871}, + {9851, 9875, 9876}, + {9742, 9883, 9884}, + {3362, 9888, 9889}, + {9883, 9893, 9893}, + {5711, 9899, 9910}, + {7806, 9915, 9915}, + {9120, 9919, 9919}, + {9715, 9925, 9934}, + {2580, 9938, 9938}, + {4907, 9942, 9944}, + {6239, 9953, 9954}, + {6961, 9963, 9963}, + {5295, 9967, 9968}, + {1915, 9972, 9973}, + {3426, 9983, 9985}, + {9875, 9994, 9995}, + {6942, 9999, 9999}, + {6621, 10005, 10005}, + {7589, 10010, 10012}, + {9286, 10020, 10020}, + {838, 10024, 10024}, + {9980, 10028, 10031}, + {9994, 10035, 10041}, + {2702, 10048, 10051}, + {2621, 10059, 10059}, + {10054, 10065, 10065}, + {8612, 10073, 10074}, + {7033, 10078, 10078}, + {916, 10082, 10082}, + {10035, 10086, 10087}, + {8613, 10097, 10097}, + {9919, 10107, 10108}, + {6133, 10114, 10115}, + {10059, 10119, 10119}, + {10065, 10126, 10127}, + {7732, 10131, 10131}, + {7155, 10135, 10136}, + {6728, 10140, 10140}, + {6162, 10144, 10145}, + {4724, 10150, 10150}, + {1665, 10154, 10154}, + {10126, 10163, 10163}, + {9783, 10168, 10168}, + {1715, 10172, 10173}, + {7152, 10177, 10182}, + {8760, 10187, 10187}, + {7829, 10191, 10191}, + {9679, 10196, 10196}, + {9369, 10201, 10201}, + {2928, 10206, 10208}, + {6951, 10214, 10217}, + {5633, 10221, 10221}, + {7199, 10225, 10225}, + {10118, 10230, 10231}, + {9999, 10235, 10236}, + {10045, 10240, 10249}, + {5565, 10256, 10256}, + {9866, 10261, 10261}, + {10163, 10268, 10268}, + {9869, 10272, 10272}, + {9789, 10276, 10283}, + {10235, 10287, 10288}, + {10214, 10298, 10299}, + {6971, 10303, 10303}, + {3346, 10307, 10307}, + {10185, 10311, 10312}, + {9993, 10318, 10320}, + {2779, 10332, 10334}, + {1726, 10338, 10338}, + {741, 10354, 10360}, + {10230, 10372, 10373}, + {10260, 10384, 10385}, + {10131, 10389, 10398}, + {6946, 10406, 10409}, + {10158, 10413, 10420}, + {10123, 10424, 10424}, + {6157, 10428, 10429}, + {4518, 10434, 10434}, + {9893, 10438, 10438}, + {9865, 10442, 10446}, + {7558, 10454, 10454}, + {10434, 10460, 10460}, + {10064, 10466, 10468}, + {2703, 10472, 10474}, + {9751, 10478, 10479}, + {6714, 10485, 10485}, + {8020, 10490, 10490}, + {10303, 10494, 10494}, + {3521, 10499, 10500}, + {9281, 10513, 10515}, + {6028, 10519, 10523}, + {9387, 10527, 10527}, + {7614, 10531, 10531}, + {3611, 10536, 10536}, + {9162, 10540, 10540}, + {10081, 10546, 10547}, + {10034, 10560, 10562}, + {6726, 10567, 10571}, + {8237, 10575, 10575}, + {10438, 10579, 10583}, + {10140, 10587, 10587}, + {5784, 10592, 10592}, + {9819, 10597, 10600}, + {10567, 10604, 10608}, + {9335, 10613, 10613}, + {8300, 10617, 10617}, + {10575, 10621, 10621}, + {9678, 10625, 10626}, + {9962, 10632, 10633}, + {10535, 10637, 10638}, + {8199, 10642, 10642}, + {10372, 10647, 10648}, + {10637, 10656, 10657}, + {10579, 10667, 10668}, + {10465, 10677, 10680}, + {6702, 10684, 10685}, + {10073, 10691, 10692}, + {4505, 10696, 10697}, + {9042, 10701, 10701}, + {6460, 10705, 10706}, + {10010, 10714, 10716}, + {10656, 10720, 10722}, + {7282, 10727, 10729}, + {2327, 10733, 10733}, + {2491, 10740, 10741}, + {10704, 10748, 10750}, + {6465, 10754, 10754}, + {10647, 10758, 10759}, + {10424, 10763, 10763}, + {10748, 10776, 10776}, + {10546, 10780, 10781}, + {10758, 10785, 10786}, + {10287, 10790, 10797}, + {10785, 10801, 10807}, + {10240, 10811, 10826}, + {9509, 10830, 10830}, + {2579, 10836, 10838}, + {9801, 10843, 10845}, + {7555, 10849, 10850}, + {10776, 10860, 10865}, + {8023, 10869, 10869}, + {10046, 10876, 10884}, + {10253, 10888, 10892}, + {9941, 10897, 10897}, + {7898, 10901, 10905}, + {6725, 10909, 10913}, + {10757, 10921, 10923}, + {10160, 10931, 10931}, + {10916, 10935, 10942}, + {10261, 10946, 10946}, + {10318, 10952, 10954}, + {5911, 10959, 10961}, + {10801, 10965, 10966}, + {10946, 10970, 10977}, + {10592, 10982, 10984}, + {9913, 10988, 10990}, + {8510, 10994, 10996}, + {9419, 11000, 11001}, + {6765, 11006, 11007}, + {10725, 11011, 11011}, + {5537, 11017, 11019}, + {9208, 11024, 11025}, + {5850, 11030, 11030}, + {9610, 11034, 11036}, + {8846, 11041, 11047}, + {9697, 11051, 11051}, + {1622, 11055, 11058}, + {2370, 11062, 11062}, + {8393, 11067, 11067}, + {9756, 11071, 11071}, + {10172, 11076, 11076}, + {27, 11081, 11081}, + {7357, 11087, 11092}, + {8151, 11104, 11106}, + {6115, 11110, 11110}, + {10667, 11114, 11115}, + {11099, 11121, 11123}, + {10705, 11127, 11127}, + {8938, 11131, 11131}, + {11114, 11135, 11136}, + {1390, 11140, 11141}, + {10964, 11146, 11148}, + {11140, 11152, 11155}, + {9813, 11159, 11166}, + {624, 11171, 11172}, + {3118, 11177, 11179}, + {11029, 11184, 11186}, + {10186, 11190, 11190}, + {10306, 11196, 11196}, + {8665, 11201, 11201}, + {7382, 11205, 11205}, + {1100, 11210, 11210}, + {2337, 11216, 11217}, + {1609, 11221, 11223}, + {5763, 11228, 11229}, + {5220, 11233, 11233}, + {11061, 11241, 11241}, + {10617, 11246, 11246}, + {11190, 11250, 11251}, + {10144, 11255, 11256}, + {11232, 11260, 11260}, + {857, 11264, 11265}, + {10994, 11269, 11271}, + {3879, 11280, 11281}, + {11184, 11287, 11289}, + {9611, 11293, 11295}, + {11250, 11299, 11299}, + {4495, 11304, 11304}, + {7574, 11308, 11309}, + {9814, 11315, 11317}, + {1713, 11321, 11324}, + {1905, 11328, 11328}, + {8745, 11335, 11340}, + {8883, 11351, 11351}, + {8119, 11358, 11358}, + {1842, 11363, 11364}, + {11237, 11368, 11368}, + {8814, 11373, 11374}, + {5684, 11378, 11378}, + {11011, 11382, 11382}, + {6520, 11389, 11389}, + {11183, 11393, 11396}, + {1790, 11404, 11404}, + {9536, 11408, 11408}, + {11298, 11418, 11419}, + {3929, 11425, 11425}, + {5588, 11429, 11429}, + {8476, 11436, 11436}, + {4096, 11440, 11442}, + {11084, 11446, 11454}, + {10603, 11458, 11463}, + {7332, 11472, 11474}, + {7611, 11483, 11486}, + {4836, 11490, 11491}, + {10024, 11495, 11495}, + {4917, 11501, 11506}, + {6486, 11510, 11512}, + {11269, 11516, 11518}, + {3603, 11522, 11525}, + {11126, 11535, 11535}, + {11418, 11539, 11541}, + {11408, 11545, 11545}, + {9021, 11549, 11552}, + {6745, 11557, 11557}, + {5118, 11561, 11564}, + {7590, 11568, 11569}, + {4426, 11573, 11578}, + {9790, 11582, 11583}, + {6447, 11587, 11587}, + {10229, 11591, 11594}, + {10457, 11598, 11598}, + {10168, 11604, 11604}, + {10543, 11608, 11608}, + {7404, 11612, 11612}, + {11127, 11616, 11616}, + {3337, 11620, 11620}, + {11501, 11624, 11628}, + {4543, 11633, 11635}, + {8449, 11642, 11642}, + {4943, 11646, 11648}, + {10526, 11652, 11654}, + {11620, 11659, 11659}, + {8927, 11664, 11669}, + {532, 11673, 11673}, + {10513, 11677, 11679}, + {10428, 11683, 11683}, + {10999, 11689, 11690}, + {9469, 11695, 11695}, + {3606, 11699, 11699}, + {9560, 11708, 11709}, + {1564, 11714, 11714}, + {10527, 11718, 11718}, + {3071, 11723, 11726}, + {11590, 11731, 11732}, + {6605, 11737, 11737}, + {11624, 11741, 11745}, + {7822, 11749, 11752}, + {5269, 11757, 11758}, + {1339, 11767, 11767}, + {1363, 11771, 11773}, + {3704, 11777, 11777}, + {10952, 11781, 11783}, + {6764, 11793, 11795}, + {8675, 11800, 11800}, + {9963, 11804, 11804}, + {11573, 11808, 11809}, + {9548, 11813, 11813}, + {11591, 11817, 11818}, + {11446, 11822, 11822}, + {9224, 11828, 11828}, + {3158, 11836, 11836}, + {10830, 11840, 11840}, + {7234, 11846, 11846}, + {11299, 11850, 11850}, + {11544, 11854, 11855}, + {11498, 11859, 11859}, + {10993, 11865, 11868}, + {9720, 11872, 11878}, + {10489, 11882, 11890}, + {11712, 11898, 11904}, + {11516, 11908, 11910}, + {11568, 11914, 11915}, + {10177, 11919, 11924}, + {11363, 11928, 11929}, + {10494, 11933, 11933}, + {9870, 11937, 11938}, + {9427, 11942, 11942}, + {11481, 11949, 11949}, + {6030, 11955, 11957}, + {11718, 11961, 11961}, + {10531, 11965, 11983}, + {5126, 11987, 11987}, + {7515, 11991, 11991}, + {10646, 11996, 11997}, + {2947, 12001, 12001}, + {9582, 12009, 12010}, + {6202, 12017, 12018}, + {11714, 12022, 12022}, + {9235, 12033, 12037}, + {9721, 12041, 12044}, + {11932, 12051, 12052}, + {12040, 12056, 12056}, + {12051, 12060, 12060}, + {11601, 12066, 12066}, + {8426, 12070, 12070}, + {4053, 12077, 12077}, + {4262, 12081, 12081}, + {9761, 12086, 12088}, + {11582, 12092, 12093}, + {10965, 12097, 12098}, + {11803, 12103, 12104}, + {11933, 12108, 12109}, + {10688, 12117, 12117}, + {12107, 12125, 12126}, + {6774, 12130, 12132}, + {6286, 12137, 12137}, + {9543, 12141, 12141}, + {12097, 12145, 12146}, + {10790, 12150, 12150}, + {10125, 12154, 12156}, + {12125, 12164, 12164}, + {12064, 12168, 12172}, + {10811, 12178, 12188}, + {12092, 12192, 12193}, + {10058, 12197, 12198}, + {11611, 12211, 12212}, + {3459, 12216, 12216}, + {10291, 12225, 12228}, + {12191, 12232, 12234}, + {12145, 12238, 12238}, + {12001, 12242, 12250}, + {3840, 12255, 12255}, + {12216, 12259, 12259}, + {674, 12272, 12272}, + {12141, 12276, 12276}, + {10766, 12280, 12280}, + {11545, 12284, 12284}, + {6496, 12290, 12290}, + {11381, 12294, 12295}, + {603, 12302, 12303}, + {12276, 12308, 12308}, + {11850, 12313, 12314}, + {565, 12319, 12319}, + {9351, 12324, 12324}, + {11822, 12328, 12328}, + {2691, 12333, 12334}, + {11840, 12338, 12338}, + {11070, 12343, 12343}, + {9510, 12347, 12347}, + {11024, 12352, 12353}, + {7173, 12359, 12359}, + {517, 12363, 12363}, + {6311, 12367, 12368}, + {11367, 12372, 12373}, + {12008, 12377, 12377}, + {11372, 12382, 12384}, + {11358, 12391, 12392}, + {11382, 12396, 12396}, + {6882, 12400, 12401}, + {11246, 12405, 12405}, + {8359, 12409, 12412}, + {10154, 12418, 12418}, + {12016, 12425, 12426}, + {8972, 12434, 12435}, + {10478, 12439, 12440}, + {12395, 12449, 12449}, + {11612, 12454, 12454}, + {12347, 12458, 12458}, + {10700, 12466, 12467}, + {3637, 12471, 12476}, + {1042, 12480, 12481}, + {6747, 12488, 12488}, + {12396, 12492, 12493}, + {9420, 12497, 12497}, + {11285, 12501, 12510}, + {4470, 12515, 12515}, + {9374, 12519, 12519}, + {11293, 12528, 12528}, + {2058, 12534, 12535}, + {6521, 12539, 12539}, + {12492, 12543, 12543}, + {3043, 12547, 12547}, + {2982, 12551, 12553}, + {11030, 12557, 12563}, + {7636, 12568, 12568}, + {9639, 12572, 12572}, + {12543, 12576, 12576}, + {5989, 12580, 12583}, + {11051, 12587, 12587}, + {1061, 12592, 12594}, + {12313, 12599, 12601}, + {11846, 12605, 12605}, + {12576, 12609, 12609}, + {11040, 12618, 12625}, + {12479, 12629, 12629}, + {6903, 12633, 12633}, + {12322, 12639, 12639}, + {12253, 12643, 12645}, + {5594, 12651, 12651}, + {12522, 12655, 12655}, + {11703, 12659, 12659}, + {1377, 12665, 12665}, + {8022, 12669, 12669}, + {12280, 12674, 12674}, + {9023, 12680, 12681}, + {12328, 12685, 12685}, + {3085, 12689, 12693}, + {4700, 12698, 12698}, + {10224, 12702, 12702}, + {8781, 12706, 12706}, + {1651, 12710, 12710}, + {12458, 12714, 12714}, + {12005, 12718, 12721}, + {11908, 12725, 12726}, + {8202, 12733, 12733}, + {11708, 12739, 12740}, + {12599, 12744, 12745}, + {12284, 12749, 12749}, + {5285, 12756, 12756}, + {12055, 12775, 12777}, + {6919, 12782, 12782}, + {12242, 12786, 12786}, + {12009, 12790, 12790}, + {9628, 12794, 12796}, + {11354, 12801, 12802}, + {10225, 12806, 12807}, + {579, 12813, 12813}, + {8935, 12817, 12822}, + {8753, 12827, 12829}, + {11006, 12835, 12835}, + {858, 12841, 12845}, + {476, 12849, 12849}, + {7667, 12854, 12854}, + {12760, 12860, 12871}, + {11677, 12875, 12877}, + {12714, 12881, 12881}, + {12731, 12885, 12890}, + {7108, 12894, 12896}, + {1165, 12900, 12900}, + {4021, 12906, 12906}, + {10829, 12910, 12911}, + {12331, 12915, 12915}, + {8887, 12919, 12921}, + {11639, 12925, 12925}, + {7964, 12929, 12929}, + {12528, 12937, 12937}, + {8148, 12941, 12941}, + {12770, 12948, 12950}, + {12609, 12954, 12954}, + {12685, 12958, 12958}, + {2803, 12962, 12962}, + {9561, 12966, 12966}, + {6671, 12972, 12973}, + {12056, 12977, 12977}, + {6380, 12981, 12981}, + {12048, 12985, 12985}, + {11961, 12989, 12993}, + {3368, 12997, 12999}, + {6634, 13004, 13004}, + {6775, 13009, 13010}, + {12136, 13014, 13019}, + {10341, 13023, 13023}, + {13002, 13027, 13027}, + {10587, 13031, 13031}, + {10307, 13035, 13035}, + {12736, 13039, 13039}, + {12744, 13043, 13044}, + {6175, 13048, 13048}, + {9702, 13053, 13054}, + {662, 13059, 13061}, + {12718, 13065, 13068}, + {12893, 13072, 13075}, + {8299, 13086, 13091}, + {12604, 13095, 13096}, + {12848, 13100, 13101}, + {12749, 13105, 13105}, + {12526, 13109, 13114}, + {9173, 13122, 13122}, + {12769, 13128, 13128}, + {13038, 13132, 13132}, + {12725, 13136, 13137}, + {12639, 13146, 13146}, + {9711, 13150, 13151}, + {12137, 13155, 13155}, + {13039, 13159, 13159}, + {4681, 13163, 13164}, + {12954, 13168, 13168}, + {13158, 13175, 13176}, + {13105, 13180, 13180}, + {10754, 13184, 13184}, + {13167, 13188, 13188}, + {12658, 13192, 13192}, + {4294, 13199, 13200}, + {11682, 13204, 13205}, + {11695, 13209, 13209}, + {11076, 13214, 13214}, + {12232, 13218, 13218}, + {9399, 13223, 13224}, + {12880, 13228, 13229}, + {13048, 13234, 13234}, + {9701, 13238, 13239}, + {13209, 13243, 13243}, + {3658, 13248, 13248}, + {3698, 13252, 13254}, + {12237, 13260, 13260}, + {8872, 13266, 13266}, + {12957, 13272, 13273}, + {1393, 13281, 13281}, + {2013, 13285, 13288}, + {4244, 13296, 13299}, + {9428, 13303, 13303}, + {12702, 13307, 13307}, + {13078, 13311, 13311}, + {6071, 13315, 13315}, + {3061, 13319, 13319}, + {2051, 13324, 13324}, + {11560, 13328, 13331}, + {6584, 13336, 13336}, + {8482, 13340, 13340}, + {5331, 13344, 13344}, + {4171, 13348, 13348}, + {8501, 13352, 13352}, + {9219, 13356, 13356}, + {9473, 13360, 13363}, + {12881, 13367, 13367}, + {13065, 13371, 13375}, + {2979, 13379, 13384}, + {1518, 13388, 13388}, + {11177, 13392, 13392}, + {9457, 13398, 13398}, + {12293, 13407, 13410}, + {3697, 13414, 13417}, + {10338, 13425, 13425}, + {13367, 13429, 13429}, + {11074, 13433, 13437}, + {4201, 13441, 13443}, + {1812, 13447, 13448}, + {13360, 13452, 13456}, + {13188, 13463, 13463}, + {9732, 13470, 13470}, + {11332, 13477, 13477}, + {9918, 13487, 13487}, + {6337, 13497, 13497}, + {13429, 13501, 13501}, + {11413, 13505, 13505}, + {4685, 13512, 13513}, + {13136, 13517, 13519}, + {7416, 13528, 13530}, + {12929, 13534, 13534}, + {11110, 13539, 13539}, + {11521, 13543, 13543}, + {12825, 13553, 13553}, + {13447, 13557, 13558}, + {12299, 13562, 13563}, + {9003, 13570, 13570}, + {12500, 13577, 13577}, + {13501, 13581, 13581}, + {9392, 13586, 13586}, + {12454, 13590, 13590}, + {6189, 13595, 13595}, + {13053, 13599, 13599}, + {11881, 13604, 13604}, + {13159, 13608, 13608}, + {4894, 13612, 13612}, + {13221, 13621, 13621}, + {8950, 13625, 13625}, + {13533, 13629, 13629}, + {9633, 13633, 13633}, + {7892, 13637, 13639}, + {13581, 13643, 13643}, + {13616, 13647, 13649}, + {12794, 13653, 13654}, + {8919, 13659, 13659}, + {9674, 13663, 13663}, + {13577, 13668, 13668}, + {12966, 13672, 13672}, + {12659, 13676, 13683}, + {6124, 13688, 13688}, + {9225, 13693, 13695}, + {11833, 13702, 13702}, + {12904, 13709, 13717}, + {13647, 13721, 13722}, + {11687, 13726, 13727}, + {12434, 13731, 13732}, + {12689, 13736, 13742}, + {13168, 13746, 13746}, + {6151, 13751, 13752}, + {11821, 13756, 13757}, + {6467, 13764, 13764}, + {5730, 13769, 13769}, + {5136, 13780, 13780}, + {724, 13784, 13785}, + {13517, 13789, 13791}, + {640, 13795, 13796}, + {7721, 13800, 13802}, + {11121, 13806, 13807}, + {5791, 13811, 13815}, + {12894, 13819, 13819}, + {11100, 13824, 13824}, + {7011, 13830, 13830}, + {7129, 13834, 13837}, + {13833, 13841, 13841}, + {11276, 13847, 13847}, + {13621, 13853, 13853}, + {13589, 13862, 13863}, + {12989, 13867, 13867}, + {12789, 13871, 13871}, + {1239, 13875, 13875}, + {4675, 13879, 13881}, + {4686, 13885, 13885}, + {707, 13889, 13889}, + {5449, 13897, 13898}, + {13867, 13902, 13903}, + {10613, 13908, 13908}, + {13789, 13912, 13914}, + {4451, 13918, 13919}, + {9200, 13924, 13924}, + {2011, 13930, 13930}, + {11433, 13934, 13936}, + {4695, 13942, 13943}, + {9435, 13948, 13951}, + {13688, 13955, 13957}, + {11694, 13961, 13962}, + {5712, 13966, 13966}, + {5991, 13970, 13972}, + {13477, 13976, 13976}, + {10213, 13987, 13987}, + {11839, 13991, 13993}, + {12272, 13997, 13997}, + {6206, 14001, 14001}, + {13179, 14006, 14007}, + {2939, 14011, 14011}, + {12972, 14016, 14017}, + {13918, 14021, 14022}, + {7436, 14026, 14027}, + {7678, 14032, 14034}, + {13586, 14040, 14040}, + {13347, 14044, 14044}, + {13109, 14048, 14051}, + {9244, 14055, 14057}, + {13315, 14061, 14061}, + {13276, 14067, 14067}, + {11435, 14073, 14074}, + {13853, 14078, 14078}, + {13452, 14082, 14082}, + {14044, 14087, 14087}, + {4440, 14091, 14095}, + {4479, 14100, 14103}, + {9395, 14107, 14109}, + {6834, 14119, 14119}, + {10458, 14123, 14124}, + {1429, 14129, 14129}, + {8443, 14135, 14135}, + {10365, 14140, 14140}, + {5267, 14145, 14145}, + {11834, 14151, 14153}, +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 0000000..0cf5e37 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go new file mode 100644 index 0000000..2712710 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy_test.go @@ -0,0 +1,1353 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +var ( + download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") + benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") +) + +// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by +// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on +// this GOARCH. There is more than one valid encoding of any given input, and +// there is more than one good algorithm along the frontier of trading off +// throughput for output size. Nonetheless, we presume that the C++ encoder's +// algorithm is a good one and has been tested on a wide range of inputs, so +// matching that exactly should mean that the Go encoder's algorithm is also +// good, without needing to gather our own corpus of test data. +// +// The exact algorithm used by the C++ code is potentially endian dependent, as +// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes +// at a time. The Go implementation is endian agnostic, in that its output is +// the same (as little-endian C++ code), regardless of the CPU's endianness. +// +// Thus, when comparing Go's output to C++ output generated beforehand, such as +// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- +// endian system, we can run that test regardless of the runtime.GOARCH value. +// +// When comparing Go's output to dynamically generated C++ output, i.e. the +// result of fork/exec'ing a C++ program, we can run that test only on +// little-endian systems, because the C++ output might be different on +// big-endian systems. The runtime package doesn't export endianness per se, +// but we can restrict this match-C++ test to common little-endian systems. +const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" + +func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { + got := maxEncodedLenOfMaxBlockSize + want := MaxEncodedLen(maxBlockSize) + if got != want { + t.Fatalf("got %d, want %d", got, want) + } +} + +func cmp(a, b []byte) error { + if bytes.Equal(a, b) { + return nil + } + if len(a) != len(b) { + return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) + } + for i := range a { + if a[i] != b[i] { + return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) + } + } + return nil +} + +func roundtrip(b, ebuf, dbuf []byte) error { + d, err := Decode(dbuf, Encode(ebuf, b)) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if err := cmp(d, b); err != nil { + return fmt.Errorf("roundtrip mismatch: %v", err) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rng := rand.New(rand.NewSource(1)) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(rng.Intn(256)) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestInvalidVarint(t *testing.T) { + testCases := []struct { + desc string + input string + }{{ + "invalid varint, final byte has continuation bit set", + "\xff", + }, { + "invalid varint, value overflows uint64", + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", + }, { + // https://github.com/google/snappy/blob/master/format_description.txt + // says that "the stream starts with the uncompressed length [as a + // varint] (up to a maximum of 2^32 - 1)". + "valid varint (as uint64), but value overflows uint32", + "\x80\x80\x80\x80\x10", + }} + + for _, tc := range testCases { + input := []byte(tc.input) + if _, err := DecodedLen(input); err != ErrCorrupt { + t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) + } + if _, err := Decode(nil, input); err != ErrCorrupt { + t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) + } + } +} + +func TestDecode(t *testing.T) { + lit40Bytes := make([]byte, 40) + for i := range lit40Bytes { + lit40Bytes[i] = byte(i) + } + lit40 := string(lit40Bytes) + + testCases := []struct { + desc string + input string + want string + wantErr error + }{{ + `decodedLen=0; valid input`, + "\x00", + "", + nil, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, + "\x03" + "\x08\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, + "\x02" + "\x08\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, + "\x03" + "\x08\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, + "\x28" + "\x9c" + lit40, + lit40, + nil, + }, { + `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, + "\x01" + "\xf0", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, + "\x03" + "\xf0\x02\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, + "\x01" + "\xf4\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, + "\x03" + "\xf4\x02\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, + "\x01" + "\xf8\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, + "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, + "\x01" + "\xfc\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, + "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, + "\x04" + "\xfc\x02\x00\x00\x00\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, + "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, + "\x04" + "\x01", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x02\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x03\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, + "\x04" + "\x0cabcd", + "abcd", + nil, + }, { + `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, + "\x0d" + "\x0cabcd" + "\x15\x04", + "abcdabcdabcda", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, + "\x08" + "\x0cabcd" + "\x01\x04", + "abcdabcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, + "\x08" + "\x0cabcd" + "\x01\x02", + "abcdcdcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, + "\x08" + "\x0cabcd" + "\x01\x01", + "abcddddd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, + "\x08" + "\x0cabcd" + "\x01\x00", + "", + ErrCorrupt, + }, { + `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, + "\x09" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, + "\x08" + "\x0cabcd" + "\x01\x05", + "", + ErrCorrupt, + }, { + `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, + "\x07" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x06\x03\x00", + "abcdbc", + nil, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", + "abcdbc", + nil, + }} + + const ( + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to dBuf + // to check that Decode does not write bytes past the end of + // dBuf[:dLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + + var dBuf [100]byte +loop: + for i, tc := range testCases { + input := []byte(tc.input) + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) + continue loop + } + } + + dLen, n := binary.Uvarint(input) + if n <= 0 { + t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) + continue + } + if dLen > uint64(len(dBuf)) { + t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) + continue + } + + for j := range dBuf { + dBuf[j] = byte(notPresentBase + j%notPresentLen) + } + g, gotErr := Decode(dBuf[:], input) + if got := string(g); got != tc.want || gotErr != tc.wantErr { + t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", + i, tc.desc, got, gotErr, tc.want, tc.wantErr) + continue + } + for j, x := range dBuf { + if uint64(j) < dLen { + continue + } + if w := byte(notPresentBase + j%notPresentLen); x != w { + t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", + i, tc.desc, j, x, w, dBuf) + continue loop + } + } + } +} + +func TestDecodeCopy4(t *testing.T) { + dots := strings.Repeat(".", 65536) + + input := strings.Join([]string{ + "\x89\x80\x04", // decodedLen = 65545. + "\x0cpqrs", // 4-byte literal "pqrs". + "\xf4\xff\xff" + dots, // 65536-byte literal dots. + "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. + }, "") + + gotBytes, err := Decode(nil, []byte(input)) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + want := "pqrs" + dots + "pqrs." + if len(got) != len(want) { + t.Fatalf("got %d bytes, want %d", len(got), len(want)) + } + if got != want { + for i := 0; i < len(got); i++ { + if g, w := got[i], want[i]; g != w { + t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) + } + } + } +} + +// TestDecodeLengthOffset tests decoding an encoding of the form literal + +// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". +func TestDecodeLengthOffset(t *testing.T) { + const ( + prefix = "abcdefghijklmnopqr" + suffix = "ABCDEFGHIJKLMNOPQR" + + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to + // gotBuf to check that Decode does not write bytes past the end of + // gotBuf[:totalLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + var gotBuf, wantBuf, inputBuf [128]byte + for length := 1; length <= 18; length++ { + for offset := 1; offset <= 18; offset++ { + loop: + for suffixLen := 0; suffixLen <= 18; suffixLen++ { + totalLen := len(prefix) + length + suffixLen + + inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) + inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], prefix) + inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) + inputBuf[inputLen+1] = byte(offset) + inputBuf[inputLen+2] = 0x00 + inputLen += 3 + if suffixLen > 0 { + inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) + } + input := inputBuf[:inputLen] + + for i := range gotBuf { + gotBuf[i] = byte(notPresentBase + i%notPresentLen) + } + got, err := Decode(gotBuf[:], input) + if err != nil { + t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) + continue + } + + wantLen := 0 + wantLen += copy(wantBuf[wantLen:], prefix) + for i := 0; i < length; i++ { + wantBuf[wantLen] = wantBuf[wantLen-offset] + wantLen++ + } + wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) + want := wantBuf[:wantLen] + + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", + length, offset, suffixLen, x, input) + continue loop + } + } + for i, x := range gotBuf { + if i < totalLen { + continue + } + if w := byte(notPresentBase + i%notPresentLen); x != w { + t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ + "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", + length, offset, suffixLen, totalLen, i, x, w, gotBuf) + continue loop + } + } + for _, x := range want { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", + length, offset, suffixLen, x, want) + continue loop + } + } + + if !bytes.Equal(got, want) { + t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", + length, offset, suffixLen, input, got, want) + continue + } + } + } + } +} + +const ( + goldenText = "Mark.Twain-Tom.Sawyer.txt" + goldenCompressed = goldenText + ".rawsnappy" +) + +func TestDecodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got, err := Decode(nil, src) + if err != nil { + t.Fatalf("Decode: %v", err) + } + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestEncodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got := Encode(nil, src) + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestExtendMatchGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + for i, tc := range extendMatchGoldenTestCases { + got := extendMatch(src, tc.i, tc.j) + if got != tc.want { + t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", + i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) + } + } +} + +func TestExtendMatch(t *testing.T) { + // ref is a simple, reference implementation of extendMatch. + ref := func(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j + } + + nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} + for yIndex := 40; yIndex > 30; yIndex-- { + xxx := bytes.Repeat([]byte("x"), 40) + if yIndex < len(xxx) { + xxx[yIndex] = 'y' + } + for _, i := range nums { + for _, j := range nums { + if i >= j { + continue + } + got := extendMatch(xxx, i, j) + want := ref(xxx, i, j) + if got != want { + t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) + } + } + } + } +} + +const snappytoolCmdName = "cmd/snappytool/snappytool" + +func skipTestSameEncodingAsCpp() (msg string) { + if !goEncoderShouldMatchCppEncoder { + return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) + } + if _, err := os.Stat(snappytoolCmdName); err != nil { + return fmt.Sprintf("could not find snappytool: %v", err) + } + return "" +} + +func runTestSameEncodingAsCpp(src []byte) error { + got := Encode(nil, src) + + cmd := exec.Command(snappytoolCmdName, "-e") + cmd.Stdin = bytes.NewReader(src) + want, err := cmd.Output() + if err != nil { + return fmt.Errorf("could not run snappytool: %v", err) + } + return cmp(got, want) +} + +func TestSameEncodingAsCppShortCopies(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + src := bytes.Repeat([]byte{'a'}, 20) + for i := 0; i <= len(src); i++ { + if err := runTestSameEncodingAsCpp(src[:i]); err != nil { + t.Errorf("i=%d: %v", i, err) + } + } +} + +func TestSameEncodingAsCppLongFiles(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + bDir := filepath.FromSlash(*benchdataDir) + failed := false + for i, tf := range testFiles { + if err := downloadBenchmarkFiles(t, tf.filename); err != nil { + t.Fatalf("failed to download testdata: %s", err) + } + data := readFile(t, filepath.Join(bDir, tf.filename)) + if n := tf.sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if err := runTestSameEncodingAsCpp(data); err != nil { + t.Errorf("i=%d: %v", i, err) + failed = true + } + } + if failed { + t.Errorf("was the snappytool program built against the C++ snappy library version " + + "d53de187 or later, commited on 2016-04-05? See " + + "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") + } +} + +// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm +// described in decode_amd64.s and its claim of a 10 byte overrun worst case. +func TestSlowForwardCopyOverrun(t *testing.T) { + const base = 100 + + for length := 1; length < 18; length++ { + for offset := 1; offset < 18; offset++ { + highWaterMark := base + d := base + l := length + o := offset + + // makeOffsetAtLeast8 + for o < 8 { + if end := d + 8; highWaterMark < end { + highWaterMark = end + } + l -= o + d += o + o += o + } + + // fixUpSlowForwardCopy + a := d + d += l + + // finishSlowForwardCopy + for l > 0 { + if end := a + 8; highWaterMark < end { + highWaterMark = end + } + a += 8 + l -= 8 + } + + dWant := base + length + overrun := highWaterMark - dWant + if d != dWant || overrun < 0 || 10 < overrun { + t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", + length, offset, d, overrun, dWant) + } + } + } +} + +// TestEncodeNoiseThenRepeats encodes input for which the first half is very +// incompressible and the second half is very compressible. The encoded form's +// length should be closer to 50% of the original length than 100%. +func TestEncodeNoiseThenRepeats(t *testing.T) { + for _, origLen := range []int{256 * 1024, 2048 * 1024} { + src := make([]byte, origLen) + rng := rand.New(rand.NewSource(1)) + firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] + for i := range firstHalf { + firstHalf[i] = uint8(rng.Intn(256)) + } + for i := range secondHalf { + secondHalf[i] = uint8(i >> 8) + } + dst := Encode(nil, src) + if got, want := len(dst), origLen*3/4; got >= want { + t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) + } + } +} + +func TestFramingFormat(t *testing.T) { + // src is comprised of alternating 1e5-sized sequences of random + // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen + // because it is larger than maxBlockSize (64k). + src := make([]byte, 1e6) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 10; i++ { + if i%2 == 0 { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(rng.Intn(256)) + } + } else { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(i) + } + } + } + + buf := new(bytes.Buffer) + if _, err := NewWriter(buf).Write(src); err != nil { + t.Fatalf("Write: encoding: %v", err) + } + dst, err := ioutil.ReadAll(NewReader(buf)) + if err != nil { + t.Fatalf("ReadAll: decoding: %v", err) + } + if err := cmp(dst, src); err != nil { + t.Fatal(err) + } +} + +func TestWriterGoldenOutput(t *testing.T) { + buf := new(bytes.Buffer) + w := NewBufferedWriter(buf) + defer w.Close() + w.Write([]byte("abcd")) // Not compressible. + w.Flush() + w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. + w.Flush() + // The next chunk is also compressible, but a naive, greedy encoding of the + // overall length 67 copy as a length 64 copy (the longest expressible as a + // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte + // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 + // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 + // (of length 60) and a 2-byte tagCopy1 (of length 7). + w.Write(bytes.Repeat([]byte{'B'}, 68)) + w.Write([]byte("efC")) // Not compressible. + w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. + w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. + w.Write([]byte("g")) // Not compressible. + w.Flush() + + got := buf.String() + want := strings.Join([]string{ + magicChunk, + "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). + "\x68\x10\xe6\xb6", // Checksum. + "\x61\x62\x63\x64", // Uncompressed payload: "abcd". + "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). + "\x5f\xeb\xf2\x10", // Checksum. + "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. + "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. + "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). + "\x30\x85\x69\xeb", // Checksum. + "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. + "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". + "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. + "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. + "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". + "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. + "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. + "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". + }, "") + if got != want { + t.Fatalf("\ngot: % x\nwant: % x", got, want) + } +} + +func TestEmitLiteral(t *testing.T) { + testCases := []struct { + length int + want string + }{ + {1, "\x00"}, + {2, "\x04"}, + {59, "\xe8"}, + {60, "\xec"}, + {61, "\xf0\x3c"}, + {62, "\xf0\x3d"}, + {254, "\xf0\xfd"}, + {255, "\xf0\xfe"}, + {256, "\xf0\xff"}, + {257, "\xf4\x00\x01"}, + {65534, "\xf4\xfd\xff"}, + {65535, "\xf4\xfe\xff"}, + {65536, "\xf4\xff\xff"}, + } + + dst := make([]byte, 70000) + nines := bytes.Repeat([]byte{0x99}, 65536) + for _, tc := range testCases { + lit := nines[:tc.length] + n := emitLiteral(dst, lit) + if !bytes.HasSuffix(dst[:n], lit) { + t.Errorf("length=%d: did not end with that many literal bytes", tc.length) + continue + } + got := string(dst[:n-tc.length]) + if got != tc.want { + t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) + continue + } + } +} + +func TestEmitCopy(t *testing.T) { + testCases := []struct { + offset int + length int + want string + }{ + {8, 04, "\x01\x08"}, + {8, 11, "\x1d\x08"}, + {8, 12, "\x2e\x08\x00"}, + {8, 13, "\x32\x08\x00"}, + {8, 59, "\xea\x08\x00"}, + {8, 60, "\xee\x08\x00"}, + {8, 61, "\xf2\x08\x00"}, + {8, 62, "\xf6\x08\x00"}, + {8, 63, "\xfa\x08\x00"}, + {8, 64, "\xfe\x08\x00"}, + {8, 65, "\xee\x08\x00\x05\x08"}, + {8, 66, "\xee\x08\x00\x09\x08"}, + {8, 67, "\xee\x08\x00\x0d\x08"}, + {8, 68, "\xfe\x08\x00\x01\x08"}, + {8, 69, "\xfe\x08\x00\x05\x08"}, + {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, + + {256, 04, "\x21\x00"}, + {256, 11, "\x3d\x00"}, + {256, 12, "\x2e\x00\x01"}, + {256, 13, "\x32\x00\x01"}, + {256, 59, "\xea\x00\x01"}, + {256, 60, "\xee\x00\x01"}, + {256, 61, "\xf2\x00\x01"}, + {256, 62, "\xf6\x00\x01"}, + {256, 63, "\xfa\x00\x01"}, + {256, 64, "\xfe\x00\x01"}, + {256, 65, "\xee\x00\x01\x25\x00"}, + {256, 66, "\xee\x00\x01\x29\x00"}, + {256, 67, "\xee\x00\x01\x2d\x00"}, + {256, 68, "\xfe\x00\x01\x21\x00"}, + {256, 69, "\xfe\x00\x01\x25\x00"}, + {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, + + {2048, 04, "\x0e\x00\x08"}, + {2048, 11, "\x2a\x00\x08"}, + {2048, 12, "\x2e\x00\x08"}, + {2048, 13, "\x32\x00\x08"}, + {2048, 59, "\xea\x00\x08"}, + {2048, 60, "\xee\x00\x08"}, + {2048, 61, "\xf2\x00\x08"}, + {2048, 62, "\xf6\x00\x08"}, + {2048, 63, "\xfa\x00\x08"}, + {2048, 64, "\xfe\x00\x08"}, + {2048, 65, "\xee\x00\x08\x12\x00\x08"}, + {2048, 66, "\xee\x00\x08\x16\x00\x08"}, + {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, + {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, + {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, + {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, + } + + dst := make([]byte, 1024) + for _, tc := range testCases { + n := emitCopy(dst, tc.offset, tc.length) + got := string(dst[:n]) + if got != tc.want { + t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) + } + } +} + +func TestNewBufferedWriter(t *testing.T) { + // Test all 32 possible sub-sequences of these 5 input slices. + // + // Their lengths sum to 400,000, which is over 6 times the Writer ibuf + // capacity: 6 * maxBlockSize is 393,216. + inputs := [][]byte{ + bytes.Repeat([]byte{'a'}, 40000), + bytes.Repeat([]byte{'b'}, 150000), + bytes.Repeat([]byte{'c'}, 60000), + bytes.Repeat([]byte{'d'}, 120000), + bytes.Repeat([]byte{'e'}, 30000), + } +loop: + for i := 0; i < 1< 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } +func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } +func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +func BenchmarkRandomEncode(b *testing.B) { + rng := rand.New(rand.NewSource(1)) + data := make([]byte, 1<<20) + for i := range data { + data[i] = uint8(rng.Intn(256)) + } + benchEncode(b, data) +} + +// testFiles' values are copied directly from +// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string + sizeLimit int +}{ + {"html", "html", 0}, + {"urls", "urls.10K", 0}, + {"jpg", "fireworks.jpeg", 0}, + {"jpg_200", "fireworks.jpeg", 200}, + {"pdf", "paper-100k.pdf", 0}, + {"html4", "html_x_4", 0}, + {"txt1", "alice29.txt", 0}, + {"txt2", "asyoulik.txt", 0}, + {"txt3", "lcet10.txt", 0}, + {"txt4", "plrabn12.txt", 0}, + {"pb", "geo.protodata", 0}, + {"gaviota", "kppkn.gtb", 0}, +} + +const ( + // The benchmark data files are at this canonical URL. + benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" +) + +func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { + bDir := filepath.FromSlash(*benchdataDir) + filename := filepath.Join(bDir, basename) + if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { + return nil + } + + if !*download { + b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create %s: %s", bDir, err) + } + + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + url := benchURL + basename + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("failed to download %s: %s", url, err) + } + defer resp.Body.Close() + if s := resp.StatusCode; s != http.StatusOK { + return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) + } + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) + } + return nil +} + +func benchFile(b *testing.B, i int, decode bool) { + if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + bDir := filepath.FromSlash(*benchdataDir) + data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) + if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } + +func BenchmarkExtendMatch(b *testing.B) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + b.Fatalf("ReadFile: %v", err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tc := range extendMatchGoldenTestCases { + extendMatch(src, tc.i, tc.j) + } + } +} diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt new file mode 100644 index 0000000..86a1875 --- /dev/null +++ b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt @@ -0,0 +1,396 @@ +Produced by David Widger. The previous edition was updated by Jose +Menendez. + + + + + + THE ADVENTURES OF TOM SAWYER + BY + MARK TWAIN + (Samuel Langhorne Clemens) + + + + + P R E F A C E + +MOST of the adventures recorded in this book really occurred; one or +two were experiences of my own, the rest those of boys who were +schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but +not from an individual--he is a combination of the characteristics of +three boys whom I knew, and therefore belongs to the composite order of +architecture. + +The odd superstitions touched upon were all prevalent among children +and slaves in the West at the period of this story--that is to say, +thirty or forty years ago. + +Although my book is intended mainly for the entertainment of boys and +girls, I hope it will not be shunned by men and women on that account, +for part of my plan has been to try to pleasantly remind adults of what +they once were themselves, and of how they felt and thought and talked, +and what queer enterprises they sometimes engaged in. + + THE AUTHOR. + +HARTFORD, 1876. + + + + T O M S A W Y E R + + + +CHAPTER I + +"TOM!" + +No answer. + +"TOM!" + +No answer. + +"What's gone with that boy, I wonder? You TOM!" + +No answer. + +The old lady pulled her spectacles down and looked over them about the +room; then she put them up and looked out under them. She seldom or +never looked THROUGH them for so small a thing as a boy; they were her +state pair, the pride of her heart, and were built for "style," not +service--she could have seen through a pair of stove-lids just as well. +She looked perplexed for a moment, and then said, not fiercely, but +still loud enough for the furniture to hear: + +"Well, I lay if I get hold of you I'll--" + +She did not finish, for by this time she was bending down and punching +under the bed with the broom, and so she needed breath to punctuate the +punches with. She resurrected nothing but the cat. + +"I never did see the beat of that boy!" + +She went to the open door and stood in it and looked out among the +tomato vines and "jimpson" weeds that constituted the garden. No Tom. +So she lifted up her voice at an angle calculated for distance and +shouted: + +"Y-o-u-u TOM!" + +There was a slight noise behind her and she turned just in time to +seize a small boy by the slack of his roundabout and arrest his flight. + +"There! I might 'a' thought of that closet. What you been doing in +there?" + +"Nothing." + +"Nothing! Look at your hands. And look at your mouth. What IS that +truck?" + +"I don't know, aunt." + +"Well, I know. It's jam--that's what it is. Forty times I've said if +you didn't let that jam alone I'd skin you. Hand me that switch." + +The switch hovered in the air--the peril was desperate-- + +"My! Look behind you, aunt!" + +The old lady whirled round, and snatched her skirts out of danger. The +lad fled on the instant, scrambled up the high board-fence, and +disappeared over it. + +His aunt Polly stood surprised a moment, and then broke into a gentle +laugh. + +"Hang the boy, can't I never learn anything? Ain't he played me tricks +enough like that for me to be looking out for him by this time? But old +fools is the biggest fools there is. Can't learn an old dog new tricks, +as the saying is. But my goodness, he never plays them alike, two days, +and how is a body to know what's coming? He 'pears to know just how +long he can torment me before I get my dander up, and he knows if he +can make out to put me off for a minute or make me laugh, it's all down +again and I can't hit him a lick. I ain't doing my duty by that boy, +and that's the Lord's truth, goodness knows. Spare the rod and spile +the child, as the Good Book says. I'm a laying up sin and suffering for +us both, I know. He's full of the Old Scratch, but laws-a-me! he's my +own dead sister's boy, poor thing, and I ain't got the heart to lash +him, somehow. Every time I let him off, my conscience does hurt me so, +and every time I hit him my old heart most breaks. Well-a-well, man +that is born of woman is of few days and full of trouble, as the +Scripture says, and I reckon it's so. He'll play hookey this evening, * +and [* Southwestern for "afternoon"] I'll just be obleeged to make him +work, to-morrow, to punish him. It's mighty hard to make him work +Saturdays, when all the boys is having holiday, but he hates work more +than he hates anything else, and I've GOT to do some of my duty by him, +or I'll be the ruination of the child." + +Tom did play hookey, and he had a very good time. He got back home +barely in season to help Jim, the small colored boy, saw next-day's +wood and split the kindlings before supper--at least he was there in +time to tell his adventures to Jim while Jim did three-fourths of the +work. Tom's younger brother (or rather half-brother) Sid was already +through with his part of the work (picking up chips), for he was a +quiet boy, and had no adventurous, troublesome ways. + +While Tom was eating his supper, and stealing sugar as opportunity +offered, Aunt Polly asked him questions that were full of guile, and +very deep--for she wanted to trap him into damaging revealments. Like +many other simple-hearted souls, it was her pet vanity to believe she +was endowed with a talent for dark and mysterious diplomacy, and she +loved to contemplate her most transparent devices as marvels of low +cunning. Said she: + +"Tom, it was middling warm in school, warn't it?" + +"Yes'm." + +"Powerful warm, warn't it?" + +"Yes'm." + +"Didn't you want to go in a-swimming, Tom?" + +A bit of a scare shot through Tom--a touch of uncomfortable suspicion. +He searched Aunt Polly's face, but it told him nothing. So he said: + +"No'm--well, not very much." + +The old lady reached out her hand and felt Tom's shirt, and said: + +"But you ain't too warm now, though." And it flattered her to reflect +that she had discovered that the shirt was dry without anybody knowing +that that was what she had in her mind. But in spite of her, Tom knew +where the wind lay, now. So he forestalled what might be the next move: + +"Some of us pumped on our heads--mine's damp yet. See?" + +Aunt Polly was vexed to think she had overlooked that bit of +circumstantial evidence, and missed a trick. Then she had a new +inspiration: + +"Tom, you didn't have to undo your shirt collar where I sewed it, to +pump on your head, did you? Unbutton your jacket!" + +The trouble vanished out of Tom's face. He opened his jacket. His +shirt collar was securely sewed. + +"Bother! Well, go 'long with you. I'd made sure you'd played hookey +and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a +singed cat, as the saying is--better'n you look. THIS time." + +She was half sorry her sagacity had miscarried, and half glad that Tom +had stumbled into obedient conduct for once. + +But Sidney said: + +"Well, now, if I didn't think you sewed his collar with white thread, +but it's black." + +"Why, I did sew it with white! Tom!" + +But Tom did not wait for the rest. As he went out at the door he said: + +"Siddy, I'll lick you for that." + +In a safe place Tom examined two large needles which were thrust into +the lapels of his jacket, and had thread bound about them--one needle +carried white thread and the other black. He said: + +"She'd never noticed if it hadn't been for Sid. Confound it! sometimes +she sews it with white, and sometimes she sews it with black. I wish to +geeminy she'd stick to one or t'other--I can't keep the run of 'em. But +I bet you I'll lam Sid for that. I'll learn him!" + +He was not the Model Boy of the village. He knew the model boy very +well though--and loathed him. + +Within two minutes, or even less, he had forgotten all his troubles. +Not because his troubles were one whit less heavy and bitter to him +than a man's are to a man, but because a new and powerful interest bore +them down and drove them out of his mind for the time--just as men's +misfortunes are forgotten in the excitement of new enterprises. This +new interest was a valued novelty in whistling, which he had just +acquired from a negro, and he was suffering to practise it undisturbed. +It consisted in a peculiar bird-like turn, a sort of liquid warble, +produced by touching the tongue to the roof of the mouth at short +intervals in the midst of the music--the reader probably remembers how +to do it, if he has ever been a boy. Diligence and attention soon gave +him the knack of it, and he strode down the street with his mouth full +of harmony and his soul full of gratitude. He felt much as an +astronomer feels who has discovered a new planet--no doubt, as far as +strong, deep, unalloyed pleasure is concerned, the advantage was with +the boy, not the astronomer. + +The summer evenings were long. It was not dark, yet. Presently Tom +checked his whistle. A stranger was before him--a boy a shade larger +than himself. A new-comer of any age or either sex was an impressive +curiosity in the poor little shabby village of St. Petersburg. This boy +was well dressed, too--well dressed on a week-day. This was simply +astounding. His cap was a dainty thing, his close-buttoned blue cloth +roundabout was new and natty, and so were his pantaloons. He had shoes +on--and it was only Friday. He even wore a necktie, a bright bit of +ribbon. He had a citified air about him that ate into Tom's vitals. The +more Tom stared at the splendid marvel, the higher he turned up his +nose at his finery and the shabbier and shabbier his own outfit seemed +to him to grow. Neither boy spoke. If one moved, the other moved--but +only sidewise, in a circle; they kept face to face and eye to eye all +the time. Finally Tom said: + +"I can lick you!" + +"I'd like to see you try it." + +"Well, I can do it." + +"No you can't, either." + +"Yes I can." + +"No you can't." + +"I can." + +"You can't." + +"Can!" + +"Can't!" + +An uncomfortable pause. Then Tom said: + +"What's your name?" + +"'Tisn't any of your business, maybe." + +"Well I 'low I'll MAKE it my business." + +"Well why don't you?" + +"If you say much, I will." + +"Much--much--MUCH. There now." + +"Oh, you think you're mighty smart, DON'T you? I could lick you with +one hand tied behind me, if I wanted to." + +"Well why don't you DO it? You SAY you can do it." + +"Well I WILL, if you fool with me." + +"Oh yes--I've seen whole families in the same fix." + +"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" + +"You can lump that hat if you don't like it. I dare you to knock it +off--and anybody that'll take a dare will suck eggs." + +"You're a liar!" + +"You're another." + +"You're a fighting liar and dasn't take it up." + +"Aw--take a walk!" + +"Say--if you give me much more of your sass I'll take and bounce a +rock off'n your head." + +"Oh, of COURSE you will." + +"Well I WILL." + +"Well why don't you DO it then? What do you keep SAYING you will for? +Why don't you DO it? It's because you're afraid." + +"I AIN'T afraid." + +"You are." + +"I ain't." + +"You are." + +Another pause, and more eying and sidling around each other. Presently +they were shoulder to shoulder. Tom said: + +"Get away from here!" + +"Go away yourself!" + +"I won't." + +"I won't either." + +So they stood, each with a foot placed at an angle as a brace, and +both shoving with might and main, and glowering at each other with +hate. But neither could get an advantage. After struggling till both +were hot and flushed, each relaxed his strain with watchful caution, +and Tom said: + +"You're a coward and a pup. I'll tell my big brother on you, and he +can thrash you with his little finger, and I'll make him do it, too." + +"What do I care for your big brother? I've got a brother that's bigger +than he is--and what's more, he can throw him over that fence, too." +[Both brothers were imaginary.] + +"That's a lie." + +"YOUR saying so don't make it so." + +Tom drew a line in the dust with his big toe, and said: + +"I dare you to step over that, and I'll lick you till you can't stand +up. Anybody that'll take a dare will steal sheep." + +The new boy stepped over promptly, and said: + +"Now you said you'd do it, now let's see you do it." + +"Don't you crowd me now; you better look out." + +"Well, you SAID you'd do it--why don't you do it?" + +"By jingo! for two cents I WILL do it." + +The new boy took two broad coppers out of his pocket and held them out +with derision. Tom struck them to the ground. In an instant both boys +were rolling and tumbling in the dirt, gripped together like cats; and +for the space of a minute they tugged and tore at each other's hair and +clothes, punched and scratched each other's nose, and covered +themselves with dust and glory. Presently the confusion took form, and +through the fog of battle Tom appeared, seated astride the new boy, and +pounding him with his fists. "Holler 'nuff!" said he. + +The boy only struggled to free himself. He was crying--mainly from rage. + +"Holler 'nuff!"--and the pounding went on. + +At last the stranger got out a smothered "'Nuff!" and Tom let him up +and said: + +"Now that'll learn you. Better look out who you're fooling with next +time." + +The new boy went off brushing the dust from his clothes, sobbing, +snuffling, and occasionally looking back and shaking his head and +threatening what he would do to Tom the "next time he caught him out." +To which Tom responded with jeers, and started off in high feather, and +as soon as his back was turned the new boy snatched up a stone, threw +it and hit him between the shoulders and then turned tail and ran like +an antelope. Tom chased the traitor home, and thus found out where he +lived. He then held a position at the gate for some time, daring the +enemy to come outside, but the enemy only made faces at him through the +window and declined. At last the enemy's mother appeared, and called +Tom a bad, vicious, vulgar child, and ordered him away. So he went +away; but he said he "'lowed" to "lay" for that boy. + +He got home pretty late that night, and when he climbed cautiously in +at the window, he uncovered an ambuscade, in the person of his aunt; +and when she saw the state his clothes were in her resolution to turn +his Saturday holiday into captivity at hard labor became adamantine in +its firmness. diff --git a/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy new file mode 100644 index 0000000..9c56d98 Binary files /dev/null and b/vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy differ diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go index f4596d8..7d8a57c 100644 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -3,11 +3,12 @@ package cleanhttp import ( "net" "net/http" + "runtime" "time" ) -// DefaultTransport returns a new http.Transport with the same default values -// as http.DefaultTransport, but with idle connections and keepalives disabled. +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. func DefaultTransport() *http.Transport { transport := DefaultPooledTransport() transport.DisableKeepAlives = true @@ -22,13 +23,15 @@ func DefaultTransport() *http.Transport { func DefaultPooledTransport() *http.Transport { transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ + DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - DisableKeepAlives: false, - MaxIdleConnsPerHost: 1, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, } return transport } @@ -42,10 +45,10 @@ func DefaultClient() *http.Client { } } -// DefaultPooledClient returns a new http.Client with the same default values -// as http.Client, but with a shared Transport. Do not use this function -// for transient clients as it can leak file descriptors over time. Only use -// this for clients that will be re-used for the same host(s). +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). func DefaultPooledClient() *http.Client { return &http.Client{ Transport: DefaultPooledTransport(), diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml new file mode 100644 index 0000000..4b865d1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 0000000..b97cd6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index e81be50..ead5830 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,5 +1,11 @@ # go-multierror +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go index 00afa9b..775b6e7 100644 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -18,9 +18,13 @@ func Append(err error, errs ...error) *Error { for _, e := range errs { switch e := e.(type) { case *Error: - err.Errors = append(err.Errors, e.Errors...) + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } default: - err.Errors = append(err.Errors, e) + if e != nil { + err.Errors = append(err.Errors, e) + } } } diff --git a/vendor/github.com/hashicorp/go-multierror/append_test.go b/vendor/github.com/hashicorp/go-multierror/append_test.go index dfa79e2..58ddafa 100644 --- a/vendor/github.com/hashicorp/go-multierror/append_test.go +++ b/vendor/github.com/hashicorp/go-multierror/append_test.go @@ -47,6 +47,24 @@ func TestAppend_NilError(t *testing.T) { } } +func TestAppend_NilErrorArg(t *testing.T) { + var err error + var nilErr *Error + result := Append(err, nilErr) + if len(result.Errors) != 0 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } +} + +func TestAppend_NilErrorIfaceArg(t *testing.T) { + var err error + var nilErr error + result := Append(err, nilErr) + if len(result.Errors) != 0 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } +} + func TestAppend_NonError(t *testing.T) { original := errors.New("foo") result := Append(original, errors.New("bar")) diff --git a/vendor/github.com/hashicorp/go-multierror/flatten_test.go b/vendor/github.com/hashicorp/go-multierror/flatten_test.go index 75218f1..9fbacad 100644 --- a/vendor/github.com/hashicorp/go-multierror/flatten_test.go +++ b/vendor/github.com/hashicorp/go-multierror/flatten_test.go @@ -26,7 +26,7 @@ func TestFlatten(t *testing.T) { } expected := strings.TrimSpace(` -3 error(s) occurred: +3 errors occurred: * one * two diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go index bb65a12..6c7a3cc 100644 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -12,12 +12,16 @@ type ErrorFormatFunc func([]error) string // ListFormatFunc is a basic formatter that outputs the number of errors // that occurred along with a bullet point list of the errors. func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) + } + points := make([]string, len(es)) for i, err := range es { points[i] = fmt.Sprintf("* %s", err) } return fmt.Sprintf( - "%d error(s) occurred:\n\n%s", + "%d errors occurred:\n\n%s", len(es), strings.Join(points, "\n")) } diff --git a/vendor/github.com/hashicorp/go-multierror/format_test.go b/vendor/github.com/hashicorp/go-multierror/format_test.go index d7cee5d..3359e02 100644 --- a/vendor/github.com/hashicorp/go-multierror/format_test.go +++ b/vendor/github.com/hashicorp/go-multierror/format_test.go @@ -5,8 +5,23 @@ import ( "testing" ) -func TestListFormatFunc(t *testing.T) { - expected := `2 error(s) occurred: +func TestListFormatFuncSingle(t *testing.T) { + expected := `1 error occurred: + +* foo` + + errors := []error{ + errors.New("foo"), + } + + actual := ListFormatFunc(errors) + if actual != expected { + t.Fatalf("bad: %#v", actual) + } +} + +func TestListFormatFuncMultiple(t *testing.T) { + expected := `2 errors occurred: * foo * bar` diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index 2ea0827..89b1422 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -40,11 +40,11 @@ func (e *Error) GoString() string { } // WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementatin of the errwrap.Wrapper interface so that +// It is an implementation of the errwrap.Wrapper interface so that // multierror.Error can be used with that library. // // This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implementd only to +// than accessing the Errors field directly. It is implemented only to // satisfy the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { return e.Errors diff --git a/vendor/github.com/hashicorp/go-multierror/multierror_test.go b/vendor/github.com/hashicorp/go-multierror/multierror_test.go index 3e78079..5567d1c 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror_test.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror_test.go @@ -27,7 +27,7 @@ func TestErrorError_custom(t *testing.T) { } func TestErrorError_default(t *testing.T) { - expected := `2 error(s) occurred: + expected := `2 errors occurred: * foo * bar` diff --git a/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh b/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh new file mode 100755 index 0000000..1d2fcf9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/scripts/deps.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# +# This script updates dependencies using a temporary directory. This is required +# to avoid any auxillary dependencies that sneak into GOPATH. +set -e + +# Get the parent directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$(cd -P "$(dirname "$SOURCE")/.." && pwd)" + +# Change into that directory +cd "$DIR" + +# Get the name from the directory +NAME=${NAME:-"$(basename $(pwd))"} + +# Announce +echo "==> Updating dependencies..." + +echo "--> Making tmpdir..." +tmpdir=$(mktemp -d) +function cleanup { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +export GOPATH="${tmpdir}" +export PATH="${tmpdir}/bin:$PATH" + +mkdir -p "${tmpdir}/src/github.com/hashicorp" +pushd "${tmpdir}/src/github.com/hashicorp" &>/dev/null + +echo "--> Copying ${NAME}..." +cp -R "$DIR" "${tmpdir}/src/github.com/hashicorp/${NAME}" +pushd "${tmpdir}/src/github.com/hashicorp/${NAME}" &>/dev/null +rm -rf vendor/ + +echo "--> Installing dependency manager..." +go get -u github.com/kardianos/govendor +govendor init + +echo "--> Installing all dependencies (may take some time)..." +govendor fetch -v +outside + +echo "--> Vendoring..." +govendor add +external + +echo "--> Moving into place..." +vpath="${tmpdir}/src/github.com/hashicorp/${NAME}/vendor" +popd &>/dev/null +popd &>/dev/null +rm -rf vendor/ +cp -R "${vpath}" . diff --git a/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md b/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..2d7fc4b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,21 @@ +### HCL Template +```hcl +# Place your HCL configuration file here +``` + +### Expected behavior +What should have happened? + +### Actual behavior +What actually happened? + +### Steps to reproduce +1. +2. +3. + +### References +Are there any other GitHub issues (open or closed) that should +be linked here? For example: +- GH-1234 +- ... diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml index a785444..cb63a32 100644 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -1,3 +1,13 @@ sudo: false + language: go -go: 1.7 + +go: + - 1.x + - tip + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml index 3c8cdf8..4db0b71 100644 --- a/vendor/github.com/hashicorp/hcl/appveyor.yml +++ b/vendor/github.com/hashicorp/hcl/appveyor.yml @@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\hashicorp\hcl environment: GOPATH: c:\gopath init: - - git config --global core.autocrlf true + - git config --global core.autocrlf false install: - cmd: >- echo %Path% diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go index c8a077d..b88f322 100644 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -89,9 +89,9 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error switch k.Kind() { case reflect.Bool: return d.decodeBool(name, node, result) - case reflect.Float64: + case reflect.Float32, reflect.Float64: return d.decodeFloat(name, node, result) - case reflect.Int: + case reflect.Int, reflect.Int32, reflect.Int64: return d.decodeInt(name, node, result) case reflect.Interface: // When we see an interface, we make our own thing @@ -137,13 +137,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: - if n.Token.Type == token.FLOAT { + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { v, err := strconv.ParseFloat(n.Token.Text, 64) if err != nil { return err } - result.Set(reflect.ValueOf(v)) + result.Set(reflect.ValueOf(v).Convert(result.Type())) return nil } } @@ -164,7 +164,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er return err } - result.Set(reflect.ValueOf(int(v))) + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } return nil case token.STRING: v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) @@ -172,7 +176,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er return err } - result.Set(reflect.ValueOf(int(v))) + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } return nil } } diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go index 5a8404c..8682f47 100644 --- a/vendor/github.com/hashicorp/hcl/decoder_test.go +++ b/vendor/github.com/hashicorp/hcl/decoder_test.go @@ -5,10 +5,10 @@ import ( "path/filepath" "reflect" "testing" + "time" "github.com/davecgh/go-spew/spew" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/testhelper" ) func TestDecode_interface(t *testing.T) { @@ -64,7 +64,7 @@ func TestDecode_interface(t *testing.T) { "qux": "back\\slash", "bar": "new\nline", "qax": `slash\:colon`, - "nested": `${HH\:mm\:ss}`, + "nested": `${HH\\:mm\\:ss}`, "nestedquotes": `${"\"stringwrappedinquotes\""}`, }, }, @@ -73,6 +73,7 @@ func TestDecode_interface(t *testing.T) { false, map[string]interface{}{ "a": 1.02, + "b": 2, }, }, { @@ -82,9 +83,13 @@ func TestDecode_interface(t *testing.T) { }, { "multiline_literal.hcl", + true, + nil, + }, + { + "multiline_literal_with_hil.hcl", false, - map[string]interface{}{"multiline_literal": testhelper.Unix2dos(`hello - world`)}, + map[string]interface{}{"multiline_literal_with_hil": "${hello\n world}"}, }, { "multiline_no_marker.hcl", @@ -94,22 +99,22 @@ func TestDecode_interface(t *testing.T) { { "multiline.hcl", false, - map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n")}, + map[string]interface{}{"foo": "bar\nbaz\n"}, }, { "multiline_indented.hcl", false, - map[string]interface{}{"foo": testhelper.Unix2dos(" bar\n baz\n")}, + map[string]interface{}{"foo": " bar\n baz\n"}, }, { "multiline_no_hanging_indent.hcl", false, - map[string]interface{}{"foo": testhelper.Unix2dos(" baz\n bar\n foo\n")}, + map[string]interface{}{"foo": " baz\n bar\n foo\n"}, }, { "multiline_no_eof.hcl", false, - map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n"), "key": "value"}, + map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"}, }, { "multiline.json", @@ -201,6 +206,16 @@ func TestDecode_interface(t *testing.T) { }, }, }, + { + "list_of_lists.hcl", + false, + map[string]interface{}{ + "foo": []interface{}{ + []interface{}{"foo"}, + []interface{}{"bar"}, + }, + }, + }, { "list_of_maps.hcl", false, @@ -274,6 +289,14 @@ func TestDecode_interface(t *testing.T) { }, }, + { + "structure_list_empty.json", + false, + map[string]interface{}{ + "foo": []interface{}{}, + }, + }, + { "nested_block_comment.hcl", false, @@ -357,34 +380,72 @@ func TestDecode_interface(t *testing.T) { true, nil, }, + + { + "escape_backslash.hcl", + false, + map[string]interface{}{ + "output": []map[string]interface{}{ + map[string]interface{}{ + "one": `${replace(var.sub_domain, ".", "\\.")}`, + "two": `${replace(var.sub_domain, ".", "\\\\.")}`, + "many": `${replace(var.sub_domain, ".", "\\\\\\\\.")}`, + }, + }, + }, + }, + + { + "git_crypt.hcl", + true, + nil, + }, + + { + "object_with_bool.hcl", + false, + map[string]interface{}{ + "path": []map[string]interface{}{ + map[string]interface{}{ + "policy": "write", + "permissions": []map[string]interface{}{ + map[string]interface{}{ + "bool": []interface{}{false}, + }, + }, + }, + }, + }, + }, } for _, tc := range cases { - t.Logf("Testing: %s", tc.File) - d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File)) - if err != nil { - t.Fatalf("err: %s", err) - } + t.Run(tc.File, func(t *testing.T) { + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File)) + if err != nil { + t.Fatalf("err: %s", err) + } - var out interface{} - err = Decode(&out, string(d)) - if (err != nil) != tc.Err { - t.Fatalf("Input: %s\n\nError: %s", tc.File, err) - } + var out interface{} + err = Decode(&out, string(d)) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.File, err) + } - if !reflect.DeepEqual(out, tc.Out) { - t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) - } + if !reflect.DeepEqual(out, tc.Out) { + t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) + } - var v interface{} - err = Unmarshal(d, &v) - if (err != nil) != tc.Err { - t.Fatalf("Input: %s\n\nError: %s", tc.File, err) - } + var v interface{} + err = Unmarshal(d, &v) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s", tc.File, err) + } - if !reflect.DeepEqual(v, tc.Out) { - t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) - } + if !reflect.DeepEqual(v, tc.Out) { + t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) + } + }) } } @@ -748,6 +809,59 @@ func TestDecode_intString(t *testing.T) { } } +func TestDecode_float32(t *testing.T) { + var value struct { + A float32 `hcl:"a"` + B float32 `hcl:"b"` + } + + err := Decode(&value, testReadFile(t, "float.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if got, want := value.A, float32(1.02); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } + if got, want := value.B, float32(2); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } +} + +func TestDecode_float64(t *testing.T) { + var value struct { + A float64 `hcl:"a"` + B float64 `hcl:"b"` + } + + err := Decode(&value, testReadFile(t, "float.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if got, want := value.A, float64(1.02); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } + if got, want := value.B, float64(2); got != want { + t.Fatalf("wrong result %#v; want %#v", got, want) + } +} + +func TestDecode_intStringAliased(t *testing.T) { + var value struct { + Count time.Duration + } + + err := Decode(&value, testReadFile(t, "basic_int_string.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if value.Count != time.Duration(3) { + t.Fatalf("bad: %#v", value.Count) + } +} + func TestDecode_Node(t *testing.T) { // given var value struct { diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go index ea3734f..6e5ef65 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -156,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos { type LiteralType struct { Token token.Token - // associated line comment, only when used in a list + // comment types, only used when in a list + LeadComment *CommentGroup LineComment *CommentGroup } diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go index 85e536d..2380d71 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go @@ -58,7 +58,7 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts res, err := printer.Format(src) if err != nil { - return err + return fmt.Errorf("In %s: %s", filename, err) } if !bytes.Equal(src, res) { diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go index f46ed4c..098e1bc 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -3,6 +3,7 @@ package parser import ( + "bytes" "errors" "fmt" "strings" @@ -36,6 +37,11 @@ func newParser(src []byte) *Parser { // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + p := newParser(src) return p.Parse() } @@ -50,7 +56,7 @@ func (p *Parser) Parse() (*ast.File, error) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } - f.Node, err = p.objectList() + f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } @@ -62,11 +68,23 @@ func (p *Parser) Parse() (*ast.File, error) { return f, nil } -func (p *Parser) objectList() (*ast.ObjectList, error) { +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + n, err := p.objectItem() if err == errEofToken { break // we are finished @@ -179,9 +197,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) { keyStr = append(keyStr, k.Token.Text) } - return nil, fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")) + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } } // do a look-ahead for line comment @@ -244,7 +265,10 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: - fmt.Println("illegal") + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } default: return keys, &PosError{ Pos: p.tok.Pos, @@ -288,7 +312,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) { Lbrace: p.tok.Pos, } - l, err := p.objectList() + l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. @@ -296,9 +320,12 @@ func (p *Parser) objectType() (*ast.ObjectType, error) { return nil, err } - // If there is no error, we should be at a RBRACE to end the object - if p.tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type) + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } } o.List = l @@ -331,12 +358,18 @@ func (p *Parser) listType() (*ast.ListType, error) { } } switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + l.Add(node) needComma = true case token.COMMA: @@ -367,12 +400,16 @@ func (p *Parser) listType() (*ast.ListType, error) { } l.Add(node) needComma = true - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go index 2756d06..2702122 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go @@ -59,12 +59,12 @@ func TestListType(t *testing.T) { []token.Type{token.NUMBER, token.STRING}, }, { - `foo = []`, - []token.Type{}, + `foo = [false]`, + []token.Type{token.BOOL}, }, { - `foo = ["123", 123]`, - []token.Type{token.STRING, token.NUMBER}, + `foo = []`, + []token.Type{}, }, { `foo = [1, @@ -152,6 +152,109 @@ func TestListOfMaps_requiresComma(t *testing.T) { } } +func TestListType_leadComment(t *testing.T) { + var literals = []struct { + src string + comment []string + }{ + { + `foo = [ + 1, + # bar + 2, + 3, + ]`, + []string{"", "# bar", ""}, + }, + } + + for _, l := range literals { + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Fatal(err) + } + + list, ok := item.Val.(*ast.ListType) + if !ok { + t.Fatalf("node should be of type LiteralType, got: %T", item.Val) + } + + if len(list.List) != len(l.comment) { + t.Fatalf("bad: %d", len(list.List)) + } + + for i, li := range list.List { + lt := li.(*ast.LiteralType) + comment := l.comment[i] + + if (lt.LeadComment == nil) != (comment == "") { + t.Fatalf("bad: %#v", lt) + } + + if comment == "" { + continue + } + + actual := lt.LeadComment.List[0].Text + if actual != comment { + t.Fatalf("bad: %q %q", actual, comment) + } + } + } +} + +func TestListType_lineComment(t *testing.T) { + var literals = []struct { + src string + comment []string + }{ + { + `foo = [ + 1, + 2, # bar + 3, + ]`, + []string{"", "# bar", ""}, + }, + } + + for _, l := range literals { + p := newParser([]byte(l.src)) + item, err := p.objectItem() + if err != nil { + t.Fatal(err) + } + + list, ok := item.Val.(*ast.ListType) + if !ok { + t.Fatalf("node should be of type LiteralType, got: %T", item.Val) + } + + if len(list.List) != len(l.comment) { + t.Fatalf("bad: %d", len(list.List)) + } + + for i, li := range list.List { + lt := li.(*ast.LiteralType) + comment := l.comment[i] + + if (lt.LineComment == nil) != (comment == "") { + t.Fatalf("bad: %s", lt) + } + + if comment == "" { + continue + } + + actual := lt.LineComment.List[0].Text + if actual != comment { + t.Fatalf("bad: %q %q", actual, comment) + } + } + } +} + func TestObjectType(t *testing.T) { var literals = []struct { src string @@ -204,6 +307,8 @@ func TestObjectType(t *testing.T) { } for _, l := range literals { + t.Logf("Source: %s", l.src) + p := newParser([]byte(l.src)) // p.enableTrace = true item, err := p.objectItem() @@ -282,6 +387,30 @@ func TestObjectKey(t *testing.T) { } } +func TestCommentGroup(t *testing.T) { + var cases = []struct { + src string + groups int + }{ + {"# Hello\n# World", 1}, + {"# Hello\r\n# Windows", 1}, + } + + for _, tc := range cases { + t.Run(tc.src, func(t *testing.T) { + p := newParser([]byte(tc.src)) + file, err := p.Parse() + if err != nil { + t.Fatalf("parse error: %s", err) + } + + if len(file.Comments) != tc.groups { + t.Fatalf("bad: %#v", file.Comments) + } + }) + } +} + // Official HCL tests func TestParse(t *testing.T) { cases := []struct { @@ -296,6 +425,10 @@ func TestParse(t *testing.T) { "comment.hcl", false, }, + { + "comment_crlf.hcl", + false, + }, { "comment_lastline.hcl", false, @@ -336,6 +469,10 @@ func TestParse(t *testing.T) { "complex.hcl", false, }, + { + "complex_crlf.hcl", + false, + }, { "types.hcl", false, @@ -368,20 +505,38 @@ func TestParse(t *testing.T) { "object_key_without_value.hcl", true, }, + { + "object_key_assign_without_value.hcl", + true, + }, + { + "object_key_assign_without_value2.hcl", + true, + }, + { + "object_key_assign_without_value3.hcl", + true, + }, + { + "git_crypt.hcl", + true, + }, } const fixtureDir = "./test-fixtures" for _, tc := range cases { - d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name)) - if err != nil { - t.Fatalf("err: %s", err) - } + t.Run(tc.Name, func(t *testing.T) { + d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name)) + if err != nil { + t.Fatalf("err: %s", err) + } - _, err = Parse(d) - if (err != nil) != tc.Err { - t.Fatalf("Input: %s\n\nError: %s", tc.Name, err) - } + v, err := Parse(d) + if (err != nil) != tc.Err { + t.Fatalf("Input: %s\n\nError: %s\n\nAST: %#v", tc.Name, err, v) + } + }) } } diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl index 1ff7f29..e32be87 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl @@ -1,15 +1,15 @@ -// Foo - -/* Bar */ - -/* -/* -Baz -*/ - -# Another - -# Multiple -# Lines - -foo = "bar" +// Foo + +/* Bar */ + +/* +/* +Baz +*/ + +# Another + +# Multiple +# Lines + +foo = "bar" diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl new file mode 100644 index 0000000..1ff7f29 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl @@ -0,0 +1,15 @@ +// Foo + +/* Bar */ + +/* +/* +Baz +*/ + +# Another + +# Multiple +# Lines + +foo = "bar" diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl new file mode 100644 index 0000000..9b071d1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl @@ -0,0 +1,42 @@ +variable "foo" { + default = "bar" + description = "bar" +} + +variable "groups" { } + +provider "aws" { + access_key = "foo" + secret_key = "bar" +} + +provider "do" { + api_key = "${var.foo}" +} + +resource "aws_security_group" "firewall" { + count = 5 +} + +resource aws_instance "web" { + ami = "${var.foo}" + security_groups = [ + "foo", + "${aws_security_group.firewall.foo}", + "${element(split(\",\", var.groups)}", + ] + network_interface = { + device_index = 0 + description = "Main network interface" + } +} + +resource "aws_instance" "db" { + security_groups = "${aws_security_group.firewall.*.id}" + VPC = "foo" + depends_on = ["aws_instance.web"] +} + +output "web_ip" { + value = "${aws_instance.web.private_ip}" +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl new file mode 100644 index 0000000..f691948 Binary files /dev/null and b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl differ diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl new file mode 100644 index 0000000..37a2c7a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl @@ -0,0 +1,3 @@ +foo { + bar = +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl new file mode 100644 index 0000000..83ec5e6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl @@ -0,0 +1,4 @@ +foo { + baz = 7 + bar = +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl new file mode 100644 index 0000000..21136d1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl @@ -0,0 +1,4 @@ +foo { + bar = + baz = 7 +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go index 218b56a..c896d58 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go @@ -62,6 +62,14 @@ func (p *printer) collectComments(node ast.Node) { ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { switch t := nn.(type) { case *ast.LiteralType: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + if t.LineComment != nil { for _, comment := range t.LineComment.List { if _, ok := standaloneComments[comment.Pos()]; ok { @@ -95,7 +103,6 @@ func (p *printer) collectComments(node ast.Node) { } sort.Sort(ByPosition(p.standaloneComments)) - } // output prints creates b printable HCL output and returns it. @@ -104,35 +111,60 @@ func (p *printer) output(n interface{}) []byte { switch t := n.(type) { case *ast.File: + // File doesn't trace so we add the tracing here + defer un(trace(p, "File")) return p.output(t.Node) case *ast.ObjectList: - var index int - var nextItem token.Pos - var commented bool - for { - // TODO(arslan): refactor below comment printing, we have the same in objectType - for _, c := range p.standaloneComments { - for _, comment := range c.List { - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } + defer un(trace(p, "ObjectList")) + var index int + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is at "infinity" + var nextItem token.Pos + if index != len(t.Items) { + nextItem = t.Items[index].Pos() + } else { + nextItem = token.Pos{Offset: infinity, Line: infinity} + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + // Go through all the comments in the group. The group + // should be printed together, not separated by double newlines. + printed := false + newlinePrinted := false + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { // if we hit the end add newlines so we can print the comment - if index == len(t.Items) { + // we don't do this if prev is invalid which means the + // beginning of the file since the first comment should + // be at the first line. + if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { buf.Write([]byte{newline, newline}) + newlinePrinted = true } + // Write the actual comment. buf.WriteString(comment.Text) - buf.WriteByte(newline) - if index != len(t.Items) { - buf.WriteByte(newline) - } + + // Set printed to true to note that we printed something + printed = true } } + + // If we're not at the last item, write a new line so + // that there is a newline separating this comment from + // the next object. + if printed && index != len(t.Items) { + buf.WriteByte(newline) + } } if index == len(t.Items) { @@ -140,8 +172,29 @@ func (p *printer) output(n interface{}) []byte { } buf.Write(p.output(t.Items[index])) - if !commented && index != len(t.Items)-1 { - buf.Write([]byte{newline, newline}) + if index != len(t.Items)-1 { + // Always write a newline to separate us from the next item + buf.WriteByte(newline) + + // Need to determine if we're going to separate the next item + // with a blank line. The logic here is simple, though there + // are a few conditions: + // + // 1. The next object is more than one line away anyways, + // so we need an empty line. + // + // 2. The next object is not a "single line" object, so + // we need an empty line. + // + // 3. This current object is not a single line object, + // so we need an empty line. + current := t.Items[index] + next := t.Items[index+1] + if next.Pos().Line != t.Items[index].Pos().Line+1 || + !p.isSingleLineObject(next) || + !p.isSingleLineObject(current) { + buf.WriteByte(newline) + } } index++ } @@ -165,7 +218,8 @@ func (p *printer) output(n interface{}) []byte { func (p *printer) literalType(lit *ast.LiteralType) []byte { result := []byte(lit.Token.Text) - if lit.Token.Type == token.HEREDOC { + switch lit.Token.Type { + case token.HEREDOC: // Clear the trailing newline from heredocs if result[len(result)-1] == '\n' { result = result[:len(result)-1] @@ -173,6 +227,12 @@ func (p *printer) literalType(lit *ast.LiteralType) []byte { // Poison lines 2+ so that we don't indent them result = p.heredocIndent(result) + case token.STRING: + // If this is a multiline string, poison lines 2+ so we don't + // indent them. + if bytes.IndexRune(result, '\n') >= 0 { + result = p.heredocIndent(result) + } } return result @@ -226,17 +286,24 @@ func (p *printer) objectType(o *ast.ObjectType) []byte { var nextItem token.Pos var commented, newlinePrinted bool for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is the closing brace + if index != len(o.List.Items) { + nextItem = o.List.Items[index].Pos() + } else { + nextItem = o.Rbrace + } - // Print stand alone comments + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. for _, c := range p.standaloneComments { + printed := false + var lastCommentPos token.Pos for _, comment := range c.List { - // if we hit the end, last item should be the brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { // If there are standalone comments and the initial newline has not // been printed yet, do it now. @@ -251,11 +318,33 @@ func (p *printer) objectType(o *ast.ObjectType) []byte { buf.WriteByte(newline) } - buf.Write(p.indent([]byte(comment.Text))) + // Store this position + lastCommentPos = comment.Pos() + + // output the comment itself + buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) + + // Set printed to true to note that we printed something + printed = true + + /* + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + */ + } + } + + // Stuff to do if we had comments + if printed { + // Always write a newline + buf.WriteByte(newline) + + // If there is another item in the object and our comment + // didn't hug it directly, then make sure there is a blank + // line separating them. + if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { buf.WriteByte(newline) - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } } } } @@ -435,16 +524,54 @@ func (p *printer) list(l *ast.ListType) []byte { } insertSpaceBeforeItem := false + lastHadLeadComment := false for i, item := range l.List { + // Keep track of whether this item is a heredoc since that has + // unique behavior. + heredoc := false + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + heredoc = true + } + if item.Pos().Line != l.Lbrack.Line { // multiline list, add newline before we add each item buf.WriteByte(newline) insertSpaceBeforeItem = false + + // If we have a lead comment, then we want to write that first + leadComment := false + if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { + leadComment = true + + // If this isn't the first item and the previous element + // didn't have a lead comment, then we need to add an extra + // newline to properly space things out. If it did have a + // lead comment previously then this would be done + // automatically. + if i > 0 && !lastHadLeadComment { + buf.WriteByte(newline) + } + + for _, comment := range lit.LeadComment.List { + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + } + } + // also indent each line val := p.output(item) curLen := len(val) buf.Write(p.indent(val)) - buf.WriteString(",") + + // if this item is a heredoc, then we output the comma on + // the next line. This is the only case this happens. + comma := []byte{','} + if heredoc { + buf.WriteByte(newline) + comma = p.indent(comma) + } + + buf.Write(comma) if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { // if the next item doesn't have any comments, do not align @@ -458,19 +585,51 @@ func (p *printer) list(l *ast.ListType) []byte { } } - if i == len(l.List)-1 { + lastItem := i == len(l.List)-1 + if lastItem { buf.WriteByte(newline) } + + if leadComment && !lastItem { + buf.WriteByte(newline) + } + + lastHadLeadComment = leadComment } else { if insertSpaceBeforeItem { buf.WriteByte(blank) insertSpaceBeforeItem = false } - buf.Write(p.output(item)) + + // Output the item itself + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(val) + + // If this is a heredoc item we always have to output a newline + // so that it parses properly. + if heredoc { + buf.WriteByte(newline) + } + + // If this isn't the last element, write a comma. if i != len(l.List)-1 { buf.WriteString(",") insertSpaceBeforeItem = true } + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } } } @@ -547,6 +706,36 @@ func (p *printer) heredocIndent(buf []byte) []byte { return res } +// isSingleLineObject tells whether the given object item is a single +// line object such as "obj {}". +// +// A single line object: +// +// * has no lead comments (hence multi-line) +// * has no assignment +// * has no values in the stanza (within {}) +// +func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { + // If there is a lead comment, can't be one line + if val.LeadComment != nil { + return false + } + + // If there is assignment, we always break by line + if val.Assign.IsValid() { + return false + } + + // If it isn't an object type, then its not a single line object + ot, ok := val.Val.(*ast.ObjectType) + if !ok { + return false + } + + // If the object has no items, it is single line! + return len(ot.List.Items) == 0 +} + func lines(txt string) int { endline := 1 for i := 0; i < len(txt); i++ { diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go index a296fc8..6617ab8 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -62,6 +62,5 @@ func Format(src []byte) ([]byte, error) { // Add trailing newline to result buf.WriteString("\n") - return buf.Bytes(), nil } diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go index abb22a3..5248259 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go @@ -1,8 +1,3 @@ -// +build !windows -// TODO(jen20): These need fixing on Windows but printer is not used right now -// and red CI is making it harder to process other bugs, so ignore until -// we get around to fixing them.package printer - package printer import ( @@ -31,18 +26,32 @@ type entry struct { var data = []entry{ {"complexhcl.input", "complexhcl.golden"}, {"list.input", "list.golden"}, + {"list_comment.input", "list_comment.golden"}, {"comment.input", "comment.golden"}, + {"comment_crlf.input", "comment.golden"}, {"comment_aligned.input", "comment_aligned.golden"}, + {"comment_array.input", "comment_array.golden"}, + {"comment_end_file.input", "comment_end_file.golden"}, + {"comment_multiline_indent.input", "comment_multiline_indent.golden"}, + {"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"}, + {"comment_multiline_stanza.input", "comment_multiline_stanza.golden"}, + {"comment_newline.input", "comment_newline.golden"}, + {"comment_object_multi.input", "comment_object_multi.golden"}, {"comment_standalone.input", "comment_standalone.golden"}, {"empty_block.input", "empty_block.golden"}, {"list_of_objects.input", "list_of_objects.golden"}, + {"multiline_string.input", "multiline_string.golden"}, + {"object_singleline.input", "object_singleline.golden"}, + {"object_with_heredoc.input", "object_with_heredoc.golden"}, } func TestFiles(t *testing.T) { for _, e := range data { source := filepath.Join(dataDir, e.source) golden := filepath.Join(dataDir, e.golden) - check(t, source, golden) + t.Run(e.source, func(t *testing.T) { + check(t, source, golden) + }) } } @@ -96,8 +105,8 @@ func diff(aname, bname string, a, b []byte) error { for i := 0; i < len(a) && i < len(b); i++ { ch := a[i] if ch != b[i] { - fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs)) - fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs)) + fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs)) + fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs)) fmt.Fprintf(&buf, "\n\n") break } @@ -124,7 +133,7 @@ func format(src []byte) ([]byte, error) { // make sure formatted output is syntactically correct if _, err := parser.Parse(formatted); err != nil { - return nil, fmt.Errorf("parse: %s\n%s", err, src) + return nil, fmt.Errorf("parse: %s\n%s", err, formatted) } return formatted, nil diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden new file mode 100644 index 0000000..e778eaf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden @@ -0,0 +1,13 @@ +banana = [ + # I really want to comment this item in the array. + "a", + + # This as well + "b", + + "c", # And C + "d", + + # And another + "e", +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input new file mode 100644 index 0000000..e778eaf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input @@ -0,0 +1,13 @@ +banana = [ + # I really want to comment this item in the array. + "a", + + # This as well + "b", + + "c", # And C + "d", + + # And another + "e", +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input new file mode 100644 index 0000000..5d27206 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input @@ -0,0 +1,37 @@ +// A standalone comment is a comment which is not attached to any kind of node + + // This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = [ "fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1,2] // another line here + + # Another comment +variable = { + description = "bar" # another yooo + foo { + # Nested standalone + + bar = "fatih" + } +} + + // lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 + diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden new file mode 100644 index 0000000..dbeae36 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden @@ -0,0 +1,6 @@ +resource "blah" "blah" {} + +// +// +// + diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input new file mode 100644 index 0000000..68c4c28 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input @@ -0,0 +1,5 @@ +resource "blah" "blah" {} + +// +// +// diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden new file mode 100644 index 0000000..74c4ccd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden @@ -0,0 +1,12 @@ +resource "provider" "resource" { + /* + SPACE_SENSITIVE_CODE = < 0 { + s.err("unexpected null character (0x00)") + return eof + } + // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch @@ -474,7 +480,7 @@ func (s *Scanner) scanString() { // read character after quote ch := s.next() - if ch < 0 || ch == eof { + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { s.err("literal not terminated") return } diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go index b167811..4f2c9cb 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go @@ -363,7 +363,7 @@ func TestRealExample(t *testing.T) { provider "aws" { access_key = "foo" - secret_key = "bar" + secret_key = "${replace(var.foo, ".", "\\.")}" } resource "aws_security_group" "firewall" { @@ -416,7 +416,7 @@ EOF {token.STRING, `"foo"`}, {token.IDENT, `secret_key`}, {token.ASSIGN, `=`}, - {token.STRING, `"bar"`}, + {token.STRING, `"${replace(var.foo, ".", "\\.")}"`}, {token.RBRACE, `}`}, {token.IDENT, `resource`}, {token.STRING, `"aws_security_group"`}, @@ -476,6 +476,36 @@ EOF } +func TestScan_crlf(t *testing.T) { + complexHCL := "foo {\r\n bar = \"baz\"\r\n}\r\n" + + literals := []struct { + tokenType token.Type + literal string + }{ + {token.IDENT, `foo`}, + {token.LBRACE, `{`}, + {token.IDENT, `bar`}, + {token.ASSIGN, `=`}, + {token.STRING, `"baz"`}, + {token.RBRACE, `}`}, + {token.EOF, ``}, + } + + s := New([]byte(complexHCL)) + for _, l := range literals { + tok := s.Scan() + if l.tokenType != tok.Type { + t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String()) + } + + if l.literal != tok.Text { + t.Errorf("got:\n%+v\n%s\n want:\n%+v\n%s\n", []byte(tok.String()), tok, []byte(l.literal), l.literal) + } + } + +} + func TestError(t *testing.T) { testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) @@ -494,7 +524,8 @@ func TestError(t *testing.T) { testError(t, `"`, "1:2", "literal not terminated", token.STRING) testError(t, `"abc`, "1:5", "literal not terminated", token.STRING) - testError(t, `"abc`+"\n", "2:1", "literal not terminated", token.STRING) + testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING) + testError(t, `"${abc`+"\n", "2:1", "literal not terminated", token.STRING) testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT) testError(t, `/foo`, "1:1", "expected '/' for comment", token.COMMENT) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go index 956c899..5f981ea 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -27,6 +27,9 @@ func Unquote(s string) (t string, err error) { if quote != '"' { return "", ErrSyntax } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } // Is it trivial? Avoid allocation. if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { @@ -46,7 +49,7 @@ func Unquote(s string) (t string, err error) { for len(s) > 0 { // If we're starting a '${}' then let it through un-unquoted. // Specifically: we don't unquote any characters within the `${}` - // section, except for escaped backslashes, which we handle specifically. + // section. if s[0] == '$' && len(s) > 1 && s[1] == '{' { buf = append(buf, '$', '{') s = s[2:] @@ -61,16 +64,6 @@ func Unquote(s string) (t string, err error) { s = s[size:] - // We special case escaped backslashes in interpolations, converting - // them to their unescaped equivalents. - if r == '\\' { - q, _ := utf8.DecodeRuneInString(s) - switch q { - case '\\': - continue - } - } - n := utf8.EncodeRune(runeTmp[:], r) buf = append(buf, runeTmp[:n]...) @@ -94,6 +87,10 @@ func Unquote(s string) (t string, err error) { } } + if s[0] == '\n' { + return "", ErrSyntax + } + c, multibyte, ss, err := unquoteChar(s, quote) if err != nil { return "", err diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go index af2d848..65be375 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go @@ -39,7 +39,8 @@ var unquotetests = []unQuoteTest{ {`"${file("\"foo\"")}"`, `${file("\"foo\"")}`}, {`"echo ${var.region}${element(split(",",var.zones),0)}"`, `echo ${var.region}${element(split(",",var.zones),0)}`}, - {`"${HH\\:mm\\:ss}"`, `${HH\:mm\:ss}`}, + {`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`}, + {`"${\n}"`, `${\n}`}, } var misquoted = []string{ @@ -65,9 +66,12 @@ var misquoted = []string{ "`\"", `"\'"`, `'\"'`, + "\"\n\"", + "\"\\n\n\"", "'\n'", `"${"`, `"${foo{}"`, + "\"${foo}\n\"", } func TestUnquote(t *testing.T) { diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go b/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go index 932951c..e4b4af2 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token_test.go @@ -51,6 +51,12 @@ func TestTokenValue(t *testing.T) { {Token{Type: STRING, Text: `"foo"`}, "foo"}, {Token{Type: STRING, Text: `"foo\nbar"`}, "foo\nbar"}, {Token{Type: STRING, Text: `"${file("foo")}"`}, `${file("foo")}`}, + { + Token{ + Type: STRING, + Text: `"${replace("foo", ".", "\\.")}"`, + }, + `${replace("foo", ".", "\\.")}`}, {Token{Type: HEREDOC, Text: "< + + + + +**Environment:** + + +* Vault Version: +* Operating System/Architecture: + +**Vault Config File:** + + +**Startup Log Output:** + + +**Expected Behavior:** + + +**Actual Behavior:** + + +**Steps to Reproduce:** + + +**Important Factoids:** + + +**References:** + diff --git a/vendor/github.com/hashicorp/vault/.gitignore b/vendor/github.com/hashicorp/vault/.gitignore index 89cb1b7..dbd3bc3 100644 --- a/vendor/github.com/hashicorp/vault/.gitignore +++ b/vendor/github.com/hashicorp/vault/.gitignore @@ -46,6 +46,7 @@ Vagrantfile .DS_Store .idea +.vscode dist/* diff --git a/vendor/github.com/hashicorp/vault/.hooks/pre-push b/vendor/github.com/hashicorp/vault/.hooks/pre-push new file mode 100755 index 0000000..ac56a48 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/.hooks/pre-push @@ -0,0 +1,14 @@ +#!/bin/sh + +remote="$1" + +if [ "$remote" = "enterprise" ]; then + exit 0 +fi + +if [ -f version/version_ent.go ]; then + echo "Found enterprise version file while pushing to oss remote" + exit 1 +fi + +exit 0 diff --git a/vendor/github.com/hashicorp/vault/.travis.yml b/vendor/github.com/hashicorp/vault/.travis.yml index e5ad1be..aa214be 100644 --- a/vendor/github.com/hashicorp/vault/.travis.yml +++ b/vendor/github.com/hashicorp/vault/.travis.yml @@ -7,7 +7,7 @@ services: - docker go: - - 1.8.1 + - 1.9 matrix: allow_failures: @@ -20,4 +20,5 @@ branches: script: - make bootstrap - - make test testrace + - travis_wait 75 make test + - travis_wait 75 make testrace diff --git a/vendor/github.com/hashicorp/vault/CHANGELOG.md b/vendor/github.com/hashicorp/vault/CHANGELOG.md index c721cdf..9ce00fe 100644 --- a/vendor/github.com/hashicorp/vault/CHANGELOG.md +++ b/vendor/github.com/hashicorp/vault/CHANGELOG.md @@ -1,4 +1,411 @@ -## 0.7.1 (Unreleased) +## 0.8.3 (September 19th, 2017) + +CHANGES: + + * Policy input/output standardization: For all built-in authentication + backends, policies can now be specified as a comma-delimited string or an + array if using JSON as API input; on read, policies will be returned as an + array; and the `default` policy will not be forcefully added to policies + saved in configurations. Please note that the `default` policy will continue + to be added to generated tokens, however, rather than backends adding + `default` to the given set of input policies (in some cases, and not in + others), the stored set will reflect the user-specified set. + * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the + endpoint would not modify the Issuer in the generated certificate, leaving + the output self-issued. Although theoretically valid, in practice crypto + stacks were unhappy validating paths containing such certs. As a result, + `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer + DN of the generated certificate. + * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely + useful in break-glass or support scenarios, it is also extremely dangerous. + As of now, a configuration file option `raw_storage_endpoint` must be set in + order to enable this API endpoint. Once set, the available functionality has + been enhanced slightly; it now supports listing and decrypting most of + Vault's core data structures, except for the encryption keyring itself. + * `generic` is now `kv`: To better reflect its actual use, the `generic` + backend is now `kv`. Using `generic` will still work for backwards + compatibility. + +FEATURES: + + * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault + using machine credentials. + * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts + can not authenticate to vault using JWT tokens. + +IMPROVEMENTS: + + * configuration: Provide a config option to store Vault server's process ID + (PID) in a file [GH-3321] + * mfa (Enterprise): Add the ability to use identity metadata in username format + * mfa/okta (Enterprise): Add support for configuring base_url for API calls + * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value + longer than the signing CA certificate's NotAfter value. [GH-3325] + * sys/raw: Raw storage access is now disabled by default [GH-3329] + +BUG FIXES: + + * auth/okta: Fix regression that removed the ability to set base_url [GH-3313] + * core: Fix panic while loading leases at startup on ARM processors + [GH-3314] + * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key + [GH-3325] + +## 0.8.2.1 (September 11th, 2017) (Enterprise Only) + +BUG FIXES: + + * Fix an issue upgrading to 0.8.2 for Enterprise customers. + +## 0.8.2 (September 5th, 2017) + +SECURITY: + +* In prior versions of Vault, if authenticating via AWS IAM and requesting a + periodic token, the period was not properly respected. This could lead to + tokens expiring unexpectedly, or a token lifetime being longer than expected. + Upon token renewal with Vault 0.8.2 the period will be properly enforced. + +DEPRECATIONS/CHANGES: + +* `vault ssh` users should supply `-mode` and `-role` to reduce the number of + API calls. A future version of Vault will mark these optional values are + required. Failure to supply `-mode` or `-role` will result in a warning. +* Vault plugins will first briefly run a restricted version of the plugin to + fetch metadata, and then lazy-load the plugin on first request to prevent + crash/deadlock of Vault during the unseal process. Plugins will need to be + built with the latest changes in order for them to run properly. + +FEATURES: + +* **Lazy Lease Loading**: On startup, Vault will now load leases from storage + in a lazy fashion (token checks and revocation/renewal requests still force + an immediate load). For larger installations this can significantly reduce + downtime when switching active nodes or bringing Vault up from cold start. +* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA + backend for authenticating to machines. It also supports remote host key + verification through the SSH CA backend, if enabled. +* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports + signing self-issued CA certs. This is useful when switching root CAs. + +IMPROVEMENTS: + + * audit/file: Allow specifying `stdout` as the `file_path` to log to standard + output [GH-3235] + * auth/aws: Allow wildcards in `bound_iam_principal_id` [GH-3213] + * auth/okta: Compare groups case-insensitively since Okta is only + case-preserving [GH-3240] + * auth/okta: Standarize Okta configuration APIs across backends [GH-3245] + * cli: Add subcommand autocompletion that can be enabled with + `vault -autocomplete-install` [GH-3223] + * cli: Add ability to handle wrapped responses when using `vault auth`. What + is output depends on the other given flags; see the help output for that + command for more information. [GH-3263] + * core: TLS cipher suites used for cluster behavior can now be set via + `cluster_cipher_suites` in configuration [GH-3228] + * core: The `plugin_name` can now either be specified directly as part of the + parameter or within the `config` object when mounting a secret or auth backend + via `sys/mounts/:path` or `sys/auth/:path` respectively [GH-3202] + * core: It is now possible to update the `description` of a mount when + mount-tuning, although this must be done through the HTTP layer [GH-3285] + * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and + retrying the operation [GH-3269] + * secret/pki: TTLs can now be specified as a string or an integer number of + seconds [GH-3270] + * secret/pki: Self-issued certs can now be signed via + `pki/root/sign-self-issued` [GH-3274] + * storage/gcp: Use application default credentials if they exist [GH-3248] + +BUG FIXES: + + * auth/aws: Properly use role-set period values for IAM-derived token renewals + [GH-3220] + * auth/okta: Fix updating organization/ttl/max_ttl after initial setting + [GH-3236] + * core: Fix PROXY when underlying connection is TLS [GH-3195] + * core: Policy-related commands would sometimes fail to act case-insensitively + [GH-3210] + * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address + [GH-3268] + * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process. + [GH-3255] + * plugins: Skip mounting plugin-based secret and credential mounts when setting + up mounts if the plugin is no longer present in the catalog. [GH-3255] + +## 0.8.1 (August 16th, 2017) + +DEPRECATIONS/CHANGES: + + * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already + exists will now return a `204` instead of overwriting an existing root. If + you want to recreate the root, first run a delete operation on `pki/root` + (requires `sudo` capability), then generate it again. + +FEATURES: + + * **Oracle Secret Backend**: There is now an external plugin to support leased + credentials for Oracle databases (distributed separately). + * **GCP IAM Auth Backend**: There is now an authentication backend that allows + using GCP IAM credentials to retrieve Vault tokens. This is available as + both a plugin and built-in to Vault. + * **PingID Push Support for Path-Baased MFA (Enterprise)**: PingID Push can + now be used for MFA with the new path-based MFA introduced in Vault + Enterprise 0.8. + * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports + specifying permitted DNS domains for CA certificates, allowing you to + narrowly scope the set of domains for which a CA can issue or sign child + certificates. + * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to + reload using the `sys/plugins/reload/backend` endpoint and providing either + the plugin name or the mounts to reload. + * **Self-Reloading Plugins**: The plugin system will now attempt to reload a + crashed or stopped plugin, once per request. + +IMPROVEMENTS: + + * auth/approle: Allow array input for policies in addition to comma-delimited + strings [GH-3163] + * plugins: Send logs through Vault's logger rather than stdout [GH-3142] + * secret/pki: Add `pki/root` delete operation [GH-3165] + * secret/pki: Don't overwrite an existing root cert/key when calling generate + [GH-3165] + +BUG FIXES: + + * aws: Don't prefer a nil HTTP client over an existing one [GH-3159] + * core: If there is an error when checking for create/update existence, return + 500 instead of 400 [GH-3162] + * secret/database: Avoid creating usernames that are too long for legacy MySQL + [GH-3138] + +## 0.8.0 (August 9th, 2017) + +SECURITY: + + * We've added a note to the docs about the way the GitHub auth backend works + as it may not be readily apparent that GitHub personal access tokens, which + are used by the backend, can be used for unauthorized access if they are + stolen from third party services and access to Vault is public. + +DEPRECATIONS/CHANGES: + + * Database Plugin Backends: Passwords generated for these backends now + enforce stricter password requirements, as opposed to the previous behavior + of returning a randomized UUID. Passwords are of length 20, and have a `A1a-` + characters prepended to ensure stricter requirements. No regressions are + expected from this change. (For database backends that were previously + substituting underscores for hyphens in passwords, this will remain the + case.) + * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`, + `sys/revoke-force` have been deprecated and relocated under `sys/leases`. + Additionally, the deprecated path `sys/revoke-force` now requires the `sudo` + capability. + * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint + is now unauthenticated. This allows introspection of the wrapping info by + clients that only have the wrapping token without then invalidating the + token. Validation functions/checks are still performed on the token. + +FEATURES: + + * **Cassandra Storage**: Cassandra can now be used for Vault storage + * **CockroachDB Storage**: CockroachDB can now be used for Vault storage + * **CouchDB Storage**: CouchDB can now be used for Vault storage + * **SAP HANA Database Plugin**: The `databases` backend can now manage users + for SAP HANA databases + * **Plugin Backends**: Vault now supports running secret and auth backends as + plugins. Plugins can be mounted like normal backends and can be developed + independently from Vault. + * **PROXY Protocol Support** Vault listeners can now be configured to honor + PROXY protocol v1 information to allow passing real client IPs into Vault. A + list of authorized addresses (IPs or subnets) can be defined and + accept/reject behavior controlled. + * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI + now supports lookup and listing of leases and the associated actions from the + `sys/leases` endpoints in the API. These are located in the new top level + navigation item "Leases". + * **Filtered Mounts for Performance Mode Replication**: Whitelists or + blacklists of mounts can be defined per-secondary to control which mounts + are actually replicated to that secondary. This can allow targeted + replication of specific sets of data to specific geolocations/datacenters. + * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new + replication mode, Disaster Recovery (DR), that performs full real-time + replication (including tokens and leases) to DR secondaries. DR secondaries + cannot handle client requests, but can be promoted to primary as needed for + failover. + * **Manage New Replication Features in the Vault Enterprise UI**: Support for + Replication features in Vault Enterprise UI has expanded to include new DR + Replication mode and management of Filtered Mounts in Performance Replication + mode. + * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows + correlation of users across tokens. At present this is only used for MFA, + but will be the foundation of many other features going forward. + * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise + Only)**: A brand new MFA system built on top of Identity allows MFA + (currently Duo Push, Okta Push, and TOTP) for any authenticated path within + Vault. MFA methods can be configured centrally, and TOTP keys live within + the user's Identity information to allow using the same key across tokens. + Specific MFA method(s) required for any given path within Vault can be + specified in normal ACL path statements. + +IMPROVEMENTS: + + * api: Add client method for a secret renewer background process [GH-2886] + * api: Add `RenewTokenAsSelf` [GH-2886] + * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var or with a new API function [GH-2956] + * api/cli: Client will now attempt to look up SRV records for the given Vault + hostname [GH-3035] + * audit/socket: Enhance reconnection logic and don't require the connection to + be established at unseal time [GH-2934] + * audit/file: Opportunistically try re-opening the file on error [GH-2999] + * auth/approle: Add role name to token metadata [GH-2985] + * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [GH-2915] + * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var [GH-2956] + * command/auth: Add `-token-only` flag to `vault auth` that returns only the + token on stdout and does not store it via the token helper [GH-2855] + * core: CORS allowed origins can now be configured [GH-2021] + * core: Add metrics counters for audit log failures [GH-2863] + * cors: Allow setting allowed headers via the API instead of always using + wildcard [GH-3023] + * secret/ssh: Allow specifying the key ID format using template values for CA + type [GH-2888] + * server: Add `tls_client_ca_file` option for specifying a CA file to use for + client certificate verification when `tls_require_and_verify_client_cert` is + enabled [GH-3034] + * storage/cockroachdb: Add CockroachDB storage backend [GH-2713] + * storage/couchdb: Add CouchhDB storage backend [GH-2880] + * storage/mssql: Add `max_parallel` [GH-3026] + * storage/postgresql: Add `max_parallel` [GH-3026] + * storage/postgresql: Improve listing speed [GH-2945] + * storage/s3: More efficient paging when an object has a lot of subobjects + [GH-2780] + * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [GH-3084] + * sys/wrapping: Wrapped tokens now store the original request path of the data + [GH-3100] + * telemetry: Add support for DogStatsD [GH-2490] + +BUG FIXES: + + * api/health: Don't treat standby `429` codes as an error [GH-2850] + * api/leases: Fix lease lookup returning lease properties at the top level + * audit: Fix panic when audit logging a read operation on an asymmetric + `transit` key [GH-2958] + * auth/approle: Fix panic when secret and cidr list not provided in role + [GH-3075] + * auth/aws: Look up proper account ID on token renew [GH-3012] + * auth/aws: Store IAM header in all cases when it changes [GH-3004] + * auth/ldap: Verify given certificate is PEM encoded instead of failing + silently [GH-3016] + * auth/token: Don't allow using the same token ID twice when manually + specifying [GH-2916] + * cli: Fix issue with parsing keys that start with special characters [GH-2998] + * core: Relocated `sys/leases/renew` returns same payload as original + `sys/leases` endpoint [GH-2891] + * secret/ssh: Fix panic when signing with incorrect key type [GH-3072] + * secret/totp: Ensure codes can only be used once. This makes some automated + workflows harder but complies with the RFC. [GH-2908] + * secret/transit: Fix locking when creating a key with unsupported options + [GH-2974] + +## 0.7.3 (June 7th, 2017) + +SECURITY: + + * Cert auth backend now checks validity of individual certificates: In + previous versions of Vault, validity (e.g. expiration) of individual leaf + certificates added for authentication was not checked. This was done to make + it easier for administrators to control lifecycles of individual + certificates added to the backend, e.g. the authentication material being + checked was access to that specific certificate's private key rather than + all private keys signed by a CA. However, this behavior is often unexpected + and as a result can lead to insecure deployments, so we are now validating + these certificates as well. + * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2 + caused the HMACing of any App-ID information stored in paths (including + actual app-IDs and user-IDs) to be unsalted and written as-is from the API. + In 0.7.3 any such paths will be automatically changed to salted versions on + access (e.g. login or read); however, if you created new app-IDs or user-IDs + in 0.7.1/0.7.2, you may want to consider whether any users with access to + Vault's underlying data store may have intercepted these values, and + revoke/roll them. + +DEPRECATIONS/CHANGES: + + * Step-Down is Forwarded: When a step-down is issued against a non-active node + in an HA cluster, it will now forward the request to the active node. + +FEATURES: + + * **ed25519 Signing/Verification in Transit with Key Derivation**: The + `transit` backend now supports generating + [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification + functionality. These keys support derivation, allowing you to modify the + actual encryption key used by supplying a `context` value. + * **Key Version Specification for Encryption in Transit**: You can now specify + the version of a key you use to wish to generate a signature, ciphertext, or + HMAC. This can be controlled by the `min_encryption_version` key + configuration property. + * **Replication Primary Discovery (Enterprise)**: Replication primaries will + now advertise the addresses of their local HA cluster members to replication + secondaries. This helps recovery if the primary active node goes down and + neither service discovery nor load balancers are in use to steer clients. + +IMPROVEMENTS: + + * api/health: Add Sys().Health() [GH-2805] + * audit: Add auth information to requests that error out [GH-2754] + * command/auth: Add `-no-store` option that prevents the auth command from + storing the returned token into the configured token helper [GH-2809] + * core/forwarding: Request forwarding now heartbeats to prevent unused + connections from being terminated by firewalls or proxies + * plugins/databases: Add MongoDB as an internal database plugin [GH-2698] + * storage/dynamodb: Add a method for checking the existence of children, + speeding up deletion operations in the DynamoDB storage backend [GH-2722] + * storage/mysql: Add max_parallel parameter to MySQL backend [GH-2760] + * secret/databases: Support listing connections [GH-2823] + * secret/databases: Support custom renewal statements in Postgres database + plugin [GH-2788] + * secret/databases: Use the role name as part of generated credentials + [GH-2812] + * ui (Enterprise): Transit key and secret browsing UI handle large lists better + * ui (Enterprise): root tokens are no longer persisted + * ui (Enterprise): support for mounting Database and TOTP secret backends + +BUG FIXES: + + * auth/app-id: Fix regression causing loading of salts to be skipped + * auth/aws: Improve EC2 describe instances performance [GH-2766] + * auth/aws: Fix lookup of some instance profile ARNs [GH-2802] + * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various + points (e.g. renewal time) more robust [GH-2814] + * auth/aws: Properly honor configured period when using IAM authentication + [GH-2825] + * auth/aws: Check that a bound IAM principal is not empty (in the current + state of the role) before requiring it match the previously authenticated + client [GH-2781] + * auth/cert: Fix panic on renewal [GH-2749] + * auth/cert: Certificate verification for non-CA certs [GH-2761] + * core/acl: Prevent race condition when compiling ACLs in some scenarios + [GH-2826] + * secret/database: Increase wrapping token TTL; in a loaded scenario it could + be too short + * secret/generic: Allow integers to be set as the value of `ttl` field as the + documentation claims is supported [GH-2699] + * secret/ssh: Added host key callback to ssh client config [GH-2752] + * storage/s3: Avoid a panic when some bad data is returned [GH-2785] + * storage/dynamodb: Fix list functions working improperly on Windows [GH-2789] + * storage/file: Don't leak file descriptors in some error cases + * storage/swift: Fix pre-v3 project/tenant name reading [GH-2803] + +## 0.7.2 (May 8th, 2017) + +BUG FIXES: + + * audit: Fix auditing entries containing certain kinds of time values + [GH-2689] + +## 0.7.1 (May 5th, 2017) DEPRECATIONS/CHANGES: @@ -13,11 +420,26 @@ FEATURES: Lambda instances, and more. Signed client identity information retrieved using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS service before issuing a Vault token. This backend is unified with the - `aws-ec2` authentication backend, and allows additional EC2-related - restrictions to be applied during the IAM authentication; the previous EC2 - behavior is also still available. [GH-2441] + `aws-ec2` authentication backend under the name `aws`, and allows additional + EC2-related restrictions to be applied during the IAM authentication; the + previous EC2 behavior is also still available. [GH-2441] * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your Vault physical data store [GH-2546] + * **Lease Listing and Lookup**: You can now introspect a lease to get its + creation and expiration properties via `sys/leases/lookup`; with `sudo` + capability you can also list leases for lookup, renewal, or revocation via + that endpoint. Various lease functions (renew, revoke, revoke-prefix, + revoke-force) have also been relocated to `sys/leases/`, but they also work + at the old paths for compatibility. Reading (but not listing) leases via + `sys/leases/lookup` is now a part of the current `default` policy. [GH-2650] + * **TOTP Secret Backend**: You can now store multi-factor authentication keys + in Vault and use the API to retrieve time-based one-time use passwords on + demand. The backend can also be used to generate a new key and validate + passwords generated by that key. [GH-2492] + * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend + combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra + backends. It also provides a plugin interface for extendability through + custom databases. [GH-2200] IMPROVEMENTS: @@ -27,7 +449,11 @@ IMPROVEMENTS: than the user credentials [GH-2534] * cli/revoke: Add `-self` option to allow revoking the currently active token [GH-2596] - * core: Randomizing x coordinate in Shamir shares [GH-2621] + * core: Randomize x coordinate in Shamir shares [GH-2621] + * replication: Fix a bug when enabling `approle` on a primary before + secondaries were connected + * replication: Add heartbeating to ensure firewalls don't kill connections to + primaries * secret/pki: Add `no_store` option that allows certificates to be issued without being stored. This removes the ability to look up and/or add to a CRL but helps with scaling to very large numbers of certificates. [GH-2565] @@ -44,12 +470,20 @@ IMPROVEMENTS: requests [GH-2466] * storage/s3: Use pooled transport for http client [GH-2481] * storage/swift: Allow domain values for V3 authentication [GH-2554] + * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more + cleanup cases [GH-2452] BUG FIXES: * api: Respect a configured path in Vault's address [GH-2588] * auth/aws-ec2: New bounds added as criteria to allow role creation [GH-2600] * auth/ldap: Don't lowercase groups attached to users [GH-2613] + * cli: Don't panic if `vault write` is used with the `force` flag but no path + [GH-2674] + * core: Help operations should request forward since standbys may not have + appropriate info [GH-2677] + * replication: Fix enabling secondaries when certain mounts already existed on + the primary * secret/mssql: Update mssql driver to support queries with colons [GH-2610] * secret/pki: Don't lowercase O/OU values in certs [GH-2555] * secret/pki: Don't attempt to validate IP SANs if none are provided [GH-2574] @@ -192,11 +626,11 @@ FEATURES: * **Configurable Audited HTTP Headers**: You can now specify headers that you want to have included in each audit entry, along with whether each header should be HMAC'd or kept plaintext. This can be useful for adding additional - client or network metadata to the audit logs. + client or network metadata to the audit logs. * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit backend, allowing creation, viewing and editing of named keys as well as using those keys to perform supported transit operations directly in the UI. - * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent + * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent through TCP, UDP, or UNIX Sockets. IMPROVEMENTS: @@ -404,9 +838,9 @@ FEATURES: response wrapped token parameters; wrap arbitrary values; rotate wrapping tokens; and unwrap with enhanced validation. In addition, list operations can now be response-wrapped. [GH-1927] - * Transit features: The `transit` backend now supports generating random bytes - and SHA sums; HMACs; and signing and verification functionality using EC - keys (P-256 curve) + * **Transit Features**: The `transit` backend now supports generating random + bytes and SHA sums; HMACs; and signing and verification functionality using + EC keys (P-256 curve) IMPROVEMENTS: diff --git a/vendor/github.com/hashicorp/vault/Makefile b/vendor/github.com/hashicorp/vault/Makefile index 52ab43c..0bf1d14 100644 --- a/vendor/github.com/hashicorp/vault/Makefile +++ b/vendor/github.com/hashicorp/vault/Makefile @@ -1,36 +1,38 @@ TEST?=$$(go list ./... | grep -v /vendor/) VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr EXTERNAL_TOOLS=\ - github.com/mitchellh/gox + github.com/mitchellh/gox \ + github.com/kardianos/govendor BUILD_TAGS?=vault +GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) default: dev # bin generates the releaseable binaries for Vault -bin: generate +bin: fmtcheck prep @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'" # dev creates binaries for testing Vault locally. These are put # into ./bin/ as well as $GOPATH/bin, except for quickdev which # is only put into /bin/ -quickdev: generate +quickdev: prep @CGO_ENABLED=0 go build -i -tags='$(BUILD_TAGS)' -o bin/vault -dev: generate +dev: fmtcheck prep @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" -dev-dynamic: generate +dev-dynamic: prep @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" # test runs the unit tests and vets the code -test: generate +test: fmtcheck prep CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=20m -parallel=4 -testcompile: generate +testcompile: fmtcheck prep @for pkg in $(TEST) ; do \ go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ done # testacc runs acceptance tests -testacc: generate +testacc: fmtcheck prep @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package"; \ exit 1; \ @@ -38,8 +40,8 @@ testacc: generate VAULT_ACC=1 go test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout 45m # testrace runs the race checker -testrace: generate - CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=20m -parallel=4 +testrace: fmtcheck prep + CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=45m -parallel=4 cover: ./scripts/coverage.sh --html @@ -55,15 +57,16 @@ vet: echo "and fix them if necessary before submitting the code for reviewal."; \ fi -# generate runs `go generate` to build the dynamically generated +# prep runs `go generate` to build the dynamically generated # source files. -generate: +prep: go generate $(go list ./... | grep -v /vendor/) + cp .hooks/* .git/hooks/ # bootstrap the build by downloading additional tools bootstrap: @for tool in $(EXTERNAL_TOOLS) ; do \ - echo "Installing $$tool" ; \ + echo "Installing/Updating $$tool" ; \ go get -u $$tool; \ done @@ -71,4 +74,31 @@ proto: protoc -I helper/forwarding -I vault -I ../../.. vault/*.proto --go_out=plugins=grpc:vault protoc -I helper/forwarding -I vault -I ../../.. helper/forwarding/types.proto --go_out=plugins=grpc:helper/forwarding -.PHONY: bin default generate test vet bootstrap +fmtcheck: + @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" + +fmt: + gofmt -w $(GOFMT_FILES) + +mysql-database-plugin: + @CGO_ENABLED=0 go build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin + +mysql-legacy-database-plugin: + @CGO_ENABLED=0 go build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin + +cassandra-database-plugin: + @CGO_ENABLED=0 go build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin + +postgresql-database-plugin: + @CGO_ENABLED=0 go build -o bin/postgresql-database-plugin ./plugins/database/postgresql/postgresql-database-plugin + +mssql-database-plugin: + @CGO_ENABLED=0 go build -o bin/mssql-database-plugin ./plugins/database/mssql/mssql-database-plugin + +hana-database-plugin: + @CGO_ENABLED=0 go build -o bin/hana-database-plugin ./plugins/database/hana/hana-database-plugin + +mongodb-database-plugin: + @CGO_ENABLED=0 go build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin + +.PHONY: bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin diff --git a/vendor/github.com/hashicorp/vault/README.md b/vendor/github.com/hashicorp/vault/README.md index 61b2bb42..058c065 100644 --- a/vendor/github.com/hashicorp/vault/README.md +++ b/vendor/github.com/hashicorp/vault/README.md @@ -1,4 +1,4 @@ -Vault [![Build Status](https://travis-ci.org/hashicorp/vault.svg)](https://travis-ci.org/hashicorp/vault) [![Join the chat at https://gitter.im/hashicorp-vault/Lobby](https://badges.gitter.im/hashicorp-vault/Lobby.svg)](https://gitter.im/hashicorp-vault/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +Vault [![Build Status](https://travis-ci.org/hashicorp/vault.svg)](https://travis-ci.org/hashicorp/vault) [![Join the chat at https://gitter.im/hashicorp-vault/Lobby](https://badges.gitter.im/hashicorp-vault/Lobby.svg)](https://gitter.im/hashicorp-vault/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise) ========= **Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). @@ -57,7 +57,7 @@ Developing Vault -------------------- If you wish to work on Vault itself or any of its built-in systems, you'll -first need [Go](https://www.golang.org) installed on your machine (version 1.8+ +first need [Go](https://www.golang.org) installed on your machine (version 1.9+ is *required*). For local dev first make sure Go is properly installed, including setting up a @@ -128,3 +128,5 @@ long time. Acceptance tests typically require other environment variables to be set for things such as access keys. The test itself should error early and tell you what to set, so it is not documented here. + +For more information on Vault Enterprise features, visit the [Vault Enterprise site](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=referral&utm_campaign=github-vault-enterprise). diff --git a/vendor/github.com/hashicorp/vault/api/api_integration_test.go b/vendor/github.com/hashicorp/vault/api/api_integration_test.go new file mode 100644 index 0000000..c4e1a1d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/api_integration_test.go @@ -0,0 +1,92 @@ +package api_test + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/vault" + + vaulthttp "github.com/hashicorp/vault/http" + logxi "github.com/mgutz/logxi/v1" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +var testVaultServerDefaultBackends = map[string]logical.Factory{ + "transit": transit.Factory, + "pki": pki.Factory, +} + +func testVaultServer(t testing.TB) (*api.Client, func()) { + return testVaultServerBackends(t, testVaultServerDefaultBackends) +} + +func testVaultServerBackends(t testing.TB, backends map[string]logical.Factory) (*api.Client, func()) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: logxi.NullLog, + LogicalBackends: backends, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + + // make it easy to get access to the active + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + + client := cluster.Cores[0].Client + client.SetToken(cluster.RootToken) + + // Sanity check + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data["id"].(string) != cluster.RootToken { + t.Fatalf("token mismatch: %#v vs %q", secret, cluster.RootToken) + } + return client, func() { defer cluster.Cleanup() } +} + +// testPostgresDB creates a testing postgres database in a Docker container, +// returning the connection URL and the associated closer function. +func testPostgresDB(t testing.TB) (string, func()) { + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("postgresdb: failed to connect to docker: %s", err) + } + + resource, err := pool.Run("postgres", "latest", []string{ + "POSTGRES_PASSWORD=secret", + "POSTGRES_DB=database", + }) + if err != nil { + t.Fatalf("postgresdb: could not start container: %s", err) + } + + addr := fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp")) + + if err := pool.Retry(func() error { + db, err := sql.Open("postgres", addr) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + t.Fatalf("postgresdb: could not connect: %s", err) + } + + return addr, func() { + if err := pool.Purge(resource); err != nil { + t.Fatalf("postgresdb: failed to cleanup container: %s", err) + } + } +} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go index aff10f4..4f74f61 100644 --- a/vendor/github.com/hashicorp/vault/api/auth_token.go +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -135,6 +135,26 @@ func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { return ParseSecret(resp.Body) } +// RenewTokenAsSelf behaves like renew-self, but authenticates using a provided +// token instead of the token attached to the client. +func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + r.ClientToken = token + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + // RevokeAccessor revokes a token associated with the given accessor // along with all the child tokens. func (c *TokenAuth) RevokeAccessor(accessor string) error { diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index 85b8953..b19d5f0 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -3,19 +3,21 @@ package api import ( "crypto/tls" "fmt" + "net" "net/http" "net/url" "os" + "path" "strconv" "strings" "sync" "time" - "path" "golang.org/x/net/http2" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/vault/helper/parseutil" "github.com/sethgrid/pester" ) @@ -24,6 +26,7 @@ const EnvVaultCACert = "VAULT_CACERT" const EnvVaultCAPath = "VAULT_CAPATH" const EnvVaultClientCert = "VAULT_CLIENT_CERT" const EnvVaultClientKey = "VAULT_CLIENT_KEY" +const EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" const EnvVaultInsecure = "VAULT_SKIP_VERIFY" const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" const EnvVaultWrapTTL = "VAULT_WRAP_TTL" @@ -54,6 +57,9 @@ type Config struct { // MaxRetries controls the maximum number of times to retry when a 5xx error // occurs. Set to 0 or less to disable retrying. Defaults to 0. MaxRetries int + + // Timeout is for setting custom timeout parameter in the HttpClient + Timeout time.Duration } // TLSConfig contains the parameters needed to configure TLS on the HTTP client @@ -156,6 +162,7 @@ func (c *Config) ReadEnvironment() error { var envCAPath string var envClientCert string var envClientKey string + var envClientTimeout time.Duration var envInsecure bool var envTLSServerName string var envMaxRetries *uint64 @@ -183,6 +190,13 @@ func (c *Config) ReadEnvironment() error { if v := os.Getenv(EnvVaultClientKey); v != "" { envClientKey = v } + if t := os.Getenv(EnvVaultClientTimeout); t != "" { + clientTimeout, err := parseutil.ParseDurationSecond(t) + if err != nil { + return fmt.Errorf("Could not parse %s", EnvVaultClientTimeout) + } + envClientTimeout = clientTimeout + } if v := os.Getenv(EnvVaultInsecure); v != "" { var err error envInsecure, err = strconv.ParseBool(v) @@ -215,6 +229,10 @@ func (c *Config) ReadEnvironment() error { c.MaxRetries = int(*envMaxRetries) + 1 } + if envClientTimeout != 0 { + c.Timeout = envClientTimeout + } + return nil } @@ -304,6 +322,11 @@ func (c *Client) SetMaxRetries(retries int) { c.config.MaxRetries = retries } +// SetClientTimeout sets the client request timeout +func (c *Client) SetClientTimeout(timeout time.Duration) { + c.config.Timeout = timeout +} + // SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs // for a given operation and path func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { @@ -327,16 +350,32 @@ func (c *Client) ClearToken() { c.token = "" } +// Clone creates a copy of this client. +func (c *Client) Clone() (*Client, error) { + return NewClient(c.config) +} + // NewRequest creates a new raw request object to query the Vault server // configured for this client. This is an advanced method and generally // doesn't need to be called externally. func (c *Client) NewRequest(method, requestPath string) *Request { + // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV + // record and take the highest match; this is not designed for high-availability, just discovery + var host string = c.addr.Host + if c.addr.Port() == "" { + // Internet Draft specifies that the SRV record is ignored if a port is given + _, addrs, err := net.LookupSRV("http", "tcp", c.addr.Hostname()) + if err == nil && len(addrs) > 0 { + host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + } + } + req := &Request{ Method: method, URL: &url.URL{ User: c.addr.User, Scheme: c.addr.Scheme, - Host: c.addr.Host, + Host: host, Path: path.Join(c.addr.Path, requestPath), }, ClientToken: c.token, @@ -357,6 +396,9 @@ func (c *Client) NewRequest(method, requestPath string) *Request { } else { req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) } + if c.config.Timeout != 0 { + c.config.HttpClient.Timeout = c.config.Timeout + } return req } diff --git a/vendor/github.com/hashicorp/vault/api/client_test.go b/vendor/github.com/hashicorp/vault/api/client_test.go index f95d795..84663ee 100644 --- a/vendor/github.com/hashicorp/vault/api/client_test.go +++ b/vendor/github.com/hashicorp/vault/api/client_test.go @@ -6,6 +6,7 @@ import ( "net/http" "os" "testing" + "time" ) func init() { @@ -160,3 +161,27 @@ func TestClientEnvSettings(t *testing.T) { t.Fatalf("bad: %v", tlsConfig.InsecureSkipVerify) } } + +func TestClientTimeoutSetting(t *testing.T) { + oldClientTimeout := os.Getenv(EnvVaultClientTimeout) + os.Setenv(EnvVaultClientTimeout, "10") + defer os.Setenv(EnvVaultClientTimeout, oldClientTimeout) + config := DefaultConfig() + config.ReadEnvironment() + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + _ = client.NewRequest("PUT", "/") + if client.config.HttpClient.Timeout != time.Second*10 { + t.Fatalf("error setting client timeout using env variable") + } + + // Setting custom client timeout for a new request + client.SetClientTimeout(time.Second * 20) + _ = client.NewRequest("PUT", "/") + if client.config.HttpClient.Timeout != time.Second*20 { + t.Fatalf("error setting client timeout using SetClientTimeout") + } + +} diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go new file mode 100644 index 0000000..a2a4b66 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/renewer.go @@ -0,0 +1,302 @@ +package api + +import ( + "errors" + "math/rand" + "sync" + "time" +) + +var ( + ErrRenewerMissingInput = errors.New("missing input to renewer") + ErrRenewerMissingSecret = errors.New("missing secret to renew") + ErrRenewerNotRenewable = errors.New("secret is not renewable") + ErrRenewerNoSecretData = errors.New("returned empty secret data") + + // DefaultRenewerGrace is the default grace period + DefaultRenewerGrace = 15 * time.Second + + // DefaultRenewerRenewBuffer is the default size of the buffer for renew + // messages on the channel. + DefaultRenewerRenewBuffer = 5 +) + +// Renewer is a process for renewing a secret. +// +// renewer, err := client.NewRenewer(&RenewerInput{ +// Secret: mySecret, +// }) +// go renewer.Renew() +// defer renewer.Stop() +// +// for { +// select { +// case err := <-renewer.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-renewer.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } +// +// +// The `DoneCh` will return if renewal fails or if the remaining lease duration +// after a renewal is less than or equal to the grace (in number of seconds). In +// both cases, the caller should attempt a re-read of the secret. Clients should +// check the return value of the channel to see if renewal was successful. +type Renewer struct { + l sync.Mutex + + client *Client + secret *Secret + grace time.Duration + random *rand.Rand + doneCh chan error + renewCh chan *RenewOutput + + stopped bool + stopCh chan struct{} +} + +// RenewerInput is used as input to the renew function. +type RenewerInput struct { + // Secret is the secret to renew + Secret *Secret + + // Grace is a minimum renewal before returning so the upstream client + // can do a re-read. This can be used to prevent clients from waiting + // too long to read a new credential and incur downtime. + Grace time.Duration + + // Rand is the randomizer to use for underlying randomization. If not + // provided, one will be generated and seeded automatically. If provided, it + // is assumed to have already been seeded. + Rand *rand.Rand + + // RenewBuffer is the size of the buffered channel where renew messages are + // dispatched. + RenewBuffer int +} + +// RenewOutput is the metadata returned to the client (if it's listening) to +// renew messages. +type RenewOutput struct { + // RenewedAt is the timestamp when the renewal took place (UTC). + RenewedAt time.Time + + // Secret is the underlying renewal data. It's the same struct as all data + // that is returned from Vault, but since this is renewal data, it will not + // usually include the secret itself. + Secret *Secret +} + +// NewRenewer creates a new renewer from the given input. +func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) { + if i == nil { + return nil, ErrRenewerMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrRenewerMissingSecret + } + + grace := i.Grace + if grace == 0 { + grace = DefaultRenewerGrace + } + + random := i.Rand + if random == nil { + random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + } + + renewBuffer := i.RenewBuffer + if renewBuffer == 0 { + renewBuffer = DefaultRenewerRenewBuffer + } + + return &Renewer{ + client: c, + secret: secret, + grace: grace, + random: random, + doneCh: make(chan error, 1), + renewCh: make(chan *RenewOutput, renewBuffer), + + stopped: false, + stopCh: make(chan struct{}), + }, nil +} + +// DoneCh returns the channel where the renewer will publish when renewal stops. +// If there is an error, this will be an error. +func (r *Renewer) DoneCh() <-chan error { + return r.doneCh +} + +// RenewCh is a channel that receives a message when a successful renewal takes +// place and includes metadata about the renewal. +func (r *Renewer) RenewCh() <-chan *RenewOutput { + return r.renewCh +} + +// Stop stops the renewer. +func (r *Renewer) Stop() { + r.l.Lock() + if !r.stopped { + close(r.stopCh) + r.stopped = true + } + r.l.Unlock() +} + +// Renew starts a background process for renewing this secret. When the secret +// is has auth data, this attempts to renew the auth (token). When the secret +// has a lease, this attempts to renew the lease. +func (r *Renewer) Renew() { + var result error + if r.secret.Auth != nil { + result = r.renewAuth() + } else { + result = r.renewLease() + } + + select { + case r.doneCh <- result: + case <-r.stopCh: + } +} + +// renewAuth is a helper for renewing authentication. +func (r *Renewer) renewAuth() error { + if !r.secret.Auth.Renewable || r.secret.Auth.ClientToken == "" { + return ErrRenewerNotRenewable + } + + client, token := r.client, r.secret.Auth.ClientToken + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + // Renew the auth. + renewal, err := client.Auth().Token().RenewTokenAsSelf(token, 0) + if err != nil { + return err + } + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Somehow, sometimes, this happens. + if renewal == nil || renewal.Auth == nil { + return ErrRenewerNoSecretData + } + + // Do nothing if we are not renewable + if !renewal.Auth.Renewable { + return ErrRenewerNotRenewable + } + + // Grab the lease duration and sleep duration - note that we grab the auth + // lease duration, not the secret lease duration. + leaseDuration := time.Duration(renewal.Auth.LeaseDuration) * time.Second + sleepDuration := r.sleepDuration(leaseDuration) + + // If we are within grace, return now. + if leaseDuration <= r.grace || sleepDuration <= r.grace { + return nil + } + + select { + case <-r.stopCh: + return nil + case <-time.After(sleepDuration): + continue + } + } +} + +// renewLease is a helper for renewing a lease. +func (r *Renewer) renewLease() error { + if !r.secret.Renewable || r.secret.LeaseID == "" { + return ErrRenewerNotRenewable + } + + client, leaseID := r.client, r.secret.LeaseID + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + // Renew the lease. + renewal, err := client.Sys().Renew(leaseID, 0) + if err != nil { + return err + } + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Somehow, sometimes, this happens. + if renewal == nil { + return ErrRenewerNoSecretData + } + + // Do nothing if we are not renewable + if !renewal.Renewable { + return ErrRenewerNotRenewable + } + + // Grab the lease duration and sleep duration + leaseDuration := time.Duration(renewal.LeaseDuration) * time.Second + sleepDuration := r.sleepDuration(leaseDuration) + + // If we are within grace, return now. + if leaseDuration <= r.grace || sleepDuration <= r.grace { + return nil + } + + select { + case <-r.stopCh: + return nil + case <-time.After(sleepDuration): + continue + } + } +} + +// sleepDuration calculates the time to sleep given the base lease duration. The +// base is the resulting lease duration. It will be reduced to 1/3 and +// multiplied by a random float between 0.0 and 1.0. This extra randomness +// prevents multiple clients from all trying to renew simultaneously. +func (r *Renewer) sleepDuration(base time.Duration) time.Duration { + sleep := float64(base) + + // Renew at 1/3 the remaining lease. This will give us an opportunity to retry + // at least one more time should the first renewal fail. + sleep = sleep / 3.0 + + // Use a randomness so many clients do not hit Vault simultaneously. + sleep = sleep * (r.random.Float64() + 1) / 2.0 + + return time.Duration(sleep) +} diff --git a/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go b/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go new file mode 100644 index 0000000..7011c7d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/renewer_integration_test.go @@ -0,0 +1,228 @@ +package api_test + +import ( + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/logical" +) + +func TestRenewer_Renew(t *testing.T) { + t.Parallel() + + client, vaultDone := testVaultServerBackends(t, map[string]logical.Factory{ + "database": database.Factory, + "pki": pki.Factory, + "transit": transit.Factory, + }) + defer vaultDone() + + pgURL, pgDone := testPostgresDB(t) + defer pgDone() + + t.Run("group", func(t *testing.T) { + t.Run("kv", func(t *testing.T) { + t.Parallel() + + if _, err := client.Logical().Write("secret/value", map[string]interface{}{ + "foo": "bar", + }); err != nil { + t.Fatal(err) + } + + secret, err := client.Logical().Read("secret/value") + if err != nil { + t.Fatal(err) + } + + v, err := client.NewRenewer(&api.RenewerInput{ + Secret: secret, + }) + if err != nil { + t.Fatal(err) + } + go v.Renew() + defer v.Stop() + + select { + case err := <-v.DoneCh(): + if err != api.ErrRenewerNotRenewable { + t.Fatal(err) + } + case renew := <-v.RenewCh(): + t.Errorf("received renew, but should have been nil: %#v", renew) + case <-time.After(500 * time.Millisecond): + t.Error("should have been non-renewable") + } + }) + + t.Run("transit", func(t *testing.T) { + t.Parallel() + + if err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }); err != nil { + t.Fatal(err) + } + + secret, err := client.Logical().Write("transit/encrypt/my-app", map[string]interface{}{ + "plaintext": "Zm9vCg==", + }) + if err != nil { + t.Fatal(err) + } + + v, err := client.NewRenewer(&api.RenewerInput{ + Secret: secret, + }) + if err != nil { + t.Fatal(err) + } + go v.Renew() + defer v.Stop() + + select { + case err := <-v.DoneCh(): + if err != api.ErrRenewerNotRenewable { + t.Fatal(err) + } + case renew := <-v.RenewCh(): + t.Errorf("received renew, but should have been nil: %#v", renew) + case <-time.After(500 * time.Millisecond): + t.Error("should have been non-renewable") + } + }) + + t.Run("database", func(t *testing.T) { + t.Parallel() + + if err := client.Sys().Mount("database", &api.MountInput{ + Type: "database", + }); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("database/config/postgresql", map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_url": pgURL, + "allowed_roles": "readonly", + }); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("database/roles/readonly", map[string]interface{}{ + "db_name": "postgresql", + "creation_statements": `` + + `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';` + + `GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + "default_ttl": "1s", + "max_ttl": "3s", + }); err != nil { + t.Fatal(err) + } + + secret, err := client.Logical().Read("database/creds/readonly") + if err != nil { + t.Fatal(err) + } + + v, err := client.NewRenewer(&api.RenewerInput{ + Secret: secret, + }) + if err != nil { + t.Fatal(err) + } + go v.Renew() + defer v.Stop() + + select { + case err := <-v.DoneCh(): + t.Errorf("should have renewed once before returning: %s", err) + case renew := <-v.RenewCh(): + if renew == nil { + t.Fatal("renew is nil") + } + if !renew.Secret.Renewable { + t.Errorf("expected lease to be renewable: %#v", renew) + } + if renew.Secret.LeaseDuration > 2 { + t.Errorf("expected lease to < 2s: %#v", renew) + } + case <-time.After(3 * time.Second): + t.Errorf("no renewal") + } + + select { + case err := <-v.DoneCh(): + if err != nil { + t.Fatal(err) + } + case renew := <-v.RenewCh(): + t.Fatalf("should not have renewed (lease should be up): %#v", renew) + case <-time.After(3 * time.Second): + t.Errorf("no data") + } + }) + + t.Run("auth", func(t *testing.T) { + t.Parallel() + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "1s", + ExplicitMaxTTL: "3s", + }) + if err != nil { + t.Fatal(err) + } + + v, err := client.NewRenewer(&api.RenewerInput{ + Secret: secret, + }) + if err != nil { + t.Fatal(err) + } + go v.Renew() + defer v.Stop() + + select { + case err := <-v.DoneCh(): + t.Errorf("should have renewed once before returning: %s", err) + case renew := <-v.RenewCh(): + if renew == nil { + t.Fatal("renew is nil") + } + if renew.Secret.Auth == nil { + t.Fatal("renew auth is nil") + } + if !renew.Secret.Auth.Renewable { + t.Errorf("expected lease to be renewable: %#v", renew) + } + if renew.Secret.Auth.LeaseDuration > 2 { + t.Errorf("expected lease to < 2s: %#v", renew) + } + if renew.Secret.Auth.ClientToken == "" { + t.Error("expected a client token") + } + if renew.Secret.Auth.Accessor == "" { + t.Error("expected an accessor") + } + case <-time.After(3 * time.Second): + t.Errorf("no renewal") + } + + select { + case err := <-v.DoneCh(): + if err != nil { + t.Fatal(err) + } + case renew := <-v.RenewCh(): + t.Fatalf("should not have renewed (lease should be up): %#v", renew) + case <-time.After(3 * time.Second): + t.Errorf("no data") + } + }) + }) +} diff --git a/vendor/github.com/hashicorp/vault/api/renewer_test.go b/vendor/github.com/hashicorp/vault/api/renewer_test.go new file mode 100644 index 0000000..262484e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/renewer_test.go @@ -0,0 +1,85 @@ +package api + +import ( + "reflect" + "testing" + "time" +) + +func TestRenewer_NewRenewer(t *testing.T) { + t.Parallel() + + client, err := NewClient(DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + name string + i *RenewerInput + e *Renewer + err bool + }{ + { + "nil", + nil, + nil, + true, + }, + { + "missing_secret", + &RenewerInput{ + Secret: nil, + }, + nil, + true, + }, + { + "default_grace", + &RenewerInput{ + Secret: &Secret{}, + }, + &Renewer{ + secret: &Secret{}, + grace: DefaultRenewerGrace, + }, + false, + }, + { + "custom_grace", + &RenewerInput{ + Secret: &Secret{}, + Grace: 30 * time.Second, + }, + &Renewer{ + secret: &Secret{}, + grace: 30 * time.Second, + }, + false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + v, err := client.NewRenewer(tc.i) + if (err != nil) != tc.err { + t.Fatal(err) + } + + if v == nil { + return + } + + // Zero-out channels because reflect + v.client = nil + v.random = nil + v.doneCh = nil + v.renewCh = nil + v.stopCh = nil + + if !reflect.DeepEqual(tc.e, v) { + t.Errorf("not equal\nexp: %#v\nact: %#v", tc.e, v) + } + }) + } +} diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go index 685e2d7..83a28bd 100644 --- a/vendor/github.com/hashicorp/vault/api/request.go +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -14,6 +14,7 @@ type Request struct { Method string URL *url.URL Params url.Values + Headers http.Header ClientToken string WrapTTL string Obj interface{} @@ -60,6 +61,14 @@ func (r *Request) ToHTTP() (*http.Request, error) { req.URL.Host = r.URL.Host req.Host = r.URL.Host + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + if len(r.ClientToken) != 0 { req.Header.Set("X-Vault-Token", r.ClientToken) } diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go index 7c8ac9f..05502e1 100644 --- a/vendor/github.com/hashicorp/vault/api/response.go +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -25,8 +25,9 @@ func (r *Response) DecodeJSON(out interface{}) error { // this will fully consume the response body, but will not close it. The // body must still be closed manually. func (r *Response) Error() error { - // 200 to 399 are okay status codes - if r.StatusCode >= 200 && r.StatusCode < 400 { + // 200 to 399 are okay status codes. 429 is the code for health status of + // standby nodes. + if (r.StatusCode >= 200 && r.StatusCode < 400) || r.StatusCode == 429 { return nil } diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go index 14924f9..7478a0c 100644 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -42,6 +42,7 @@ type SecretWrapInfo struct { Token string `json:"token"` TTL int `json:"ttl"` CreationTime time.Time `json:"creation_time"` + CreationPath string `json:"creation_path"` WrappedAccessor string `json:"wrapped_accessor"` } diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go index 7c3e56b..a17b0eb 100644 --- a/vendor/github.com/hashicorp/vault/api/ssh.go +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -36,3 +36,20 @@ func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, err return ParseSecret(resp.Body) } + +// SignKey signs the given public key and returns a signed public key to pass +// along with the SSH request. +func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go index f9f3c8c..32f4bbd 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_auth.go +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -82,19 +82,27 @@ func (c *Sys) DisableAuth(path string) error { // documentation. Please refer to that documentation for more details. type EnableAuthOptions struct { - Type string `json:"type" structs:"type"` - Description string `json:"description" structs:"description"` - Local bool `json:"local" structs:"local"` + Type string `json:"type" structs:"type"` + Description string `json:"description" structs:"description"` + Config AuthConfigInput `json:"config" structs:"config"` + Local bool `json:"local" structs:"local"` + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty"` +} + +type AuthConfigInput struct { + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` } type AuthMount struct { Type string `json:"type" structs:"type" mapstructure:"type"` Description string `json:"description" structs:"description" mapstructure:"description"` + Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"` Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"` Local bool `json:"local" structs:"local" mapstructure:"local"` } type AuthConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` + DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go new file mode 100644 index 0000000..e7f2a59 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -0,0 +1,56 @@ +package api + +func (c *Sys) CORSStatus() (*CORSResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/config/cors") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result CORSResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) ConfigureCORS(req *CORSRequest) (*CORSResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/config/cors") + if err := r.SetJSONBody(req); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result CORSResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) DisableCORS() (*CORSResponse, error) { + r := c.c.NewRequest("DELETE", "/v1/sys/config/cors") + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result CORSResponse + err = resp.DecodeJSON(&result) + return &result, err + +} + +type CORSRequest struct { + AllowedOrigins string `json:"allowed_origins"` + Enabled bool `json:"enabled"` +} + +type CORSResponse struct { + AllowedOrigins string `json:"allowed_origins"` + Enabled bool `json:"enabled"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go new file mode 100644 index 0000000..822354c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -0,0 +1,29 @@ +package api + +func (c *Sys) Health() (*HealthResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/health") + // If the code is 400 or above it will automatically turn into an error, + // but the sys/health API defaults to returning 5xx when not sealed or + // inited, so we force this code to be something else so we parse correctly + r.Params.Add("sealedcode", "299") + r.Params.Add("uninitcode", "299") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HealthResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go index 201ac73..4951c46 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leader.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -14,7 +14,8 @@ func (c *Sys) Leader() (*LeaderResponse, error) { } type LeaderResponse struct { - HAEnabled bool `json:"ha_enabled"` - IsSelf bool `json:"is_self"` - LeaderAddress string `json:"leader_address"` + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_lease.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go similarity index 76% rename from vendor/github.com/hashicorp/vault/api/sys_lease.go rename to vendor/github.com/hashicorp/vault/api/sys_leases.go index e5c19c4..34bd99e 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_lease.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -1,7 +1,7 @@ package api func (c *Sys) Renew(id string, increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/sys/renew") + r := c.c.NewRequest("PUT", "/v1/sys/leases/renew") body := map[string]interface{}{ "increment": increment, @@ -21,7 +21,7 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) { } func (c *Sys) Revoke(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/revoke/"+id) + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id) resp, err := c.c.RawRequest(r) if err == nil { defer resp.Body.Close() @@ -30,7 +30,7 @@ func (c *Sys) Revoke(id string) error { } func (c *Sys) RevokePrefix(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/revoke-prefix/"+id) + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id) resp, err := c.c.RawRequest(r) if err == nil { defer resp.Body.Close() @@ -39,7 +39,7 @@ func (c *Sys) RevokePrefix(id string) error { } func (c *Sys) RevokeForce(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/revoke-force/"+id) + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id) resp, err := c.c.RawRequest(r) if err == nil { defer resp.Body.Close() diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go index 907fddb..091a8f6 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -124,23 +124,27 @@ type MountInput struct { Description string `json:"description" structs:"description"` Config MountConfigInput `json:"config" structs:"config"` Local bool `json:"local" structs:"local"` + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name"` } type MountConfigInput struct { DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` } type MountOutput struct { Type string `json:"type" structs:"type"` Description string `json:"description" structs:"description"` + Accessor string `json:"accessor" structs:"accessor"` Config MountConfigOutput `json:"config" structs:"config"` Local bool `json:"local" structs:"local"` } type MountConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` + DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` } diff --git a/vendor/github.com/hashicorp/vault/audit/audit.go b/vendor/github.com/hashicorp/vault/audit/audit.go index dffa8ee..b96391c 100644 --- a/vendor/github.com/hashicorp/vault/audit/audit.go +++ b/vendor/github.com/hashicorp/vault/audit/audit.go @@ -25,15 +25,21 @@ type Backend interface { // GetHash is used to return the given data with the backend's hash, // so that a caller can determine if a value in the audit log matches // an expected plaintext value - GetHash(string) string + GetHash(string) (string, error) // Reload is called on SIGHUP for supporting backends. Reload() error + + // Invalidate is called for path invalidation + Invalidate() } type BackendConfig struct { - // The salt that should be used for any secret obfuscation - Salt *salt.Salt + // The view to store the salt + SaltView logical.Storage + + // The salt config that should be used for any secret obfuscation + SaltConfig *salt.Config // Config is the opaque user configuration provided when mounting Config map[string]string diff --git a/vendor/github.com/hashicorp/vault/audit/format.go b/vendor/github.com/hashicorp/vault/audit/format.go index 919da12..18eb254 100644 --- a/vendor/github.com/hashicorp/vault/audit/format.go +++ b/vendor/github.com/hashicorp/vault/audit/format.go @@ -7,6 +7,8 @@ import ( "time" "github.com/SermoDigital/jose/jws" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" "github.com/mitchellh/copystructure" ) @@ -14,6 +16,7 @@ import ( type AuditFormatWriter interface { WriteRequest(io.Writer, *AuditRequestEntry) error WriteResponse(io.Writer, *AuditResponseEntry) error + Salt() (*salt.Salt, error) } // AuditFormatter implements the Formatter interface, and allows the underlying @@ -41,6 +44,11 @@ func (f *AuditFormatter) FormatRequest( return fmt.Errorf("no format writer specified") } + salt, err := f.Salt() + if err != nil { + return errwrap.Wrapf("error fetching salt: {{err}}", err) + } + if !config.Raw { // Before we copy the structure we must nil out some data // otherwise we will cause reflection to panic and die @@ -70,9 +78,17 @@ func (f *AuditFormatter) FormatRequest( // Hash any sensitive information if auth != nil { - if err := Hash(config.Salt, auth); err != nil { + // Cache and restore accessor in the auth + var authAccessor string + if !config.HMACAccessor && auth.Accessor != "" { + authAccessor = auth.Accessor + } + if err := Hash(salt, auth); err != nil { return err } + if authAccessor != "" { + auth.Accessor = authAccessor + } } // Cache and restore accessor in the request @@ -80,7 +96,7 @@ func (f *AuditFormatter) FormatRequest( if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" { clientTokenAccessor = req.ClientTokenAccessor } - if err := Hash(config.Salt, req); err != nil { + if err := Hash(salt, req); err != nil { return err } if clientTokenAccessor != "" { @@ -102,6 +118,8 @@ func (f *AuditFormatter) FormatRequest( Error: errString, Auth: AuditAuth{ + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, DisplayName: auth.DisplayName, Policies: auth.Policies, Metadata: auth.Metadata, @@ -152,6 +170,11 @@ func (f *AuditFormatter) FormatResponse( return fmt.Errorf("no format writer specified") } + salt, err := f.Salt() + if err != nil { + return errwrap.Wrapf("error fetching salt: {{err}}", err) + } + if !config.Raw { // Before we copy the structure we must nil out some data // otherwise we will cause reflection to panic and die @@ -195,7 +218,7 @@ func (f *AuditFormatter) FormatResponse( if !config.HMACAccessor && auth.Accessor != "" { accessor = auth.Accessor } - if err := Hash(config.Salt, auth); err != nil { + if err := Hash(salt, auth); err != nil { return err } if accessor != "" { @@ -208,7 +231,7 @@ func (f *AuditFormatter) FormatResponse( if !config.HMACAccessor && req != nil && req.ClientTokenAccessor != "" { clientTokenAccessor = req.ClientTokenAccessor } - if err := Hash(config.Salt, req); err != nil { + if err := Hash(salt, req); err != nil { return err } if clientTokenAccessor != "" { @@ -224,7 +247,7 @@ func (f *AuditFormatter) FormatResponse( if !config.HMACAccessor && resp != nil && resp.WrapInfo != nil && resp.WrapInfo.WrappedAccessor != "" { wrappedAccessor = resp.WrapInfo.WrappedAccessor } - if err := Hash(config.Salt, resp); err != nil { + if err := Hash(salt, resp); err != nil { return err } if accessor != "" { @@ -277,6 +300,7 @@ func (f *AuditFormatter) FormatResponse( TTL: int(resp.WrapInfo.TTL / time.Second), Token: token, CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, WrappedAccessor: resp.WrapInfo.WrappedAccessor, } } @@ -284,11 +308,13 @@ func (f *AuditFormatter) FormatResponse( respEntry := &AuditResponseEntry{ Type: "response", Error: errString, - Auth: AuditAuth{ - DisplayName: auth.DisplayName, - Policies: auth.Policies, - Metadata: auth.Metadata, + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, + DisplayName: auth.DisplayName, + Policies: auth.Policies, + Metadata: auth.Metadata, + RemainingUses: req.ClientTokenRemainingUses, }, Request: AuditRequest{ @@ -381,6 +407,7 @@ type AuditResponseWrapInfo struct { TTL int `json:"ttl"` Token string `json:"token"` CreationTime string `json:"creation_time"` + CreationPath string `json:"creation_path"` WrappedAccessor string `json:"wrapped_accessor,omitempty"` } diff --git a/vendor/github.com/hashicorp/vault/audit/format_json.go b/vendor/github.com/hashicorp/vault/audit/format_json.go index 9e200f0..0a5c9d9 100644 --- a/vendor/github.com/hashicorp/vault/audit/format_json.go +++ b/vendor/github.com/hashicorp/vault/audit/format_json.go @@ -4,12 +4,15 @@ import ( "encoding/json" "fmt" "io" + + "github.com/hashicorp/vault/helper/salt" ) // JSONFormatWriter is an AuditFormatWriter implementation that structures data into // a JSON format. type JSONFormatWriter struct { - Prefix string + Prefix string + SaltFunc func() (*salt.Salt, error) } func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { @@ -43,3 +46,7 @@ func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) enc := json.NewEncoder(w) return enc.Encode(resp) } + +func (f *JSONFormatWriter) Salt() (*salt.Salt, error) { + return f.SaltFunc() +} diff --git a/vendor/github.com/hashicorp/vault/audit/format_json_test.go b/vendor/github.com/hashicorp/vault/audit/format_json_test.go index 21bb647..688ae3d 100644 --- a/vendor/github.com/hashicorp/vault/audit/format_json_test.go +++ b/vendor/github.com/hashicorp/vault/audit/format_json_test.go @@ -9,21 +9,32 @@ import ( "errors" + "fmt" "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) func TestFormatJSON_formatRequest(t *testing.T) { + salter, err := salt.NewSalt(nil, nil) + if err != nil { + t.Fatal(err) + } + saltFunc := func() (*salt.Salt, error) { + return salter, nil + } + + expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, salter.GetIdentifiedHMAC("foo")) + cases := map[string]struct { - Auth *logical.Auth - Req *logical.Request - Err error - Prefix string - Result string + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + ExpectedStr string }{ "auth, request": { - &logical.Auth{ClientToken: "foo", Policies: []string{"root"}}, + &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}}, &logical.Request{ Operation: logical.UpdateOperation, Path: "/foo", @@ -39,10 +50,10 @@ func TestFormatJSON_formatRequest(t *testing.T) { }, errors.New("this is an error"), "", - testFormatJSONReqBasicStr, + expectedResultStr, }, "auth, request with prefix": { - &logical.Auth{ClientToken: "foo", Policies: []string{"root"}}, + &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}}, &logical.Request{ Operation: logical.UpdateOperation, Path: "/foo", @@ -58,7 +69,7 @@ func TestFormatJSON_formatRequest(t *testing.T) { }, errors.New("this is an error"), "@cee: ", - testFormatJSONReqBasicStr, + expectedResultStr, }, } @@ -66,23 +77,24 @@ func TestFormatJSON_formatRequest(t *testing.T) { var buf bytes.Buffer formatter := AuditFormatter{ AuditFormatWriter: &JSONFormatWriter{ - Prefix: tc.Prefix, + Prefix: tc.Prefix, + SaltFunc: saltFunc, }, } - salter, _ := salt.NewSalt(nil, nil) config := FormatterConfig{ - Salt: salter, + HMACAccessor: false, } if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil { t.Fatalf("bad: %s\nerr: %s", name, err) } if !strings.HasPrefix(buf.String(), tc.Prefix) { - t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix) + t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix) } var expectedjson = new(AuditRequestEntry) - if err := jsonutil.DecodeJSON([]byte(tc.Result), &expectedjson); err != nil { + + if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil { t.Fatalf("bad json: %s", err) } @@ -106,5 +118,5 @@ func TestFormatJSON_formatRequest(t *testing.T) { } } -const testFormatJSONReqBasicStr = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"display_name":"","policies":["root"],"metadata":null},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"} +const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"metadata":null},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"} ` diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go index cc6cc95..792e552 100644 --- a/vendor/github.com/hashicorp/vault/audit/format_jsonx.go +++ b/vendor/github.com/hashicorp/vault/audit/format_jsonx.go @@ -5,13 +5,15 @@ import ( "fmt" "io" + "github.com/hashicorp/vault/helper/salt" "github.com/jefferai/jsonx" ) // JSONxFormatWriter is an AuditFormatWriter implementation that structures data into // a XML format. type JSONxFormatWriter struct { - Prefix string + Prefix string + SaltFunc func() (*salt.Salt, error) } func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { @@ -65,3 +67,7 @@ func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) _, err = w.Write(xmlBytes) return err } + +func (f *JSONxFormatWriter) Salt() (*salt.Salt, error) { + return f.SaltFunc() +} diff --git a/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go b/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go index 8d4fe4b..b04ccd0 100644 --- a/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go +++ b/vendor/github.com/hashicorp/vault/audit/format_jsonx_test.go @@ -8,21 +8,32 @@ import ( "errors" + "fmt" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) func TestFormatJSONx_formatRequest(t *testing.T) { + salter, err := salt.NewSalt(nil, nil) + if err != nil { + t.Fatal(err) + } + saltFunc := func() (*salt.Salt, error) { + return salter, nil + } + + fooSalted := salter.GetIdentifiedHMAC("foo") + cases := map[string]struct { - Auth *logical.Auth - Req *logical.Request - Err error - Prefix string - Result string - Expected string + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + Result string + ExpectedStr string }{ "auth, request": { - &logical.Auth{ClientToken: "foo", Policies: []string{"root"}}, + &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}}, &logical.Request{ Operation: logical.UpdateOperation, Path: "/foo", @@ -39,10 +50,11 @@ func TestFormatJSONx_formatRequest(t *testing.T) { errors.New("this is an error"), "", "", - `rootthis is an errorbarupdate/foo127.0.0.160request`, + fmt.Sprintf(`bar%stesttokenrootthis is an errorbarupdate/foo127.0.0.160request`, + fooSalted), }, "auth, request with prefix": { - &logical.Auth{ClientToken: "foo", Policies: []string{"root"}}, + &logical.Auth{ClientToken: "foo", Accessor: "bar", DisplayName: "testtoken", Policies: []string{"root"}}, &logical.Request{ Operation: logical.UpdateOperation, Path: "/foo", @@ -59,7 +71,8 @@ func TestFormatJSONx_formatRequest(t *testing.T) { errors.New("this is an error"), "", "@cee: ", - `rootthis is an errorbarupdate/foo127.0.0.160request`, + fmt.Sprintf(`bar%stesttokenrootthis is an errorbarupdate/foo127.0.0.160request`, + fooSalted), }, } @@ -67,13 +80,13 @@ func TestFormatJSONx_formatRequest(t *testing.T) { var buf bytes.Buffer formatter := AuditFormatter{ AuditFormatWriter: &JSONxFormatWriter{ - Prefix: tc.Prefix, + Prefix: tc.Prefix, + SaltFunc: saltFunc, }, } - salter, _ := salt.NewSalt(nil, nil) config := FormatterConfig{ - Salt: salter, - OmitTime: true, + OmitTime: true, + HMACAccessor: false, } if err := formatter.FormatRequest(&buf, config, tc.Auth, tc.Req, tc.Err); err != nil { t.Fatalf("bad: %s\nerr: %s", name, err) @@ -83,10 +96,10 @@ func TestFormatJSONx_formatRequest(t *testing.T) { t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix) } - if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.Expected)) { + if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) { t.Fatalf( "bad: %s\nResult:\n\n'%s'\n\nExpected:\n\n'%s'", - name, strings.TrimSpace(buf.String()), string(tc.Expected)) + name, strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) } } } diff --git a/vendor/github.com/hashicorp/vault/audit/format_test.go b/vendor/github.com/hashicorp/vault/audit/format_test.go index 6a6425b..5390229 100644 --- a/vendor/github.com/hashicorp/vault/audit/format_test.go +++ b/vendor/github.com/hashicorp/vault/audit/format_test.go @@ -10,6 +10,8 @@ import ( ) type noopFormatWriter struct { + salt *salt.Salt + SaltFunc func() (*salt.Salt, error) } func (n *noopFormatWriter) WriteRequest(_ io.Writer, _ *AuditRequestEntry) error { @@ -20,11 +22,20 @@ func (n *noopFormatWriter) WriteResponse(_ io.Writer, _ *AuditResponseEntry) err return nil } -func TestFormatRequestErrors(t *testing.T) { - salter, _ := salt.NewSalt(nil, nil) - config := FormatterConfig{ - Salt: salter, +func (n *noopFormatWriter) Salt() (*salt.Salt, error) { + if n.salt != nil { + return n.salt, nil } + var err error + n.salt, err = salt.NewSalt(nil, nil) + if err != nil { + return nil, err + } + return n.salt, nil +} + +func TestFormatRequestErrors(t *testing.T) { + config := FormatterConfig{} formatter := AuditFormatter{ AuditFormatWriter: &noopFormatWriter{}, } @@ -38,10 +49,7 @@ func TestFormatRequestErrors(t *testing.T) { } func TestFormatResponseErrors(t *testing.T) { - salter, _ := salt.NewSalt(nil, nil) - config := FormatterConfig{ - Salt: salter, - } + config := FormatterConfig{} formatter := AuditFormatter{ AuditFormatWriter: &noopFormatWriter{}, } diff --git a/vendor/github.com/hashicorp/vault/audit/formatter.go b/vendor/github.com/hashicorp/vault/audit/formatter.go index 318bd1b..3c1748f 100644 --- a/vendor/github.com/hashicorp/vault/audit/formatter.go +++ b/vendor/github.com/hashicorp/vault/audit/formatter.go @@ -3,7 +3,6 @@ package audit import ( "io" - "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) @@ -19,7 +18,6 @@ type Formatter interface { type FormatterConfig struct { Raw bool - Salt *salt.Salt HMACAccessor bool // This should only ever be used in a testing context diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure.go b/vendor/github.com/hashicorp/vault/audit/hashstructure.go index 8d0fd7c..8caf3eb 100644 --- a/vendor/github.com/hashicorp/vault/audit/hashstructure.go +++ b/vendor/github.com/hashicorp/vault/audit/hashstructure.go @@ -1,10 +1,13 @@ package audit import ( + "errors" "reflect" "strings" + "time" "github.com/hashicorp/vault/helper/salt" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" "github.com/mitchellh/copystructure" "github.com/mitchellh/reflectwalk" @@ -84,7 +87,7 @@ func Hash(salter *salt.Salt, raw interface{}) error { s.Data = data.(map[string]interface{}) - case *logical.ResponseWrapInfo: + case *wrapping.ResponseWrapInfo: if s == nil { return nil } @@ -140,6 +143,12 @@ type hashWalker struct { unknownKeys []string } +// hashTimeType stores a pre-computed reflect.Type for a time.Time so +// we can quickly compare in hashWalker.Struct. We create an empty/invalid +// time.Time{} so we don't need to incur any additional startup cost vs. +// Now() or Unix(). +var hashTimeType = reflect.TypeOf(time.Time{}) + func (w *hashWalker) Enter(loc reflectwalk.Location) error { w.loc = loc return nil @@ -187,6 +196,35 @@ func (w *hashWalker) SliceElem(i int, elem reflect.Value) error { return nil } +func (w *hashWalker) Struct(v reflect.Value) error { + // We are looking for time values. If it isn't one, ignore it. + if v.Type() != hashTimeType { + return nil + } + + // If we aren't in a map value, return an error to prevent a panic + if v.Interface() != w.lastValue.Interface() { + return errors.New("time.Time value in a non map key cannot be hashed for audits") + } + + // Create a string value of the time. IMPORTANT: this must never change + // across Vault versions or the hash value of equivalent time.Time will + // change. + strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) + + // Set the map value to the string instead of the time.Time object + m := w.cs[len(w.cs)-1] + mk := w.csData.(reflect.Value) + m.SetMapIndex(mk, reflect.ValueOf(strVal)) + + // Skip this entry so that we don't walk the struct. + return reflectwalk.SkipEntry +} + +func (w *hashWalker) StructField(reflect.StructField, reflect.Value) error { + return nil +} + func (w *hashWalker) Primitive(v reflect.Value) error { if w.Callback == nil { return nil diff --git a/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go b/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go index 5fefa0f..49afa6e 100644 --- a/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go +++ b/vendor/github.com/hashicorp/vault/audit/hashstructure_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/vault/helper/certutil" "github.com/hashicorp/vault/helper/salt" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" "github.com/mitchellh/copystructure" ) @@ -69,7 +70,7 @@ func TestCopy_response(t *testing.T) { Data: map[string]interface{}{ "foo": "bar", }, - WrapInfo: &logical.ResponseWrapInfo{ + WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, Token: "foo", CreationTime: time.Now(), @@ -139,8 +140,12 @@ func TestHash(t *testing.T) { &logical.Response{ Data: map[string]interface{}{ "foo": "bar", + + // Responses can contain time values, so test that with + // a known fixed value. + "bar": now, }, - WrapInfo: &logical.ResponseWrapInfo{ + WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, Token: "bar", CreationTime: now, @@ -150,8 +155,9 @@ func TestHash(t *testing.T) { &logical.Response{ Data: map[string]interface{}{ "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + "bar": now.Format(time.RFC3339Nano), }, - WrapInfo: &logical.ResponseWrapInfo{ + WrapInfo: &wrapping.ResponseWrapInfo{ TTL: 60, Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", CreationTime: now, diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go index cc2cfe5..614e153 100644 --- a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend.go @@ -2,18 +2,24 @@ package file import ( "fmt" + "io/ioutil" "os" "path/filepath" "strconv" + "strings" "sync" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) func Factory(conf *audit.BackendConfig) (audit.Backend, error) { - if conf.Salt == nil { - return nil, fmt.Errorf("nil salt") + if conf.SaltConfig == nil { + return nil, fmt.Errorf("nil salt config") + } + if conf.SaltView == nil { + return nil, fmt.Errorf("nil salt view") } path, ok := conf.Config["file_path"] @@ -24,6 +30,14 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { } } + // normalize path if configured for stdout + if strings.ToLower(path) == "stdout" { + path = "stdout" + } + if strings.ToLower(path) == "discard" { + path = "discard" + } + format, ok := conf.Config["format"] if !ok { format = "json" @@ -65,11 +79,12 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { } b := &Backend{ - path: path, - mode: mode, + path: path, + mode: mode, + saltConfig: conf.SaltConfig, + saltView: conf.SaltView, formatConfig: audit.FormatterConfig{ Raw: logRaw, - Salt: conf.Salt, HMACAccessor: hmacAccessor, }, } @@ -77,19 +92,26 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { switch format { case "json": b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, } case "jsonx": b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, } } - // Ensure that the file can be successfully opened for writing; - // otherwise it will be too late to catch later without problems - // (ref: https://github.com/hashicorp/vault/issues/550) - if err := b.open(); err != nil { - return nil, fmt.Errorf("sanity check failed; unable to open %s for writing: %v", path, err) + switch path { + case "stdout", "discard": + // no need to test opening file if outputting to stdout or discarding + default: + // Ensure that the file can be successfully opened for writing; + // otherwise it will be too late to catch later without problems + // (ref: https://github.com/hashicorp/vault/issues/550) + if err := b.open(); err != nil { + return nil, fmt.Errorf("sanity check failed; unable to open %s for writing: %v", path, err) + } } return b, nil @@ -109,16 +131,64 @@ type Backend struct { fileLock sync.RWMutex f *os.File mode os.FileMode + + saltMutex sync.RWMutex + salt *salt.Salt + saltConfig *salt.Config + saltView logical.Storage } -func (b *Backend) GetHash(data string) string { - return audit.HashString(b.formatConfig.Salt, data) +func (b *Backend) Salt() (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(b.saltView, b.saltConfig) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *Backend) GetHash(data string) (string, error) { + salt, err := b.Salt() + if err != nil { + return "", err + } + return audit.HashString(salt, data), nil } func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error { b.fileLock.Lock() defer b.fileLock.Unlock() + switch b.path { + case "stdout": + return b.formatter.FormatRequest(os.Stdout, b.formatConfig, auth, req, outerErr) + case "discard": + return b.formatter.FormatRequest(ioutil.Discard, b.formatConfig, auth, req, outerErr) + } + + if err := b.open(); err != nil { + return err + } + + if err := b.formatter.FormatRequest(b.f, b.formatConfig, auth, req, outerErr); err == nil { + return nil + } + + // Opportunistically try to re-open the FD, once per call + b.f.Close() + b.f = nil + if err := b.open(); err != nil { return err } @@ -135,6 +205,25 @@ func (b *Backend) LogResponse( b.fileLock.Lock() defer b.fileLock.Unlock() + switch b.path { + case "stdout": + return b.formatter.FormatResponse(os.Stdout, b.formatConfig, auth, req, resp, err) + case "discard": + return b.formatter.FormatResponse(ioutil.Discard, b.formatConfig, auth, req, resp, err) + } + + if err := b.open(); err != nil { + return err + } + + if err := b.formatter.FormatResponse(b.f, b.formatConfig, auth, req, resp, err); err == nil { + return nil + } + + // Opportunistically try to re-open the FD, once per call + b.f.Close() + b.f = nil + if err := b.open(); err != nil { return err } @@ -172,6 +261,11 @@ func (b *Backend) open() error { } func (b *Backend) Reload() error { + switch b.path { + case "stdout", "discard": + return nil + } + b.fileLock.Lock() defer b.fileLock.Unlock() @@ -189,3 +283,9 @@ func (b *Backend) Reload() error { return b.open() } + +func (b *Backend) Invalidate() { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil +} diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go index 0a1a8c7..3b4ec84 100644 --- a/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/audit/file/backend_test.go @@ -9,15 +9,21 @@ import ( "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/salt" + "github.com/hashicorp/vault/logical" ) func TestAuditFile_fileModeNew(t *testing.T) { - salter, _ := salt.NewSalt(nil, nil) - modeStr := "0777" mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + t.Fatal(err) + } path, err := ioutil.TempDir("", "vault-test_audit_file-file_mode_new") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(path) file := filepath.Join(path, "auditTest.txt") @@ -28,8 +34,9 @@ func TestAuditFile_fileModeNew(t *testing.T) { } _, err = Factory(&audit.BackendConfig{ - Salt: salter, - Config: config, + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Config: config, }) if err != nil { t.Fatal(err) @@ -45,8 +52,6 @@ func TestAuditFile_fileModeNew(t *testing.T) { } func TestAuditFile_fileModeExisting(t *testing.T) { - salter, _ := salt.NewSalt(nil, nil) - f, err := ioutil.TempFile("", "test") if err != nil { t.Fatalf("Failure to create test file.") @@ -68,8 +73,9 @@ func TestAuditFile_fileModeExisting(t *testing.T) { } _, err = Factory(&audit.BackendConfig{ - Salt: salter, - Config: config, + Config: config, + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, }) if err != nil { t.Fatal(err) diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go index 91e701e..bf0ce7f 100644 --- a/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/audit/socket/backend.go @@ -11,12 +11,16 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/parseutil" + "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) func Factory(conf *audit.BackendConfig) (audit.Backend, error) { - if conf.Salt == nil { - return nil, fmt.Errorf("nil salt passed in") + if conf.SaltConfig == nil { + return nil, fmt.Errorf("nil salt config") + } + if conf.SaltView == nil { + return nil, fmt.Errorf("nil salt view") } address, ok := conf.Config["address"] @@ -68,18 +72,14 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { logRaw = b } - conn, err := net.Dial(socketType, address) - if err != nil { - return nil, err - } - b := &Backend{ - connection: conn, + saltConfig: conf.SaltConfig, + saltView: conf.SaltView, formatConfig: audit.FormatterConfig{ Raw: logRaw, - Salt: conf.Salt, HMACAccessor: hmacAccessor, }, + writeDuration: writeDuration, address: address, socketType: socketType, @@ -88,11 +88,13 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { switch format { case "json": b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, } case "jsonx": b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, } } @@ -111,10 +113,19 @@ type Backend struct { socketType string sync.Mutex + + saltMutex sync.RWMutex + salt *salt.Salt + saltConfig *salt.Config + saltView logical.Storage } -func (b *Backend) GetHash(data string) string { - return audit.HashString(b.formatConfig.Salt, data) +func (b *Backend) GetHash(data string) (string, error) { + salt, err := b.Salt() + if err != nil { + return "", err + } + return audit.HashString(salt, data), nil } func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error { @@ -165,6 +176,12 @@ func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request, } func (b *Backend) write(buf []byte) error { + if b.connection == nil { + if err := b.reconnect(); err != nil { + return err + } + } + err := b.connection.SetWriteDeadline(time.Now().Add(b.writeDuration)) if err != nil { return err @@ -179,12 +196,16 @@ func (b *Backend) write(buf []byte) error { } func (b *Backend) reconnect() error { + if b.connection != nil { + b.connection.Close() + b.connection = nil + } + conn, err := net.Dial(b.socketType, b.address) if err != nil { return err } - b.connection.Close() b.connection = conn return nil @@ -198,3 +219,29 @@ func (b *Backend) Reload() error { return err } + +func (b *Backend) Salt() (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(b.saltView, b.saltConfig) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *Backend) Invalidate() { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil +} diff --git a/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go b/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go index 4b1912f..22c39d4 100644 --- a/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/audit/syslog/backend.go @@ -4,15 +4,20 @@ import ( "bytes" "fmt" "strconv" + "sync" "github.com/hashicorp/go-syslog" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" ) func Factory(conf *audit.BackendConfig) (audit.Backend, error) { - if conf.Salt == nil { - return nil, fmt.Errorf("Nil salt passed in") + if conf.SaltConfig == nil { + return nil, fmt.Errorf("nil salt config") + } + if conf.SaltView == nil { + return nil, fmt.Errorf("nil salt view") } // Get facility or default to AUTH @@ -64,10 +69,11 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { } b := &Backend{ - logger: logger, + logger: logger, + saltConfig: conf.SaltConfig, + saltView: conf.SaltView, formatConfig: audit.FormatterConfig{ Raw: logRaw, - Salt: conf.Salt, HMACAccessor: hmacAccessor, }, } @@ -75,11 +81,13 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) { switch format { case "json": b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, } case "jsonx": b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, } } @@ -92,10 +100,19 @@ type Backend struct { formatter audit.AuditFormatter formatConfig audit.FormatterConfig + + saltMutex sync.RWMutex + salt *salt.Salt + saltConfig *salt.Config + saltView logical.Storage } -func (b *Backend) GetHash(data string) string { - return audit.HashString(b.formatConfig.Salt, data) +func (b *Backend) GetHash(data string) (string, error) { + salt, err := b.Salt() + if err != nil { + return "", err + } + return audit.HashString(salt, data), nil } func (b *Backend) LogRequest(auth *logical.Auth, req *logical.Request, outerErr error) error { @@ -123,3 +140,29 @@ func (b *Backend) LogResponse(auth *logical.Auth, req *logical.Request, resp *lo func (b *Backend) Reload() error { return nil } + +func (b *Backend) Salt() (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(b.saltView, b.saltConfig) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *Backend) Invalidate() { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil +} diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go index 76d9a6e..a25c9ee 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend.go @@ -1,7 +1,7 @@ package appId import ( - "fmt" + "sync" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" @@ -13,10 +13,13 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) { if err != nil { return nil, err } - return b.Setup(conf) + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } -func Backend(conf *logical.BackendConfig) (*framework.Backend, error) { +func Backend(conf *logical.BackendConfig) (*backend, error) { var b backend b.MapAppId = &framework.PolicyMap{ PathMap: framework.PathMap{ @@ -60,7 +63,6 @@ func Backend(conf *logical.BackendConfig) (*framework.Backend, error) { "login/*", }, }, - Paths: framework.PathAppend([]*framework.Path{ pathLogin(&b), pathLoginWithAppIDPath(&b), @@ -68,110 +70,58 @@ func Backend(conf *logical.BackendConfig) (*framework.Backend, error) { b.MapAppId.Paths(), b.MapUserId.Paths(), ), - - AuthRenew: b.pathLoginRenew, - - Init: b.initialize, + AuthRenew: b.pathLoginRenew, + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, } b.view = conf.StorageView + b.MapAppId.SaltFunc = b.Salt + b.MapUserId.SaltFunc = b.Salt - return b.Backend, nil + return &b, nil } type backend struct { *framework.Backend - Salt *salt.Salt + salt *salt.Salt + SaltMutex sync.RWMutex view logical.Storage MapAppId *framework.PolicyMap MapUserId *framework.PathMap } -func (b *backend) initialize() error { +func (b *backend) Salt() (*salt.Salt, error) { + b.SaltMutex.RLock() + if b.salt != nil { + defer b.SaltMutex.RUnlock() + return b.salt, nil + } + b.SaltMutex.RUnlock() + b.SaltMutex.Lock() + defer b.SaltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } salt, err := salt.NewSalt(b.view, &salt.Config{ HashFunc: salt.SHA1Hash, + Location: salt.DefaultLocation, }) if err != nil { - return err + return nil, err } - b.Salt = salt - - b.MapAppId.Salt = salt - b.MapUserId.Salt = salt - - // Since the salt is new in 0.2, we need to handle this by migrating - // any existing keys to use the salt. We can deprecate this eventually, - // but for now we want a smooth upgrade experience by automatically - // upgrading to use salting. - if salt.DidGenerate() { - if err := b.upgradeToSalted(b.view); err != nil { - return err - } - } - - return nil + b.salt = salt + return salt, nil } -// upgradeToSalted is used to upgrade the non-salted keys prior to -// Vault 0.2 to be salted. This is done on mount time and is only -// done once. It can be deprecated eventually, but should be around -// long enough for all 0.1.x users to upgrade. -func (b *backend) upgradeToSalted(view logical.Storage) error { - // Create a copy of MapAppId that does not use a Salt - nonSaltedAppId := new(framework.PathMap) - *nonSaltedAppId = b.MapAppId.PathMap - nonSaltedAppId.Salt = nil - - // Get the list of app-ids - keys, err := b.MapAppId.List(view, "") - if err != nil { - return fmt.Errorf("failed to list app-ids: %v", err) +func (b *backend) invalidate(key string) { + switch key { + case salt.DefaultLocation: + b.SaltMutex.Lock() + defer b.SaltMutex.Unlock() + b.salt = nil } - - // Upgrade all the existing keys - for _, key := range keys { - val, err := nonSaltedAppId.Get(view, key) - if err != nil { - return fmt.Errorf("failed to read app-id: %v", err) - } - - if err := b.MapAppId.Put(view, key, val); err != nil { - return fmt.Errorf("failed to write app-id: %v", err) - } - - if err := nonSaltedAppId.Delete(view, key); err != nil { - return fmt.Errorf("failed to delete app-id: %v", err) - } - } - - // Create a copy of MapUserId that does not use a Salt - nonSaltedUserId := new(framework.PathMap) - *nonSaltedUserId = *b.MapUserId - nonSaltedUserId.Salt = nil - - // Get the list of user-ids - keys, err = b.MapUserId.List(view, "") - if err != nil { - return fmt.Errorf("failed to list user-ids: %v", err) - } - - // Upgrade all the existing keys - for _, key := range keys { - val, err := nonSaltedUserId.Get(view, key) - if err != nil { - return fmt.Errorf("failed to read user-id: %v", err) - } - - if err := b.MapUserId.Put(view, key, val); err != nil { - return fmt.Errorf("failed to write user-id: %v", err) - } - - if err := nonSaltedUserId.Delete(view, key); err != nil { - return fmt.Errorf("failed to delete user-id: %v", err) - } - } - return nil } const backendHelp = ` diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go index 2960e40..4ae5d3e 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/app-id/backend_test.go @@ -9,8 +9,22 @@ import ( ) func TestBackend_basic(t *testing.T) { + var b *backend + var err error + var storage logical.Storage + factory := func(conf *logical.BackendConfig) (logical.Backend, error) { + b, err = Backend(conf) + if err != nil { + t.Fatal(err) + } + storage = conf.StorageView + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil + } logicaltest.Test(t, logicaltest.TestCase{ - Factory: Factory, + Factory: factory, Steps: []logicaltest.TestStep{ testAccStepMapAppId(t), testAccStepMapUserId(t), @@ -21,6 +35,30 @@ func TestBackend_basic(t *testing.T) { testAccLoginDeleted(t), }, }) + + req := &logical.Request{ + Path: "map/app-id", + Operation: logical.ListOperation, + Storage: storage, + } + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + keys := resp.Data["keys"].([]string) + if len(keys) != 1 { + t.Fatalf("expected 1 key, got %d", len(keys)) + } + salt, err := b.Salt() + if err != nil { + t.Fatal(err) + } + if keys[0] != salt.SaltID("foo") { + t.Fatal("value was improperly salted") + } } func TestBackend_cidr(t *testing.T) { @@ -51,70 +89,6 @@ func TestBackend_displayName(t *testing.T) { }) } -// Verify that we are able to update from non-salted (<0.2) to -// using a Salt for the paths -func TestBackend_upgradeToSalted(t *testing.T) { - inm := new(logical.InmemStorage) - - // Create some fake keys - se, _ := logical.StorageEntryJSON("struct/map/app-id/foo", - map[string]string{"value": "test"}) - inm.Put(se) - se, _ = logical.StorageEntryJSON("struct/map/user-id/bar", - map[string]string{"value": "foo"}) - inm.Put(se) - - // Initialize the backend, this should do the automatic upgrade - conf := &logical.BackendConfig{ - StorageView: inm, - } - backend, err := Factory(conf) - if err != nil { - t.Fatalf("err: %v", err) - } - err = backend.Initialize() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Check the keys have been upgraded - out, err := inm.Get("struct/map/app-id/foo") - if err != nil { - t.Fatalf("err: %v", err) - } - if out != nil { - t.Fatalf("unexpected key") - } - out, err = inm.Get("struct/map/user-id/bar") - if err != nil { - t.Fatalf("err: %v", err) - } - if out != nil { - t.Fatalf("unexpected key") - } - - // Backend should still be able to resolve - req := logical.TestRequest(t, logical.ReadOperation, "map/app-id/foo") - req.Storage = inm - resp, err := backend.HandleRequest(req) - if err != nil { - t.Fatalf("err: %v", err) - } - if resp.Data["value"] != "test" { - t.Fatalf("bad: %#v", resp) - } - - req = logical.TestRequest(t, logical.ReadOperation, "map/user-id/bar") - req.Storage = inm - resp, err = backend.HandleRequest(req) - if err != nil { - t.Fatalf("err: %v", err) - } - if resp.Data["value"] != "foo" { - t.Fatalf("bad: %#v", resp) - } -} - func testAccStepMapAppId(t *testing.T) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go index cd5d97b..d086d3c 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend.go @@ -14,7 +14,8 @@ type backend struct { // The salt value to be used by the information to be accessed only // by this backend. - salt *salt.Salt + salt *salt.Salt + saltMutex sync.RWMutex // The view to use when creating the salt view logical.Storage @@ -53,7 +54,10 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) { if err != nil { return nil, err } - return b.Setup(conf) + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend(conf *logical.BackendConfig) (*backend, error) { @@ -92,20 +96,42 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { pathTidySecretID(b), }, ), - Init: b.initialize, + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, } return b, nil } -func (b *backend) initialize() error { +func (b *backend) Salt() (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } salt, err := salt.NewSalt(b.view, &salt.Config{ HashFunc: salt.SHA256Hash, + Location: salt.DefaultLocation, }) if err != nil { - return err + return nil, err } b.salt = salt - return nil + return salt, nil +} + +func (b *backend) invalidate(key string) { + switch key { + case salt.DefaultLocation: + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil + } } // periodicFunc of the backend will be invoked once a minute by the RollbackManager. diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go index e49cf48..5f16e5f 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/backend_test.go @@ -17,11 +17,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { if b == nil { t.Fatalf("failed to create backend") } - _, err = b.Backend.Setup(config) - if err != nil { - t.Fatal(err) - } - err = b.Initialize() + err = b.Backend.Setup(config) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go index d40530e..9b902a4 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_login.go @@ -38,6 +38,9 @@ func (b *backend) pathLoginUpdate(req *logical.Request, data *framework.FieldDat return logical.ErrorResponse(fmt.Sprintf("failed to validate SecretID: %s", err)), nil } + // Always include the role name, for later filtering + metadata["role_name"] = roleName + auth := &logical.Auth{ NumUses: role.TokenNumUses, Period: role.Period, diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go index 2a1ff1a..b9f7e5b 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role.go @@ -113,7 +113,7 @@ func rolePaths(b *backend) []*framework.Path { addresses which can perform the login operation`, }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Default: "default", Description: "Comma separated list of policies on the role.", }, @@ -172,7 +172,7 @@ TTL will be set to the value of this parameter.`, Description: "Name of the role.", }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Default: "default", Description: "Comma separated list of policies on the role.", }, @@ -768,9 +768,9 @@ func (b *backend) pathRoleCreateUpdate(req *logical.Request, data *framework.Fie } if policiesRaw, ok := data.GetOk("policies"); ok { - role.Policies = policyutil.ParsePolicies(policiesRaw.(string)) + role.Policies = policyutil.ParsePolicies(policiesRaw) } else if req.Operation == logical.CreateOperation { - role.Policies = policyutil.ParsePolicies(data.Get("policies").(string)) + role.Policies = policyutil.ParsePolicies(data.Get("policies")) } periodRaw, ok := data.GetOk("period") @@ -1306,8 +1306,8 @@ func (b *backend) pathRolePoliciesUpdate(req *logical.Request, data *framework.F return nil, nil } - policies := strings.TrimSpace(data.Get("policies").(string)) - if policies == "" { + policiesRaw, ok := data.GetOk("policies") + if !ok { return logical.ErrorResponse("missing policies"), nil } @@ -1316,7 +1316,7 @@ func (b *backend) pathRolePoliciesUpdate(req *logical.Request, data *framework.F lock.Lock() defer lock.Unlock() - role.Policies = policyutil.ParsePolicies(policies) + role.Policies = policyutil.ParsePolicies(policiesRaw) return nil, b.setRoleEntry(req.Storage, roleName, role, "") } @@ -1359,7 +1359,7 @@ func (b *backend) pathRolePoliciesDelete(req *logical.Request, data *framework.F lock.Lock() defer lock.Unlock() - role.Policies = policyutil.ParsePolicies(data.GetDefaultOrZero("policies").(string)) + role.Policies = []string{} return nil, b.setRoleEntry(req.Storage, roleName, role, "") } @@ -1939,7 +1939,11 @@ func (b *backend) setRoleIDEntry(s logical.Storage, roleID string, roleIDEntry * lock.Lock() defer lock.Unlock() - entryIndex := "role_id/" + b.salt.SaltID(roleID) + salt, err := b.Salt() + if err != nil { + return err + } + entryIndex := "role_id/" + salt.SaltID(roleID) entry, err := logical.StorageEntryJSON(entryIndex, roleIDEntry) if err != nil { @@ -1963,7 +1967,11 @@ func (b *backend) roleIDEntry(s logical.Storage, roleID string) (*roleIDStorageE var result roleIDStorageEntry - entryIndex := "role_id/" + b.salt.SaltID(roleID) + salt, err := b.Salt() + if err != nil { + return nil, err + } + entryIndex := "role_id/" + salt.SaltID(roleID) if entry, err := s.Get(entryIndex); err != nil { return nil, err @@ -1987,7 +1995,11 @@ func (b *backend) roleIDEntryDelete(s logical.Storage, roleID string) error { lock.Lock() defer lock.Unlock() - entryIndex := "role_id/" + b.salt.SaltID(roleID) + salt, err := b.Salt() + if err != nil { + return err + } + entryIndex := "role_id/" + salt.SaltID(roleID) return s.Delete(entryIndex) } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go index a40cbe1..fa3e681 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/path_role_test.go @@ -61,6 +61,9 @@ func TestAppRole_CIDRSubset(t *testing.T) { secretIDData["cidr_list"] = "192.168.27.29/20,172.245.30.40/25,10.20.30.40/32" resp, err = b.HandleRequest(secretIDReq) + if err != nil { + t.Fatal(err) + } if resp != nil && resp.IsError() { t.Fatalf("resp: %#v", resp) } @@ -605,7 +608,7 @@ func TestAppRole_RoleCRUD(t *testing.T) { expected := map[string]interface{}{ "bind_secret_id": true, - "policies": []string{"default", "p", "q", "r", "s"}, + "policies": []string{"p", "q", "r", "s"}, "secret_id_num_uses": 10, "secret_id_ttl": 300, "token_ttl": 400, @@ -653,7 +656,7 @@ func TestAppRole_RoleCRUD(t *testing.T) { } expected = map[string]interface{}{ - "policies": []string{"a", "b", "c", "d", "default"}, + "policies": []string{"a", "b", "c", "d"}, "secret_id_num_uses": 100, "secret_id_ttl": 3000, "token_ttl": 4000, @@ -761,7 +764,7 @@ func TestAppRole_RoleCRUD(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1", "default"}) { + if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1"}) { t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) } roleReq.Operation = logical.DeleteOperation diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go index db668a8..c7e32e1 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/approle/validation.go @@ -91,7 +91,7 @@ func (b *backend) validateRoleID(s logical.Storage, roleID string) (*roleStorage // Validates the supplied RoleID and SecretID func (b *backend) validateCredentials(req *logical.Request, data *framework.FieldData) (*roleStorageEntry, string, map[string]string, error) { - var metadata map[string]string + metadata := make(map[string]string) // RoleID must be supplied during every login roleID := strings.TrimSpace(data.Get("role_id").(string)) if roleID == "" { @@ -469,7 +469,11 @@ func (b *backend) secretIDAccessorEntry(s logical.Storage, secretIDAccessor stri var result secretIDAccessorStorageEntry // Create index entry, mapping the accessor to the token ID - entryIndex := "accessor/" + b.salt.SaltID(secretIDAccessor) + salt, err := b.Salt() + if err != nil { + return nil, err + } + entryIndex := "accessor/" + salt.SaltID(secretIDAccessor) accessorLock := b.secretIDAccessorLock(secretIDAccessor) accessorLock.RLock() @@ -498,7 +502,11 @@ func (b *backend) createSecretIDAccessorEntry(s logical.Storage, entry *secretID entry.SecretIDAccessor = accessorUUID // Create index entry, mapping the accessor to the token ID - entryIndex := "accessor/" + b.salt.SaltID(entry.SecretIDAccessor) + salt, err := b.Salt() + if err != nil { + return err + } + entryIndex := "accessor/" + salt.SaltID(entry.SecretIDAccessor) accessorLock := b.secretIDAccessorLock(accessorUUID) accessorLock.Lock() @@ -517,7 +525,11 @@ func (b *backend) createSecretIDAccessorEntry(s logical.Storage, entry *secretID // deleteSecretIDAccessorEntry deletes the storage index mapping the accessor to a SecretID. func (b *backend) deleteSecretIDAccessorEntry(s logical.Storage, secretIDAccessor string) error { - accessorEntryIndex := "accessor/" + b.salt.SaltID(secretIDAccessor) + salt, err := b.Salt() + if err != nil { + return err + } + accessorEntryIndex := "accessor/" + salt.SaltID(secretIDAccessor) accessorLock := b.secretIDAccessorLock(secretIDAccessor) accessorLock.Lock() diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go index fbd62c7..30feba9 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend.go @@ -1,14 +1,16 @@ package awsauth import ( + "fmt" "sync" "time" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" + "github.com/patrickmn/go-cache" ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { @@ -16,15 +18,14 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) { if err != nil { return nil, err } - return b.Setup(conf) + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } type backend struct { *framework.Backend - Salt *salt.Salt - - // Used during initialization to set the salt - view logical.Storage // Lock to make changes to any of the backend's configuration endpoints. configMutex sync.RWMutex @@ -59,18 +60,34 @@ type backend struct { // When the credentials are modified or deleted, all the cached client objects // will be flushed. The empty STS role signifies the master account IAMClientsMap map[string]map[string]*iam.IAM + + // Map of AWS unique IDs to the full ARN corresponding to that unique ID + // This avoids the overhead of an AWS API hit for every login request + // using the IAM auth method when bound_iam_principal_arn contains a wildcard + iamUserIdToArnCache *cache.Cache + + // AWS Account ID of the "default" AWS credentials + // This cache avoids the need to call GetCallerIdentity repeatedly to learn it + // We can't store this because, in certain pathological cases, it could change + // out from under us, such as a standby and active Vault server in different AWS + // accounts using their IAM instance profile to get their credentials. + defaultAWSAccountID string + + resolveArnToUniqueIDFunc func(logical.Storage, string) (string, error) } func Backend(conf *logical.BackendConfig) (*backend, error) { b := &backend{ // Setting the periodic func to be run once in an hour. // If there is a real need, this can be made configurable. - tidyCooldownPeriod: time.Hour, - view: conf.StorageView, - EC2ClientsMap: make(map[string]map[string]*ec2.EC2), - IAMClientsMap: make(map[string]map[string]*iam.IAM), + tidyCooldownPeriod: time.Hour, + EC2ClientsMap: make(map[string]map[string]*ec2.EC2), + IAMClientsMap: make(map[string]map[string]*iam.IAM), + iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour), } + b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId + b.Backend = &framework.Backend{ PeriodicFunc: b.periodicFunc, AuthRenew: b.pathLoginRenew, @@ -103,26 +120,13 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { pathIdentityWhitelist(b), pathTidyIdentityWhitelist(b), }, - - Invalidate: b.invalidate, - - Init: b.initialize, + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, } return b, nil } -func (b *backend) initialize() error { - salt, err := salt.NewSalt(b.view, &salt.Config{ - HashFunc: salt.SHA256Hash, - }) - if err != nil { - return err - } - b.Salt = salt - return nil -} - // periodicFunc performs the tasks that the backend wishes to do periodically. // Currently this will be triggered once in a minute by the RollbackManager. // @@ -190,9 +194,86 @@ func (b *backend) invalidate(key string) { defer b.configMutex.Unlock() b.flushCachedEC2Clients() b.flushCachedIAMClients() + b.defaultAWSAccountID = "" } } +// Putting this here so we can inject a fake resolver into the backend for unit testing +// purposes +func (b *backend) resolveArnToRealUniqueId(s logical.Storage, arn string) (string, error) { + entity, err := parseIamArn(arn) + if err != nil { + return "", err + } + // This odd-looking code is here because IAM is an inherently global service. IAM and STS ARNs + // don't have regions in them, and there is only a single global endpoint for IAM; see + // http://docs.aws.amazon.com/general/latest/gr/rande.html#iam_region + // However, the ARNs do have a partition in them, because the GovCloud and China partitions DO + // have their own separate endpoints, and the partition is encoded in the ARN. If Amazon's Go SDK + // would allow us to pass a partition back to the IAM client, it would be much simpler. But it + // doesn't appear that's possible, so in order to properly support GovCloud and China, we do a + // circular dance of extracting the partition from the ARN, finding any arbitrary region in the + // partition, and passing that region back back to the SDK, so that the SDK can figure out the + // proper partition from the arbitrary region we passed in to look up the endpoint. + // Sigh + region := getAnyRegionForAwsPartition(entity.Partition) + if region == nil { + return "", fmt.Errorf("Unable to resolve partition %q to a region", entity.Partition) + } + iamClient, err := b.clientIAM(s, region.ID(), entity.AccountNumber) + if err != nil { + return "", err + } + + switch entity.Type { + case "user": + userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName}) + if err != nil { + return "", err + } + if userInfo == nil { + return "", fmt.Errorf("got nil result from GetUser") + } + return *userInfo.User.UserId, nil + case "role": + roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName}) + if err != nil { + return "", err + } + if roleInfo == nil { + return "", fmt.Errorf("got nil result from GetRole") + } + return *roleInfo.Role.RoleId, nil + case "instance-profile": + profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) + if err != nil { + return "", err + } + if profileInfo == nil { + return "", fmt.Errorf("got nil result from GetInstanceProfile") + } + return *profileInfo.InstanceProfile.InstanceProfileId, nil + default: + return "", fmt.Errorf("unrecognized error type %#v", entity.Type) + } +} + +// Adapted from https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/ +// the "Enumerating Regions and Endpoint Metadata" section +func getAnyRegionForAwsPartition(partitionId string) *endpoints.Region { + resolver := endpoints.DefaultResolver() + partitions := resolver.(endpoints.EnumPartitions).Partitions() + + for _, p := range partitions { + if p.ID() == partitionId { + for _, r := range p.Regions() { + return &r + } + } + } + return nil +} + const backendHelp = ` aws-ec2 auth backend takes in PKCS#7 signature of an AWS EC2 instance and a client created nonce to authenticates the EC2 instance with Vault. diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go index a539fba..881ca85 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/backend_test.go @@ -9,11 +9,13 @@ import ( "os" "strings" "testing" + "time" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" logicaltest "github.com/hashicorp/vault/logical/testing" ) @@ -27,7 +29,7 @@ func TestBackend_CreateParseVerifyRoleTag(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -251,7 +253,7 @@ func TestBackend_ConfigTidyIdentities(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -305,7 +307,7 @@ func TestBackend_ConfigTidyRoleTags(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -359,7 +361,7 @@ func TestBackend_TidyIdentities(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -384,7 +386,7 @@ func TestBackend_TidyRoleTags(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -409,7 +411,7 @@ func TestBackend_ConfigClient(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -546,7 +548,7 @@ func TestBackend_pathConfigCertificate(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -614,6 +616,9 @@ MlpCclZOR3JOOU4yZjZST2swazlLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K certReq.Operation = logical.ReadOperation // test read operation resp, err = b.HandleRequest(certReq) + if err != nil { + t.Fatal(err) + } expectedCert := `-----BEGIN CERTIFICATE----- MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD @@ -698,7 +703,7 @@ func TestBackend_parseAndVerifyRoleTagValue(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -730,6 +735,9 @@ func TestBackend_parseAndVerifyRoleTagValue(t *testing.T) { Path: "role/abcd-123", Storage: storage, }) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatalf("expected an role entry for abcd-123") } @@ -776,7 +784,7 @@ func TestBackend_PathRoleTag(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -841,7 +849,7 @@ func TestBackend_PathBlacklistRoleTag(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -989,7 +997,7 @@ func TestBackendAcc_LoginWithInstanceIdentityDocAndWhitelistIdentity(t *testing. if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -1169,7 +1177,7 @@ func TestBackend_pathStsConfig(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -1216,6 +1224,9 @@ func TestBackend_pathStsConfig(t *testing.T) { stsReq.Operation = logical.ReadOperation // test read operation resp, err = b.HandleRequest(stsReq) + if err != nil { + t.Fatal(err) + } expectedStsRole := "arn:aws:iam:account1:role/myRole" if resp.Data["sts_role"].(string) != expectedStsRole { t.Fatalf("bad: expected:%s\n got:%s\n", expectedStsRole, resp.Data["sts_role"].(string)) @@ -1314,7 +1325,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -1346,7 +1357,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { if err != nil { t.Fatalf("Received error retrieving identity: %s", err) } - testIdentityArn, _, _, err := parseIamArn(*testIdentity.Arn) + entity, err := parseIamArn(*testIdentity.Arn) if err != nil { t.Fatal(err) } @@ -1385,7 +1396,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { // configuring the valid role we'll be able to login to roleData := map[string]interface{}{ - "bound_iam_principal_arn": testIdentityArn, + "bound_iam_principal_arn": entity.canonicalArn(), "policies": "root", "auth_type": iamAuthType, } @@ -1417,8 +1428,17 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { t.Fatalf("bad: failed to create role; resp:%#v\nerr:%v", resp, err) } + fakeArn := "arn:aws:iam::123456789012:role/somePath/FakeRole" + fakeArnResolver := func(s logical.Storage, arn string) (string, error) { + if arn == fakeArn { + return fmt.Sprintf("FakeUniqueIdFor%s", fakeArn), nil + } + return b.resolveArnToRealUniqueId(s, arn) + } + b.resolveArnToUniqueIDFunc = fakeArnResolver + // now we're creating the invalid role we won't be able to login to - roleData["bound_iam_principal_arn"] = "arn:aws:iam::123456789012:role/FakeRole" + roleData["bound_iam_principal_arn"] = fakeArn roleRequest.Path = "role/" + testInvalidRoleName resp, err = b.HandleRequest(roleRequest) if err != nil || (resp != nil && resp.IsError()) { @@ -1491,7 +1511,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { t.Errorf("bad: expected failed login due to bad auth type: resp:%#v\nerr:%v", resp, err) } - // finally, the happy path tests :) + // finally, the happy path test :) loginData["role"] = testValidRoleName resp, err = b.HandleRequest(loginRequest) @@ -1499,6 +1519,101 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { t.Fatal(err) } if resp == nil || resp.Auth == nil || resp.IsError() { - t.Errorf("bad: expected valid login: resp:%#v", resp) + t.Fatalf("bad: expected valid login: resp:%#v", resp) + } + + renewReq := generateRenewRequest(storage, resp.Auth) + // dump a fake ARN into the metadata to ensure that we ONLY look + // at the unique ID that has been generated + renewReq.Auth.Metadata["canonical_arn"] = "fake_arn" + empty_login_fd := &framework.FieldData{ + Raw: map[string]interface{}{}, + Schema: pathLogin(b).Fields, + } + // ensure we can renew + resp, err = b.pathLoginRenew(renewReq, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error when renewing: %#v", *resp) + } + + // Now, fake out the unique ID resolver to ensure we fail login if the unique ID + // changes from under us + b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId + // First, we need to update the role to force Vault to use our fake resolver to + // pick up the fake user ID + roleData["bound_iam_principal_arn"] = entity.canonicalArn() + roleRequest.Path = "role/" + testValidRoleName + resp, err = b.HandleRequest(roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to recreate role: resp:%#v\nerr:%v", resp, err) + } + resp, err = b.HandleRequest(loginRequest) + if err != nil || resp == nil || !resp.IsError() { + t.Errorf("bad: expected failed login due to changed AWS role ID: resp: %#v\nerr:%v", resp, err) + } + + // and ensure a renew no longer works + resp, err = b.pathLoginRenew(renewReq, empty_login_fd) + if err == nil || (resp != nil && !resp.IsError()) { + t.Errorf("bad: expected failed renew due to changed AWS role ID: resp: %#v", resp, err) + } + // Undo the fake resolver... + b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId + + // Now test that wildcard matching works + wildcardRoleName := "valid_wildcard" + wildcardEntity := *entity + wildcardEntity.FriendlyName = "*" + roleData["bound_iam_principal_arn"] = wildcardEntity.canonicalArn() + roleRequest.Path = "role/" + wildcardRoleName + resp, err = b.HandleRequest(roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to create wildcard role: resp:%#v\nerr:%v", resp, err) + } + + loginData["role"] = wildcardRoleName + resp, err = b.HandleRequest(loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("bad: expected valid login: resp:%#v", resp) + } + // and ensure we can renew + renewReq = generateRenewRequest(storage, resp.Auth) + resp, err = b.pathLoginRenew(renewReq, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error when renewing: %#v", *resp) + } + // ensure the cache is populated + cachedArn := b.getCachedUserId(resp.Auth.Metadata["client_user_id"]) + if cachedArn == "" { + t.Errorf("got empty ARN back from user ID cache; expected full arn") } } + +func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Request { + renewReq := &logical.Request{ + Storage: s, + Auth: &logical.Auth{}, + } + renewReq.Auth.InternalData = auth.InternalData + renewReq.Auth.Metadata = auth.Metadata + renewReq.Auth.LeaseOptions = auth.LeaseOptions + renewReq.Auth.Policies = auth.Policies + renewReq.Auth.IssueTime = time.Now() + + return renewReq +} diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go index c69187f..2842c24 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/cli.go @@ -16,7 +16,60 @@ import ( type CLIHandler struct{} -func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { +// Generates the necessary data to send to the Vault server for generating a token +// This is useful for other API clients to use +func GenerateLoginData(accessKey, secretKey, sessionToken, headerValue string) (map[string]interface{}, error) { + loginData := make(map[string]interface{}) + + credConfig := &awsutil.CredentialsConfig{ + AccessKey: accessKey, + SecretKey: secretKey, + SessionToken: sessionToken, + } + creds, err := credConfig.GenerateCredentialChain() + if err != nil { + return nil, err + } + if creds == nil { + return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata") + } + + // Use the credentials we've found to construct an STS session + stsSession, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Credentials: creds}, + }) + if err != nil { + return nil, err + } + + var params *sts.GetCallerIdentityInput + svc := sts.New(stsSession) + stsRequest, _ := svc.GetCallerIdentityRequest(params) + + // Inject the required auth header value, if supplied, and then sign the request including that header + if headerValue != "" { + stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue) + } + stsRequest.Sign() + + // Now extract out the relevant parts of the request + headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header) + if err != nil { + return nil, err + } + requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body) + if err != nil { + return nil, err + } + loginData["iam_http_request_method"] = stsRequest.HTTPRequest.Method + loginData["iam_request_url"] = base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String())) + loginData["iam_request_headers"] = base64.StdEncoding.EncodeToString(headersJson) + loginData["iam_request_body"] = base64.StdEncoding.EncodeToString(requestBody) + + return loginData, nil +} + +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { mount, ok := m["mount"] if !ok { mount = "aws" @@ -32,71 +85,25 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { headerValue = "" } - // Grab any supplied credentials off the command line - // Ensure we're able to fall back to the SDK default credential providers - credConfig := &awsutil.CredentialsConfig{ - AccessKey: m["aws_access_key_id"], - SecretKey: m["aws_secret_access_key"], - SessionToken: m["aws_security_token"], - } - creds, err := credConfig.GenerateCredentialChain() + loginData, err := GenerateLoginData(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], headerValue) if err != nil { - return "", err + return nil, err } - if creds == nil { - return "", fmt.Errorf("could not compile valid credential providers from static config, environemnt, shared, or instance metadata") + if loginData == nil { + return nil, fmt.Errorf("got nil response from GenerateLoginData") } - - // Use the credentials we've found to construct an STS session - stsSession, err := session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Credentials: creds}, - }) - if err != nil { - return "", err - } - - var params *sts.GetCallerIdentityInput - svc := sts.New(stsSession) - stsRequest, _ := svc.GetCallerIdentityRequest(params) - - // Inject the required auth header value, if suplied, and then sign the request including that header - if headerValue != "" { - stsRequest.HTTPRequest.Header.Add(iamServerIdHeader, headerValue) - } - stsRequest.Sign() - - // Now extract out the relevant parts of the request - headersJson, err := json.Marshal(stsRequest.HTTPRequest.Header) - if err != nil { - return "", err - } - requestBody, err := ioutil.ReadAll(stsRequest.HTTPRequest.Body) - if err != nil { - return "", err - } - method := stsRequest.HTTPRequest.Method - targetUrl := base64.StdEncoding.EncodeToString([]byte(stsRequest.HTTPRequest.URL.String())) - headers := base64.StdEncoding.EncodeToString(headersJson) - body := base64.StdEncoding.EncodeToString(requestBody) - - // And pass them on to the Vault server + loginData["role"] = role path := fmt.Sprintf("auth/%s/login", mount) - secret, err := c.Logical().Write(path, map[string]interface{}{ - "iam_http_request_method": method, - "iam_request_url": targetUrl, - "iam_request_headers": headers, - "iam_request_body": body, - "role": role, - }) + secret, err := c.Logical().Write(path, loginData) if err != nil { - return "", err + return nil, err } if secret == nil { - return "", fmt.Errorf("empty response from credential provider") + return nil, fmt.Errorf("empty response from credential provider") } - return secret.Auth.ClientToken, nil + return secret, nil } func (h *CLIHandler) Help() string { diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go index 1647f45..aa3da0d 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/client.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/logical" @@ -70,7 +71,7 @@ func (b *backend) getRawClientConfig(s logical.Storage, region, clientType strin // It uses getRawClientConfig to obtain config for the runtime environemnt, and if // stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed // credentials. The credentials will expire after 15 minutes but will auto-refresh. -func (b *backend) getClientConfig(s logical.Storage, region, stsRole, clientType string) (*aws.Config, error) { +func (b *backend) getClientConfig(s logical.Storage, region, stsRole, accountID, clientType string) (*aws.Config, error) { config, err := b.getRawClientConfig(s, region, clientType) if err != nil { @@ -80,20 +81,39 @@ func (b *backend) getClientConfig(s logical.Storage, region, stsRole, clientType return nil, fmt.Errorf("could not compile valid credentials through the default provider chain") } + stsConfig, err := b.getRawClientConfig(s, region, "sts") + if stsConfig == nil { + return nil, fmt.Errorf("could not configure STS client") + } + if err != nil { + return nil, err + } if stsRole != "" { - assumeRoleConfig, err := b.getRawClientConfig(s, region, "sts") - if err != nil { - return nil, err - } - if assumeRoleConfig == nil { - return nil, fmt.Errorf("could not configure STS client") - } - assumedCredentials := stscreds.NewCredentials(session.New(assumeRoleConfig), stsRole) + assumedCredentials := stscreds.NewCredentials(session.New(stsConfig), stsRole) // Test that we actually have permissions to assume the role if _, err = assumedCredentials.Get(); err != nil { return nil, err } config.Credentials = assumedCredentials + } else { + if b.defaultAWSAccountID == "" { + client := sts.New(session.New(stsConfig)) + if client == nil { + return nil, fmt.Errorf("could not obtain sts client: %v", err) + } + inputParams := &sts.GetCallerIdentityInput{} + identity, err := client.GetCallerIdentity(inputParams) + if err != nil { + return nil, fmt.Errorf("unable to fetch current caller: %v", err) + } + if identity == nil { + return nil, fmt.Errorf("got nil result from GetCallerIdentity") + } + b.defaultAWSAccountID = *identity.Account + } + if b.defaultAWSAccountID != accountID { + return nil, fmt.Errorf("unable to fetch client for account ID %s -- default client is for account %s", accountID, b.defaultAWSAccountID) + } } return config, nil @@ -121,8 +141,44 @@ func (b *backend) flushCachedIAMClients() { } } +// Gets an entry out of the user ID cache +func (b *backend) getCachedUserId(userId string) string { + if userId == "" { + return "" + } + if entry, ok := b.iamUserIdToArnCache.Get(userId); ok { + b.iamUserIdToArnCache.SetDefault(userId, entry) + return entry.(string) + } + return "" +} + +// Sets an entry in the user ID cache +func (b *backend) setCachedUserId(userId, arn string) { + if userId != "" { + b.iamUserIdToArnCache.SetDefault(userId, arn) + } +} + +func (b *backend) stsRoleForAccount(s logical.Storage, accountID string) (string, error) { + // Check if an STS configuration exists for the AWS account + sts, err := b.lockedAwsStsEntry(s, accountID) + if err != nil { + return "", fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err) + } + // An empty STS role signifies the master account + if sts != nil { + return sts.StsRole, nil + } + return "", nil +} + // clientEC2 creates a client to interact with AWS EC2 API -func (b *backend) clientEC2(s logical.Storage, region string, stsRole string) (*ec2.EC2, error) { +func (b *backend) clientEC2(s logical.Storage, region, accountID string) (*ec2.EC2, error) { + stsRole, err := b.stsRoleForAccount(s, accountID) + if err != nil { + return nil, err + } b.configMutex.RLock() if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil { defer b.configMutex.RUnlock() @@ -142,8 +198,7 @@ func (b *backend) clientEC2(s logical.Storage, region string, stsRole string) (* // Create an AWS config object using a chain of providers var awsConfig *aws.Config - var err error - awsConfig, err = b.getClientConfig(s, region, stsRole, "ec2") + awsConfig, err = b.getClientConfig(s, region, stsRole, accountID, "ec2") if err != nil { return nil, err @@ -168,7 +223,11 @@ func (b *backend) clientEC2(s logical.Storage, region string, stsRole string) (* } // clientIAM creates a client to interact with AWS IAM API -func (b *backend) clientIAM(s logical.Storage, region string, stsRole string) (*iam.IAM, error) { +func (b *backend) clientIAM(s logical.Storage, region, accountID string) (*iam.IAM, error) { + stsRole, err := b.stsRoleForAccount(s, accountID) + if err != nil { + return nil, err + } b.configMutex.RLock() if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil { defer b.configMutex.RUnlock() @@ -188,8 +247,7 @@ func (b *backend) clientIAM(s logical.Storage, region string, stsRole string) (* // Create an AWS config object using a chain of providers var awsConfig *aws.Config - var err error - awsConfig, err = b.getClientConfig(s, region, stsRole, "iam") + awsConfig, err = b.getClientConfig(s, region, stsRole, accountID, "iam") if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go index 3787aed..9242ebd 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client.go @@ -129,6 +129,9 @@ func (b *backend) pathConfigClientDelete( // Remove all the cached EC2 client objects in the backend. b.flushCachedIAMClients() + // unset the cached default AWS account ID + b.defaultAWSAccountID = "" + return nil, nil } @@ -147,7 +150,11 @@ func (b *backend) pathConfigClientCreateUpdate( configEntry = &clientConfig{} } + // changedCreds is whether we need to flush the cached AWS clients and store in the backend changedCreds := false + // changedOtherConfig is whether other config has changed that requires storing in the backend + // but does not require flushing the cached clients + changedOtherConfig := false accessKeyStr, ok := data.GetOk("access_key") if ok { @@ -210,6 +217,7 @@ func (b *backend) pathConfigClientCreateUpdate( if configEntry.IAMServerIdHeaderValue != headerValStr.(string) { // NOT setting changedCreds here, since this isn't really cached configEntry.IAMServerIdHeaderValue = headerValStr.(string) + changedOtherConfig = true } } else if req.Operation == logical.CreateOperation { configEntry.IAMServerIdHeaderValue = data.Get("iam_server_id_header_value").(string) @@ -225,7 +233,7 @@ func (b *backend) pathConfigClientCreateUpdate( return nil, err } - if changedCreds || req.Operation == logical.CreateOperation { + if changedCreds || changedOtherConfig || req.Operation == logical.CreateOperation { if err := req.Storage.Put(entry); err != nil { return nil, err } @@ -234,6 +242,7 @@ func (b *backend) pathConfigClientCreateUpdate( if changedCreds { b.flushCachedEC2Clients() b.flushCachedIAMClients() + b.defaultAWSAccountID = "" } return nil, nil diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go index 2685710..ff60ebf 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_config_client_test.go @@ -15,7 +15,7 @@ func TestBackend_pathConfigClient(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -73,4 +73,37 @@ func TestBackend_pathConfigClient(t *testing.T) { t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'", data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"]) } + + data = map[string]interface{}{ + "iam_server_id_header_value": "vault_server_identification_2718281", + } + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Data: data, + Storage: storage, + }) + + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatal("failed to update the client config entry") + } + + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.ReadOperation, + Path: "config/client", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatal("failed to read the client config entry") + } + if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] { + t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'", + data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"]) + } } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go index bf50898..cca2d75 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login.go @@ -10,6 +10,7 @@ import ( "io/ioutil" "net/http" "net/url" + "reflect" "regexp" "strings" "time" @@ -151,32 +152,15 @@ func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName str // validateInstance queries the status of the EC2 instance using AWS EC2 API // and checks if the instance is running and is healthy func (b *backend) validateInstance(s logical.Storage, instanceID, region, accountID string) (*ec2.Instance, error) { - - // Check if an STS configuration exists for the AWS account - sts, err := b.lockedAwsStsEntry(s, accountID) - if err != nil { - return nil, fmt.Errorf("error fetching STS config for account ID %q: %q\n", accountID, err) - } - // An empty STS role signifies the master account - stsRole := "" - if sts != nil { - stsRole = sts.StsRole - } - // Create an EC2 client to pull the instance information - ec2Client, err := b.clientEC2(s, region, stsRole) + ec2Client, err := b.clientEC2(s, region, accountID) if err != nil { return nil, err } status, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("instance-id"), - Values: []*string{ - aws.String(instanceID), - }, - }, + InstanceIds: []*string{ + aws.String(instanceID), }, }) if err != nil { @@ -477,32 +461,20 @@ func (b *backend) verifyInstanceMeetsRoleRequirements( // Extract out the instance profile name from the instance // profile ARN - iamInstanceProfileARNSlice := strings.SplitAfter(iamInstanceProfileARN, ":instance-profile/") - iamInstanceProfileName := iamInstanceProfileARNSlice[len(iamInstanceProfileARNSlice)-1] + iamInstanceProfileEntity, err := parseIamArn(iamInstanceProfileARN) - if iamInstanceProfileName == "" { - return nil, fmt.Errorf("failed to extract out IAM instance profile name from IAM instance profile ARN") - } - - // Check if an STS configuration exists for the AWS account - sts, err := b.lockedAwsStsEntry(s, identityDoc.AccountID) if err != nil { - return fmt.Errorf("error fetching STS config for account ID %q: %q\n", identityDoc.AccountID, err), nil - } - // An empty STS role signifies the master account - stsRole := "" - if sts != nil { - stsRole = sts.StsRole + return nil, fmt.Errorf("failed to parse IAM instance profile ARN %q; error: %v", iamInstanceProfileARN, err) } // Use instance profile ARN to fetch the associated role ARN - iamClient, err := b.clientIAM(s, identityDoc.Region, stsRole) + iamClient, err := b.clientIAM(s, identityDoc.Region, identityDoc.AccountID) if err != nil { return nil, fmt.Errorf("could not fetch IAM client: %v", err) } else if iamClient == nil { return nil, fmt.Errorf("received a nil iamClient") } - iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileName) + iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileEntity.FriendlyName) if err != nil { return nil, fmt.Errorf("IAM role ARN could not be fetched: %v", err) } @@ -927,8 +899,18 @@ func (b *backend) pathLoginRenewIam( return nil, fmt.Errorf("role entry not found") } - if entityType, ok := req.Auth.Metadata["inferred_entity_type"]; !ok { - if entityType == ec2EntityType { + // we don't really care what the inferred entity type was when the role was initially created. We + // care about what the role currently requires. However, the metadata's inferred_entity_id is only + // set when inferencing is turned on at initial login time. So, if inferencing is turned on, any + // existing roles will NOT be able to renew tokens. + // This might change later, but authenticating the actual inferred entity ID is NOT done if there + // is no inferencing requested in the role. The reason is that authenticating the inferred entity + // ID requires additional AWS IAM permissions that might not be present (e.g., + // ec2:DescribeInstances) as well as additional inferencing configuration (the inferred region). + // So, for now, if you want to turn on inferencing, all clients must re-authenticate and cannot + // renew existing tokens. + if roleEntry.InferredEntityType != "" { + if roleEntry.InferredEntityType == ec2EntityType { instanceID, ok := req.Auth.Metadata["inferred_entity_id"] if !ok { return nil, fmt.Errorf("no inferred entity ID in auth metadata") @@ -937,21 +919,64 @@ func (b *backend) pathLoginRenewIam( if !ok { return nil, fmt.Errorf("no inferred AWS region in auth metadata") } - _, err := b.validateInstance(req.Storage, instanceID, instanceRegion, req.Auth.Metadata["accountID"]) + _, err := b.validateInstance(req.Storage, instanceID, instanceRegion, req.Auth.Metadata["account_id"]) if err != nil { return nil, fmt.Errorf("failed to verify instance ID %q: %v", instanceID, err) } } else { - return nil, fmt.Errorf("unrecognized entity_type in metadata: %q", entityType) + return nil, fmt.Errorf("unrecognized entity_type in metadata: %q", roleEntry.InferredEntityType) } } - if roleEntry.BoundIamPrincipalARN != canonicalArn { - return nil, fmt.Errorf("role no longer bound to arn %q", canonicalArn) + // Note that the error messages below can leak a little bit of information about the role information + // For example, if on renew, the client gets the "error parsing ARN..." error message, the client + // will know that it's a wildcard bind (but not the actual bind), even if the client can't actually + // read the role directly to know what the bind is. It's a relatively small amount of leakage, in + // some fairly corner cases, and in the most likely error case (role has been changed to a new ARN), + // the error message is identical. + if roleEntry.BoundIamPrincipalARN != "" { + // We might not get here if all bindings were on the inferred entity, which we've already validated + // above + clientUserId, ok := req.Auth.Metadata["client_user_id"] + if ok && roleEntry.BoundIamPrincipalID != "" { + // Resolving unique IDs is enabled and the auth metadata contains the unique ID, so checking the + // unique ID is authoritative at this stage + if roleEntry.BoundIamPrincipalID != clientUserId { + return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn) + } + } else if strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") { + fullArn := b.getCachedUserId(clientUserId) + if fullArn == "" { + entity, err := parseIamArn(canonicalArn) + if err != nil { + return nil, fmt.Errorf("error parsing ARN %q: %v", canonicalArn, err) + } + fullArn, err = b.fullArn(entity, req.Storage) + if err != nil { + return nil, fmt.Errorf("error looking up full ARN of entity %v: %v", entity, err) + } + if fullArn == "" { + return nil, fmt.Errorf("got empty string back when looking up full ARN of entity %v", entity) + } + if clientUserId != "" { + b.setCachedUserId(clientUserId, fullArn) + } + } + if !strutil.GlobbedStringsMatch(roleEntry.BoundIamPrincipalARN, fullArn) { + return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn) + } + } else if roleEntry.BoundIamPrincipalARN != canonicalArn { + return nil, fmt.Errorf("role no longer bound to ARN %q", canonicalArn) + } } - return framework.LeaseExtend(roleEntry.TTL, roleEntry.MaxTTL, b.System())(req, data) - + // If 'Period' is set on the role, then the token should never expire. + if roleEntry.Period > time.Duration(0) { + req.Auth.TTL = roleEntry.Period + return &logical.Response{Auth: req.Auth}, nil + } else { + return framework.LeaseExtend(roleEntry.TTL, roleEntry.MaxTTL, b.System())(req, data) + } } func (b *backend) pathLoginRenewEc2( @@ -1095,14 +1120,12 @@ func (b *backend) pathLoginUpdateIam( if headersB64 == "" { return logical.ErrorResponse("missing iam_request_headers"), nil } - headersJson, err := base64.StdEncoding.DecodeString(headersB64) + headers, err := parseIamRequestHeaders(headersB64) if err != nil { - return logical.ErrorResponse("failed to base64 decode iam_request_headers"), nil + return logical.ErrorResponse(fmt.Sprintf("Error parsing iam_request_headers: %v", err)), nil } - var headers http.Header - err = jsonutil.DecodeJSON(headersJson, &headers) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("failed to JSON decode iam_request_headers %q: %v", headersJson, err)), nil + if headers == nil { + return logical.ErrorResponse("nil response when parsing iam_request_headers"), nil } config, err := b.lockedClientConfigEntry(req.Storage) @@ -1124,18 +1147,21 @@ func (b *backend) pathLoginUpdateIam( } } - clientArn, accountID, err := submitCallerIdentityRequest(method, endpoint, parsedUrl, body, headers) + callerID, err := submitCallerIdentityRequest(method, endpoint, parsedUrl, body, headers) if err != nil { return logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil } - canonicalArn, principalName, sessionName, err := parseIamArn(clientArn) + // This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID" + // (in the case of an IAM user). + callerUniqueId := strings.Split(callerID.UserId, ":")[0] + entity, err := parseIamArn(callerID.Arn) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Error parsing arn: %v", err)), nil + return logical.ErrorResponse(fmt.Sprintf("error parsing arn %q: %v", callerID.Arn, err)), nil } roleName := data.Get("role").(string) if roleName == "" { - roleName = principalName + roleName = entity.FriendlyName } roleEntry, err := b.lockedAWSRole(req.Storage, roleName) @@ -1152,8 +1178,34 @@ func (b *backend) pathLoginUpdateIam( // The role creation should ensure that either we're inferring this is an EC2 instance // or that we're binding an ARN - if roleEntry.BoundIamPrincipalARN != "" && roleEntry.BoundIamPrincipalARN != canonicalArn { - return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", clientArn, roleName)), nil + // The only way BoundIamPrincipalID could get set is if BoundIamPrincipalARN was also set and + // resolving to internal IDs was turned on, which can't be turned off. So, there should be no + // way for this to be set and not match BoundIamPrincipalARN + if roleEntry.BoundIamPrincipalID != "" { + if callerUniqueId != roleEntry.BoundIamPrincipalID { + return logical.ErrorResponse(fmt.Sprintf("expected IAM %s %s to resolve to unique AWS ID %q but got %q instead", entity.Type, entity.FriendlyName, roleEntry.BoundIamPrincipalID, callerUniqueId)), nil + } + } else if roleEntry.BoundIamPrincipalARN != "" { + if strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") { + fullArn := b.getCachedUserId(callerUniqueId) + if fullArn == "" { + fullArn, err = b.fullArn(entity, req.Storage) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error looking up full ARN of entity %v: %v", entity, err)), nil + } + if fullArn == "" { + return logical.ErrorResponse(fmt.Sprintf("got empty string back when looking up full ARN of entity %v", entity)), nil + } + b.setCachedUserId(callerUniqueId, fullArn) + } + if !strutil.GlobbedStringsMatch(roleEntry.BoundIamPrincipalARN, fullArn) { + // Note: Intentionally giving the exact same error message as a few lines below. Otherwise, we might leak information + // about whether the bound IAM principal ARN is a wildcard or not, and what that wildcard is. + return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName)), nil + } + } else if roleEntry.BoundIamPrincipalARN != entity.canonicalArn() { + return logical.ErrorResponse(fmt.Sprintf("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName)), nil + } } policies := roleEntry.Policies @@ -1161,9 +1213,9 @@ func (b *backend) pathLoginUpdateIam( inferredEntityType := "" inferredEntityId := "" if roleEntry.InferredEntityType == ec2EntityType { - instance, err := b.validateInstance(req.Storage, sessionName, roleEntry.InferredAWSRegion, accountID) + instance, err := b.validateInstance(req.Storage, entity.SessionInfo, roleEntry.InferredAWSRegion, callerID.Account) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", sessionName, roleEntry.InferredAWSRegion)), nil + return logical.ErrorResponse(fmt.Sprintf("failed to verify %s as a valid EC2 instance in region %s", entity.SessionInfo, roleEntry.InferredAWSRegion)), nil } // build a fake identity doc to pass on metadata about the instance to verifyInstanceMeetsRoleRequirements @@ -1171,7 +1223,7 @@ func (b *backend) pathLoginUpdateIam( Tags: nil, // Don't really need the tags, so not doing the work of converting them from Instance.Tags to identityDocument.Tags InstanceID: *instance.InstanceId, AmiID: *instance.ImageId, - AccountID: accountID, + AccountID: callerID.Account, Region: roleEntry.InferredAWSRegion, PendingTime: instance.LaunchTime.Format(time.RFC3339), } @@ -1181,29 +1233,31 @@ func (b *backend) pathLoginUpdateIam( return nil, err } if validationError != nil { - return logical.ErrorResponse(fmt.Sprintf("Error validating instance: %s", validationError)), nil + return logical.ErrorResponse(fmt.Sprintf("error validating instance: %s", validationError)), nil } inferredEntityType = ec2EntityType - inferredEntityId = sessionName + inferredEntityId = entity.SessionInfo } resp := &logical.Response{ Auth: &logical.Auth{ + Period: roleEntry.Period, Policies: policies, Metadata: map[string]string{ - "client_arn": clientArn, - "canonical_arn": canonicalArn, + "client_arn": callerID.Arn, + "canonical_arn": entity.canonicalArn(), + "client_user_id": callerUniqueId, "auth_type": iamAuthType, "inferred_entity_type": inferredEntityType, "inferred_entity_id": inferredEntityId, "inferred_aws_region": roleEntry.InferredAWSRegion, - "account_id": accountID, + "account_id": entity.AccountNumber, }, InternalData: map[string]interface{}{ "role_name": roleName, }, - DisplayName: principalName, + DisplayName: entity.FriendlyName, LeaseOptions: logical.LeaseOptions{ Renewable: true, TTL: roleEntry.TTL, @@ -1256,29 +1310,50 @@ func hasValuesForIamAuth(data *framework.FieldData) (bool, bool) { (hasRequestMethod || hasRequestUrl || hasRequestBody || hasRequestHeaders) } -func parseIamArn(iamArn string) (string, string, string, error) { +func parseIamArn(iamArn string) (*iamEntity, error) { // iamArn should look like one of the following: - // 1. arn:aws:iam:::user/ + // 1. arn:aws:iam:::/ // 2. arn:aws:sts:::assumed-role// // if we get something like 2, then we want to transform that back to what // most people would expect, which is arn:aws:iam:::role/ + var entity iamEntity fullParts := strings.Split(iamArn, ":") - principalFullName := fullParts[5] - // principalFullName would now be something like user/ or assumed-role// - parts := strings.Split(principalFullName, "/") - principalName := parts[1] - // now, principalName should either be or - transformedArn := iamArn - sessionName := "" - if parts[0] == "assumed-role" { - transformedArn = fmt.Sprintf("arn:aws:iam::%s:role/%s", fullParts[4], principalName) - // fullParts[4] is the - sessionName = parts[2] - // sessionName is - } else if parts[0] != "user" { - return "", "", "", fmt.Errorf("unrecognized principal type: %q", parts[0]) + if len(fullParts) != 6 { + return nil, fmt.Errorf("unrecognized arn: contains %d colon-separated parts, expected 6", len(fullParts)) } - return transformedArn, principalName, sessionName, nil + if fullParts[0] != "arn" { + return nil, fmt.Errorf("unrecognized arn: does not begin with arn:") + } + // normally aws, but could be aws-cn or aws-us-gov + entity.Partition = fullParts[1] + if fullParts[2] != "iam" && fullParts[2] != "sts" { + return nil, fmt.Errorf("unrecognized service: %v, not one of iam or sts", fullParts[2]) + } + // fullParts[3] is the region, which doesn't matter for AWS IAM entities + entity.AccountNumber = fullParts[4] + // fullParts[5] would now be something like user/ or assumed-role// + parts := strings.Split(fullParts[5], "/") + if len(parts) < 2 { + return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 2 slash-separated parts", fullParts[5]) + } + entity.Type = parts[0] + entity.Path = strings.Join(parts[1:len(parts)-1], "/") + entity.FriendlyName = parts[len(parts)-1] + // now, entity.FriendlyName should either be or + switch entity.Type { + case "assumed-role": + // Assumed roles don't have paths and have a slightly different format + // parts[2] is + entity.Path = "" + entity.FriendlyName = parts[1] + entity.SessionInfo = parts[2] + case "user": + case "role": + case "instance-profile": + default: + return &iamEntity{}, fmt.Errorf("unrecognized principal type: %q", entity.Type) + } + return &entity, nil } func validateVaultHeaderValue(headers http.Header, requestUrl *url.URL, requiredHeaderValue string) error { @@ -1381,7 +1456,38 @@ func parseGetCallerIdentityResponse(response string) (GetCallerIdentityResponse, return result, err } -func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (string, string, error) { +func parseIamRequestHeaders(headersB64 string) (http.Header, error) { + headersJson, err := base64.StdEncoding.DecodeString(headersB64) + if err != nil { + return nil, fmt.Errorf("failed to base64 decode iam_request_headers") + } + var headersDecoded map[string]interface{} + err = jsonutil.DecodeJSON(headersJson, &headersDecoded) + if err != nil { + return nil, fmt.Errorf("failed to JSON decode iam_request_headers %q: %v", headersJson, err) + } + headers := make(http.Header) + for k, v := range headersDecoded { + switch typedValue := v.(type) { + case string: + headers.Add(k, typedValue) + case []interface{}: + for _, individualVal := range typedValue { + switch possibleStrVal := individualVal.(type) { + case string: + headers.Add(k, possibleStrVal) + default: + return nil, fmt.Errorf("header %q contains value %q that has type %s, not string", k, individualVal, reflect.TypeOf(individualVal)) + } + } + default: + return nil, fmt.Errorf("header %q value %q has type %s, not string or []interface", k, typedValue, reflect.TypeOf(v)) + } + } + return headers, nil +} + +func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*GetCallerIdentityResult, error) { // NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy // The protection against this is that this method will only call the endpoint specified in the // client config (defaulting to sts.amazonaws.com), so it would require a Vault admin to override @@ -1390,25 +1496,24 @@ func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, bo client := cleanhttp.DefaultClient() response, err := client.Do(request) if err != nil { - return "", "", fmt.Errorf("error making request: %v", err) + return nil, fmt.Errorf("error making request: %v", err) } if response != nil { defer response.Body.Close() } // we check for status code afterwards to also print out response body responseBody, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } if response.StatusCode != 200 { - return "", "", fmt.Errorf("received error code %s from STS: %s", response.StatusCode, string(responseBody)) + return nil, fmt.Errorf("received error code %s from STS: %s", response.StatusCode, string(responseBody)) } callerIdentityResponse, err := parseGetCallerIdentityResponse(string(responseBody)) if err != nil { - return "", "", fmt.Errorf("error parsing STS response") + return nil, fmt.Errorf("error parsing STS response") } - clientArn := callerIdentityResponse.GetCallerIdentityResult[0].Arn - if clientArn == "" { - return "", "", fmt.Errorf("no ARN validated") - } - return clientArn, callerIdentityResponse.GetCallerIdentityResult[0].Account, nil + return &callerIdentityResponse.GetCallerIdentityResult[0], nil } type GetCallerIdentityResponse struct { @@ -1446,6 +1551,70 @@ type roleTagLoginResponse struct { DisallowReauthentication bool `json:"disallow_reauthentication" structs:"disallow_reauthentication" mapstructure:"disallow_reauthentication"` } +type iamEntity struct { + Partition string + AccountNumber string + Type string + Path string + FriendlyName string + SessionInfo string +} + +// Returns a Vault-internal canonical ARN for referring to an IAM entity +func (e *iamEntity) canonicalArn() string { + entityType := e.Type + // canonicalize "assumed-role" into "role" + if entityType == "assumed-role" { + entityType = "role" + } + // Annoyingly, the assumed-role entity type doesn't have the Path of the role which was assumed + // So, we "canonicalize" it by just completely dropping the path. The other option would be to + // make an AWS API call to look up the role by FriendlyName, which introduces more complexity to + // code and test, and it also breaks backwards compatibility in an area where we would really want + // it + return fmt.Sprintf("arn:%s:iam::%s:%s/%s", e.Partition, e.AccountNumber, entityType, e.FriendlyName) +} + +// This returns the "full" ARN of an iamEntity, how it would be referred to in AWS proper +func (b *backend) fullArn(e *iamEntity, s logical.Storage) (string, error) { + // Not assuming path is reliable for any entity types + client, err := b.clientIAM(s, getAnyRegionForAwsPartition(e.Partition).ID(), e.AccountNumber) + if err != nil { + return "", fmt.Errorf("error creating IAM client: %v", err) + } + + switch e.Type { + case "user": + input := iam.GetUserInput{ + UserName: aws.String(e.FriendlyName), + } + resp, err := client.GetUser(&input) + if err != nil { + return "", fmt.Errorf("error fetching user %q: %v", e.FriendlyName, err) + } + if resp == nil { + return "", fmt.Errorf("nil response from GetUser") + } + return *(resp.User.Arn), nil + case "assumed-role": + fallthrough + case "role": + input := iam.GetRoleInput{ + RoleName: aws.String(e.FriendlyName), + } + resp, err := client.GetRole(&input) + if err != nil { + return "", fmt.Errorf("error fetching role %q: %v", e.FriendlyName, err) + } + if resp == nil { + return "", fmt.Errorf("nil response form GetRole") + } + return *(resp.Role.Arn), nil + default: + return "", fmt.Errorf("unrecognized entity type: %s", e.Type) + } +} + const iamServerIdHeader = "X-Vault-AWS-IAM-Server-ID" const pathLoginSyn = ` diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go index e96bed8..f813a58 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_login_test.go @@ -1,8 +1,12 @@ package awsauth import ( + "encoding/base64" + "encoding/json" + "fmt" "net/http" "net/url" + "reflect" "testing" ) @@ -32,11 +36,17 @@ func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) { expectedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName" parsedUserResponse, err := parseGetCallerIdentityResponse(responseFromUser) + if err != nil { + t.Fatal(err) + } if parsed_arn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedUserArn { t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsed_arn) } parsedRoleResponse, err := parseGetCallerIdentityResponse(responseFromAssumedRole) + if err != nil { + t.Fatal(err) + } if parsed_arn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedRoleArn { t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsed_arn) } @@ -48,36 +58,56 @@ func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) { } func TestBackend_pathLogin_parseIamArn(t *testing.T) { - userArn := "arn:aws:iam::123456789012:user/MyUserName" - assumedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName" - baseRoleArn := "arn:aws:iam::123456789012:role/RoleName" - - xformedUser, principalFriendlyName, sessionName, err := parseIamArn(userArn) - if err != nil { - t.Fatal(err) - } - if xformedUser != userArn { - t.Fatalf("expected to transform ARN %#v into %#v but got %#v instead", userArn, userArn, xformedUser) - } - if principalFriendlyName != "MyUserName" { - t.Fatalf("expected to extract MyUserName from ARN %#v but got %#v instead", userArn, principalFriendlyName) - } - if sessionName != "" { - t.Fatalf("expected to extract no session name from ARN %#v but got %#v instead", userArn, sessionName) + testParser := func(inputArn, expectedCanonicalArn string, expectedEntity iamEntity) { + entity, err := parseIamArn(inputArn) + if err != nil { + t.Fatal(err) + } + if expectedCanonicalArn != "" && entity.canonicalArn() != expectedCanonicalArn { + t.Fatalf("expected to canonicalize ARN %q into %q but got %q instead", inputArn, expectedCanonicalArn, entity.canonicalArn()) + } + if *entity != expectedEntity { + t.Fatalf("expected to get iamEntity %#v from input ARN %q but instead got %#v", expectedEntity, inputArn, *entity) + } } - xformedRole, principalFriendlyName, sessionName, err := parseIamArn(assumedRoleArn) - if err != nil { - t.Fatal(err) + testParser("arn:aws:iam::123456789012:user/UserPath/MyUserName", + "arn:aws:iam::123456789012:user/MyUserName", + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "user", Path: "UserPath", FriendlyName: "MyUserName"}, + ) + canonicalRoleArn := "arn:aws:iam::123456789012:role/RoleName" + testParser("arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName", + canonicalRoleArn, + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "assumed-role", FriendlyName: "RoleName", SessionInfo: "RoleSessionName"}, + ) + testParser("arn:aws:iam::123456789012:role/RolePath/RoleName", + canonicalRoleArn, + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "role", Path: "RolePath", FriendlyName: "RoleName"}, + ) + testParser("arn:aws:iam::123456789012:instance-profile/profilePath/InstanceProfileName", + "", + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "instance-profile", Path: "profilePath", FriendlyName: "InstanceProfileName"}, + ) + + // Test that it properly handles pathological inputs... + _, err := parseIamArn("") + if err == nil { + t.Error("expected error from empty input string") } - if xformedRole != baseRoleArn { - t.Fatalf("expected to transform ARN %#v into %#v but got %#v instead", assumedRoleArn, baseRoleArn, xformedRole) + + _, err = parseIamArn("arn:aws:iam::123456789012:role") + if err == nil { + t.Error("expected error from malformed ARN without a role name") } - if principalFriendlyName != "RoleName" { - t.Fatalf("expected to extract principal name of RoleName from ARN %#v but got %#v instead", assumedRoleArn, sessionName) + + _, err = parseIamArn("arn:aws:iam") + if err == nil { + t.Error("expected error from incomplete ARN (arn:aws:iam)") } - if sessionName != "RoleSessionName" { - t.Fatalf("expected to extract role session name of RoleSessionName from ARN %#v but got %#v instead", assumedRoleArn, sessionName) + + _, err = parseIamArn("arn:aws:iam::1234556789012:/") + if err == nil { + t.Error("expected error from empty principal type and no principal name (arn:aws:iam::1234556789012:/)") } } @@ -138,3 +168,43 @@ func TestBackend_validateVaultHeaderValue(t *testing.T) { t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err) } } + +func TestBackend_pathLogin_parseIamRequestHeaders(t *testing.T) { + testIamParser := func(headers interface{}, expectedHeaders http.Header) error { + headersJson, err := json.Marshal(headers) + if err != nil { + return fmt.Errorf("unable to JSON encode headers: %v", err) + } + headersB64 := base64.StdEncoding.EncodeToString(headersJson) + + parsedHeaders, err := parseIamRequestHeaders(headersB64) + if err != nil { + return fmt.Errorf("error parsing encoded headers: %v", err) + } + if parsedHeaders == nil { + return fmt.Errorf("nil result from parsing headers") + } + if !reflect.DeepEqual(parsedHeaders, expectedHeaders) { + return fmt.Errorf("parsed headers not equal to input headers") + } + return nil + } + + headersGoStyle := http.Header{ + "Header1": []string{"Value1"}, + "Header2": []string{"Value2"}, + } + headersMixedType := map[string]interface{}{ + "Header1": "Value1", + "Header2": []string{"Value2"}, + } + + err := testIamParser(headersGoStyle, headersGoStyle) + if err != nil { + t.Errorf("error parsing go-style headers: %v", err) + } + err = testIamParser(headersMixedType, headersGoStyle) + if err != nil { + t.Errorf("error parsing mixed-style headers: %v", err) + } +} diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go index f6c19f2..476beca 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role.go @@ -63,6 +63,14 @@ with an IAM instance profile ARN which has a prefix that matches the value specified by this parameter. The value is prefix-matched (as though it were a glob ending in '*'). This is only checked when auth_type is ec2.`, + }, + "resolve_aws_unique_ids": { + Type: framework.TypeBool, + Default: true, + Description: `If set, resolve all AWS IAM ARNs into AWS's internal unique IDs. +When an IAM entity (e.g., user, role, or instance profile) is deleted, then all references +to it within the role will be invalidated, which prevents a new IAM entity from being created +with the same name and matching the role's IAM binds. Once set, this cannot be unset.`, }, "inferred_entity_type": { Type: framework.TypeString, @@ -121,7 +129,7 @@ to 0, in which case the value will fallback to the system/mount defaults.`, Description: "The maximum allowed lifetime of tokens issued using this role.", }, "policies": { - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Default: "default", Description: "Policies to be set on tokens issued using this role.", }, @@ -210,7 +218,7 @@ func (b *backend) lockedAWSRole(s logical.Storage, roleName string) (*awsRoleEnt if roleEntry == nil { return nil, nil } - needUpgrade, err := upgradeRoleEntry(roleEntry) + needUpgrade, err := b.upgradeRoleEntry(s, roleEntry) if err != nil { return nil, fmt.Errorf("error upgrading roleEntry: %v", err) } @@ -228,7 +236,7 @@ func (b *backend) lockedAWSRole(s logical.Storage, roleName string) (*awsRoleEnt return nil, nil } // now re-check to see if we need to upgrade - if needUpgrade, err = upgradeRoleEntry(roleEntry); err != nil { + if needUpgrade, err = b.upgradeRoleEntry(s, roleEntry); err != nil { return nil, fmt.Errorf("error upgrading roleEntry: %v", err) } if needUpgrade { @@ -284,7 +292,7 @@ func (b *backend) nonLockedSetAWSRole(s logical.Storage, roleName string, // If needed, updates the role entry and returns a bool indicating if it was updated // (and thus needs to be persisted) -func upgradeRoleEntry(roleEntry *awsRoleEntry) (bool, error) { +func (b *backend) upgradeRoleEntry(s logical.Storage, roleEntry *awsRoleEntry) (bool, error) { if roleEntry == nil { return false, fmt.Errorf("received nil roleEntry") } @@ -307,6 +315,19 @@ func upgradeRoleEntry(roleEntry *awsRoleEntry) (bool, error) { upgraded = true } + if roleEntry.AuthType == iamAuthType && + roleEntry.ResolveAWSUniqueIDs && + roleEntry.BoundIamPrincipalARN != "" && + roleEntry.BoundIamPrincipalID == "" && + !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") { + principalId, err := b.resolveArnToUniqueIDFunc(s, roleEntry.BoundIamPrincipalARN) + if err != nil { + return false, err + } + roleEntry.BoundIamPrincipalID = principalId + upgraded = true + } + return upgraded, nil } @@ -411,7 +432,7 @@ func (b *backend) pathRoleCreateUpdate( if roleEntry == nil { roleEntry = &awsRoleEntry{} } else { - needUpdate, err := upgradeRoleEntry(roleEntry) + needUpdate, err := b.upgradeRoleEntry(req.Storage, roleEntry) if err != nil { return logical.ErrorResponse(fmt.Sprintf("failed to update roleEntry: %v", err)), nil } @@ -445,6 +466,19 @@ func (b *backend) pathRoleCreateUpdate( roleEntry.BoundSubnetID = boundSubnetIDRaw.(string) } + if resolveAWSUniqueIDsRaw, ok := data.GetOk("resolve_aws_unique_ids"); ok { + switch { + case req.Operation == logical.CreateOperation: + roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool) + case roleEntry.ResolveAWSUniqueIDs && !resolveAWSUniqueIDsRaw.(bool): + return logical.ErrorResponse("changing resolve_aws_unique_ids from true to false is not allowed"), nil + default: + roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool) + } + } else if req.Operation == logical.CreateOperation { + roleEntry.ResolveAWSUniqueIDs = data.Get("resolve_aws_unique_ids").(bool) + } + if boundIamRoleARNRaw, ok := data.GetOk("bound_iam_role_arn"); ok { roleEntry.BoundIamRoleARN = boundIamRoleARNRaw.(string) } @@ -454,7 +488,29 @@ func (b *backend) pathRoleCreateUpdate( } if boundIamPrincipalARNRaw, ok := data.GetOk("bound_iam_principal_arn"); ok { - roleEntry.BoundIamPrincipalARN = boundIamPrincipalARNRaw.(string) + principalARN := boundIamPrincipalARNRaw.(string) + roleEntry.BoundIamPrincipalARN = principalARN + // Explicitly not checking to see if the user has changed the ARN under us + // This allows the user to sumbit an update with the same ARN to force Vault + // to re-resolve the ARN to the unique ID, in case an entity was deleted and + // recreated + if roleEntry.ResolveAWSUniqueIDs && !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") { + principalID, err := b.resolveArnToUniqueIDFunc(req.Storage, principalARN) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed updating the unique ID of ARN %#v: %#v", principalARN, err)), nil + } + roleEntry.BoundIamPrincipalID = principalID + } else { + // Need to handle the case where we're switching from a non-wildcard principal to a wildcard principal + roleEntry.BoundIamPrincipalID = "" + } + } else if roleEntry.ResolveAWSUniqueIDs && roleEntry.BoundIamPrincipalARN != "" && !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") { + // we're turning on resolution on this role, so ensure we update it + principalID, err := b.resolveArnToUniqueIDFunc(req.Storage, roleEntry.BoundIamPrincipalARN) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to resolve ARN %#v to internal ID: %#v", roleEntry.BoundIamPrincipalARN, err)), nil + } + roleEntry.BoundIamPrincipalID = principalID } if inferRoleTypeRaw, ok := data.GetOk("inferred_entity_type"); ok { @@ -570,11 +626,11 @@ func (b *backend) pathRoleCreateUpdate( return logical.ErrorResponse("at least be one bound parameter should be specified on the role"), nil } - policiesStr, ok := data.GetOk("policies") + policiesRaw, ok := data.GetOk("policies") if ok { - roleEntry.Policies = policyutil.ParsePolicies(policiesStr.(string)) + roleEntry.Policies = policyutil.ParsePolicies(policiesRaw) } else if req.Operation == logical.CreateOperation { - roleEntry.Policies = []string{"default"} + roleEntry.Policies = []string{} } disallowReauthenticationBool, ok := data.GetOk("disallow_reauthentication") @@ -669,7 +725,7 @@ func (b *backend) pathRoleCreateUpdate( return nil, err } - if len(resp.Warnings()) == 0 { + if len(resp.Warnings) == 0 { return nil, nil } @@ -682,6 +738,7 @@ type awsRoleEntry struct { BoundAmiID string `json:"bound_ami_id" structs:"bound_ami_id" mapstructure:"bound_ami_id"` BoundAccountID string `json:"bound_account_id" structs:"bound_account_id" mapstructure:"bound_account_id"` BoundIamPrincipalARN string `json:"bound_iam_principal_arn" structs:"bound_iam_principal_arn" mapstructure:"bound_iam_principal_arn"` + BoundIamPrincipalID string `json:"bound_iam_principal_id" structs:"bound_iam_principal_id" mapstructure:"bound_iam_principal_id"` BoundIamRoleARN string `json:"bound_iam_role_arn" structs:"bound_iam_role_arn" mapstructure:"bound_iam_role_arn"` BoundIamInstanceProfileARN string `json:"bound_iam_instance_profile_arn" structs:"bound_iam_instance_profile_arn" mapstructure:"bound_iam_instance_profile_arn"` BoundRegion string `json:"bound_region" structs:"bound_region" mapstructure:"bound_region"` @@ -689,6 +746,7 @@ type awsRoleEntry struct { BoundVpcID string `json:"bound_vpc_id" structs:"bound_vpc_id" mapstructure:"bound_vpc_id"` InferredEntityType string `json:"inferred_entity_type" structs:"inferred_entity_type" mapstructure:"inferred_entity_type"` InferredAWSRegion string `json:"inferred_aws_region" structs:"inferred_aws_region" mapstructure:"inferred_aws_region"` + ResolveAWSUniqueIDs bool `json:"resolve_aws_unique_ids" structs:"resolve_aws_unique_ids" mapstructure:"resolve_aws_unique_ids"` RoleTag string `json:"role_tag" structs:"role_tag" mapstructure:"role_tag"` AllowInstanceMigration bool `json:"allow_instance_migration" structs:"allow_instance_migration" mapstructure:"allow_instance_migration"` TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go index 5c8a119..0f5dc5e 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_tag.go @@ -35,7 +35,7 @@ If set, the created tag can only be used by the instance with the given ID.`, }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Policies to be associated with the tag. If set, must be a subset of the role's policies. If set, but set to an empty value, only the 'default' policy will be given to issued tokens.", }, @@ -107,9 +107,9 @@ func (b *backend) pathRoleTagUpdate( // should be inherited. So, by leaving the policies var unset to anything when it is not // supplied, we ensure that it inherits all the policies on the role. var policies []string - policiesStr, ok := data.GetOk("policies") + policiesRaw, ok := data.GetOk("policies") if ok { - policies = policyutil.ParsePolicies(policiesStr.(string)) + policies = policyutil.ParsePolicies(policiesRaw) } if !strutil.StrListSubset(roleEntry.Policies, policies) { resp.AddWarning("Policies on the tag are not a subset of the policies set on the role. Login will not be allowed with this tag unless the role policies are updated.") diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go index 52ff435..21c87ab 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/aws/path_role_test.go @@ -19,7 +19,7 @@ func TestBackend_pathRoleEc2(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -135,7 +135,81 @@ func TestBackend_pathRoleEc2(t *testing.T) { if resp != nil { t.Fatalf("bad: response: expected:nil actual:%#v\n", resp) } +} +func Test_enableIamIDResolution(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(config) + if err != nil { + t.Fatal(err) + } + roleName := "upgradable_role" + + b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId + + data := map[string]interface{}{ + "auth_type": iamAuthType, + "policies": "p,q", + "bound_iam_principal_arn": "arn:aws:iam::123456789012:role/MyRole", + "resolve_aws_unique_ids": false, + } + + submitRequest := func(roleName string, op logical.Operation) (*logical.Response, error) { + return b.HandleRequest(&logical.Request{ + Operation: op, + Path: "role/" + roleName, + Data: data, + Storage: storage, + }) + } + + resp, err := submitRequest(roleName, logical.CreateOperation) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role: %#v", resp) + } + + resp, err = submitRequest(roleName, logical.ReadOperation) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err) + } + if resp.Data["bound_iam_principal_id"] != "" { + t.Fatalf("expected to get no unique ID in role, but got %q", resp.Data["bound_iam_principal_id"]) + } + + data = map[string]interface{}{ + "resolve_aws_unique_ids": true, + } + resp, err = submitRequest(roleName, logical.UpdateOperation) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("unable to upgrade role to resolve internal IDs: resp:%#v", resp) + } + + resp, err = submitRequest(roleName, logical.ReadOperation) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err) + } + if resp.Data["bound_iam_principal_id"] != "FakeUniqueId1" { + t.Fatalf("bad: expected upgrade of role resolve principal ID to %q, but got %q instead", "FakeUniqueId1", resp.Data["bound_iam_principal_id"]) + } } func TestBackend_pathIam(t *testing.T) { @@ -147,7 +221,7 @@ func TestBackend_pathIam(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -174,6 +248,7 @@ func TestBackend_pathIam(t *testing.T) { "policies": "p,q,r,s", "max_ttl": "2h", "bound_iam_principal_arn": "n:aws:iam::123456789012:user/MyUserName", + "resolve_aws_unique_ids": false, } resp, err = b.HandleRequest(&logical.Request{ Operation: logical.CreateOperation, @@ -310,7 +385,7 @@ func TestBackend_pathRoleMixedTypes(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -369,6 +444,7 @@ func TestBackend_pathRoleMixedTypes(t *testing.T) { data["inferred_entity_type"] = ec2EntityType data["inferred_aws_region"] = "us-east-1" + data["resolve_aws_unique_ids"] = false resp, err = submitRequest("multipleTypesInferred", logical.CreateOperation) if err != nil { t.Fatal(err) @@ -376,6 +452,32 @@ func TestBackend_pathRoleMixedTypes(t *testing.T) { if resp.IsError() { t.Fatalf("didn't allow creation of roles with only inferred bindings") } + + b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId + data["resolve_aws_unique_ids"] = true + resp, err = submitRequest("withInternalIdResolution", logical.CreateOperation) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("didn't allow creation of role resolving unique IDs") + } + resp, err = submitRequest("withInternalIdResolution", logical.ReadOperation) + if err != nil { + t.Fatal(err) + } + if resp.Data["bound_iam_principal_id"] != "FakeUniqueId1" { + t.Fatalf("expected fake unique ID of FakeUniqueId1, got %q", resp.Data["bound_iam_principal_id"]) + } + data["resolve_aws_unique_ids"] = false + resp, err = submitRequest("withInternalIdResolution", logical.UpdateOperation) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("allowed changing resolve_aws_unique_ids from true to false") + } + } func TestAwsEc2_RoleCrud(t *testing.T) { @@ -389,7 +491,7 @@ func TestAwsEc2_RoleCrud(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -417,11 +519,12 @@ func TestAwsEc2_RoleCrud(t *testing.T) { "bound_ami_id": "testamiid", "bound_account_id": "testaccountid", "bound_region": "testregion", - "bound_iam_role_arn": "testiamrolearn", - "bound_iam_instance_profile_arn": "testiaminstanceprofilearn", + "bound_iam_role_arn": "arn:aws:iam::123456789012:role/MyRole", + "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/MyInstanceProfile", "bound_subnet_id": "testsubnetid", "bound_vpc_id": "testvpcid", "role_tag": "testtag", + "resolve_aws_unique_ids": false, "allow_instance_migration": true, "ttl": "10m", "max_ttl": "20m", @@ -451,17 +554,19 @@ func TestAwsEc2_RoleCrud(t *testing.T) { "bound_account_id": "testaccountid", "bound_region": "testregion", "bound_iam_principal_arn": "", - "bound_iam_role_arn": "testiamrolearn", - "bound_iam_instance_profile_arn": "testiaminstanceprofilearn", + "bound_iam_principal_id": "", + "bound_iam_role_arn": "arn:aws:iam::123456789012:role/MyRole", + "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/MyInstanceProfile", "bound_subnet_id": "testsubnetid", "bound_vpc_id": "testvpcid", "inferred_entity_type": "", "inferred_aws_region": "", + "resolve_aws_unique_ids": false, "role_tag": "testtag", "allow_instance_migration": true, "ttl": time.Duration(600), "max_ttl": time.Duration(1200), - "policies": []string{"default", "testpolicy1", "testpolicy2"}, + "policies": []string{"testpolicy1", "testpolicy2"}, "disallow_reauthentication": true, "period": time.Duration(60), } @@ -512,14 +617,15 @@ func TestAwsEc2_RoleDurationSeconds(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } roleData := map[string]interface{}{ "auth_type": "ec2", - "bound_iam_instance_profile_arn": "testarn", + "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/test-profile-name", + "resolve_aws_unique_ids": false, "ttl": "10s", "max_ttl": "20s", "period": "30s", @@ -554,3 +660,7 @@ func TestAwsEc2_RoleDurationSeconds(t *testing.T) { t.Fatalf("bad: period; expected: 30, actual: %d", resp.Data["period"]) } } + +func resolveArnToFakeUniqueId(s logical.Storage, arn string) (string, error) { + return "FakeUniqueId1", nil +} diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go index 088cc41..9420164 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend.go @@ -10,9 +10,8 @@ import ( func Factory(conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() - _, err := b.Setup(conf) - if err != nil { - return b, err + if err := b.Setup(conf); err != nil { + return nil, err } return b, nil } @@ -21,13 +20,11 @@ func Backend() *backend { var b backend b.Backend = &framework.Backend{ Help: backendHelp, - PathsSpecial: &logical.Paths{ Unauthenticated: []string{ "login", }, }, - Paths: append([]*framework.Path{ pathConfig(&b), pathLogin(&b), @@ -35,10 +32,9 @@ func Backend() *backend { pathCerts(&b), pathCRLs(&b), }), - - AuthRenew: b.pathLoginRenew, - - Invalidate: b.invalidate, + AuthRenew: b.pathLoginRenew, + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, } b.crlUpdateMutex = &sync.RWMutex{} diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go index f96c9cb..4680d61 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/backend_test.go @@ -1,14 +1,23 @@ package cert import ( + "crypto/rand" + "crypto/rsa" "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" "fmt" + "io" "io/ioutil" + "math/big" + "net" + "os" "reflect" "testing" "time" "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/vault/helper/certutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" logicaltest "github.com/hashicorp/vault/logical/testing" @@ -36,10 +45,10 @@ const ( // But the client, presents the CA cert of the server to trust the server. // The client can present a cert and key which is completely independent of server's CA. // The connection state returned will contain the certificate presented by the client. -func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) tls.ConnectionState { +func connectionState(serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) (tls.ConnectionState, error) { serverKeyPair, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath) if err != nil { - t.Fatal(err) + return tls.ConnectionState{}, err } // Prepare the listener configuration with server's key pair listenConf := &tls.Config{ @@ -49,7 +58,7 @@ func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath, clientKeyPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) if err != nil { - t.Fatal(err) + return tls.ConnectionState{}, err } // Load the CA cert required by the client to authenticate the server. rootConfig := &rootcerts.Config{ @@ -57,7 +66,7 @@ func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath, } serverCAs, err := rootcerts.LoadCACerts(rootConfig) if err != nil { - t.Fatal(err) + return tls.ConnectionState{}, err } // Prepare the dial configuration that the client uses to establish the connection. dialConf := &tls.Config{ @@ -68,37 +77,287 @@ func connectionState(t *testing.T, serverCAPath, serverCertPath, serverKeyPath, // Start the server. list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf) if err != nil { - t.Fatal(err) + return tls.ConnectionState{}, err } defer list.Close() + // Accept connections. + serverErrors := make(chan error, 1) + connState := make(chan tls.ConnectionState) + go func() { + defer close(connState) + serverConn, err := list.Accept() + if err != nil { + serverErrors <- err + close(serverErrors) + return + } + defer serverConn.Close() + + // Read the ping + buf := make([]byte, 4) + _, err = serverConn.Read(buf) + if (err != nil) && (err != io.EOF) { + serverErrors <- err + close(serverErrors) + return + } + close(serverErrors) + connState <- serverConn.(*tls.Conn).ConnectionState() + }() + // Establish a connection from the client side and write a few bytes. + clientErrors := make(chan error, 1) go func() { addr := list.Addr().String() conn, err := tls.Dial("tcp", addr, dialConf) if err != nil { - t.Fatalf("err: %v", err) + clientErrors <- err + close(clientErrors) + return } defer conn.Close() // Write ping - conn.Write([]byte("ping")) + _, err = conn.Write([]byte("ping")) + if err != nil { + clientErrors <- err + } + close(clientErrors) }() - // Accept the connection on the server side. - serverConn, err := list.Accept() + for err = range clientErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err) + } + } + + for err = range serverErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err) + } + } + // Grab the current state + return <-connState, nil +} + +func TestBackend_NonCAExpiry(t *testing.T) { + var resp *logical.Response + var err error + + // Create a self-signed certificate and issue a leaf certificate using the + // CA cert + template := &x509.Certificate{ + SerialNumber: big.NewInt(1234), + Subject: pkix.Name{ + CommonName: "localhost", + Organization: []string{"hashicorp"}, + OrganizationalUnit: []string{"vault"}, + }, + BasicConstraintsValid: true, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(50 * time.Second), + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + } + + // Set IP SAN + parsedIP := net.ParseIP("127.0.0.1") + if parsedIP == nil { + t.Fatalf("failed to create parsed IP") + } + template.IPAddresses = []net.IP{parsedIP} + + // Private key for CA cert + caPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatal(err) } - defer serverConn.Close() - // Read the ping - buf := make([]byte, 4) - serverConn.Read(buf) + // Marshalling to be able to create PEM file + caPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(caPrivateKey) - // Grab the current state - connState := serverConn.(*tls.Conn).ConnectionState() - return connState + caPublicKey := &caPrivateKey.PublicKey + + template.IsCA = true + + caCertBytes, err := x509.CreateCertificate(rand.Reader, template, template, caPublicKey, caPrivateKey) + if err != nil { + t.Fatal(err) + } + + caCert, err := x509.ParseCertificate(caCertBytes) + if err != nil { + t.Fatal(err) + } + + parsedCaBundle := &certutil.ParsedCertBundle{ + Certificate: caCert, + CertificateBytes: caCertBytes, + PrivateKeyBytes: caPrivateKeyBytes, + PrivateKeyType: certutil.RSAPrivateKey, + } + + caCertBundle, err := parsedCaBundle.ToCertBundle() + if err != nil { + t.Fatal(err) + } + + caCertFile, err := ioutil.TempFile("", "caCert") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(caCertFile.Name()) + + if _, err := caCertFile.Write([]byte(caCertBundle.Certificate)); err != nil { + t.Fatal(err) + } + if err := caCertFile.Close(); err != nil { + t.Fatal(err) + } + + caKeyFile, err := ioutil.TempFile("", "caKey") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(caKeyFile.Name()) + + if _, err := caKeyFile.Write([]byte(caCertBundle.PrivateKey)); err != nil { + t.Fatal(err) + } + if err := caKeyFile.Close(); err != nil { + t.Fatal(err) + } + + // Prepare template for non-CA cert + + template.IsCA = false + template.SerialNumber = big.NewInt(5678) + + template.KeyUsage = x509.KeyUsage(x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign) + issuedPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + issuedPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(issuedPrivateKey) + + issuedPublicKey := &issuedPrivateKey.PublicKey + + // Keep a short certificate lifetime so logins can be tested both when + // cert is valid and when it gets expired + template.NotBefore = time.Now().Add(-2 * time.Second) + template.NotAfter = time.Now().Add(3 * time.Second) + + issuedCertBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, issuedPublicKey, caPrivateKey) + if err != nil { + t.Fatal(err) + } + + issuedCert, err := x509.ParseCertificate(issuedCertBytes) + if err != nil { + t.Fatal(err) + } + + parsedIssuedBundle := &certutil.ParsedCertBundle{ + Certificate: issuedCert, + CertificateBytes: issuedCertBytes, + PrivateKeyBytes: issuedPrivateKeyBytes, + PrivateKeyType: certutil.RSAPrivateKey, + } + + issuedCertBundle, err := parsedIssuedBundle.ToCertBundle() + if err != nil { + t.Fatal(err) + } + + issuedCertFile, err := ioutil.TempFile("", "issuedCert") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(issuedCertFile.Name()) + + if _, err := issuedCertFile.Write([]byte(issuedCertBundle.Certificate)); err != nil { + t.Fatal(err) + } + if err := issuedCertFile.Close(); err != nil { + t.Fatal(err) + } + + issuedKeyFile, err := ioutil.TempFile("", "issuedKey") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(issuedKeyFile.Name()) + + if _, err := issuedKeyFile.Write([]byte(issuedCertBundle.PrivateKey)); err != nil { + t.Fatal(err) + } + if err := issuedKeyFile.Close(); err != nil { + t.Fatal(err) + } + + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Register the Non-CA certificate of the client key pair + certData := map[string]interface{}{ + "certificate": issuedCertBundle.Certificate, + "policies": "abc", + "display_name": "cert1", + "ttl": 10000, + } + certReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/cert1", + Storage: storage, + Data: certData, + } + + resp, err = b.HandleRequest(certReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Create connection state using the certificates generated + connState, err := connectionState(caCertFile.Name(), caCertFile.Name(), caKeyFile.Name(), issuedCertFile.Name(), issuedKeyFile.Name()) + if err != nil { + t.Fatalf("error testing connection state:%v", err) + } + + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "login", + Connection: &logical.Connection{ + ConnState: &connState, + }, + } + + // Login when the certificate is still valid. Login should succeed. + resp, err = b.HandleRequest(loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Wait until the certificate expires + time.Sleep(5 * time.Second) + + // Login attempt after certificate expiry should fail + resp, err = b.HandleRequest(loginReq) + if err == nil { + t.Fatalf("expected error due to expired certificate") + } } func TestBackend_RegisteredNonCA_CRL(t *testing.T) { @@ -137,7 +396,10 @@ func TestBackend_RegisteredNonCA_CRL(t *testing.T) { // Connection state is presenting the client Non-CA cert and its key. // This is exactly what is registered at the backend. - connState := connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1) + connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1) + if err != nil { + t.Fatalf("error testing connection state:%v", err) + } loginReq := &logical.Request{ Operation: logical.UpdateOperation, Storage: storage, @@ -217,7 +479,10 @@ func TestBackend_CRLs(t *testing.T) { // Connection state is presenting the client CA cert and its key. // This is exactly what is registered at the backend. - connState := connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1) + connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1) + if err != nil { + t.Fatalf("error testing connection state:%v", err) + } loginReq := &logical.Request{ Operation: logical.UpdateOperation, Storage: storage, @@ -233,7 +498,10 @@ func TestBackend_CRLs(t *testing.T) { // Now, without changing the registered client CA cert, present from // the client side, a cert issued using the registered CA. - connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1) + connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1) + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } loginReq.Connection.ConnState = &connState // Attempt login with the updated connection @@ -283,7 +551,10 @@ func TestBackend_CRLs(t *testing.T) { } // Test login using a different client CA cert pair. - connState = connectionState(t, serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2) + connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2) + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } loginReq.Connection.ConnState = &connState // Attempt login with the updated connection @@ -359,8 +630,11 @@ func TestBackend_CertWrites(t *testing.T) { // Test a client trusted by a CA func TestBackend_basic_CA(t *testing.T) { - connState := testConnState(t, "test-fixtures/keys/cert.pem", + connState, err := testConnState("test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") if err != nil { t.Fatalf("err: %v", err) @@ -385,8 +659,11 @@ func TestBackend_basic_CA(t *testing.T) { // Test CRL behavior func TestBackend_Basic_CRLs(t *testing.T) { - connState := testConnState(t, "test-fixtures/keys/cert.pem", + connState, err := testConnState("test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") if err != nil { t.Fatalf("err: %v", err) @@ -411,8 +688,11 @@ func TestBackend_Basic_CRLs(t *testing.T) { // Test a self-signed client (root CA) that is trusted func TestBackend_basic_singleCert(t *testing.T) { - connState := testConnState(t, "test-fixtures/root/rootcacert.pem", + connState, err := testConnState("test-fixtures/root/rootcacert.pem", "test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") if err != nil { t.Fatalf("err: %v", err) @@ -432,8 +712,11 @@ func TestBackend_basic_singleCert(t *testing.T) { // Test against a collection of matching and non-matching rules func TestBackend_mixed_constraints(t *testing.T) { - connState := testConnState(t, "test-fixtures/keys/cert.pem", + connState, err := testConnState("test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") if err != nil { t.Fatalf("err: %v", err) @@ -454,8 +737,11 @@ func TestBackend_mixed_constraints(t *testing.T) { // Test an untrusted client func TestBackend_untrusted(t *testing.T) { - connState := testConnState(t, "test-fixtures/keys/cert.pem", + connState, err := testConnState("test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } logicaltest.Test(t, logicaltest.TestCase{ Backend: testFactory(t), Steps: []logicaltest.TestStep{ @@ -682,17 +968,17 @@ func testAccStepCertNoLease( } } -func testConnState(t *testing.T, certPath, keyPath, rootCertPath string) tls.ConnectionState { +func testConnState(certPath, keyPath, rootCertPath string) (tls.ConnectionState, error) { cert, err := tls.LoadX509KeyPair(certPath, keyPath) if err != nil { - t.Fatalf("err: %v", err) + return tls.ConnectionState{}, err } rootConfig := &rootcerts.Config{ CAFile: rootCertPath, } rootCAs, err := rootcerts.LoadCACerts(rootConfig) if err != nil { - t.Fatalf("err: %v", err) + return tls.ConnectionState{}, err } listenConf := &tls.Config{ Certificates: []tls.Certificate{cert}, @@ -702,37 +988,72 @@ func testConnState(t *testing.T, certPath, keyPath, rootCertPath string) tls.Con } dialConf := new(tls.Config) *dialConf = *listenConf + // start a server list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf) if err != nil { - t.Fatalf("err: %v", err) + return tls.ConnectionState{}, err } defer list.Close() + // Accept connections. + serverErrors := make(chan error, 1) + connState := make(chan tls.ConnectionState) + go func() { + defer close(connState) + serverConn, err := list.Accept() + serverErrors <- err + if err != nil { + close(serverErrors) + return + } + defer serverConn.Close() + + // Read the ping + buf := make([]byte, 4) + _, err = serverConn.Read(buf) + if (err != nil) && (err != io.EOF) { + serverErrors <- err + close(serverErrors) + return + } else { + // EOF is a reasonable error condition, so swallow it. + serverErrors <- nil + } + close(serverErrors) + connState <- serverConn.(*tls.Conn).ConnectionState() + }() + + // Establish a connection from the client side and write a few bytes. + clientErrors := make(chan error, 1) go func() { addr := list.Addr().String() conn, err := tls.Dial("tcp", addr, dialConf) + clientErrors <- err if err != nil { - t.Fatalf("err: %v", err) + close(clientErrors) + return } defer conn.Close() // Write ping - conn.Write([]byte("ping")) + _, err = conn.Write([]byte("ping")) + clientErrors <- err + close(clientErrors) }() - serverConn, err := list.Accept() - if err != nil { - t.Fatalf("err: %v", err) + for err = range clientErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err) + } } - defer serverConn.Close() - - // Read the pign - buf := make([]byte, 4) - serverConn.Read(buf) + for err = range serverErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err) + } + } // Grab the current state - connState := serverConn.(*tls.Conn).ConnectionState() - return connState + return <-connState, nil } func Test_Renew(t *testing.T) { @@ -750,8 +1071,11 @@ func Test_Renew(t *testing.T) { } b := lb.(*backend) - connState := testConnState(t, "test-fixtures/keys/cert.pem", + connState, err := testConnState("test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") if err != nil { t.Fatal(err) diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go index 66809c2..a1071fc 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/cli.go @@ -10,13 +10,13 @@ import ( type CLIHandler struct{} -func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { var data struct { Mount string `mapstructure:"mount"` Name string `mapstructure:"name"` } if err := mapstructure.WeakDecode(m, &data); err != nil { - return "", err + return nil, err } if data.Mount == "" { @@ -29,13 +29,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { path := fmt.Sprintf("auth/%s/login", data.Mount) secret, err := c.Logical().Write(path, options) if err != nil { - return "", err + return nil, err } if secret == nil { - return "", fmt.Errorf("empty response from credential provider") + return nil, fmt.Errorf("empty response from credential provider") } - return secret.Auth.ClientToken, nil + return secret, nil } func (h *CLIHandler) Help() string { diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go index 2c002f6..fc5254f 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_certs.go @@ -52,7 +52,7 @@ certificate.`, }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Comma-seperated list of policies.", }, @@ -133,7 +133,7 @@ func (b *backend) pathCertRead( Data: map[string]interface{}{ "certificate": cert.Certificate, "display_name": cert.DisplayName, - "policies": strings.Join(cert.Policies, ","), + "policies": cert.Policies, "ttl": duration / time.Second, }, }, nil @@ -144,7 +144,7 @@ func (b *backend) pathCertWrite( name := strings.ToLower(d.Get("name").(string)) certificate := d.Get("certificate").(string) displayName := d.Get("display_name").(string) - policies := policyutil.ParsePolicies(d.Get("policies").(string)) + policies := policyutil.ParsePolicies(d.Get("policies")) allowedNames := d.Get("allowed_names").([]string) // Default the display name to the certificate name if not given diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go index 164bbe7..2faecd3 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/cert/path_login.go @@ -156,11 +156,22 @@ func (b *backend) verifyCredentials(req *logical.Request, d *framework.FieldData clientCert := connState.PeerCertificates[0] // Allow constraining the login request to a single CertEntry - certName := d.Get("name").(string) + var certName string + if req.Auth != nil { // It's a renewal, use the saved certName + certName = req.Auth.Metadata["cert_name"] + } else { + certName = d.Get("name").(string) + } // Load the trusted certificates roots, trusted, trustedNonCAs := b.loadTrustedCerts(req.Storage, certName) + // Get the list of full chains matching the connection + trustedChains, err := validateConnState(roots, connState) + if err != nil { + return nil, nil, err + } + // If trustedNonCAs is not empty it means that client had registered a non-CA cert // with the backend. if len(trustedNonCAs) != 0 { @@ -175,12 +186,6 @@ func (b *backend) verifyCredentials(req *logical.Request, d *framework.FieldData } } - // Get the list of full chains matching the connection - trustedChains, err := validateConnState(roots, connState) - if err != nil { - return nil, nil, err - } - // If no trusted chain was found, client is not authenticated if len(trustedChains) == 0 { return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go index 0dbe893..b53e95f 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend.go @@ -1,6 +1,8 @@ package github import ( + "context" + "github.com/google/go-github/github" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/logical" @@ -9,7 +11,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -44,7 +50,8 @@ func Backend() *backend { pathLogin(&b), }, allPaths...), - AuthRenew: b.pathLoginRenew, + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, } return &b @@ -63,7 +70,8 @@ type backend struct { func (b *backend) Client(token string) (*github.Client, error) { tc := cleanhttp.DefaultClient() if token != "" { - tc = oauth2.NewClient(oauth2.NoContext, &tokenSource{Value: token}) + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, tc) + tc = oauth2.NewClient(ctx, &tokenSource{Value: token}) } return github.NewClient(tc), nil diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go index 037c2ca..6dd7da8 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/backend_test.go @@ -48,9 +48,8 @@ func TestBackend_Config(t *testing.T) { } logicaltest.Test(t, logicaltest.TestCase{ - AcceptanceTest: true, - PreCheck: func() { testAccPreCheck(t) }, - Backend: b, + PreCheck: func() { testAccPreCheck(t) }, + Backend: b, Steps: []logicaltest.TestStep{ testConfigWrite(t, config_data1), testLoginWrite(t, login_data, expectedTTL1.Nanoseconds(), false), @@ -105,9 +104,8 @@ func TestBackend_basic(t *testing.T) { } logicaltest.Test(t, logicaltest.TestCase{ - AcceptanceTest: true, - PreCheck: func() { testAccPreCheck(t) }, - Backend: b, + PreCheck: func() { testAccPreCheck(t) }, + Backend: b, Steps: []logicaltest.TestStep{ testAccStepConfig(t, false), testAccMap(t, "default", "fakepol"), @@ -131,15 +129,15 @@ func TestBackend_basic(t *testing.T) { func testAccPreCheck(t *testing.T) { if v := os.Getenv("GITHUB_TOKEN"); v == "" { - t.Fatal("GITHUB_TOKEN must be set for acceptance tests") + t.Skip("GITHUB_TOKEN must be set for acceptance tests") } if v := os.Getenv("GITHUB_ORG"); v == "" { - t.Fatal("GITHUB_ORG must be set for acceptance tests") + t.Skip("GITHUB_ORG must be set for acceptance tests") } if v := os.Getenv("GITHUB_BASEURL"); v == "" { - t.Fatal("GITHUB_BASEURL must be set for acceptance tests (use 'https://api.github.com' if you don't know what you're doing)") + t.Skip("GITHUB_BASEURL must be set for acceptance tests (use 'https://api.github.com' if you don't know what you're doing)") } } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go index dda1dac..557939b 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/cli.go @@ -10,7 +10,7 @@ import ( type CLIHandler struct{} -func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { mount, ok := m["mount"] if !ok { mount = "github" @@ -19,7 +19,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { token, ok := m["token"] if !ok { if token = os.Getenv("VAULT_AUTH_GITHUB_TOKEN"); token == "" { - return "", fmt.Errorf("GitHub token should be provided either as 'value' for 'token' key,\nor via an env var VAULT_AUTH_GITHUB_TOKEN") + return nil, fmt.Errorf("GitHub token should be provided either as 'value' for 'token' key,\nor via an env var VAULT_AUTH_GITHUB_TOKEN") } } @@ -28,13 +28,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { "token": token, }) if err != nil { - return "", err + return nil, err } if secret == nil { - return "", fmt.Errorf("empty response from credential provider") + return nil, fmt.Errorf("empty response from credential provider") } - return secret.Auth.ClientToken, nil + return secret, nil } func (h *CLIHandler) Help() string { diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go index 9db2e64..c211450 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/github/path_config.go @@ -5,9 +5,9 @@ import ( "net/url" "time" + "github.com/fatih/structs" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" - "github.com/fatih/structs" ) func pathConfig(b *backend) *framework.Path { @@ -37,7 +37,7 @@ API-compatible authentication server.`, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathConfigWrite, - logical.ReadOperation: b.pathConfigRead, + logical.ReadOperation: b.pathConfigRead, }, } } @@ -77,10 +77,10 @@ func (b *backend) pathConfigWrite( } entry, err := logical.StorageEntryJSON("config", config{ - Organization: organization, - BaseURL: baseURL, - TTL: ttl, - MaxTTL: maxTTL, + Organization: organization, + BaseURL: baseURL, + TTL: ttl, + MaxTTL: maxTTL, }) if err != nil { diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go index d165626..835b4a6 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend.go @@ -13,7 +13,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -39,7 +43,8 @@ func Backend() *backend { mfa.MFAPaths(b.Backend, pathLogin(&b))..., ), - AuthRenew: b.pathLoginRenew, + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, } return &b @@ -118,7 +123,12 @@ func (b *backend) Login(req *logical.Request, username string, password string) } // Try to bind as the login user. This is where the actual authentication takes place. - if err = c.Bind(userBindDN, password); err != nil { + if len(password) > 0 { + err = c.Bind(userBindDN, password) + } else { + err = c.UnauthenticatedBind(userBindDN) + } + if err != nil { return nil, logical.ErrorResponse(fmt.Sprintf("LDAP bind failed: %v", err)), nil } @@ -184,8 +194,8 @@ func (b *backend) Login(req *logical.Request, username string, password string) if len(policies) == 0 { errStr := "user is not a member of any authorized group" - if len(ldapResponse.Warnings()) > 0 { - errStr = fmt.Sprintf("%s; additionally, %s", errStr, ldapResponse.Warnings()[0]) + if len(ldapResponse.Warnings) > 0 { + errStr = fmt.Sprintf("%s; additionally, %s", errStr, ldapResponse.Warnings[0]) } ldapResponse.Data["error"] = errStr @@ -232,7 +242,13 @@ func (b *backend) getCN(dn string) string { func (b *backend) getUserBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) (string, error) { bindDN := "" if cfg.DiscoverDN || (cfg.BindDN != "" && cfg.BindPassword != "") { - if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil { + var err error + if cfg.BindPassword != "" { + err = c.Bind(cfg.BindDN, cfg.BindPassword) + } else { + err = c.UnauthenticatedBind(cfg.BindDN) + } + if err != nil { return bindDN, fmt.Errorf("LDAP bind (service) failed: %v", err) } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go index 51b4df7..3b1d936 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/backend_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/logical" logicaltest "github.com/hashicorp/vault/logical/testing" "github.com/mitchellh/mapstructure" @@ -21,7 +22,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { t.Fatalf("failed to create backend") } - _, err := b.Backend.Setup(config) + err := b.Backend.Setup(config) if err != nil { t.Fatal(err) } @@ -94,7 +95,7 @@ func TestLdapAuthBackend_UserPolicies(t *testing.T) { if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) } - expected := []string{"default", "grouppolicy", "userpolicy"} + expected := []string{"grouppolicy", "userpolicy"} if !reflect.DeepEqual(expected, resp.Auth.Policies) { t.Fatalf("bad: policies: expected: %q, actual: %q", expected, resp.Auth.Policies) } @@ -211,7 +212,7 @@ func TestBackend_groupCrud(t *testing.T) { Backend: b, Steps: []logicaltest.TestStep{ testAccStepGroup(t, "g1", "foo"), - testAccStepReadGroup(t, "g1", "default,foo"), + testAccStepReadGroup(t, "g1", "foo"), testAccStepDeleteGroup(t, "g1"), testAccStepReadGroup(t, "g1", ""), }, @@ -357,13 +358,13 @@ func testAccStepReadGroup(t *testing.T, group string, policies string) logicalte } var d struct { - Policies string `mapstructure:"policies"` + Policies []string `mapstructure:"policies"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } - if d.Policies != policies { + if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) { return fmt.Errorf("bad: %#v", resp) } @@ -463,8 +464,8 @@ func testAccStepLoginNoGroupDN(t *testing.T, user string, pass string) logicalte // Verifies a search without defined GroupDN returns a warnting rather than failing Check: func(resp *logical.Response) error { - if len(resp.Warnings()) != 1 { - return fmt.Errorf("expected a warning due to no group dn, got: %#v", resp.Warnings()) + if len(resp.Warnings) != 1 { + return fmt.Errorf("expected a warning due to no group dn, got: %#v", resp.Warnings) } return logicaltest.TestCheckAuth([]string{"bar", "default"})(resp) diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go index e4d151f..262bc99 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/cli.go @@ -11,7 +11,7 @@ import ( type CLIHandler struct{} -func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { mount, ok := m["mount"] if !ok { mount = "ldap" @@ -21,7 +21,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { if !ok { username = usernameFromEnv() if username == "" { - return "", fmt.Errorf("'username' not supplied and neither 'LOGNAME' nor 'USER' env vars set") + return nil, fmt.Errorf("'username' not supplied and neither 'LOGNAME' nor 'USER' env vars set") } } password, ok := m["password"] @@ -31,7 +31,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { password, err = pwd.Read(os.Stdin) fmt.Println() if err != nil { - return "", err + return nil, err } } @@ -51,13 +51,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { path := fmt.Sprintf("auth/%s/login/%s", mount, username) secret, err := c.Logical().Write(path, data) if err != nil { - return "", err + return nil, err } if secret == nil { - return "", fmt.Errorf("empty response from credential provider") + return nil, fmt.Errorf("empty response from credential provider") } - return secret.Auth.ClientToken, nil + return secret, nil } func (h *CLIHandler) Help() string { diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go index 4fc772e..bf76715 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_config.go @@ -3,6 +3,7 @@ package ldap import ( "crypto/tls" "crypto/x509" + "encoding/pem" "fmt" "net" "net/url" @@ -225,6 +226,15 @@ func (b *backend) newConfigEntry(d *framework.FieldData) (*ConfigEntry, error) { } certificate := d.Get("certificate").(string) if certificate != "" { + block, _ := pem.Decode([]byte(certificate)) + + if block == nil || block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("failed to decode PEM block in the certificate") + } + _, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate %s", err.Error()) + } cfg.Certificate = certificate } insecureTLS := d.Get("insecure_tls").(bool) diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go index 998fdc4..48c0d25 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_groups.go @@ -1,8 +1,6 @@ package ldap import ( - "strings" - "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -31,7 +29,7 @@ func pathGroups(b *backend) *framework.Path { }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", }, }, @@ -86,7 +84,7 @@ func (b *backend) pathGroupRead( return &logical.Response{ Data: map[string]interface{}{ - "policies": strings.Join(group.Policies, ","), + "policies": group.Policies, }, }, nil } @@ -95,7 +93,7 @@ func (b *backend) pathGroupWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { // Store it entry, err := logical.StorageEntryJSON("group/"+d.Get("name").(string), &GroupEntry{ - Policies: policyutil.ParsePolicies(d.Get("policies").(string)), + Policies: policyutil.ParsePolicies(d.Get("policies")), }) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go index e859adb..2266e8d 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_login.go @@ -3,7 +3,6 @@ package ldap import ( "fmt" "sort" - "strings" "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/logical" @@ -59,7 +58,6 @@ func (b *backend) pathLogin( Policies: policies, Metadata: map[string]string{ "username": username, - "policies": strings.Join(policies, ","), }, InternalData: map[string]interface{}{ "password": password, diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go index 605f779..6845a41 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/ldap/path_users.go @@ -37,7 +37,7 @@ func pathUsers(b *backend) *framework.Path { }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated with the user.", }, }, @@ -93,7 +93,7 @@ func (b *backend) pathUserRead( return &logical.Response{ Data: map[string]interface{}{ "groups": strings.Join(user.Groups, ","), - "policies": strings.Join(user.Policies, ","), + "policies": user.Policies, }, }, nil } @@ -102,7 +102,7 @@ func (b *backend) pathUserWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) groups := strutil.RemoveDuplicates(strutil.ParseStringSlice(d.Get("groups").(string), ","), false) - policies := policyutil.ParsePolicies(d.Get("policies").(string)) + policies := policyutil.ParsePolicies(d.Get("policies")) for i, g := range groups { groups[i] = strings.TrimSpace(g) } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go index 43a1647..951d190 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend.go @@ -3,12 +3,17 @@ package okta import ( "fmt" + "github.com/chrismalek/oktasdk-go/okta" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -31,7 +36,8 @@ func Backend() *backend { pathLogin(&b), }), - AuthRenew: b.pathLoginRenew, + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, } return &b @@ -51,49 +57,76 @@ func (b *backend) Login(req *logical.Request, username string, password string) } client := cfg.OktaClient() - auth, err := client.Authenticate(username, password) + + type embeddedResult struct { + User okta.User `json:"user"` + } + + type authResult struct { + Embedded embeddedResult `json:"_embedded"` + } + + authReq, err := client.NewRequest("POST", "authn", map[string]interface{}{ + "username": username, + "password": password, + }) + if err != nil { + return nil, nil, err + } + + var result authResult + rsp, err := client.Do(authReq, &result) if err != nil { return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil } - if auth == nil { + if rsp == nil { return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil } - oktaGroups, err := b.getOktaGroups(cfg, auth.Embedded.User.ID) - if err != nil { - return nil, logical.ErrorResponse(err.Error()), nil - } - if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", oktaGroups) - } - oktaResponse := &logical.Response{ Data: map[string]interface{}{}, } - if len(oktaGroups) == 0 { - errString := fmt.Sprintf( - "no Okta groups found; only policies from locally-defined groups available") - oktaResponse.AddWarning(errString) - } var allGroups []string + // Only query the Okta API for group membership if we have a token + if cfg.Token != "" { + oktaGroups, err := b.getOktaGroups(client, &result.Embedded.User) + if err != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("okta failure retrieving groups: %v", err)), nil + } + if len(oktaGroups) == 0 { + errString := fmt.Sprintf( + "no Okta groups found; only policies from locally-defined groups available") + oktaResponse.AddWarning(errString) + } + allGroups = append(allGroups, oktaGroups...) + } + // Import the custom added groups from okta backend user, err := b.User(req.Storage, username) + if err != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("auth/okta: error looking up user", "error", err) + } + } if err == nil && user != nil && user.Groups != nil { if b.Logger().IsDebug() { b.Logger().Debug("auth/okta: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) } allGroups = append(allGroups, user.Groups...) } - // Merge local and Okta groups - allGroups = append(allGroups, oktaGroups...) // Retrieve policies var policies []string for _, groupName := range allGroups { - group, err := b.Group(req.Storage, groupName) - if err == nil && group != nil && group.Policies != nil { - policies = append(policies, group.Policies...) + entry, _, err := b.Group(req.Storage, groupName) + if err != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("auth/okta: error looking up group policies", "error", err) + } + } + if err == nil && entry != nil && entry.Policies != nil { + policies = append(policies, entry.Policies...) } } @@ -104,8 +137,8 @@ func (b *backend) Login(req *logical.Request, username string, password string) if len(policies) == 0 { errStr := "user is not a member of any authorized policy" - if len(oktaResponse.Warnings()) > 0 { - errStr = fmt.Sprintf("%s; additionally, %s", errStr, oktaResponse.Warnings()[0]) + if len(oktaResponse.Warnings) > 0 { + errStr = fmt.Sprintf("%s; additionally, %s", errStr, oktaResponse.Warnings[0]) } oktaResponse.Data["error"] = errStr @@ -115,21 +148,22 @@ func (b *backend) Login(req *logical.Request, username string, password string) return policies, oktaResponse, nil } -func (b *backend) getOktaGroups(cfg *ConfigEntry, userID string) ([]string, error) { - if cfg.Token != "" { - client := cfg.OktaClient() - groups, err := client.Groups(userID) - if err != nil { - return nil, err - } - - oktaGroups := make([]string, 0, len(*groups)) - for _, group := range *groups { - oktaGroups = append(oktaGroups, group.Profile.Name) - } - return oktaGroups, err +func (b *backend) getOktaGroups(client *okta.Client, user *okta.User) ([]string, error) { + rsp, err := client.Users.PopulateGroups(user) + if err != nil { + return nil, err } - return nil, nil + if rsp == nil { + return nil, fmt.Errorf("okta auth backend unexpected failure") + } + oktaGroups := make([]string, 0, len(user.Groups)) + for _, group := range user.Groups { + oktaGroups = append(oktaGroups, group.Profile.Name) + } + if b.Logger().IsDebug() { + b.Logger().Debug("auth/okta: Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", oktaGroups) + } + return oktaGroups, nil } const backendHelp = ` diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go index 7672dc0..9c2503d 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/backend_test.go @@ -10,14 +10,21 @@ import ( "github.com/hashicorp/vault/helper/policyutil" log "github.com/mgutz/logxi/v1" + "time" + "github.com/hashicorp/vault/logical" logicaltest "github.com/hashicorp/vault/logical/testing" ) func TestBackend_Config(t *testing.T) { + defaultLeaseTTLVal := time.Hour * 12 + maxLeaseTTLVal := time.Hour * 24 b, err := Factory(&logical.BackendConfig{ Logger: logformat.NewVaultLogger(log.LevelTrace), - System: &logical.StaticSystemView{}, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + }, }) if err != nil { t.Fatalf("Unable to create backend: %s", err) @@ -25,14 +32,17 @@ func TestBackend_Config(t *testing.T) { username := os.Getenv("OKTA_USERNAME") password := os.Getenv("OKTA_PASSWORD") + token := os.Getenv("OKTA_API_TOKEN") configData := map[string]interface{}{ "organization": os.Getenv("OKTA_ORG"), "base_url": "oktapreview.com", } + updatedDuration := time.Hour * 1 configDataToken := map[string]interface{}{ - "token": os.Getenv("OKTA_API_TOKEN"), + "token": token, + "ttl": "1h", } logicaltest.Test(t, logicaltest.TestCase{ @@ -41,23 +51,23 @@ func TestBackend_Config(t *testing.T) { Backend: b, Steps: []logicaltest.TestStep{ testConfigCreate(t, configData), - testLoginWrite(t, username, "wrong", "E0000004", nil), - testLoginWrite(t, username, password, "user is not a member of any authorized policy", nil), - testAccUserGroups(t, username, "local_group,local_group2"), - testAccGroups(t, "local_group", "local_group_policy"), - testLoginWrite(t, username, password, "", []string{"local_group_policy"}), - testAccGroups(t, "Everyone", "everyone_group_policy,every_group_policy2"), - testLoginWrite(t, username, password, "", []string{"local_group_policy"}), + testLoginWrite(t, username, "wrong", "E0000004", 0, nil), + testLoginWrite(t, username, password, "user is not a member of any authorized policy", 0, nil), + testAccUserGroups(t, username, "local_grouP,lOcal_group2"), + testAccGroups(t, "local_groUp", "loCal_group_policy"), + testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy"}), + testAccGroups(t, "everyoNe", "everyone_grouP_policy,eveRy_group_policy2"), + testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy"}), testConfigUpdate(t, configDataToken), - testConfigRead(t, configData), - testLoginWrite(t, username, password, "", []string{"everyone_group_policy", "every_group_policy2", "local_group_policy"}), - testAccGroups(t, "local_group2", "testgroup_group_policy"), - testLoginWrite(t, username, password, "", []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "testgroup_group_policy"}), + testConfigRead(t, token, configData), + testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy"}), + testAccGroups(t, "locAl_group2", "testgroup_group_policy"), + testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "testgroup_group_policy"}), }, }) } -func testLoginWrite(t *testing.T, username, password, reason string, policies []string) logicaltest.TestStep { +func testLoginWrite(t *testing.T, username, password, reason string, expectedTTL time.Duration, policies []string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "login/" + username, @@ -76,6 +86,11 @@ func testLoginWrite(t *testing.T, username, password, reason string, policies [] if !policyutil.EquivalentPolicies(resp.Auth.Policies, policies) { return fmt.Errorf("policy mismatch expected %v but got %v", policies, resp.Auth.Policies) } + + actualTTL := resp.Auth.LeaseOptions.TTL + if actualTTL != expectedTTL { + return fmt.Errorf("TTL mismatch expected %v but got %v", expectedTTL, actualTTL) + } } return nil @@ -99,7 +114,7 @@ func testConfigUpdate(t *testing.T, d map[string]interface{}) logicaltest.TestSt } } -func testConfigRead(t *testing.T, d map[string]interface{}) logicaltest.TestStep { +func testConfigRead(t *testing.T, token string, d map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "config", @@ -108,16 +123,18 @@ func testConfigRead(t *testing.T, d map[string]interface{}) logicaltest.TestStep return resp.Error() } - if resp.Data["Org"] != d["organization"] { + if resp.Data["organization"] != d["organization"] { return fmt.Errorf("Org mismatch expected %s but got %s", d["organization"], resp.Data["Org"]) } - if resp.Data["BaseURL"] != d["base_url"] { + if resp.Data["base_url"] != d["base_url"] { return fmt.Errorf("BaseURL mismatch expected %s but got %s", d["base_url"], resp.Data["BaseURL"]) } - if _, exists := resp.Data["Token"]; exists { - return fmt.Errorf("token should not be returned on a read request") + for _, value := range resp.Data { + if value == token { + return fmt.Errorf("token should not be returned on a read request") + } } return nil diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go index 355e8cb..f5f8502 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/cli.go @@ -13,7 +13,7 @@ import ( type CLIHandler struct{} // Auth cli method -func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { mount, ok := m["mount"] if !ok { mount = "okta" @@ -21,7 +21,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { username, ok := m["username"] if !ok { - return "", fmt.Errorf("'username' var must be set") + return nil, fmt.Errorf("'username' var must be set") } password, ok := m["password"] if !ok { @@ -30,7 +30,7 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { password, err = pwd.Read(os.Stdin) fmt.Println() if err != nil { - return "", err + return nil, err } } @@ -41,13 +41,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { path := fmt.Sprintf("auth/%s/login/%s", mount, username) secret, err := c.Logical().Write(path, data) if err != nil { - return "", err + return nil, err } if secret == nil { - return "", fmt.Errorf("empty response from credential provider") + return nil, fmt.Errorf("empty response from credential provider") } - return secret.Auth.ClientToken, nil + return secret, nil } // Help method for okta cli diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go index b454f7e..e879302 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_config.go @@ -4,9 +4,17 @@ import ( "fmt" "net/url" + "time" + + "github.com/chrismalek/oktasdk-go/okta" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" - "github.com/sstarcher/go-okta" +) + +const ( + defaultBaseURL = "okta.com" + previewBaseURL = "oktapreview.com" ) func pathConfig(b *backend) *framework.Path { @@ -15,16 +23,35 @@ func pathConfig(b *backend) *framework.Path { Fields: map[string]*framework.FieldSchema{ "organization": &framework.FieldSchema{ Type: framework.TypeString, - Description: "Okta organization to authenticate against", + Description: "(DEPRECATED) Okta organization to authenticate against. Use org_name instead.", + }, + "org_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the organization to be used in the Okta API.", }, "token": &framework.FieldSchema{ Type: framework.TypeString, - Description: "Okta admin API token", + Description: "(DEPRECATED) Okta admin API token. Use api_token instead.", + }, + "api_token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Okta API key.", }, "base_url": &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The API endpoint to use. Useful if you -are using Okta development accounts.`, + Type: framework.TypeString, + Description: `The base domain to use for the Okta API. When not specified in the configuraiton, "okta.com" is used.`, + }, + "production": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `(DEPRECATED) Use base_url.`, + }, + "ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `Duration after which authentication will be expired`, + }, + "max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `Maximum duration after which authentication will be expired`, }, }, @@ -73,17 +100,24 @@ func (b *backend) pathConfigRead( resp := &logical.Response{ Data: map[string]interface{}{ - "Org": cfg.Org, - "BaseURL": cfg.BaseURL, + "organization": cfg.Org, + "org_name": cfg.Org, + "ttl": cfg.TTL, + "max_ttl": cfg.MaxTTL, }, } + if cfg.BaseURL != "" { + resp.Data["base_url"] = cfg.BaseURL + } + if cfg.Production != nil { + resp.Data["production"] = *cfg.Production + } return resp, nil } func (b *backend) pathConfigWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - org := d.Get("organization").(string) cfg, err := b.Config(req.Storage) if err != nil { return nil, err @@ -92,30 +126,69 @@ func (b *backend) pathConfigWrite( // Due to the existence check, entry will only be nil if it's a create // operation, so just create a new one if cfg == nil { - cfg = &ConfigEntry{ - Org: org, - } + cfg = &ConfigEntry{} } - token, ok := d.GetOk("token") + org, ok := d.GetOk("org_name") + if ok { + cfg.Org = org.(string) + } + if cfg.Org == "" { + org, ok = d.GetOk("organization") + if ok { + cfg.Org = org.(string) + } + } + if cfg.Org == "" && req.Operation == logical.CreateOperation { + return logical.ErrorResponse("org_name is missing"), nil + } + + token, ok := d.GetOk("api_token") if ok { cfg.Token = token.(string) - } else if req.Operation == logical.CreateOperation { - cfg.Token = d.Get("token").(string) + } + if cfg.Token == "" { + token, ok = d.GetOk("token") + if ok { + cfg.Token = token.(string) + } } - baseURL, ok := d.GetOk("base_url") + baseURLRaw, ok := d.GetOk("base_url") if ok { - baseURLString := baseURL.(string) - if len(baseURLString) != 0 { - _, err = url.Parse(baseURLString) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil - } - cfg.BaseURL = baseURLString + baseURL := baseURLRaw.(string) + _, err = url.Parse(fmt.Sprintf("https://%s,%s", cfg.Org, baseURL)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil } + cfg.BaseURL = baseURL + } + + // We only care about the production flag when base_url is not set. It is + // for compatibility reasons. + if cfg.BaseURL == "" { + productionRaw, ok := d.GetOk("production") + if ok { + production := productionRaw.(bool) + cfg.Production = &production + } + } else { + // clear out old production flag if base_url is set + cfg.Production = nil + } + + ttl, ok := d.GetOk("ttl") + if ok { + cfg.TTL = time.Duration(ttl.(int)) * time.Second } else if req.Operation == logical.CreateOperation { - cfg.BaseURL = d.Get("base_url").(string) + cfg.TTL = time.Duration(d.Get("ttl").(int)) * time.Second + } + + maxTTL, ok := d.GetOk("max_ttl") + if ok { + cfg.MaxTTL = time.Duration(maxTTL.(int)) * time.Second + } else if req.Operation == logical.CreateOperation { + cfg.MaxTTL = time.Duration(d.Get("max_ttl").(int)) * time.Second } jsonCfg, err := logical.StorageEntryJSON("config", cfg) @@ -141,23 +214,29 @@ func (b *backend) pathConfigExistenceCheck( // OktaClient creates a basic okta client connection func (c *ConfigEntry) OktaClient() *okta.Client { - client := okta.NewClient(c.Org) + baseURL := defaultBaseURL + if c.Production != nil { + if !*c.Production { + baseURL = previewBaseURL + } + } if c.BaseURL != "" { - client.Url = c.BaseURL - } - - if c.Token != "" { - client.ApiToken = c.Token + baseURL = c.BaseURL } + // We validate config on input and errors are only returned when parsing URLs + client, _ := okta.NewClientWithDomain(cleanhttp.DefaultClient(), c.Org, baseURL, c.Token) return client } // ConfigEntry for Okta type ConfigEntry struct { - Org string `json:"organization"` - Token string `json:"token"` - BaseURL string `json:"base_url"` + Org string `json:"organization"` + Token string `json:"token"` + BaseURL string `json:"base_url"` + Production *bool `json:"is_production,omitempty"` + TTL time.Duration `json:"ttl"` + MaxTTL time.Duration `json:"max_ttl"` } const pathConfigHelp = ` diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go index d111775..9f879a1 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_groups.go @@ -1,6 +1,8 @@ package okta import ( + "strings" + "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -29,7 +31,7 @@ func pathGroups(b *backend) *framework.Path { }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", }, }, @@ -45,34 +47,59 @@ func pathGroups(b *backend) *framework.Path { } } -func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, error) { +// We look up groups in a case-insensitive manner since Okta is case-preserving +// but case-insensitive for comparisons +func (b *backend) Group(s logical.Storage, n string) (*GroupEntry, string, error) { + canonicalName := n entry, err := s.Get("group/" + n) if err != nil { - return nil, err + return nil, "", err } if entry == nil { - return nil, nil + entries, err := s.List("group/") + if err != nil { + return nil, "", err + } + for _, groupName := range entries { + if strings.ToLower(groupName) == strings.ToLower(n) { + entry, err = s.Get("group/" + groupName) + if err != nil { + return nil, "", err + } + canonicalName = groupName + break + } + } + } + if entry == nil { + return nil, "", nil } var result GroupEntry if err := entry.DecodeJSON(&result); err != nil { - return nil, err + return nil, "", err } - return &result, nil + return &result, canonicalName, nil } func (b *backend) pathGroupDelete( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) if len(name) == 0 { - return logical.ErrorResponse("Error empty name"), nil + return logical.ErrorResponse("'name' must be supplied"), nil } - err := req.Storage.Delete("group/" + name) + entry, canonicalName, err := b.Group(req.Storage, name) if err != nil { return nil, err } + if entry != nil { + err := req.Storage.Delete("group/" + canonicalName) + if err != nil { + return nil, err + } + } return nil, nil } @@ -81,10 +108,10 @@ func (b *backend) pathGroupRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) if len(name) == 0 { - return logical.ErrorResponse("Error empty name"), nil + return logical.ErrorResponse("'name' must be supplied"), nil } - group, err := b.Group(req.Storage, name) + group, _, err := b.Group(req.Storage, name) if err != nil { return nil, err } @@ -103,11 +130,23 @@ func (b *backend) pathGroupWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) if len(name) == 0 { - return logical.ErrorResponse("Error empty name"), nil + return logical.ErrorResponse("'name' must be supplied"), nil + } + + // Check for an existing group, possibly lowercased so that we keep using + // existing user set values + _, canonicalName, err := b.Group(req.Storage, name) + if err != nil { + return nil, err + } + if canonicalName != "" { + name = canonicalName + } else { + name = strings.ToLower(name) } entry, err := logical.StorageEntryJSON("group/"+name, &GroupEntry{ - Policies: policyutil.ParsePolicies(d.Get("policies").(string)), + Policies: policyutil.ParsePolicies(d.Get("policies")), }) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go index accc867..e439771 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/okta/path_login.go @@ -5,6 +5,7 @@ import ( "sort" "strings" + "github.com/go-errors/errors" "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -55,6 +56,11 @@ func (b *backend) pathLogin( sort.Strings(policies) + cfg, err := b.getConfig(req) + if err != nil { + return nil, err + } + resp.Auth = &logical.Auth{ Policies: policies, Metadata: map[string]string{ @@ -66,6 +72,7 @@ func (b *backend) pathLogin( }, DisplayName: username, LeaseOptions: logical.LeaseOptions{ + TTL: cfg.TTL, Renewable: true, }, } @@ -87,7 +94,25 @@ func (b *backend) pathLoginRenew( return nil, fmt.Errorf("policies have changed, not renewing") } - return framework.LeaseExtend(0, 0, b.System())(req, d) + cfg, err := b.getConfig(req) + if err != nil { + return nil, err + } + + return framework.LeaseExtend(cfg.TTL, cfg.MaxTTL, b.System())(req, d) +} + +func (b *backend) getConfig(req *logical.Request) (*ConfigEntry, error) { + + cfg, err := b.Config(req.Storage) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, errors.New("Okta backend not configured") + } + + return cfg, nil } const pathLoginSyn = ` diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go index 4bd3306..49dcb7f 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/backend.go @@ -7,7 +7,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -32,7 +36,8 @@ func Backend() *backend { mfa.MFAPaths(b.Backend, pathLogin(&b))..., ), - AuthRenew: b.pathLoginRenew, + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go index 2eaac22..7d4bc8b 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_config.go @@ -39,7 +39,7 @@ func pathConfig(b *backend) *framework.Path { "read_timeout": &framework.FieldSchema{ Type: framework.TypeDurationSecond, Default: 10, - Description: "Number of seconds before response times out (default: 10)", + Description: "Number of seconds before response times out (default: 10). Note: kept for backwards compatibility, currently unused.", }, "nas_port": &framework.FieldSchema{ Type: framework.TypeInt, diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go index 6f2c16d..f3f8c9d 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_login.go @@ -1,6 +1,7 @@ package radius import ( + "context" "fmt" "net" "strconv" @@ -51,12 +52,12 @@ func (b *backend) pathLogin( if username == "" { username = d.Get("urlusername").(string) if username == "" { - return logical.ErrorResponse("username cannot be emtpy"), nil + return logical.ErrorResponse("username cannot be empty"), nil } } if password == "" { - return logical.ErrorResponse("password cannot be emtpy"), nil + return logical.ErrorResponse("password cannot be empty"), nil } policies, resp, err := b.RadiusLogin(req, username, password) @@ -123,15 +124,24 @@ func (b *backend) RadiusLogin(req *logical.Request, username string, password st hostport := net.JoinHostPort(cfg.Host, strconv.Itoa(cfg.Port)) packet := radius.New(radius.CodeAccessRequest, []byte(cfg.Secret)) - packet.Add("User-Name", username) - packet.Add("User-Password", password) - packet.Add("NAS-Port", uint32(cfg.NasPort)) + usernameAttr, err := radius.NewString(username) + if err != nil { + return nil, nil, err + } + passwordAttr, err := radius.NewString(password) + if err != nil { + return nil, nil, err + } + packet.Add(1, usernameAttr) + packet.Add(2, passwordAttr) + packet.Add(5, radius.NewInteger(uint32(cfg.NasPort))) client := radius.Client{ - DialTimeout: time.Duration(cfg.DialTimeout) * time.Second, - ReadTimeout: time.Duration(cfg.ReadTimeout) * time.Second, + Dialer: net.Dialer{ + Timeout: time.Duration(cfg.DialTimeout) * time.Second, + }, } - received, err := client.Exchange(packet, hostport) + received, err := client.Exchange(context.Background(), packet, hostport) if err != nil { return nil, logical.ErrorResponse(err.Error()), nil } @@ -142,6 +152,9 @@ func (b *backend) RadiusLogin(req *logical.Request, username string, password st var policies []string // Retrieve user entry from storage user, err := b.user(req.Storage, username) + if err != nil { + return policies, logical.ErrorResponse("could not retrieve user entry from storage"), err + } if user == nil { // No user found, check if unregistered users are allowed (unregistered_user_policies not empty) if len(policyutil.SanitizePolicies(cfg.UnregisteredUserPolicies, false)) == 0 { diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go index ac9a971..1e0fc61 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/radius/path_users.go @@ -32,7 +32,7 @@ func pathUsers(b *backend) *framework.Path { }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the user.", }, }, @@ -111,7 +111,7 @@ func (b *backend) pathUserRead( func (b *backend) pathUserWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - var policies = policyutil.ParsePolicies(d.Get("policies").(string)) + var policies = policyutil.ParsePolicies(d.Get("policies")) for _, policy := range policies { if policy == "root" { return logical.ErrorResponse("root policy cannot be granted by an authentication backend"), nil diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go index d219895..65f67e1 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend.go @@ -7,7 +7,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -32,7 +36,8 @@ func Backend() *backend { mfa.MFAPaths(b.Backend, pathLogin(&b))..., ), - AuthRenew: b.pathLoginRenew, + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go index f04dc6a..4f077ee 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/backend_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/logical" logicaltest "github.com/hashicorp/vault/logical/testing" "github.com/mitchellh/mapstructure" @@ -106,7 +107,7 @@ func TestBackend_userCrud(t *testing.T) { Backend: b, Steps: []logicaltest.TestStep{ testAccStepUser(t, "web", "password", "foo"), - testAccStepReadUser(t, "web", "default,foo"), + testAccStepReadUser(t, "web", "foo"), testAccStepDeleteUser(t, "web"), testAccStepReadUser(t, "web", ""), }, @@ -150,7 +151,7 @@ func TestBackend_passwordUpdate(t *testing.T) { Backend: b, Steps: []logicaltest.TestStep{ testAccStepUser(t, "web", "password", "foo"), - testAccStepReadUser(t, "web", "default,foo"), + testAccStepReadUser(t, "web", "foo"), testAccStepLogin(t, "web", "password", []string{"default", "foo"}), testUpdatePassword(t, "web", "newpassword"), testAccStepLogin(t, "web", "newpassword", []string{"default", "foo"}), @@ -175,10 +176,10 @@ func TestBackend_policiesUpdate(t *testing.T) { Backend: b, Steps: []logicaltest.TestStep{ testAccStepUser(t, "web", "password", "foo"), - testAccStepReadUser(t, "web", "default,foo"), + testAccStepReadUser(t, "web", "foo"), testAccStepLogin(t, "web", "password", []string{"default", "foo"}), testUpdatePolicies(t, "web", "foo,bar"), - testAccStepReadUser(t, "web", "bar,default,foo"), + testAccStepReadUser(t, "web", "bar,foo"), testAccStepLogin(t, "web", "password", []string{"bar", "default", "foo"}), }, }) @@ -311,13 +312,13 @@ func testAccStepReadUser(t *testing.T, name string, policies string) logicaltest } var d struct { - Policies string `mapstructure:"policies"` + Policies []string `mapstructure:"policies"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err } - if d.Policies != policies { + if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) { return fmt.Errorf("bad: %#v", resp) } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go index 80b52e3..4433c0e 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/cli.go @@ -14,7 +14,7 @@ type CLIHandler struct { DefaultMount string } -func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { var data struct { Username string `mapstructure:"username"` Password string `mapstructure:"password"` @@ -23,18 +23,18 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { Passcode string `mapstructure:"passcode"` } if err := mapstructure.WeakDecode(m, &data); err != nil { - return "", err + return nil, err } if data.Username == "" { - return "", fmt.Errorf("'username' must be specified") + return nil, fmt.Errorf("'username' must be specified") } if data.Password == "" { fmt.Printf("Password (will be hidden): ") password, err := pwd.Read(os.Stdin) fmt.Println() if err != nil { - return "", err + return nil, err } data.Password = password } @@ -55,13 +55,13 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (string, error) { path := fmt.Sprintf("auth/%s/login/%s", data.Mount, data.Username) secret, err := c.Logical().Write(path, options) if err != nil { - return "", err + return nil, err } if secret == nil { - return "", fmt.Errorf("empty response from credential provider") + return nil, fmt.Errorf("empty response from credential provider") } - return secret.Auth.ClientToken, nil + return secret, nil } func (h *CLIHandler) Help() string { diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go index 6165c18..d03a6c2 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_user_policies.go @@ -17,7 +17,7 @@ func pathUserPolicies(b *backend) *framework.Path { Description: "Username for this user.", }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", }, }, @@ -44,7 +44,7 @@ func (b *backend) pathUserPoliciesUpdate( return nil, fmt.Errorf("username does not exist") } - userEntry.Policies = policyutil.ParsePolicies(d.Get("policies").(string)) + userEntry.Policies = policyutil.ParsePolicies(d.Get("policies")) return nil, b.setUser(req.Storage, username, userEntry) } diff --git a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go index f8d4eb0..b207598 100644 --- a/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go +++ b/vendor/github.com/hashicorp/vault/builtin/credential/userpass/path_users.go @@ -38,7 +38,7 @@ func pathUsers(b *backend) *framework.Path { }, "policies": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", }, "ttl": &framework.FieldSchema{ @@ -137,7 +137,7 @@ func (b *backend) pathUserRead( return &logical.Response{ Data: map[string]interface{}{ - "policies": strings.Join(user.Policies, ","), + "policies": user.Policies, "ttl": user.TTL.Seconds(), "max_ttl": user.MaxTTL.Seconds(), }, @@ -166,7 +166,7 @@ func (b *backend) userCreateUpdate(req *logical.Request, d *framework.FieldData) } if policiesRaw, ok := d.GetOk("policies"); ok { - userEntry.Policies = policyutil.ParsePolicies(policiesRaw.(string)) + userEntry.Policies = policyutil.ParsePolicies(policiesRaw) } ttlStr := userEntry.TTL.String() diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go index 246e25c..b6341e0 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend.go @@ -9,7 +9,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -38,6 +42,7 @@ func Backend() *backend { WALRollback: walRollback, WALRollbackMinAge: 5 * time.Minute, + BackendType: logical.TypeLogical, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go index 3f04c68..5fab073 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/backend_test.go @@ -196,6 +196,10 @@ func teardown() error { RoleName: aws.String(testRoleName), // Required } _, err := svc.DetachRolePolicy(attachment) + if err != nil { + log.Printf("[WARN] AWS DetachRolePolicy failed: %v", err) + return err + } params := &iam.DeleteRoleInput{ RoleName: aws.String(testRoleName), @@ -206,9 +210,10 @@ func teardown() error { if err != nil { log.Printf("[WARN] AWS DeleteRole failed: %v", err) + return err } - return err + return nil } func testAccStepConfig(t *testing.T) logicaltest.TestStep { diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go index 545c685..f6bbbe2 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/client.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" @@ -31,7 +32,13 @@ func getRootConfig(s logical.Storage) (*aws.Config, error) { } if credsConfig.Region == "" { - credsConfig.Region = "us-east-1" + credsConfig.Region = os.Getenv("AWS_REGION") + if credsConfig.Region == "" { + credsConfig.Region = os.Getenv("AWS_DEFAULT_REGION") + if credsConfig.Region == "" { + credsConfig.Region = "us-east-1" + } + } } credsConfig.HTTPClient = cleanhttp.DefaultClient() @@ -49,11 +56,25 @@ func getRootConfig(s logical.Storage) (*aws.Config, error) { } func clientIAM(s logical.Storage) (*iam.IAM, error) { - awsConfig, _ := getRootConfig(s) - return iam.New(session.New(awsConfig)), nil + awsConfig, err := getRootConfig(s) + if err != nil { + return nil, err + } + client := iam.New(session.New(awsConfig)) + if client == nil { + return nil, fmt.Errorf("could not obtain iam client") + } + return client, nil } func clientSTS(s logical.Storage) (*sts.STS, error) { - awsConfig, _ := getRootConfig(s) - return sts.New(session.New(awsConfig)), nil + awsConfig, err := getRootConfig(s) + if err != nil { + return nil, err + } + client := sts.New(session.New(awsConfig)) + if client == nil { + return nil, fmt.Errorf("could not obtain sts client") + } + return client, nil } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go index 0d1d1d5..754e5b2 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_config_root.go @@ -37,9 +37,6 @@ func pathConfigRoot() *framework.Path { func pathConfigRootWrite( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { region := data.Get("region").(string) - if region == "" { - region = "us-east-1" - } entry, err := logical.StorageEntryJSON("config/root", rootConfig{ AccessKey: data.Get("access_key").(string), diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go index 08bbca9..3314c7a 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/path_roles_test.go @@ -14,7 +14,7 @@ func TestBackend_PathListRoles(t *testing.T) { config.StorageView = &logical.InmemStorage{} b := Backend() - if _, err := b.Setup(config); err != nil { + if err := b.Setup(config); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go index 637bf9d..18dbb5d 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/aws/secret_access_keys.go @@ -53,7 +53,7 @@ func genUsername(displayName, policyName, userType string) (ret string, warning normalizeDisplayName(policyName)) if len(midString) > 42 { midString = midString[0:42] - warning = "the calling token display name/IAM policy name were truncated to find into IAM username length limits" + warning = "the calling token display name/IAM policy name were truncated to fit into IAM username length limits" } case "sts": // Capped at 32 chars, which leaves only a couple of characters to play diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go index c2e769c..dd54ba5 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend.go @@ -12,7 +12,11 @@ import ( // Factory creates a new backend func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } // Backend contains the base information for the backend's functionality @@ -36,6 +40,7 @@ func Backend() *backend { Clean: func() { b.ResetDB(nil) }, + BackendType: logical.TypeLogical, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go index b84ce0d..cfeb329 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/backend_test.go @@ -74,6 +74,9 @@ func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) { } func TestBackend_basic(t *testing.T) { + if os.Getenv("TRAVIS") != "true" { + t.SkipNow() + } config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} b, err := Factory(config) @@ -97,6 +100,9 @@ func TestBackend_basic(t *testing.T) { } func TestBackend_roleCrud(t *testing.T) { + if os.Getenv("TRAVIS") != "true" { + t.SkipNow() + } config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} b, err := Factory(config) diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go index 4b025ba..98981ce 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/cassandra/path_creds_create.go @@ -61,14 +61,14 @@ func (b *backend) pathCredsCreateRead( if err != nil { return nil, err } - + // Set consistency if role.Consistency != "" { consistencyValue, err := gocql.ParseConsistencyWrapper(role.Consistency) if err != nil { return nil, err } - + session.SetConsistency(consistencyValue) } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go index 0b4351f..9fd09ac 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/consul/backend.go @@ -6,7 +6,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -22,6 +26,7 @@ func Backend() *backend { Secrets: []*framework.Secret{ secretToken(&b), }, + BackendType: logical.TypeLogical, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go new file mode 100644 index 0000000..ffc1a40 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend.go @@ -0,0 +1,181 @@ +package database + +import ( + "fmt" + "net/rpc" + "strings" + "sync" + + log "github.com/mgutz/logxi/v1" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +const databaseConfigPath = "database/config/" + +func Factory(conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend(conf) + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend(conf *logical.BackendConfig) *databaseBackend { + var b databaseBackend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + Paths: []*framework.Path{ + pathListPluginConnection(&b), + pathConfigurePluginConnection(&b), + pathListRoles(&b), + pathRoles(&b), + pathCredsCreate(&b), + pathResetConnection(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + Clean: b.closeAllDBs, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + } + + b.logger = conf.Logger + b.connections = make(map[string]dbplugin.Database) + return &b +} + +type databaseBackend struct { + connections map[string]dbplugin.Database + logger log.Logger + + *framework.Backend + sync.RWMutex +} + +// closeAllDBs closes all connections from all database types +func (b *databaseBackend) closeAllDBs() { + b.Lock() + defer b.Unlock() + + for _, db := range b.connections { + db.Close() + } + + b.connections = make(map[string]dbplugin.Database) +} + +// This function is used to retrieve a database object either from the cached +// connection map. The caller of this function needs to hold the backend's read +// lock. +func (b *databaseBackend) getDBObj(name string) (dbplugin.Database, bool) { + db, ok := b.connections[name] + return db, ok +} + +// This function creates a new db object from the stored configuration and +// caches it in the connections map. The caller of this function needs to hold +// the backend's write lock +func (b *databaseBackend) createDBObj(s logical.Storage, name string) (dbplugin.Database, error) { + db, ok := b.connections[name] + if ok { + return db, nil + } + + config, err := b.DatabaseConfig(s, name) + if err != nil { + return nil, err + } + + db, err = dbplugin.PluginFactory(config.PluginName, b.System(), b.logger) + if err != nil { + return nil, err + } + + err = db.Initialize(config.ConnectionDetails, true) + if err != nil { + return nil, err + } + + b.connections[name] = db + + return db, nil +} + +func (b *databaseBackend) DatabaseConfig(s logical.Storage, name string) (*DatabaseConfig, error) { + entry, err := s.Get(fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration: %s", err) + } + if entry == nil { + return nil, fmt.Errorf("failed to find entry for connection with name: %s", name) + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + return &config, nil +} + +func (b *databaseBackend) Role(s logical.Storage, roleName string) (*roleEntry, error) { + entry, err := s.Get("role/" + roleName) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *databaseBackend) invalidate(key string) { + b.Lock() + defer b.Unlock() + + switch { + case strings.HasPrefix(key, databaseConfigPath): + name := strings.TrimPrefix(key, databaseConfigPath) + b.clearConnection(name) + } +} + +// clearConnection closes the database connection and +// removes it from the b.connections map. +func (b *databaseBackend) clearConnection(name string) { + db, ok := b.connections[name] + if ok { + db.Close() + delete(b.connections, name) + } +} + +func (b *databaseBackend) closeIfShutdown(name string, err error) { + // Plugin has shutdown, close it so next call can reconnect. + if err == rpc.ErrShutdown { + b.Lock() + b.clearConnection(name) + b.Unlock() + } +} + +const backendHelp = ` +The database backend supports using many different databases +as secret backends, including but not limited to: +cassandra, mssql, mysql, postgres + +After mounting this backend, configure it using the endpoints within +the "database/config/" path. +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go new file mode 100644 index 0000000..d5461e2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/backend_test.go @@ -0,0 +1,753 @@ +package database + +import ( + "database/sql" + "fmt" + "log" + "os" + "reflect" + "sync" + "testing" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/pluginutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/plugins/database/postgresql" + "github.com/hashicorp/vault/vault" + "github.com/lib/pq" + "github.com/mitchellh/mapstructure" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +var ( + testImagePull sync.Once +) + +func preparePostgresTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cleanup func(), retURL string) { + if os.Getenv("PG_URL") != "" { + return func() {}, os.Getenv("PG_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"}) + if err != nil { + t.Fatalf("Could not start local PostgreSQL docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + // This will cause a validation to run + resp, err := b.HandleRequest(&logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: "config/postgresql", + Data: map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_url": retURL, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + // It's likely not up and running yet, so return error and try again + return fmt.Errorf("err:%s resp:%#v\n", err, resp) + } + if resp == nil { + t.Fatal("expected warning") + } + + return nil + }); err != nil { + t.Fatalf("Could not connect to PostgreSQL docker container: %s", err) + } + + return +} + +func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "database": Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + cores := cluster.Cores + + os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) + + sys := vault.TestDynamicSystemView(cores[0].Core) + vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", "TestBackend_PluginMain") + + return cluster, sys +} + +func TestBackend_PluginMain(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + + args := []string{"--ca-cert=" + caPEM} + + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + + postgresql.Run(apiClientMeta.GetTLSConfig()) +} + +func TestBackend_config_connection(t *testing.T) { + var resp *logical.Response + var err error + + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + configData := map[string]interface{}{ + "connection_url": "sample_connection_url", + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "connection_url": "sample_connection_url", + }, + "allowed_roles": []string{"*"}, + } + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + + configReq.Operation = logical.ListOperation + configReq.Data = nil + configReq.Path = "config/" + resp, err = b.HandleRequest(configReq) + if err != nil { + t.Fatal(err) + } + keys := resp.Data["keys"].([]string) + key := keys[0] + if key != "plugin-test" { + t.Fatalf("bad key: %q", key) + } +} + +func TestBackend_basic(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // Revoke creds + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.RevokeOperation, + Storage: config.StorageView, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": credsResp.Data["username"], + "role": "plugin-role-test", + }, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should not exist") + } + +} + +func TestBackend_connectionCrud(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": "test", + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Update the connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read connection + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "connection_url": connURL, + }, + "allowed_roles": []string{"plugin-role-test"}, + } + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + + // Reset Connection + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "reset/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // Delete Connection + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read connection + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Should be empty + if resp != nil { + t.Fatal("Expected response to be nil") + } +} + +func TestBackend_roleCrud(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := dbplugin.Statements{ + CreationStatements: testRole, + RevocationStatements: defaultRevocationSQL, + } + + var actual dbplugin.Statements + if err := mapstructure.Decode(resp.Data, &actual); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("Statements did not match, exepected %#v, got %#v", expected, actual) + } + + // Delete the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Should be empty + if resp != nil { + t.Fatal("Expected response to be nil") + } +} +func TestBackend_allowedRoles(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup() + + cleanup, connURL := preparePostgresTestContainer(t, config.StorageView, b) + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a denied and an allowed role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/denied", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/allowed", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds from denied role, should fail + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/denied", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(req) + if err != logical.ErrPermissionDenied { + t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err) + } + + // update connection with * allowed roles connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "*", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // update connection with allowed roles + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "allow, allowed", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds from denied role, should fail + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/denied", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(req) + if err != logical.ErrPermissionDenied { + t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err) + } + + // Get creds from allowed role, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } +} + +func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + log.Printf("[TRACE] Generated credentials: %v", d) + conn, err := pq.ParseURL(connURL) + + if err != nil { + t.Fatal(err) + } + + conn += " timezone=utc" + + db, err := sql.Open("postgres", conn) + if err != nil { + t.Fatal(err) + } + + returnedRows := func() int { + stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');") + if err != nil { + return -1 + } + defer stmt.Close() + + rows, err := stmt.Query(d.Username) + if err != nil { + return -1 + } + defer rows.Close() + + i := 0 + for rows.Next() { + i++ + } + return i + } + + return returnedRows() == 2 +} + +const testRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const defaultRevocationSQL = ` +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; +REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; +REVOKE USAGE ON SCHEMA public FROM {{name}}; + +DROP ROLE IF EXISTS {{name}}; +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go new file mode 100644 index 0000000..6df3948 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/client.go @@ -0,0 +1,133 @@ +package dbplugin + +import ( + "fmt" + "net/rpc" + "sync" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/pluginutil" + log "github.com/mgutz/logxi/v1" +) + +// DatabasePluginClient embeds a databasePluginRPCClient and wraps it's Close +// method to also call Kill() on the plugin.Client. +type DatabasePluginClient struct { + client *plugin.Client + sync.Mutex + + *databasePluginRPCClient +} + +func (dc *DatabasePluginClient) Close() error { + err := dc.databasePluginRPCClient.Close() + dc.client.Kill() + + return err +} + +// newPluginClient returns a databaseRPCClient with a connection to a running +// plugin. The client is wrapped in a DatabasePluginClient object to ensure the +// plugin is killed on call of Close(). +func newPluginClient(sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger) (Database, error) { + // pluginMap is the map of plugins we can dispense. + var pluginMap = map[string]plugin.Plugin{ + "database": new(DatabasePlugin), + } + + client, err := pluginRunner.Run(sys, pluginMap, handshakeConfig, []string{}, logger) + if err != nil { + return nil, err + } + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := rpcClient.Dispense("database") + if err != nil { + return nil, err + } + + // We should have a database type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + databaseRPC := raw.(*databasePluginRPCClient) + + // Wrap RPC implimentation in DatabasePluginClient + return &DatabasePluginClient{ + client: client, + databasePluginRPCClient: databaseRPC, + }, nil +} + +// ---- RPC client domain ---- + +// databasePluginRPCClient implements Database and is used on the client to +// make RPC calls to a plugin. +type databasePluginRPCClient struct { + client *rpc.Client +} + +func (dr *databasePluginRPCClient) Type() (string, error) { + var dbType string + err := dr.client.Call("Plugin.Type", struct{}{}, &dbType) + + return fmt.Sprintf("plugin-%s", dbType), err +} + +func (dr *databasePluginRPCClient) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + req := CreateUserRequest{ + Statements: statements, + UsernameConfig: usernameConfig, + Expiration: expiration, + } + + var resp CreateUserResponse + err = dr.client.Call("Plugin.CreateUser", req, &resp) + + return resp.Username, resp.Password, err +} + +func (dr *databasePluginRPCClient) RenewUser(statements Statements, username string, expiration time.Time) error { + req := RenewUserRequest{ + Statements: statements, + Username: username, + Expiration: expiration, + } + + err := dr.client.Call("Plugin.RenewUser", req, &struct{}{}) + + return err +} + +func (dr *databasePluginRPCClient) RevokeUser(statements Statements, username string) error { + req := RevokeUserRequest{ + Statements: statements, + Username: username, + } + + err := dr.client.Call("Plugin.RevokeUser", req, &struct{}{}) + + return err +} + +func (dr *databasePluginRPCClient) Initialize(conf map[string]interface{}, verifyConnection bool) error { + req := InitializeRequest{ + Config: conf, + VerifyConnection: verifyConnection, + } + + err := dr.client.Call("Plugin.Initialize", req, &struct{}{}) + + return err +} + +func (dr *databasePluginRPCClient) Close() error { + err := dr.client.Call("Plugin.Close", struct{}{}, &struct{}{}) + + return err +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go new file mode 100644 index 0000000..87dfa6c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/databasemiddleware.go @@ -0,0 +1,162 @@ +package dbplugin + +import ( + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/mgutz/logxi/v1" +) + +// ---- Tracing Middleware Domain ---- + +// databaseTracingMiddleware wraps a implementation of Database and executes +// trace logging on function call. +type databaseTracingMiddleware struct { + next Database + logger log.Logger + + typeStr string +} + +func (mw *databaseTracingMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseTracingMiddleware) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "CreateUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "CreateUser", "status", "started", "type", mw.typeStr) + return mw.next.CreateUser(statements, usernameConfig, expiration) +} + +func (mw *databaseTracingMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "RenewUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "RenewUser", "status", "started", mw.typeStr) + return mw.next.RenewUser(statements, username, expiration) +} + +func (mw *databaseTracingMiddleware) RevokeUser(statements Statements, username string) (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "RevokeUser", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "RevokeUser", "status", "started", "type", mw.typeStr) + return mw.next.RevokeUser(statements, username) +} + +func (mw *databaseTracingMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "Initialize", "status", "finished", "type", mw.typeStr, "verify", verifyConnection, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "Initialize", "status", "started", "type", mw.typeStr) + return mw.next.Initialize(conf, verifyConnection) +} + +func (mw *databaseTracingMiddleware) Close() (err error) { + defer func(then time.Time) { + mw.logger.Trace("database", "operation", "Close", "status", "finished", "type", mw.typeStr, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("database", "operation", "Close", "status", "started", "type", mw.typeStr) + return mw.next.Close() +} + +// ---- Metrics Middleware Domain ---- + +// databaseMetricsMiddleware wraps an implementation of Databases and on +// function call logs metrics about this instance. +type databaseMetricsMiddleware struct { + next Database + + typeStr string +} + +func (mw *databaseMetricsMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseMetricsMiddleware) CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "CreateUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "CreateUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "CreateUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "CreateUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser"}, 1) + return mw.next.CreateUser(statements, usernameConfig, expiration) +} + +func (mw *databaseMetricsMiddleware) RenewUser(statements Statements, username string, expiration time.Time) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RenewUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RenewUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RenewUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RenewUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser"}, 1) + return mw.next.RenewUser(statements, username, expiration) +} + +func (mw *databaseMetricsMiddleware) RevokeUser(statements Statements, username string) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RevokeUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RevokeUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RevokeUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RevokeUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser"}, 1) + return mw.next.RevokeUser(statements, username) +} + +func (mw *databaseMetricsMiddleware) Initialize(conf map[string]interface{}, verifyConnection bool) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Initialize"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Initialize"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Initialize", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Initialize"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize"}, 1) + return mw.next.Initialize(conf, verifyConnection) +} + +func (mw *databaseMetricsMiddleware) Close() (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Close"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Close"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Close", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Close"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close"}, 1) + return mw.next.Close() +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go new file mode 100644 index 0000000..0becc9f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin.go @@ -0,0 +1,147 @@ +package dbplugin + +import ( + "fmt" + "net/rpc" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/pluginutil" + log "github.com/mgutz/logxi/v1" +) + +// Database is the interface that all database objects must implement. +type Database interface { + Type() (string, error) + CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) + RenewUser(statements Statements, username string, expiration time.Time) error + RevokeUser(statements Statements, username string) error + + Initialize(config map[string]interface{}, verifyConnection bool) error + Close() error +} + +// Statements set in role creation and passed into the database type's functions. +type Statements struct { + CreationStatements string `json:"creation_statments" mapstructure:"creation_statements" structs:"creation_statments"` + RevocationStatements string `json:"revocation_statements" mapstructure:"revocation_statements" structs:"revocation_statements"` + RollbackStatements string `json:"rollback_statements" mapstructure:"rollback_statements" structs:"rollback_statements"` + RenewStatements string `json:"renew_statements" mapstructure:"renew_statements" structs:"renew_statements"` +} + +// UsernameConfig is used to configure prefixes for the username to be +// generated. +type UsernameConfig struct { + DisplayName string + RoleName string +} + +// PluginFactory is used to build plugin database types. It wraps the database +// object in a logging and metrics middleware. +func PluginFactory(pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPlugin(pluginName) + if err != nil { + return nil, err + } + + var db Database + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to a Database. + dbRaw, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, fmt.Errorf("error getting plugin type: %s", err) + } + + var ok bool + db, ok = dbRaw.(Database) + if !ok { + return nil, fmt.Errorf("unsuported database type: %s", pluginName) + } + + } else { + // create a DatabasePluginClient instance + db, err = newPluginClient(sys, pluginRunner, logger) + if err != nil { + return nil, err + } + } + + typeStr, err := db.Type() + if err != nil { + return nil, fmt.Errorf("error getting plugin type: %s", err) + } + + // Wrap with metrics middleware + db = &databaseMetricsMiddleware{ + next: db, + typeStr: typeStr, + } + + // Wrap with tracing middleware + if logger.IsTrace() { + db = &databaseTracingMiddleware{ + next: db, + typeStr: typeStr, + logger: logger, + } + } + + return db, nil +} + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var handshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 3, + MagicCookieKey: "VAULT_DATABASE_PLUGIN", + MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb", +} + +// DatabasePlugin implements go-plugin's Plugin interface. It has methods for +// retrieving a server and a client instance of the plugin. +type DatabasePlugin struct { + impl Database +} + +func (d DatabasePlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return &databasePluginRPCServer{impl: d.impl}, nil +} + +func (DatabasePlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &databasePluginRPCClient{client: c}, nil +} + +// ---- RPC Request Args Domain ---- + +type InitializeRequest struct { + Config map[string]interface{} + VerifyConnection bool +} + +type CreateUserRequest struct { + Statements Statements + UsernameConfig UsernameConfig + Expiration time.Time +} + +type RenewUserRequest struct { + Statements Statements + Username string + Expiration time.Time +} + +type RevokeUserRequest struct { + Statements Statements + Username string +} + +// ---- RPC Response Args Domain ---- + +type CreateUserResponse struct { + Username string + Password string +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go new file mode 100644 index 0000000..3a78595 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/plugin_test.go @@ -0,0 +1,245 @@ +package dbplugin_test + +import ( + "errors" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/pluginutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/vault" + log "github.com/mgutz/logxi/v1" +) + +type mockPlugin struct { + users map[string][]string +} + +func (m *mockPlugin) Type() (string, error) { return "mock", nil } +func (m *mockPlugin) CreateUser(statements dbplugin.Statements, usernameConf dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + err = errors.New("err") + if usernameConf.DisplayName == "" || expiration.IsZero() { + return "", "", err + } + + if _, ok := m.users[usernameConf.DisplayName]; ok { + return "", "", err + } + + m.users[usernameConf.DisplayName] = []string{password} + + return usernameConf.DisplayName, "test", nil +} +func (m *mockPlugin) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + err := errors.New("err") + if username == "" || expiration.IsZero() { + return err + } + + if _, ok := m.users[username]; !ok { + return err + } + + return nil +} +func (m *mockPlugin) RevokeUser(statements dbplugin.Statements, username string) error { + err := errors.New("err") + if username == "" { + return err + } + + if _, ok := m.users[username]; !ok { + return err + } + + delete(m.users, username) + return nil +} +func (m *mockPlugin) Initialize(conf map[string]interface{}, _ bool) error { + err := errors.New("err") + if len(conf) != 1 { + return err + } + + return nil +} +func (m *mockPlugin) Close() error { + m.users = nil + return nil +} + +func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + cores := cluster.Cores + + sys := vault.TestDynamicSystemView(cores[0].Core) + vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", "TestPlugin_Main") + + return cluster, sys +} + +// This is not an actual test case, it's a helper function that will be executed +// by the go-plugin client via an exec call. +func TestPlugin_Main(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { + return + } + + plugin := &mockPlugin{ + users: make(map[string][]string), + } + + args := []string{"--tls-skip-verify=true"} + + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + + plugins.Serve(plugin, apiClientMeta.GetTLSConfig()) +} + +func TestPlugin_Initialize(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + dbRaw, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + + connectionDetails := map[string]interface{}{ + "test": 1, + } + + err = dbRaw.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = dbRaw.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPlugin_CreateUser(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConf := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + us, pw, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + if us != "test" || pw != "test" { + t.Fatal("expected username and password to be 'test'") + } + + // try and save the same user again to verify it saved the first time, this + // should return an error + _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("expected an error, user wasn't created correctly") + } +} + +func TestPlugin_RenewUser(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConf := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + us, _, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = db.RenewUser(dbplugin.Statements{}, us, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPlugin_RevokeUser(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{}) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConf := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + us, _, err := db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(dbplugin.Statements{}, us) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Try adding the same username back so we can verify it was removed + _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go new file mode 100644 index 0000000..381f0ae --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/dbplugin/server.go @@ -0,0 +1,71 @@ +package dbplugin + +import ( + "crypto/tls" + + "github.com/hashicorp/go-plugin" +) + +// Serve is called from within a plugin and wraps the provided +// Database implementation in a databasePluginRPCServer object and starts a +// RPC server. +func Serve(db Database, tlsProvider func() (*tls.Config, error)) { + dbPlugin := &DatabasePlugin{ + impl: db, + } + + // pluginMap is the map of plugins we can dispense. + var pluginMap = map[string]plugin.Plugin{ + "database": dbPlugin, + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshakeConfig, + Plugins: pluginMap, + TLSProvider: tlsProvider, + }) +} + +// ---- RPC server domain ---- + +// databasePluginRPCServer implements an RPC version of Database and is run +// inside a plugin. It wraps an underlying implementation of Database. +type databasePluginRPCServer struct { + impl Database +} + +func (ds *databasePluginRPCServer) Type(_ struct{}, resp *string) error { + var err error + *resp, err = ds.impl.Type() + return err +} + +func (ds *databasePluginRPCServer) CreateUser(args *CreateUserRequest, resp *CreateUserResponse) error { + var err error + resp.Username, resp.Password, err = ds.impl.CreateUser(args.Statements, args.UsernameConfig, args.Expiration) + + return err +} + +func (ds *databasePluginRPCServer) RenewUser(args *RenewUserRequest, _ *struct{}) error { + err := ds.impl.RenewUser(args.Statements, args.Username, args.Expiration) + + return err +} + +func (ds *databasePluginRPCServer) RevokeUser(args *RevokeUserRequest, _ *struct{}) error { + err := ds.impl.RevokeUser(args.Statements, args.Username) + + return err +} + +func (ds *databasePluginRPCServer) Initialize(args *InitializeRequest, _ *struct{}) error { + err := ds.impl.Initialize(args.Config, args.VerifyConnection) + + return err +} + +func (ds *databasePluginRPCServer) Close(_ struct{}, _ *struct{}) error { + ds.impl.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go new file mode 100644 index 0000000..d1e6cb2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_config_connection.go @@ -0,0 +1,294 @@ +package database + +import ( + "errors" + "fmt" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +var ( + respErrEmptyPluginName = "empty plugin name" + respErrEmptyName = "empty name attribute given" +) + +// DatabaseConfig is used by the Factory function to configure a Database +// object. +type DatabaseConfig struct { + PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` + // ConnectionDetails stores the database specific connection settings needed + // by each database type. + ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"` + AllowedRoles []string `json:"allowed_roles" structs:"allowed_roles" mapstructure:"allowed_roles"` +} + +// pathResetConnection configures a path to reset a plugin. +func pathResetConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of this database connection", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConnectionReset(), + }, + + HelpSynopsis: pathResetConnectionHelpSyn, + HelpDescription: pathResetConnectionHelpDesc, + } +} + +// pathConnectionReset resets a plugin by closing the existing instance and +// creating a new one. +func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + // Grab the mutex lock + b.Lock() + defer b.Unlock() + + // Close plugin and delete the entry in the connections cache. + b.clearConnection(name) + + // Execute plugin again, we don't need the object so throw away. + _, err := b.createDBObj(req.Storage, name) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +// pathConfigurePluginConnection returns a configured framework.Path setup to +// operate on plugins. +func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of this database connection", + }, + + "plugin_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The name of a builtin or previously registered + plugin known to vault. This endpoint will create an instance of + that plugin type.`, + }, + + "verify_connection": &framework.FieldSchema{ + Type: framework.TypeBool, + Default: true, + Description: `If true, the connection details are verified by + actually connecting to the database. Defaults to true.`, + }, + + "allowed_roles": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or array of the role names + allowed to get creds from this database connection. If empty no + roles are allowed. If "*" all roles are allowed.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.connectionWriteHandler(), + logical.ReadOperation: b.connectionReadHandler(), + logical.DeleteOperation: b.connectionDeleteHandler(), + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +func pathListPluginConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("config/?$"), + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.connectionListHandler(), + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +func (b *databaseBackend) connectionListHandler() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List("config/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil + } +} + +// connectionReadHandler reads out the connection configuration +func (b *databaseBackend) connectionReadHandler() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + entry, err := req.Storage.Get(fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, errors.New("failed to read connection configuration") + } + if entry == nil { + return nil, nil + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + return &logical.Response{ + Data: structs.New(config).Map(), + }, nil + } +} + +// connectionDeleteHandler deletes the connection configuration +func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + err := req.Storage.Delete(fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, errors.New("failed to delete connection configuration") + } + + b.Lock() + defer b.Unlock() + + if _, ok := b.connections[name]; ok { + err = b.connections[name].Close() + if err != nil { + return nil, err + } + + delete(b.connections, name) + } + + return nil, nil + } +} + +// connectionWriteHandler returns a handler function for creating and updating +// both builtin and plugin database types. +func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + pluginName := data.Get("plugin_name").(string) + if pluginName == "" { + return logical.ErrorResponse(respErrEmptyPluginName), nil + } + + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + verifyConnection := data.Get("verify_connection").(bool) + + allowedRoles := data.Get("allowed_roles").([]string) + + // Remove these entries from the data before we store it keyed under + // ConnectionDetails. + delete(data.Raw, "name") + delete(data.Raw, "plugin_name") + delete(data.Raw, "allowed_roles") + delete(data.Raw, "verify_connection") + + config := &DatabaseConfig{ + ConnectionDetails: data.Raw, + PluginName: pluginName, + AllowedRoles: allowedRoles, + } + + db, err := dbplugin.PluginFactory(config.PluginName, b.System(), b.logger) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil + } + + err = db.Initialize(config.ConnectionDetails, verifyConnection) + if err != nil { + db.Close() + return logical.ErrorResponse(fmt.Sprintf("error creating database object: %s", err)), nil + } + + // Grab the mutex lock + b.Lock() + defer b.Unlock() + + // Close and remove the old connection + b.clearConnection(name) + + // Save the new connection + b.connections[name] = db + + // Store it + entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/%s", name), config) + if err != nil { + return nil, err + } + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + resp := &logical.Response{} + resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any.") + + return resp, nil + } +} + +const pathConfigConnectionHelpSyn = ` +Configure connection details to a database plugin. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection details used to connect to a particular +database. This path runs the provided plugin name and passes the configured +connection details to the plugin. See the documentation for the plugin specified +for a full list of accepted connection details. + +In addition to the database specific connection details, this endpoint also +accepts: + + * "plugin_name" (required) - The name of a builtin or previously registered + plugin known to vault. This endpoint will create an instance of that + plugin type. + + * "verify_connection" (default: true) - A boolean value denoting if the plugin should verify + it is able to connect to the database using the provided connection + details. +` + +const pathResetConnectionHelpSyn = ` +Resets a database plugin. +` + +const pathResetConnectionHelpDesc = ` +This path resets the database connection by closing the existing database plugin +instance and running a new one. +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go new file mode 100644 index 0000000..6fb61a3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_creds_create.go @@ -0,0 +1,112 @@ +package database + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathCredsCreate(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsCreateRead(), + }, + + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, + } +} + +func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := b.Role(req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + dbConfig, err := b.DatabaseConfig(req.Storage, role.DBName) + if err != nil { + return nil, err + } + + // If role name isn't in the database's allowed roles, send back a + // permission denied. + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContains(dbConfig.AllowedRoles, name) { + return nil, logical.ErrPermissionDenied + } + + // Grab the read lock + b.RLock() + var unlockFunc func() = b.RUnlock + + // Get the Database object + db, ok := b.getDBObj(role.DBName) + if !ok { + // Upgrade lock + b.RUnlock() + b.Lock() + unlockFunc = b.Unlock + + // Create a new DB object + db, err = b.createDBObj(req.Storage, role.DBName) + if err != nil { + unlockFunc() + return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err) + } + } + + expiration := time.Now().Add(role.DefaultTTL) + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: req.DisplayName, + RoleName: name, + } + + // Create the user + username, password, err := db.CreateUser(role.Statements, usernameConfig, expiration) + // Unlock + unlockFunc() + if err != nil { + b.closeIfShutdown(role.DBName, err) + return nil, err + } + + resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + "role": name, + }) + resp.Secret.TTL = role.DefaultTTL + return resp, nil + } +} + +const pathCredsCreateReadHelpSyn = ` +Request database credentials for a certain role. +` + +const pathCredsCreateReadHelpDesc = ` +This path reads database credentials for a certain role. The +database credentials will be generated on demand and will be automatically +revoked when the lease is up. +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go new file mode 100644 index 0000000..69884cb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/path_roles.go @@ -0,0 +1,232 @@ +package database + +import ( + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathListRoles(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList(), + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + + "db_name": { + Type: framework.TypeString, + Description: "Name of the database this role acts on.", + }, + "creation_statements": { + Type: framework.TypeString, + Description: `Specifies the database statements executed to + create and configure a user. See the plugin's API page for more + information on support and formatting for this parameter.`, + }, + "revocation_statements": { + Type: framework.TypeString, + Description: `Specifies the database statements to be executed + to revoke a user. See the plugin's API page for more information + on support and formatting for this parameter.`, + }, + "renew_statements": { + Type: framework.TypeString, + Description: `Specifies the database statements to be executed + to renew a user. Not every plugin type will support this + functionality. See the plugin's API page for more information on + support and formatting for this parameter. `, + }, + "rollback_statements": { + Type: framework.TypeString, + Description: `Specifies the database statements to be executed + rollback a create operation in the event of an error. Not every + plugin type will support this functionality. See the plugin's + API page for more information on support and formatting for this + parameter.`, + }, + + "default_ttl": { + Type: framework.TypeDurationSecond, + Description: "Default ttl for role.", + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: "Maximum time a credential is valid for", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead(), + logical.UpdateOperation: b.pathRoleCreate(), + logical.DeleteOperation: b.pathRoleDelete(), + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *databaseBackend) pathRoleDelete() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete("role/" + data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil + } +} + +func (b *databaseBackend) pathRoleRead() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "db_name": role.DBName, + "creation_statements": role.Statements.CreationStatements, + "revocation_statements": role.Statements.RevocationStatements, + "rollback_statements": role.Statements.RollbackStatements, + "renew_statements": role.Statements.RenewStatements, + "default_ttl": role.DefaultTTL.Seconds(), + "max_ttl": role.MaxTTL.Seconds(), + }, + }, nil + } +} + +func (b *databaseBackend) pathRoleList() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List("role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil + } +} + +func (b *databaseBackend) pathRoleCreate() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } + + dbName := data.Get("db_name").(string) + if dbName == "" { + return logical.ErrorResponse("empty database name attribute given"), nil + } + + // Get statements + creationStmts := data.Get("creation_statements").(string) + revocationStmts := data.Get("revocation_statements").(string) + rollbackStmts := data.Get("rollback_statements").(string) + renewStmts := data.Get("renew_statements").(string) + + // Get TTLs + defaultTTLRaw := data.Get("default_ttl").(int) + maxTTLRaw := data.Get("max_ttl").(int) + defaultTTL := time.Duration(defaultTTLRaw) * time.Second + maxTTL := time.Duration(maxTTLRaw) * time.Second + + statements := dbplugin.Statements{ + CreationStatements: creationStmts, + RevocationStatements: revocationStmts, + RollbackStatements: rollbackStmts, + RenewStatements: renewStmts, + } + + // Store it + entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ + DBName: dbName, + Statements: statements, + DefaultTTL: defaultTTL, + MaxTTL: maxTTL, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil + } +} + +type roleEntry struct { + DBName string `json:"db_name" mapstructure:"db_name" structs:"db_name"` + Statements dbplugin.Statements `json:"statments" mapstructure:"statements" structs:"statments"` + DefaultTTL time.Duration `json:"default_ttl" mapstructure:"default_ttl" structs:"default_ttl"` + MaxTTL time.Duration `json:"max_ttl" mapstructure:"max_ttl" structs:"max_ttl"` +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "db_name" parameter is required and configures the name of the database +connection to use. + +The "creation_statements" parameter customizes the string used to create the +credentials. This can be a sequence of SQL queries, or other statement formats +for a particular database type. Some substitution will be done to the statement +strings for certain keys. The names of the variables must be surrounded by "{{" +and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + + * "expiration" - The timestamp when this user will expire. + +Example of a decent creation_statements for a postgresql database plugin: + + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; + +The "revocation_statements" parameter customizes the statement string used to +revoke a user. Example of a decent revocation_statements for a postgresql +database plugin: + + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; + REVOKE USAGE ON SCHEMA public FROM {{name}}; + DROP ROLE IF EXISTS {{name}}; + +The "renew_statements" parameter customizes the statement string used to renew a +user. +The "rollback_statements' parameter customizes the statement string used to +rollback a change if needed. +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go new file mode 100644 index 0000000..c3dfcb9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/database/secret_creds.go @@ -0,0 +1,139 @@ +package database + +import ( + "fmt" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +const SecretCredsType = "creds" + +func secretCreds(b *databaseBackend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{}, + + Renew: b.secretCredsRenew(), + Revoke: b.secretCredsRevoke(), + } +} + +func (b *databaseBackend) secretCredsRenew() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + + roleNameRaw, ok := req.Secret.InternalData["role"] + if !ok { + return nil, fmt.Errorf("could not find role with name: %s", req.Secret.InternalData["role"]) + } + + role, err := b.Role(req.Storage, roleNameRaw.(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("error during renew: could not find role with name %s", req.Secret.InternalData["role"]) + } + + f := framework.LeaseExtend(role.DefaultTTL, role.MaxTTL, b.System()) + resp, err := f(req, data) + if err != nil { + return nil, err + } + + // Grab the read lock + b.RLock() + var unlockFunc func() = b.RUnlock + + // Get the Database object + db, ok := b.getDBObj(role.DBName) + if !ok { + // Upgrade lock + b.RUnlock() + b.Lock() + unlockFunc = b.Unlock + + // Create a new DB object + db, err = b.createDBObj(req.Storage, role.DBName) + if err != nil { + unlockFunc() + return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err) + } + } + + // Make sure we increase the VALID UNTIL endpoint for this user. + if expireTime := resp.Secret.ExpirationTime(); !expireTime.IsZero() { + err := db.RenewUser(role.Statements, username, expireTime) + // Unlock + unlockFunc() + if err != nil { + b.closeIfShutdown(role.DBName, err) + return nil, err + } + } + + return resp, nil + } +} + +func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc { + return func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + + var resp *logical.Response + + roleNameRaw, ok := req.Secret.InternalData["role"] + if !ok { + return nil, fmt.Errorf("no role name was provided") + } + + role, err := b.Role(req.Storage, roleNameRaw.(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("error during revoke: could not find role with name %s", req.Secret.InternalData["role"]) + } + + // Grab the read lock + b.RLock() + var unlockFunc func() = b.RUnlock + + // Get our connection + db, ok := b.getDBObj(role.DBName) + if !ok { + // Upgrade lock + b.RUnlock() + b.Lock() + unlockFunc = b.Unlock + + // Create a new DB object + db, err = b.createDBObj(req.Storage, role.DBName) + if err != nil { + unlockFunc() + return nil, fmt.Errorf("cound not retrieve db with name: %s, got error: %s", role.DBName, err) + } + } + + err = db.RevokeUser(role.Statements, username) + // Unlock + unlockFunc() + if err != nil { + b.closeIfShutdown(role.DBName, err) + return nil, err + } + + return resp, nil + } +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go index e9f3b29..d850e8a 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/mongodb/backend.go @@ -12,7 +12,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *framework.Backend { @@ -34,7 +38,8 @@ func Backend() *framework.Backend { Clean: b.ResetSession, - Invalidate: b.invalidate, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, } return b.Backend diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go index 61afe75..ccd981b 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/mssql/backend.go @@ -12,7 +12,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -32,9 +36,9 @@ func Backend() *backend { secretCreds(&b), }, - Invalidate: b.invalidate, - - Clean: b.ResetDB, + Invalidate: b.invalidate, + Clean: b.ResetDB, + BackendType: logical.TypeLogical, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go index 7ae0335..a89cc49 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/backend.go @@ -12,7 +12,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend() *backend { @@ -32,9 +36,9 @@ func Backend() *backend { secretCreds(&b), }, - Invalidate: b.invalidate, - - Clean: b.ResetDB, + Invalidate: b.invalidate, + Clean: b.ResetDB, + BackendType: logical.TypeLogical, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go index b8d6513..27c3bf8 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/mysql/secret_creds.go @@ -66,6 +66,9 @@ func (b *backend) secretCredsRevoke( return nil, fmt.Errorf("secret is missing username internal data") } username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("usernameRaw is not a string") + } // Get our connection db, err := b.DB(req.Storage) diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go index 6128028..bf5168d 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend.go @@ -11,7 +11,11 @@ import ( // Factory creates a new backend implementing the logical.Backend interface func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } // Backend returns a new Backend framework struct @@ -35,15 +39,22 @@ func Backend() *backend { "crl", "certs/", }, + + Root: []string{ + "root", + "root/sign-self-issued", + }, }, Paths: []*framework.Path{ pathListRoles(&b), pathRoles(&b), pathGenerateRoot(&b), + pathSignIntermediate(&b), + pathSignSelfIssued(&b), + pathDeleteRoot(&b), pathGenerateIntermediate(&b), pathSetSignedIntermediate(&b), - pathSignIntermediate(&b), pathConfigCA(&b), pathConfigCRL(&b), pathConfigURLs(&b), @@ -64,6 +75,8 @@ func Backend() *backend { Secrets: []*framework.Secret{ secretCerts(&b), }, + + BackendType: logical.TypeLogical, } b.crlLifetime = time.Hour * 72 diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go index 4fefc95..7a32ec2 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/backend_test.go @@ -1,6 +1,7 @@ package pki import ( + "bytes" "crypto" "crypto/ecdsa" "crypto/elliptic" @@ -12,6 +13,7 @@ import ( "encoding/pem" "fmt" "math" + "math/big" mathrand "math/rand" "net" "os" @@ -22,10 +24,13 @@ import ( "time" "github.com/fatih/structs" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/certutil" "github.com/hashicorp/vault/helper/strutil" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" logicaltest "github.com/hashicorp/vault/logical/testing" + "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" ) @@ -398,8 +403,8 @@ func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUs return nil, fmt.Errorf("Validity period not far enough in the past") } - if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 10 { - return nil, fmt.Errorf("Validity period of %d too large vs max of 10", cert.NotAfter.Unix()) + if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 20 { + return nil, fmt.Errorf("Certificate validity end: %s; expected within 20 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339)) } return parsedCertBundle, nil @@ -648,6 +653,11 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s ErrorOk: true, }, + logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "root", + }, + logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "root/generate/exported", @@ -865,6 +875,11 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int }, // Test a bunch of generation stuff + logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "root", + }, + logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "root/generate/exported", @@ -997,6 +1012,11 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int }, // Do it all again, with EC keys and DER format + logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "root", + }, + logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "root/generate/exported", @@ -1218,7 +1238,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int Operation: logical.ReadOperation, PreFlight: setSerialUnderTest, Check: func(resp *logical.Response) error { - if resp.Data["error"] != nil && resp.Data["error"].(string) != "" { + if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" { return fmt.Errorf("got an error: %s", resp.Data["error"].(string)) } @@ -1232,7 +1252,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int Operation: logical.ReadOperation, PreFlight: setSerialUnderTest, Check: func(resp *logical.Response) error { - if resp.Data["error"] != nil && resp.Data["error"].(string) != "" { + if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" { return fmt.Errorf("got an error: %s", resp.Data["error"].(string)) } @@ -1290,7 +1310,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int Operation: logical.ReadOperation, PreFlight: setSerialUnderTest, Check: func(resp *logical.Response) error { - if resp.Data["error"] != nil && resp.Data["error"].(string) != "" { + if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" { return fmt.Errorf("got an error: %s", resp.Data["error"].(string)) } @@ -1304,7 +1324,7 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int Operation: logical.ReadOperation, PreFlight: setSerialUnderTest, Check: func(resp *logical.Response) error { - if resp.Data["error"] != nil && resp.Data["error"].(string) != "" { + if resp != nil && resp.Data["error"] != nil && resp.Data["error"].(string) != "" { return fmt.Errorf("got an error: %s", resp.Data["error"].(string)) } @@ -1330,8 +1350,8 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int Operation: logical.ReadOperation, PreFlight: setSerialUnderTest, Check: func(resp *logical.Response) error { - if resp.Data["error"] == nil || resp.Data["error"].(string) == "" { - return fmt.Errorf("didn't get an expected error") + if resp != nil { + return fmt.Errorf("expected no response") } serialUnderTest = "cert/" + reqdata["ec_int_serial_number"].(string) @@ -1344,8 +1364,8 @@ func generateCATestingSteps(t *testing.T, caCert, caKey, otherCaCert string, int Operation: logical.ReadOperation, PreFlight: setSerialUnderTest, Check: func(resp *logical.Response) error { - if resp.Data["error"] == nil || resp.Data["error"].(string) == "" { - return fmt.Errorf("didn't get an expected error") + if resp != nil { + return fmt.Errorf("expected no response") } serialUnderTest = "cert/" + reqdata["rsa_int_serial_number"].(string) @@ -1827,6 +1847,8 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { addTests(nil) roleTestStep.ErrorOk = false + roleVals.TTL = "" + roleVals.MaxTTL = "12h" } // Listing test @@ -1870,7 +1892,7 @@ func TestBackend_PathFetchCertList(t *testing.T) { config.StorageView = storage b := Backend() - _, err := b.Setup(config) + err := b.Setup(config) if err != nil { t.Fatal(err) } @@ -1997,7 +2019,7 @@ func TestBackend_SignVerbatim(t *testing.T) { config.StorageView = storage b := Backend() - _, err := b.Setup(config) + err := b.Setup(config) if err != nil { t.Fatal(err) } @@ -2108,12 +2130,31 @@ func TestBackend_SignVerbatim(t *testing.T) { "ttl": "12h", }, }) - if resp != nil && !resp.IsError() { - t.Fatalf("sign-verbatim signed too-large-ttl'd CSR: %#v", *resp) - } if err != nil { t.Fatal(err) } + if resp != nil && resp.IsError() { + t.Fatalf(resp.Error().Error()) + } + if resp.Data == nil || resp.Data["certificate"] == nil { + t.Fatal("did not get expected data") + } + certString := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(certString)) + if block == nil { + t.Fatal("nil pem block") + } + certs, err := x509.ParseCertificates(block.Bytes) + if err != nil { + t.Fatal(err) + } + if len(certs) != 1 { + t.Fatalf("expected a single cert, got %d", len(certs)) + } + cert := certs[0] + if math.Abs(float64(time.Now().Add(12*time.Hour).Unix()-cert.NotAfter.Unix())) < 10 { + t.Fatalf("sign-verbatim did not properly cap validiaty period on signed CSR") + } // now check that if we set generate-lease it takes it from the role and the TTLs match roleData = map[string]interface{}{ @@ -2156,6 +2197,522 @@ func TestBackend_SignVerbatim(t *testing.T) { } } +func TestBackend_Root_Idempotentcy(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + var err error + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + resp, err = client.Logical().Read("pki/cert/ca_chain") + if err != nil { + t.Fatalf("error reading ca_chain: %v", err) + } + + r1Data := resp.Data + + // Try again, make sure it's a 204 and same CA + resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatal("expected no ca info") + } + resp, err = client.Logical().Read("pki/cert/ca_chain") + if err != nil { + t.Fatalf("error reading ca_chain: %v", err) + } + r2Data := resp.Data + if !reflect.DeepEqual(r1Data, r2Data) { + t.Fatal("got different ca certs") + } + + resp, err = client.Logical().Delete("pki/root") + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatal("expected nil response") + } + // Make sure it behaves the same + resp, err = client.Logical().Delete("pki/root") + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatal("expected nil response") + } + + _, err = client.Logical().Read("pki/cert/ca_chain") + if err == nil { + t.Fatal("expected error") + } + + resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + _, err = client.Logical().Read("pki/cert/ca_chain") + if err != nil { + t.Fatal(err) + } +} + +func TestBackend_Permitted_DNS_Domains(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + var err error + err = client.Sys().Mount("root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + err = client.Sys().Mount("int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "4h", + MaxLeaseTTL: "20h", + }, + }) + if err != nil { + t.Fatal(err) + } + _, err = client.Logical().Write("root/roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "max_ttl": "2h", + }) + if err != nil { + t.Fatal(err) + } + _, err = client.Logical().Write("int/roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_subdomains": true, + "allow_bare_domains": true, + "max_ttl": "2h", + }) + if err != nil { + t.Fatal(err) + } + + // Direct issuing from root + _, err = client.Logical().Write("root/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + "permitted_dns_domains": []string{"foobar.com", ".zipzap.com"}, + }) + if err != nil { + t.Fatal(err) + } + + clientKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + path := "root/" + checkIssue := func(valid bool, args ...interface{}) { + argMap := map[string]interface{}{} + var currString string + for i, arg := range args { + if i%2 == 0 { + currString = arg.(string) + } else { + argMap[currString] = arg + } + } + _, err = client.Logical().Write(path+"issue/example", argMap) + switch { + case valid && err != nil: + t.Fatal(err) + case !valid && err == nil: + t.Fatal("expected error") + } + + csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: argMap["common_name"].(string), + }, + }, clientKey) + if err != nil { + t.Fatal(err) + } + delete(argMap, "common_name") + argMap["csr"] = string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + })) + + _, err = client.Logical().Write(path+"sign/example", argMap) + switch { + case valid && err != nil: + t.Fatal(err) + case !valid && err == nil: + t.Fatal("expected error") + } + } + + // Check issuing and signing against root's permitted domains + checkIssue(false, "common_name", "zipzap.com") + checkIssue(false, "common_name", "host.foobar.com") + checkIssue(true, "common_name", "host.zipzap.com") + checkIssue(true, "common_name", "foobar.com") + + // Verify that root also won't issue an intermediate outside of its permitted domains + resp, err := client.Logical().Write("int/intermediate/generate/internal", map[string]interface{}{ + "common_name": "issuer.abc.com", + }) + if err != nil { + t.Fatal(err) + } + _, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{ + "common_name": "issuer.abc.com", + "csr": resp.Data["csr"], + "permitted_dns_domains": []string{"abc.com", ".xyz.com"}, + "ttl": "5h", + }) + if err == nil { + t.Fatal("expected error") + } + _, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{ + "use_csr_values": true, + "csr": resp.Data["csr"], + "permitted_dns_domains": []string{"abc.com", ".xyz.com"}, + "ttl": "5h", + }) + if err == nil { + t.Fatal("expected error") + } + + // Sign a valid intermediate + resp, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{ + "common_name": "issuer.zipzap.com", + "csr": resp.Data["csr"], + "permitted_dns_domains": []string{"abc.com", ".xyz.com"}, + "ttl": "5h", + }) + if err != nil { + t.Fatal(err) + } + resp, err = client.Logical().Write("int/intermediate/set-signed", map[string]interface{}{ + "certificate": resp.Data["certificate"], + }) + if err != nil { + t.Fatal(err) + } + + // Check enforcement with the intermediate's set values + path = "int/" + checkIssue(false, "common_name", "host.abc.com") + checkIssue(false, "common_name", "xyz.com") + checkIssue(true, "common_name", "abc.com") + checkIssue(true, "common_name", "host.xyz.com") +} + +func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + var err error + err = client.Sys().Mount("root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + err = client.Sys().Mount("int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "4h", + MaxLeaseTTL: "20h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Direct issuing from root + _, err = client.Logical().Write("root/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("root/roles/test", map[string]interface{}{ + "allow_bare_domains": true, + "allow_subdomains": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().Write("int/intermediate/generate/internal", map[string]interface{}{ + "common_name": "myint.com", + }) + if err != nil { + t.Fatal(err) + } + + csr := resp.Data["csr"] + + _, err = client.Logical().Write("root/sign/test", map[string]interface{}{ + "common_name": "myint.com", + "csr": csr, + "ttl": "60h", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = client.Logical().Write("root/sign-verbatim/test", map[string]interface{}{ + "common_name": "myint.com", + "csr": csr, + "ttl": "60h", + }) + if err == nil { + t.Fatal("expected error") + } + + resp, err = client.Logical().Write("root/root/sign-intermediate", map[string]interface{}{ + "common_name": "myint.com", + "csr": csr, + "ttl": "60h", + }) + if err != nil { + t.Fatalf("got error: %v", err) + } + if resp == nil { + t.Fatal("got nil response") + } + if len(resp.Warnings) == 0 { + t.Fatalf("expected warnings, got %#v", *resp) + } +} + +func TestBackend_SignSelfIssued(t *testing.T) { + // create the backend + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b := Backend() + err := b.Setup(config) + if err != nil { + t.Fatal(err) + } + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "ttl": "172800", + } + + resp, err := b.HandleRequest(&logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + getSelfSigned := func(subject, issuer *x509.Certificate) (string, *x509.Certificate) { + selfSigned, err := x509.CreateCertificate(rand.Reader, subject, issuer, key.Public(), key) + if err != nil { + t.Fatal(err) + } + cert, err := x509.ParseCertificate(selfSigned) + if err != nil { + t.Fatal(err) + } + pemSS := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: selfSigned, + }) + return string(pemSS), cert + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "foo.bar.com", + }, + SerialNumber: big.NewInt(1234), + IsCA: false, + BasicConstraintsValid: true, + } + + ss, _ := getSelfSigned(template, template) + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if !resp.IsError() { + t.Fatalf("expected error due to non-CA; got: %#v", *resp) + } + + // Set CA to true, but leave issuer alone + template.IsCA = true + + issuer := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "bar.foo.com", + }, + SerialNumber: big.NewInt(2345), + IsCA: true, + BasicConstraintsValid: true, + } + ss, ssCert := getSelfSigned(template, issuer) + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if !resp.IsError() { + t.Fatalf("expected error due to different issuer; cert info is\nIssuer\n%#v\nSubject\n%#v\n", ssCert.Issuer, ssCert.Subject) + } + + ss, ssCert = getSelfSigned(template, template) + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if resp.IsError() { + t.Fatalf("error in response: %s", resp.Error().Error()) + } + + newCertString := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(newCertString)) + newCert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + + signingBundle, err := fetchCAInfo(&logical.Request{Storage: storage}) + if err != nil { + t.Fatal(err) + } + if reflect.DeepEqual(newCert.Subject, newCert.Issuer) { + t.Fatal("expected different subject/issuer") + } + if !reflect.DeepEqual(newCert.Issuer, signingBundle.Certificate.Subject) { + t.Fatalf("expected matching issuer/CA subject\n\nIssuer:\n%#v\nSubject:\n%#v\n", newCert.Issuer, signingBundle.Certificate.Subject) + } + if bytes.Equal(newCert.AuthorityKeyId, newCert.SubjectKeyId) { + t.Fatal("expected different authority/subject") + } + if !bytes.Equal(newCert.AuthorityKeyId, signingBundle.Certificate.SubjectKeyId) { + t.Fatal("expected authority on new cert to be same as signing subject") + } + if newCert.Subject.CommonName != "foo.bar.com" { + t.Fatalf("unexpected common name on new cert: %s", newCert.Subject.CommonName) + } +} + const ( rsaCAKey string = `-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAmPQlK7xD5p+E8iLQ8XlVmll5uU2NKMxKY3UF5tbh+0vkc+Fy diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go index cab5797..7a6deda 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/ca_util.go @@ -1,6 +1,8 @@ package pki import ( + "time" + "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -27,7 +29,7 @@ func (b *backend) getGenerationParams( } role = &roleEntry{ - TTL: data.Get("ttl").(string), + TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(), KeyType: data.Get("key_type").(string), KeyBits: data.Get("key_bits").(int), AllowLocalhost: true, diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go index 1796d98..b4bb381 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/cert_util.go @@ -45,12 +45,13 @@ type creationBundle struct { KeyType string KeyBits int SigningBundle *caInfoBundle - TTL time.Duration + NotAfter time.Time KeyUsage x509.KeyUsage ExtKeyUsage certExtKeyUsage // Only used when signing a CA cert - UseCSRValues bool + UseCSRValues bool + PermittedDNSDomains []string // URLs to encode into the certificate URLs *urlEntries @@ -434,6 +435,8 @@ func generateCert(b *backend, if isCA { creationBundle.IsCA = isCA + creationBundle.PermittedDNSDomains = data.Get("permitted_dns_domains").([]string) + if signingBundle == nil { // Generating a self-signed root certificate entries, err := getURLs(req) @@ -581,6 +584,10 @@ func signCert(b *backend, creationBundle.IsCA = isCA creationBundle.UseCSRValues = useCSRValues + if isCA { + creationBundle.PermittedDNSDomains = data.Get("permitted_dns_domains").([]string) + } + parsedBundle, err := signCertificate(creationBundle, csr) if err != nil { return nil, err @@ -720,54 +727,48 @@ func generateCreationBundle(b *backend, } } - // Get the TTL and very it against the max allowed - var ttlField string + // Get the TTL and verify it against the max allowed var ttl time.Duration var maxTTL time.Duration - var ttlFieldInt interface{} + var notAfter time.Time { - ttlFieldInt, ok = data.GetOk("ttl") - if !ok { - ttlField = role.TTL - } else { - ttlField = ttlFieldInt.(string) - } + ttl = time.Duration(data.Get("ttl").(int)) * time.Second - if len(ttlField) == 0 { - ttl = b.System().DefaultLeaseTTL() - } else { - ttl, err = parseutil.ParseDurationSecond(ttlField) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf( - "invalid requested ttl: %s", err)} + if ttl == 0 { + if role.TTL != "" { + ttl, err = parseutil.ParseDurationSecond(role.TTL) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf( + "invalid role ttl: %s", err)} + } } } - if len(role.MaxTTL) == 0 { - maxTTL = b.System().MaxLeaseTTL() - } else { + if role.MaxTTL != "" { maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL) if err != nil { return nil, errutil.UserError{Err: fmt.Sprintf( - "invalid ttl: %s", err)} + "invalid role max_ttl: %s", err)} } } - if ttl > maxTTL { - // Don't error if they were using system defaults, only error if - // they specifically chose a bad TTL - if len(ttlField) == 0 { - ttl = maxTTL - } else { - return nil, errutil.UserError{Err: fmt.Sprintf( - "ttl is larger than maximum allowed (%d)", maxTTL/time.Second)} - } + if ttl == 0 { + ttl = b.System().DefaultLeaseTTL() } + if maxTTL == 0 { + maxTTL = b.System().MaxLeaseTTL() + } + if ttl > maxTTL { + ttl = maxTTL + } + + notAfter = time.Now().Add(ttl) // If it's not self-signed, verify that the issued certificate won't be // valid past the lifetime of the CA certificate if signingBundle != nil && - time.Now().Add(ttl).After(signingBundle.Certificate.NotAfter) { + notAfter.After(signingBundle.Certificate.NotAfter) && !role.AllowExpirationPastCA { + return nil, errutil.UserError{Err: fmt.Sprintf( "cannot satisfy request, as TTL is beyond the expiration of the CA certificate")} } @@ -800,7 +801,7 @@ func generateCreationBundle(b *backend, KeyType: role.KeyType, KeyBits: role.KeyBits, SigningBundle: signingBundle, - TTL: ttl, + NotAfter: notAfter, KeyUsage: x509.KeyUsage(parseKeyUsages(role.KeyUsage)), ExtKeyUsage: extUsage, } @@ -893,7 +894,7 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle SerialNumber: serialNumber, Subject: subject, NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(creationInfo.TTL), + NotAfter: creationInfo.NotAfter, IsCA: false, SubjectKeyId: subjKeyID, DNSNames: creationInfo.DNSNames, @@ -906,6 +907,12 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle certTemplate.IsCA = true } + // This will only be filled in from the generation paths + if len(creationInfo.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = creationInfo.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + addKeyUsages(creationInfo, certTemplate) certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates @@ -922,6 +929,12 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle } caCert := creationInfo.SigningBundle.Certificate + certTemplate.AuthorityKeyId = caCert.SubjectKeyId + + err = checkPermittedDNSDomains(certTemplate, caCert) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, result.PrivateKey.Public(), creationInfo.SigningBundle.PrivateKey) } else { @@ -940,6 +953,7 @@ func createCertificate(creationInfo *creationBundle) (*certutil.ParsedCertBundle certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 } + certTemplate.AuthorityKeyId = subjKeyID certTemplate.BasicConstraintsValid = true certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) } @@ -1047,6 +1061,8 @@ func signCertificate(creationInfo *creationBundle, } subjKeyID := sha1.Sum(marshaledKey) + caCert := creationInfo.SigningBundle.Certificate + subject := pkix.Name{ CommonName: creationInfo.CommonName, OrganizationalUnit: creationInfo.OU, @@ -1054,11 +1070,12 @@ func signCertificate(creationInfo *creationBundle, } certTemplate := &x509.Certificate{ - SerialNumber: serialNumber, - Subject: subject, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(creationInfo.TTL), - SubjectKeyId: subjKeyID[:], + SerialNumber: serialNumber, + Subject: subject, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: creationInfo.NotAfter, + SubjectKeyId: subjKeyID[:], + AuthorityKeyId: caCert.SubjectKeyId, } switch creationInfo.SigningBundle.PrivateKeyType { @@ -1085,7 +1102,6 @@ func signCertificate(creationInfo *creationBundle, addKeyUsages(creationInfo, certTemplate) var certBytes []byte - caCert := creationInfo.SigningBundle.Certificate certTemplate.IssuingCertificateURL = creationInfo.URLs.IssuingCertificates certTemplate.CRLDistributionPoints = creationInfo.URLs.CRLDistributionPoints @@ -1106,6 +1122,15 @@ func signCertificate(creationInfo *creationBundle, } } + if len(creationInfo.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = creationInfo.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + err = checkPermittedDNSDomains(certTemplate, caCert) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } + certBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, caCert, csr.PublicKey, creationInfo.SigningBundle.PrivateKey) if err != nil { @@ -1122,3 +1147,39 @@ func signCertificate(creationInfo *creationBundle, return result, nil } + +func checkPermittedDNSDomains(template, ca *x509.Certificate) error { + if len(ca.PermittedDNSDomains) == 0 { + return nil + } + + namesToCheck := map[string]struct{}{ + template.Subject.CommonName: struct{}{}, + } + for _, name := range template.DNSNames { + namesToCheck[name] = struct{}{} + } + + var badName string +NameCheck: + for name := range namesToCheck { + for _, perm := range ca.PermittedDNSDomains { + switch { + case strings.HasPrefix(perm, ".") && strings.HasSuffix(name, perm): + // .example.com matches my.host.example.com and + // host.example.com but does not match example.com + break NameCheck + case perm == name: + break NameCheck + } + } + badName = name + break + } + + if badName == "" { + return nil + } + + return fmt.Errorf("name %q disallowed by CA's permitted DNS domains", badName) +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go index e97a970..52adf10 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/fields.go @@ -59,7 +59,7 @@ email addresses.`, } fields["ttl"] = &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeDurationSecond, Description: `The requested Time To Live for the certificate; sets the expiration date. If not specified the role default, backend default, or system @@ -92,7 +92,7 @@ must still be specified in alt_names or ip_sans.`, } fields["ttl"] = &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeDurationSecond, Description: `The requested Time To Live for the certificate; sets the expiration date. If not specified the role default, backend default, or system @@ -144,5 +144,10 @@ func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*fram Description: "The maximum allowable path length", } + fields["permitted_dns_domains"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Domains for which this certificate is allowed to sign or issue child certificates. If set, all DNS names (subject and alt) on child certs must be exact matches or subsets of the given domains (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, + } + return fields } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go index c182553..347ac01 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_config_ca.go @@ -16,9 +16,7 @@ func pathConfigCA(b *backend) *framework.Path { "pem_bundle": &framework.FieldSchema{ Type: framework.TypeString, Description: `PEM-format, concatenated unencrypted -secret key and certificate, or, if a -CSR was generated with the "generate" -endpoint, just the signed certificate.`, +secret key and certificate.`, }, }, diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go index ed60e75..cf71b4c 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_fetch.go @@ -159,7 +159,7 @@ func (b *backend) pathFetchRead(req *logical.Request, data *framework.FieldData) caInfo, err := fetchCAInfo(req) switch err.(type) { case errutil.UserError: - response = logical.ErrorResponse(funcErr.Error()) + response = logical.ErrorResponse(err.Error()) goto reply case errutil.InternalError: retErr = err @@ -189,7 +189,7 @@ func (b *backend) pathFetchRead(req *logical.Request, data *framework.FieldData) } } if certEntry == nil { - response = logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found", serial)) + response = nil goto reply } @@ -244,6 +244,11 @@ reply: } case retErr != nil: response = nil + return + case response == nil: + return + case response.IsError(): + return response, nil default: response.Data["certificate"] = string(certificate) response.Data["revocation_time"] = revocationTime diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go index 71a0455..2073621 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_intermediate.go @@ -127,7 +127,7 @@ func (b *backend) pathSetSignedIntermediate( cert := data.Get("certificate").(string) if cert == "" { - return logical.ErrorResponse("no certificate provided in the \"certficate\" parameter"), nil + return logical.ErrorResponse("no certificate provided in the \"certificate\" parameter"), nil } inputBundle, err := certutil.ParsePEMBundle(cert) diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go index 26f7421..d7b0c36 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_issue_sign.go @@ -277,6 +277,15 @@ func (b *backend) pathIssueSignCert( } } + if useCSR { + if role.UseCSRCommonName && data.Get("common_name").(string) != "" { + resp.AddWarning("the common_name field was provided but the role is set with \"use_csr_common_name\" set to true") + } + if role.UseCSRSANs && data.Get("alt_names").(string) != "" { + resp.AddWarning("the alt_names field was provided but the role is set with \"use_csr_sans\" set to true") + } + } + return resp, nil } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go index 4d9e115..96d0197 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles.go @@ -35,7 +35,7 @@ func pathRoles(b *backend) *framework.Path { }, "ttl": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeDurationSecond, Default: "", Description: `The lease duration if no specific lease duration is requested. The lease duration controls the expiration @@ -383,7 +383,7 @@ func (b *backend) pathRoleCreate( entry := &roleEntry{ MaxTTL: data.Get("max_ttl").(string), - TTL: data.Get("ttl").(string), + TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(), AllowLocalhost: data.Get("allow_localhost").(bool), AllowedDomains: data.Get("allowed_domains").(string), AllowBareDomains: data.Get("allow_bare_domains").(bool), @@ -532,6 +532,9 @@ type roleEntry struct { Organization string `json:"organization" structs:"organization" mapstructure:"organization"` GenerateLease *bool `json:"generate_lease,omitempty" structs:"generate_lease,omitempty"` NoStore bool `json:"no_store" structs:"no_store" mapstructure:"no_store"` + + // Used internally for signing intermediates + AllowExpirationPastCA bool } const pathListRolesHelpSyn = `List the existing roles in this backend` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go index 82772b0..bd0aa90 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_roles_test.go @@ -13,11 +13,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { var err error b := Backend() - _, err = b.Setup(config) - if err != nil { - t.Fatal(err) - } - err = b.Initialize() + err = b.Setup(config) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go index d029531..438c92e 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/pki/path_root.go @@ -1,9 +1,15 @@ package pki import ( + "crypto/rand" + "crypto/x509" "encoding/base64" + "encoding/pem" "fmt" + "reflect" + "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/errutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -28,6 +34,21 @@ func pathGenerateRoot(b *backend) *framework.Path { return ret } +func pathDeleteRoot(b *backend) *framework.Path { + ret := &framework.Path{ + Pattern: "root", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathCADeleteRoot, + }, + + HelpSynopsis: pathDeleteRootHelpSyn, + HelpDescription: pathDeleteRootHelpDesc, + } + + return ret +} + func pathSignIntermediate(b *backend) *framework.Path { ret := &framework.Path{ Pattern: "root/sign-intermediate", @@ -66,10 +87,45 @@ the non-repudiation flag.`, return ret } +func pathSignSelfIssued(b *backend) *framework.Path { + ret := &framework.Path{ + Pattern: "root/sign-self-issued", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathCASignSelfIssued, + }, + + Fields: map[string]*framework.FieldSchema{ + "certificate": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `PEM-format self-issued certificate to be signed.`, + }, + }, + + HelpSynopsis: pathSignSelfIssuedHelpSyn, + HelpDescription: pathSignSelfIssuedHelpDesc, + } + + return ret +} + +func (b *backend) pathCADeleteRoot( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return nil, req.Storage.Delete("config/ca_bundle") +} + func (b *backend) pathCAGenerateRoot( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { var err error + entry, err := req.Storage.Get("config/ca_bundle") + if err != nil { + return nil, err + } + if entry != nil { + return nil, nil + } + exported, format, role, errorResp := b.getGenerationParams(data) if errorResp != nil { return errorResp, nil @@ -133,7 +189,7 @@ func (b *backend) pathCAGenerateRoot( } // Store it as the CA bundle - entry, err := logical.StorageEntryJSON("config/ca_bundle", cb) + entry, err = logical.StorageEntryJSON("config/ca_bundle", cb) if err != nil { return nil, err } @@ -186,12 +242,13 @@ func (b *backend) pathCASignIntermediate( } role := &roleEntry{ - TTL: data.Get("ttl").(string), - AllowLocalhost: true, - AllowAnyName: true, - AllowIPSANs: true, - EnforceHostnames: false, - KeyType: "any", + TTL: (time.Duration(data.Get("ttl").(int)) * time.Second).String(), + AllowLocalhost: true, + AllowAnyName: true, + AllowIPSANs: true, + EnforceHostnames: false, + KeyType: "any", + AllowExpirationPastCA: true, } if cn := data.Get("common_name").(string); len(cn) == 0 { @@ -248,6 +305,10 @@ func (b *backend) pathCASignIntermediate( }, } + if signingBundle.Certificate.NotAfter.Before(parsedBundle.Certificate.NotAfter) { + resp.AddWarning("The expiration time for the signed certificate is after the CA's expiration time. If the new certificate is not treated as a root, validation paths with the certificate past the issuing CA's expiration time will fail.") + } + switch format { case "pem": resp.Data["certificate"] = cb.Certificate @@ -291,6 +352,75 @@ func (b *backend) pathCASignIntermediate( return resp, nil } +func (b *backend) pathCASignSelfIssued( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var err error + + certPem := data.Get("certificate").(string) + block, _ := pem.Decode([]byte(certPem)) + if block == nil || len(block.Bytes) == 0 { + return logical.ErrorResponse("certificate could not be PEM-decoded"), nil + } + certs, err := x509.ParseCertificates(block.Bytes) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing certificate: %s", err)), nil + } + if len(certs) != 1 { + return logical.ErrorResponse(fmt.Sprintf("%d certificates found in PEM file, expected 1", len(certs))), nil + } + + cert := certs[0] + if !cert.IsCA { + return logical.ErrorResponse("given certificate is not a CA certificate"), nil + } + if !reflect.DeepEqual(cert.Issuer, cert.Subject) { + return logical.ErrorResponse("given certificate is not self-issued"), nil + } + + var caErr error + signingBundle, caErr := fetchCAInfo(req) + switch caErr.(type) { + case errutil.UserError: + return nil, errutil.UserError{Err: fmt.Sprintf( + "could not fetch the CA certificate (was one set?): %s", caErr)} + case errutil.InternalError: + return nil, errutil.InternalError{Err: fmt.Sprintf( + "error fetching CA certificate: %s", caErr)} + } + + signingCB, err := signingBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err) + } + + urls := &urlEntries{} + if signingBundle.URLs != nil { + urls = signingBundle.URLs + } + cert.IssuingCertificateURL = urls.IssuingCertificates + cert.CRLDistributionPoints = urls.CRLDistributionPoints + cert.OCSPServer = urls.OCSPServers + + newCert, err := x509.CreateCertificate(rand.Reader, cert, signingBundle.Certificate, cert.PublicKey, signingBundle.PrivateKey) + if err != nil { + return nil, errwrap.Wrapf("error signing self-issued certificate: {{err}}", err) + } + if len(newCert) == 0 { + return nil, fmt.Errorf("nil cert was created when signing self-issued certificate") + } + pemCert := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: newCert, + }) + + return &logical.Response{ + Data: map[string]interface{}{ + "certificate": string(pemCert), + "issuing_ca": signingCB.Certificate, + }, + }, nil +} + const pathGenerateRootHelpSyn = ` Generate a new CA certificate and private key used for signing. ` @@ -299,10 +429,30 @@ const pathGenerateRootHelpDesc = ` See the API documentation for more information. ` +const pathDeleteRootHelpSyn = ` +Deletes the root CA key to allow a new one to be generated. +` + +const pathDeleteRootHelpDesc = ` +See the API documentation for more information. +` + const pathSignIntermediateHelpSyn = ` Issue an intermediate CA certificate based on the provided CSR. ` const pathSignIntermediateHelpDesc = ` -See the API documentation for more information. +see the API documentation for more information. +` + +const pathSignSelfIssuedHelpSyn = ` +Signs another CA's self-issued certificate. +` + +const pathSignSelfIssuedHelpDesc = ` +Signs another CA's self-issued certificate. This is most often used for rolling roots; unless you know you need this you probably want to use sign-intermediate instead. + +Note that this is a very privileged operation and should be extremely restricted in terms of who is allowed to use it. All values will be taken directly from the incoming certificate and only verification that it is self-issued will be performed. + +Configured URLs for CRLs/OCSP/etc. will be copied over and the issuer will be this mount's CA cert. Other than that, all other values will be used verbatim. ` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go index 6f4befd..4a689f8 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/backend.go @@ -13,7 +13,11 @@ import ( ) func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend(conf).Setup(conf) + b := Backend(conf) + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend(conf *logical.BackendConfig) *backend { @@ -33,9 +37,9 @@ func Backend(conf *logical.BackendConfig) *backend { secretCreds(&b), }, - Clean: b.ResetDB, - - Invalidate: b.invalidate, + Clean: b.ResetDB, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, } b.logger = conf.Logger diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go index 535d1c1..9c5010a 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/postgresql/secret_creds.go @@ -41,7 +41,9 @@ func (b *backend) secretCredsRenew( return nil, fmt.Errorf("secret is missing username internal data") } username, ok := usernameRaw.(string) - + if !ok { + return nil, fmt.Errorf("usernameRaw is not a string") + } // Get our connection db, err := b.DB(req.Storage) if err != nil { @@ -92,7 +94,9 @@ func (b *backend) secretCredsRevoke( return nil, fmt.Errorf("secret is missing username internal data") } username, ok := usernameRaw.(string) - + if !ok { + return nil, fmt.Errorf("usernameRaw is not a string") + } var revocationSQL string var resp *logical.Response diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go index 4f9cde0..1e3f1ec 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/backend.go @@ -13,7 +13,11 @@ import ( // Factory creates and configures the backend func Factory(conf *logical.BackendConfig) (logical.Backend, error) { - return Backend().Setup(conf) + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } // Creates a new backend with all the paths and secrets belonging to it @@ -34,9 +38,9 @@ func Backend() *backend { secretCreds(&b), }, - Clean: b.resetClient, - - Invalidate: b.invalidate, + Clean: b.resetClient, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, } return &b diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go index a5c9983..4182fd4 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/rabbitmq/path_config_lease_test.go @@ -13,7 +13,7 @@ func TestBackend_config_lease_RU(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} b := Backend() - if _, err = b.Setup(config); err != nil { + if err = b.Setup(config); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go index dcfb00d..c14685d 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend.go @@ -2,6 +2,7 @@ package ssh import ( "strings" + "sync" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" @@ -10,8 +11,9 @@ import ( type backend struct { *framework.Backend - view logical.Storage - salt *salt.Salt + view logical.Storage + salt *salt.Salt + saltMutex sync.RWMutex } func Factory(conf *logical.BackendConfig) (logical.Backend, error) { @@ -19,7 +21,10 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) { if err != nil { return nil, err } - return b.Setup(conf) + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil } func Backend(conf *logical.BackendConfig) (*backend, error) { @@ -57,20 +62,42 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { secretOTP(&b), }, - Init: b.Initialize, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, } return &b, nil } -func (b *backend) Initialize() error { +func (b *backend) Salt() (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } salt, err := salt.NewSalt(b.view, &salt.Config{ HashFunc: salt.SHA256Hash, + Location: salt.DefaultLocation, }) if err != nil { - return err + return nil, err } b.salt = salt - return nil + return salt, nil +} + +func (b *backend) invalidate(key string) { + switch key { + case salt.DefaultLocation: + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil + } } const backendHelp = ` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go index 538455c..139d24a 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/backend_test.go @@ -106,7 +106,7 @@ func TestBackend_allowed_users(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } @@ -652,6 +652,94 @@ func TestBackend_OptionsOverrideDefaults(t *testing.T) { logicaltest.Test(t, testCase) } +func TestBackend_CustomKeyIDFormat(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + configCaStep(), + + createRoleStep("customrole", map[string]interface{}{ + "key_type": "ca", + "key_id_format": "{{role_name}}-{{token_display_name}}-{{public_key_hash}}", + "allowed_users": "tuber", + "default_user": "tuber", + "allow_user_certificates": true, + "allowed_critical_options": "option,secondary", + "allowed_extensions": "extension,additional", + "default_critical_options": map[string]interface{}{ + "option": "value", + }, + "default_extensions": map[string]interface{}{ + "extension": "extended", + }, + }), + + signCertificateStep("customrole", "customrole-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{ + "secondary": "value", + }, map[string]string{ + "additional": "value", + }, 2*time.Hour, map[string]interface{}{ + "public_key": publicKey2, + "ttl": "2h", + "critical_options": map[string]interface{}{ + "secondary": "value", + }, + "extensions": map[string]interface{}{ + "additional": "value", + }, + }), + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_DisallowUserProvidedKeyIDs(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + configCaStep(), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allow_user_key_ids": false, + "allow_user_certificates": true, + }), + logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "sign/testing", + Data: map[string]interface{}{ + "public_key": publicKey2, + "key_id": "override", + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "setting key_id is not allowed by role" { + return errors.New("Custom user key id was allowed even when 'allow_user_key_ids' is false.") + } + return nil + }, + }, + }, + } + + logicaltest.Test(t, testCase) +} + func configCaStep() logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go index cc0b17b..250ab4f 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_config_ca_test.go @@ -17,7 +17,7 @@ func TestSSH_ConfigCAStorageUpgrade(t *testing.T) { t.Fatal(err) } - _, err = b.Setup(config) + err = b.Setup(config) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go index e2b1e0c..53d55ed 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_creds_create.go @@ -207,7 +207,12 @@ func (b *backend) GenerateSaltedOTP() (string, string, error) { if err != nil { return "", "", err } - return str, b.salt.SaltID(str), nil + salt, err := b.Salt() + if err != nil { + return "", "", err + } + + return str, salt.SaltID(str), nil } // Generates an UUID OTP and creates an entry for the same in storage backend with its salted string. diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go index b905115..6be96b6 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_roles.go @@ -45,6 +45,7 @@ type sshRole struct { AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"` AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"` AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"` + KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"` } func pathListRoles(b *backend) *framework.Path { @@ -150,7 +151,7 @@ func pathRoles(b *backend) *framework.Path { this list enforces it. If this field is set, then credentials can only be created for default_user and usernames present in this list. Setting this option will enable all the users with - access this role to fetch credentials for all other usernames + access to this role to fetch credentials for all other usernames in this list. Use with caution. N.B.: with the CA type, an empty list means that no users are allowed; explicitly specify '*' to allow any user. @@ -213,7 +214,7 @@ func pathRoles(b *backend) *framework.Path { have if none are provided when signing. This field takes in key value pairs in JSON format. Note that these are not restricted by "allowed_critical_options". Defaults to none. -`, + `, }, "default_extensions": &framework.FieldSchema{ Type: framework.TypeMap, @@ -266,6 +267,16 @@ func pathRoles(b *backend) *framework.Path { The key ID is logged by the SSH server and can be useful for auditing. `, }, + "key_id_format": &framework.FieldSchema{ + Type: framework.TypeString, + Description: ` + [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + When supplied, this value specifies a custom format for the key id of a signed certificate. + The following variables are availble for use: '{{token_display_name}}' - The display name of + the token used to make the request. '{{role_name}}' - The name of the role signing the request. + '{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed. + `, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -435,6 +446,7 @@ func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework AllowBareDomains: data.Get("allow_bare_domains").(bool), AllowSubdomains: data.Get("allow_subdomains").(bool), AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool), + KeyIDFormat: data.Get("key_id_format").(string), KeyType: KeyTypeCA, } @@ -553,6 +565,7 @@ func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*l "allow_bare_domains": role.AllowBareDomains, "allow_subdomains": role.AllowSubdomains, "allow_user_key_ids": role.AllowUserKeyIDs, + "key_id_format": role.KeyIDFormat, "key_type": role.KeyType, "default_critical_options": role.DefaultCriticalOptions, "default_extensions": role.DefaultExtensions, diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go index b5c2e0d..4d62f4a 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_sign.go @@ -3,7 +3,6 @@ package ssh import ( "crypto/rand" "crypto/sha256" - "encoding/hex" "errors" "fmt" "strconv" @@ -275,16 +274,22 @@ func (b *backend) calculateKeyId(data *framework.FieldData, req *logical.Request return reqId, nil } - keyHash := sha256.Sum256(pubKey.Marshal()) - keyId := hex.EncodeToString(keyHash[:]) - - if req.DisplayName != "" { - keyId = fmt.Sprintf("%s-%s", req.DisplayName, keyId) + keyIDFormat := "vault-{{token_display_name}}-{{public_key_hash}}" + if req.DisplayName == "" { + keyIDFormat = "vault-{{public_key_hash}}" } - keyId = fmt.Sprintf("vault-%s", keyId) + if role.KeyIDFormat != "" { + keyIDFormat = role.KeyIDFormat + } - return keyId, nil + keyID := substQuery(keyIDFormat, map[string]string{ + "token_display_name": req.DisplayName, + "role_name": data.Get("role").(string), + "public_key_hash": fmt.Sprintf("%x", sha256.Sum256(pubKey.Marshal())), + }) + + return keyID, nil } func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) { @@ -383,7 +388,17 @@ func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.D return ttl, nil } -func (b *creationBundle) sign() (*ssh.Certificate, error) { +func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) { + defer func() { + if r := recover(); r != nil { + errMsg, ok := r.(string) + if ok { + retCert = nil + retErr = errors.New(errMsg) + } + } + }() + serialNumber, err := certutil.GenerateSerialNumber() if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go index 9cb98ad..1c5e453 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/path_verify.go @@ -57,7 +57,11 @@ func (b *backend) pathVerifyWrite(req *logical.Request, d *framework.FieldData) // Create the salt of OTP because entry would have been create with the // salt and not directly of the OTP. Salt will yield the same value which // because the seed is the same, the backend salt. - otpSalted := b.salt.SaltID(otp) + salt, err := b.Salt() + if err != nil { + return nil, err + } + otpSalted := salt.SaltID(otp) // Return nil if there is no entry found for the OTP otpEntry, err := b.getOTP(req.Storage, otpSalted) diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go index d0e4dd5..cc8872b 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/secret_otp.go @@ -33,7 +33,11 @@ func (b *backend) secretOTPRevoke(req *logical.Request, d *framework.FieldData) return nil, fmt.Errorf("secret is missing internal data") } - err := req.Storage.Delete("otp/" + b.salt.SaltID(otp)) + salt, err := b.Salt() + if err != nil { + return nil, err + } + err = req.Storage.Delete("otp/" + salt.SaltID(otp)) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go index c18ccaf..106c740 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/ssh/util.go @@ -163,6 +163,7 @@ func createSSHComm(logger log.Logger, username, ip string, port int, hostkey str Auth: []ssh.AuthMethod{ ssh.PublicKeys(signer), }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), } connfunc := func() (net.Conn, error) { @@ -211,3 +212,12 @@ func convertMapToStringValue(initial map[string]interface{}) map[string]string { } return result } + +// Serve a template processor for custom format inputs +func substQuery(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) + } + + return tpl +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go new file mode 100644 index 0000000..936b46b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend.go @@ -0,0 +1,48 @@ +package totp + +import ( + "strings" + "time" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" + cache "github.com/patrickmn/go-cache" +) + +func Factory(conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + Paths: []*framework.Path{ + pathListKeys(&b), + pathKeys(&b), + pathCode(&b), + }, + + Secrets: []*framework.Secret{}, + BackendType: logical.TypeLogical, + } + + b.usedCodes = cache.New(0, 30*time.Second) + + return &b +} + +type backend struct { + *framework.Backend + + usedCodes *cache.Cache +} + +const backendHelp = ` +The TOTP backend dynamically generates time-based one-time use passwords. +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go new file mode 100644 index 0000000..a3304c2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/backend_test.go @@ -0,0 +1,1131 @@ +package totp + +import ( + "fmt" + "log" + "net/url" + "path" + "testing" + "time" + + "github.com/hashicorp/vault/logical" + logicaltest "github.com/hashicorp/vault/logical/testing" + "github.com/mitchellh/mapstructure" + otplib "github.com/pquerna/otp" + totplib "github.com/pquerna/otp/totp" +) + +func createKey() (string, error) { + keyUrl, err := totplib.Generate(totplib.GenerateOpts{ + Issuer: "Vault", + AccountName: "Test", + }) + + key := keyUrl.Secret() + + return key, err +} + +func generateCode(key string, period uint, digits otplib.Digits, algorithm otplib.Algorithm) (string, error) { + // Generate password using totp library + totpToken, err := totplib.GenerateCodeCustom(key, time.Now(), totplib.ValidateOpts{ + Period: period, + Digits: digits, + Algorithm: algorithm, + }) + + return totpToken, err +} + +func TestBackend_readCredentialsDefaultValues(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "key": key, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "", + "account_name": "", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsEightDigitsThirtySecondPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "digits": 8, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsEight, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsSixDigitsNinetySecondPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "period": 90, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 90, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsSHA256(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "algorithm": "SHA256", + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA256, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsSHA512(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "algorithm": "SHA512", + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA512, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_keyCrudDefaultValues(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + code, _ := generateCode(key, 30, otplib.DigitsSix, otplib.AlgorithmSHA1) + invalidCode := "12345678" + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepValidateCode(t, "test", code, true, false), + // Next step should fail because it should be in the used cache + testAccStepValidateCode(t, "test", code, false, true), + testAccStepValidateCode(t, "test", invalidCode, false, false), + testAccStepDeleteKey(t, "test"), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyMissingKeyValue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidKeyValue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": "1", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidAlgorithm(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "algorithm": "BADALGORITHM", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "period": -1, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidDigits(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "digits": 20, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyDefaultValues(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": true, + "key_size": 20, + "exported": true, + "qr_size": 200, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + }, + }) +} + +func TestBackend_generatedKeyDefaultValuesNoQR(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": true, + "key_size": 20, + "exported": true, + "qr_size": 0, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + }, + }) +} + +func TestBackend_generatedKeyNonDefaultKeySize(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": true, + "key_size": 10, + "exported": true, + "qr_size": 200, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyInvalidPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=AZ" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyInvalidDigits(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=Q&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyIssuerInFirstPosition(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyIssuerInQueryString(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60&issuer=Vault" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyMissingIssuer(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyMissingAccountName(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyMissingAccountNameandIssuer(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "", + "account_name": "", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_generatedKeyInvalidSkew(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "skew": "2", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyInvalidQRSize(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "qr_size": "-100", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyInvalidKeySize(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key_size": "-100", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyMissingAccountName(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyMissingIssuer(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "account_name": "test@email.com", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_invalidURLValue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "url": "notaurl", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_urlAndGenerateTrue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "url": "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_keyAndGenerateTrue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyExportedFalse(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "generate": true, + "exported": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + } + + logicaltest.Test(t, logicaltest.TestCase{ + Backend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + }, + }) +} + +func testAccStepCreateKey(t *testing.T, name string, keyData map[string]interface{}, expectFail bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: path.Join("keys", name), + Data: keyData, + ErrorOk: expectFail, + Check: func(resp *logical.Response) error { + //Skip this if the key is not generated by vault or if the test is expected to fail + if !keyData["generate"].(bool) || expectFail { + return nil + } + + // Check to see if barcode and url were returned if exported is false + if !keyData["exported"].(bool) { + if resp != nil { + t.Fatalf("data was returned when exported was set to false") + } + return nil + } + + // Check to see if a barcode was returned when qr_size is zero + if keyData["qr_size"].(int) == 0 { + if _, exists := resp.Data["barcode"]; exists { + t.Fatalf("a barcode was returned when qr_size was set to zero") + } + return nil + } + + var d struct { + Url string `mapstructure:"url"` + Barcode string `mapstructure:"barcode"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + //Check to see if barcode and url are returned + if d.Barcode == "" { + t.Fatalf("a barcode was not returned for a generated key") + } + + if d.Url == "" { + t.Fatalf("a url was not returned for a generated key") + } + + //Parse url + urlObject, err := url.Parse(d.Url) + + if err != nil { + t.Fatal("an error occured while parsing url string") + } + + //Set up query object + urlQuery := urlObject.Query() + + //Read secret + urlSecret := urlQuery.Get("secret") + + //Check key length + keySize := keyData["key_size"].(int) + correctSecretStringSize := (keySize / 5) * 8 + actualSecretStringSize := len(urlSecret) + + if actualSecretStringSize != correctSecretStringSize { + t.Fatal("incorrect key string length") + } + + return nil + }, + } +} + +func testAccStepDeleteKey(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: path.Join("keys", name), + } +} + +func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, validation map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: path.Join("code", name), + Check: func(resp *logical.Response) error { + var d struct { + Code string `mapstructure:"code"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + log.Printf("[TRACE] Generated credentials: %v", d) + + period := validation["period"].(int) + key := validation["key"].(string) + algorithm := validation["algorithm"].(otplib.Algorithm) + digits := validation["digits"].(otplib.Digits) + + valid, _ := totplib.ValidateCustom(d.Code, key, time.Now(), totplib.ValidateOpts{ + Period: uint(period), + Skew: 1, + Digits: digits, + Algorithm: algorithm, + }) + + if !valid { + t.Fatalf("generated code isn't valid") + } + + return nil + }, + } +} + +func testAccStepReadKey(t *testing.T, name string, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "keys/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if expected == nil { + return nil + } + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Issuer string `mapstructure:"issuer"` + AccountName string `mapstructure:"account_name"` + Period uint `mapstructure:"period"` + Algorithm string `mapstructure:"algorithm"` + Digits otplib.Digits `mapstructure:"digits"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + var keyAlgorithm otplib.Algorithm + switch d.Algorithm { + case "SHA1": + keyAlgorithm = otplib.AlgorithmSHA1 + case "SHA256": + keyAlgorithm = otplib.AlgorithmSHA256 + case "SHA512": + keyAlgorithm = otplib.AlgorithmSHA512 + } + + period := expected["period"].(int) + + switch { + case d.Issuer != expected["issuer"]: + return fmt.Errorf("issuer should equal: %s", expected["issuer"]) + case d.AccountName != expected["account_name"]: + return fmt.Errorf("account_name should equal: %s", expected["account_name"]) + case d.Period != uint(period): + return fmt.Errorf("period should equal: %d", expected["period"]) + case keyAlgorithm != expected["algorithm"]: + return fmt.Errorf("algorithm should equal: %s", expected["algorithm"]) + case d.Digits != expected["digits"]: + return fmt.Errorf("digits should equal: %d", expected["digits"]) + } + return nil + }, + } +} + +func testAccStepValidateCode(t *testing.T, name string, code string, valid, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "code/" + name, + Data: map[string]interface{}{ + "code": code, + }, + ErrorOk: expectError, + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Valid bool `mapstructure:"valid"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + switch valid { + case true: + if d.Valid != true { + return fmt.Errorf("code was not valid: %s", code) + } + + default: + if d.Valid != false { + return fmt.Errorf("code was incorrectly validated: %s", code) + } + } + return nil + }, + } +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go new file mode 100644 index 0000000..ebc3d47 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_code.go @@ -0,0 +1,128 @@ +package totp + +import ( + "fmt" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" + otplib "github.com/pquerna/otp" + totplib "github.com/pquerna/otp/totp" +) + +func pathCode(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "code/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the key.", + }, + "code": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "TOTP code to be validated.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathReadCode, + logical.UpdateOperation: b.pathValidateCode, + }, + + HelpSynopsis: pathCodeHelpSyn, + HelpDescription: pathCodeHelpDesc, + } +} + +func (b *backend) pathReadCode( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the key + key, err := b.Key(req.Storage, name) + if err != nil { + return nil, err + } + if key == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil + } + + // Generate password using totp library + totpToken, err := totplib.GenerateCodeCustom(key.Key, time.Now(), totplib.ValidateOpts{ + Period: key.Period, + Digits: key.Digits, + Algorithm: key.Algorithm, + }) + if err != nil { + return nil, err + } + + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "code": totpToken, + }, + }, nil +} + +func (b *backend) pathValidateCode( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + code := data.Get("code").(string) + + // Enforce input value requirements + if code == "" { + return logical.ErrorResponse("the code value is required"), nil + } + + // Get the key's stored values + key, err := b.Key(req.Storage, name) + if err != nil { + return nil, err + } + if key == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil + } + + usedName := fmt.Sprintf("%s_%s", name, code) + + _, ok := b.usedCodes.Get(usedName) + if ok { + return logical.ErrorResponse("code already used; wait until the next time period"), nil + } + + valid, err := totplib.ValidateCustom(code, key.Key, time.Now(), totplib.ValidateOpts{ + Period: key.Period, + Skew: key.Skew, + Digits: key.Digits, + Algorithm: key.Algorithm, + }) + if err != nil && err != otplib.ErrValidateInputInvalidLength { + return logical.ErrorResponse("an error occured while validating the code"), err + } + + // Take the key skew, add two for behind and in front, and multiple that by + // the period to cover the full possibility of the validity of the key + err = b.usedCodes.Add(usedName, nil, time.Duration( + int64(time.Second)* + int64(key.Period)* + int64((2+key.Skew)))) + if err != nil { + return nil, errwrap.Wrapf("error adding code to used cache: {{err}}", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + "valid": valid, + }, + }, nil +} + +const pathCodeHelpSyn = ` +Request time-based one-time use password or validate a password for a certain key . +` +const pathCodeHelpDesc = ` +This path generates and validates time-based one-time use passwords for a certain key. + +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go new file mode 100644 index 0000000..3f36aef --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/totp/path_keys.go @@ -0,0 +1,424 @@ +package totp + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "fmt" + "image/png" + "net/url" + "strconv" + "strings" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" + otplib "github.com/pquerna/otp" + totplib "github.com/pquerna/otp/totp" +) + +func pathListKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathKeyList, + }, + + HelpSynopsis: pathKeyHelpSyn, + HelpDescription: pathKeyHelpDesc, + } +} + +func pathKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key.", + }, + + "generate": { + Type: framework.TypeBool, + Default: false, + Description: "Determines if a key should be generated by Vault or if a key is being passed from another service.", + }, + + "exported": { + Type: framework.TypeBool, + Default: true, + Description: "Determines if a QR code and url are returned upon generating a key. Only used if generate is true.", + }, + + "key_size": { + Type: framework.TypeInt, + Default: 20, + Description: "Determines the size in bytes of the generated key. Only used if generate is true.", + }, + + "key": { + Type: framework.TypeString, + Description: "The shared master key used to generate a TOTP token. Only used if generate is false.", + }, + + "issuer": { + Type: framework.TypeString, + Description: `The name of the key's issuing organization. Required if generate is true.`, + }, + + "account_name": { + Type: framework.TypeString, + Description: `The name of the account associated with the key. Required if generate is true.`, + }, + + "period": { + Type: framework.TypeDurationSecond, + Default: 30, + Description: `The length of time used to generate a counter for the TOTP token calculation.`, + }, + + "algorithm": { + Type: framework.TypeString, + Default: "SHA1", + Description: `The hashing algorithm used to generate the TOTP token. Options include SHA1, SHA256 and SHA512.`, + }, + + "digits": { + Type: framework.TypeInt, + Default: 6, + Description: `The number of digits in the generated TOTP token. This value can either be 6 or 8.`, + }, + + "skew": { + Type: framework.TypeInt, + Default: 1, + Description: `The number of delay periods that are allowed when validating a TOTP token. This value can either be 0 or 1. Only used if generate is true.`, + }, + + "qr_size": { + Type: framework.TypeInt, + Default: 200, + Description: `The pixel size of the generated square QR code. Only used if generate is true and exported is true. If this value is 0, a QR code will not be returned.`, + }, + + "url": { + Type: framework.TypeString, + Description: `A TOTP url string containing all of the parameters for key setup. Only used if generate is false.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathKeyRead, + logical.UpdateOperation: b.pathKeyCreate, + logical.DeleteOperation: b.pathKeyDelete, + }, + + HelpSynopsis: pathKeyHelpSyn, + HelpDescription: pathKeyHelpDesc, + } +} + +func (b *backend) Key(s logical.Storage, n string) (*keyEntry, error) { + entry, err := s.Get("key/" + n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result keyEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathKeyDelete( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete("key/" + data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathKeyRead( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key, err := b.Key(req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if key == nil { + return nil, nil + } + + // Translate algorithm back to string + algorithm := key.Algorithm.String() + + // Return values of key + return &logical.Response{ + Data: map[string]interface{}{ + "issuer": key.Issuer, + "account_name": key.AccountName, + "period": key.Period, + "algorithm": algorithm, + "digits": key.Digits, + }, + }, nil +} + +func (b *backend) pathKeyList( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List("key/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathKeyCreate( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + generate := data.Get("generate").(bool) + exported := data.Get("exported").(bool) + keyString := data.Get("key").(string) + issuer := data.Get("issuer").(string) + accountName := data.Get("account_name").(string) + period := data.Get("period").(int) + algorithm := data.Get("algorithm").(string) + digits := data.Get("digits").(int) + skew := data.Get("skew").(int) + qrSize := data.Get("qr_size").(int) + keySize := data.Get("key_size").(int) + inputURL := data.Get("url").(string) + + if generate { + if keyString != "" { + return logical.ErrorResponse("a key should not be passed if generate is true"), nil + } + if inputURL != "" { + return logical.ErrorResponse("a url should not be passed if generate is true"), nil + } + } + + // Read parameters from url if given + if inputURL != "" { + //Parse url + urlObject, err := url.Parse(inputURL) + if err != nil { + return logical.ErrorResponse("an error occured while parsing url string"), err + } + + //Set up query object + urlQuery := urlObject.Query() + path := strings.TrimPrefix(urlObject.Path, "/") + index := strings.Index(path, ":") + + //Read issuer + urlIssuer := urlQuery.Get("issuer") + if urlIssuer != "" { + issuer = urlIssuer + } else { + if index != -1 { + issuer = path[:index] + } + } + + //Read account name + if index == -1 { + accountName = path + } else { + accountName = path[index+1:] + } + + //Read key string + keyString = urlQuery.Get("secret") + + //Read period + periodQuery := urlQuery.Get("period") + if periodQuery != "" { + periodInt, err := strconv.Atoi(periodQuery) + if err != nil { + return logical.ErrorResponse("an error occured while parsing period value in url"), err + } + period = periodInt + } + + //Read digits + digitsQuery := urlQuery.Get("digits") + if digitsQuery != "" { + digitsInt, err := strconv.Atoi(digitsQuery) + if err != nil { + return logical.ErrorResponse("an error occured while parsing digits value in url"), err + } + digits = digitsInt + } + + //Read algorithm + algorithmQuery := urlQuery.Get("algorithm") + if algorithmQuery != "" { + algorithm = algorithmQuery + } + } + + // Translate digits and algorithm to a format the totp library understands + var keyDigits otplib.Digits + switch digits { + case 6: + keyDigits = otplib.DigitsSix + case 8: + keyDigits = otplib.DigitsEight + default: + return logical.ErrorResponse("the digits value can only be 6 or 8"), nil + } + + var keyAlgorithm otplib.Algorithm + switch algorithm { + case "SHA1": + keyAlgorithm = otplib.AlgorithmSHA1 + case "SHA256": + keyAlgorithm = otplib.AlgorithmSHA256 + case "SHA512": + keyAlgorithm = otplib.AlgorithmSHA512 + default: + return logical.ErrorResponse("the algorithm value is not valid"), nil + } + + // Enforce input value requirements + if period <= 0 { + return logical.ErrorResponse("the period value must be greater than zero"), nil + } + + switch skew { + case 0: + case 1: + default: + return logical.ErrorResponse("the skew value must be 0 or 1"), nil + } + + // QR size can be zero but it shouldn't be negative + if qrSize < 0 { + return logical.ErrorResponse("the qr_size value must be greater than or equal to zero"), nil + } + + if keySize <= 0 { + return logical.ErrorResponse("the key_size value must be greater than zero"), nil + } + + // Period, Skew and Key Size need to be unsigned ints + uintPeriod := uint(period) + uintSkew := uint(skew) + uintKeySize := uint(keySize) + + var response *logical.Response + + switch generate { + case true: + // If the key is generated, Account Name and Issuer are required. + if accountName == "" { + return logical.ErrorResponse("the account_name value is required for generated keys"), nil + } + + if issuer == "" { + return logical.ErrorResponse("the issuer value is required for generated keys"), nil + } + + // Generate a new key + keyObject, err := totplib.Generate(totplib.GenerateOpts{ + Issuer: issuer, + AccountName: accountName, + Period: uintPeriod, + Digits: keyDigits, + Algorithm: keyAlgorithm, + SecretSize: uintKeySize, + }) + if err != nil { + return logical.ErrorResponse("an error occured while generating a key"), err + } + + // Get key string value + keyString = keyObject.Secret() + + // Skip returning the QR code and url if exported is set to false + if exported { + // Prepare the url and barcode + urlString := keyObject.String() + + // Don't include QR code is size is set to zero + if qrSize == 0 { + response = &logical.Response{ + Data: map[string]interface{}{ + "url": urlString, + }, + } + } else { + barcode, err := keyObject.Image(qrSize, qrSize) + if err != nil { + return logical.ErrorResponse("an error occured while generating a QR code image"), err + } + + var buff bytes.Buffer + png.Encode(&buff, barcode) + b64Barcode := base64.StdEncoding.EncodeToString(buff.Bytes()) + response = &logical.Response{ + Data: map[string]interface{}{ + "url": urlString, + "barcode": b64Barcode, + }, + } + } + } + default: + if keyString == "" { + return logical.ErrorResponse("the key value is required"), nil + } + + _, err := base32.StdEncoding.DecodeString(keyString) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "invalid key value: %s", err)), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("key/"+name, &keyEntry{ + Key: keyString, + Issuer: issuer, + AccountName: accountName, + Period: uintPeriod, + Algorithm: keyAlgorithm, + Digits: keyDigits, + Skew: uintSkew, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return response, nil +} + +type keyEntry struct { + Key string `json:"key" mapstructure:"key" structs:"key"` + Issuer string `json:"issuer" mapstructure:"issuer" structs:"issuer"` + AccountName string `json:"account_name" mapstructure:"account_name" structs:"account_name"` + Period uint `json:"period" mapstructure:"period" structs:"period"` + Algorithm otplib.Algorithm `json:"algorithm" mapstructure:"algorithm" structs:"algorithm"` + Digits otplib.Digits `json:"digits" mapstructure:"digits" structs:"digits"` + Skew uint `json:"skew" mapstructure:"skew" structs:"skew"` +} + +const pathKeyHelpSyn = ` +Manage the keys that can be created with this backend. +` + +const pathKeyHelpDesc = ` +This path lets you manage the keys that can be created with this backend. + +` diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go index 37ebca4..db85ba1 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend.go @@ -10,12 +10,10 @@ import ( func Factory(conf *logical.BackendConfig) (logical.Backend, error) { b := Backend(conf) - be, err := b.Backend.Setup(conf) - if err != nil { + if err := b.Setup(conf); err != nil { return nil, err } - - return be, nil + return b, nil } func Backend(conf *logical.BackendConfig) *backend { @@ -40,9 +38,9 @@ func Backend(conf *logical.BackendConfig) *backend { b.pathVerify(), }, - Secrets: []*framework.Secret{}, - - Invalidate: b.invalidate, + Secrets: []*framework.Secret{}, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, } b.lm = keysutil.NewLockManager(conf.System.CachingDisabled()) diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go index 0f9d06f..a9c27bc 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/backend_test.go @@ -31,7 +31,7 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { if b == nil { t.Fatalf("failed to create backend") } - _, err := b.Backend.Setup(config) + err := b.Backend.Setup(config) if err != nil { t.Fatal(err) } @@ -129,7 +129,9 @@ func TestBackend_rotation(t *testing.T) { testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepDeleteNotDisabledPolicy(t, "test"), - testAccStepAdjustPolicy(t, "test", 3), + testAccStepAdjustPolicyMinDecryption(t, "test", 3), + testAccStepAdjustPolicyMinEncryption(t, "test", 4), + testAccStepReadPolicyWithVersions(t, "test", false, false, 3, 4), testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), @@ -140,7 +142,8 @@ func TestBackend_rotation(t *testing.T) { testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), - testAccStepAdjustPolicy(t, "test", 1), + testAccStepAdjustPolicyMinDecryption(t, "test", 1), + testAccStepReadPolicyWithVersions(t, "test", false, false, 1, 4), testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), testAccStepDecrypt(t, "test", testPlaintext, decryptData), testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), @@ -221,7 +224,7 @@ func testAccStepListPolicy(t *testing.T, name string, expectNone bool) logicalte } } -func testAccStepAdjustPolicy(t *testing.T, name string, minVer int) logicaltest.TestStep { +func testAccStepAdjustPolicyMinDecryption(t *testing.T, name string, minVer int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, Path: "keys/" + name + "/config", @@ -230,6 +233,15 @@ func testAccStepAdjustPolicy(t *testing.T, name string, minVer int) logicaltest. }, } } +func testAccStepAdjustPolicyMinEncryption(t *testing.T, name string, minVer int) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "keys/" + name + "/config", + Data: map[string]interface{}{ + "min_encryption_version": minVer, + }, + } +} func testAccStepDisableDeletion(t *testing.T, name string) logicaltest.TestStep { return logicaltest.TestStep{ @@ -276,6 +288,10 @@ func testAccStepDeleteNotDisabledPolicy(t *testing.T, name string) logicaltest.T } func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) logicaltest.TestStep { + return testAccStepReadPolicyWithVersions(t, name, expectNone, derived, 1, 0) +} + +func testAccStepReadPolicyWithVersions(t *testing.T, name string, expectNone, derived bool, minDecryptionVersion int, minEncryptionVersion int) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "keys/" + name, @@ -297,6 +313,8 @@ func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) KDF string `mapstructure:"kdf"` DeletionAllowed bool `mapstructure:"deletion_allowed"` ConvergentEncryption bool `mapstructure:"convergent_encryption"` + MinDecryptionVersion int `mapstructure:"min_decryption_version"` + MinEncryptionVersion int `mapstructure:"min_encryption_version"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err @@ -315,6 +333,12 @@ func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) if d.Keys == nil { return fmt.Errorf("bad: %#v", d) } + if d.MinDecryptionVersion != minDecryptionVersion { + return fmt.Errorf("bad: %#v", d) + } + if d.MinEncryptionVersion != minEncryptionVersion { + return fmt.Errorf("bad: %#v", d) + } if d.DeletionAllowed == true { return fmt.Errorf("bad: %#v", d) } @@ -610,7 +634,7 @@ func TestKeyUpgrade(t *testing.T) { if p.Key != nil || p.Keys == nil || len(p.Keys) != 1 || - !reflect.DeepEqual(p.Keys[1].AESKey, key) { + !reflect.DeepEqual(p.Keys[1].Key, key) { t.Errorf("bad key migration, result is %#v", p.Keys) } } @@ -730,6 +754,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(req) + if err == nil { + t.Fatal("expected error, got nil") + } if resp == nil { t.Fatal("expected non-nil response") } @@ -755,6 +782,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } @@ -764,6 +794,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { ciphertext1 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } @@ -789,6 +822,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { } resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } @@ -798,6 +834,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { ciphertext3 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } @@ -820,6 +859,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT", } resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } @@ -829,6 +871,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { ciphertext5 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } @@ -854,6 +899,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(req) + if err == nil { + t.Fatal("expected error, got nil") + } if resp == nil { t.Fatal("expected non-nil response") } @@ -868,6 +916,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } @@ -877,6 +928,9 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) { ciphertext7 := resp.Data["ciphertext"].(string) resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } if resp == nil { t.Fatal("expected non-nil response") } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go index d2b3e5f..7cbd513 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config.go @@ -19,7 +19,16 @@ func (b *backend) pathConfig() *framework.Path { "min_decryption_version": &framework.FieldSchema{ Type: framework.TypeInt, Description: `If set, the minimum version of the key allowed -to be decrypted.`, +to be decrypted. For signing keys, the minimum +version allowed to be used for verification.`, + }, + + "min_encryption_version": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: `If set, the minimum version of the key allowed +to be used for encryption; or for signing keys, +to be used for signing. If set to zero, only +the latest version of the key is allowed.`, }, "deletion_allowed": &framework.FieldSchema{ @@ -72,8 +81,7 @@ func (b *backend) pathConfigWrite( resp.AddWarning("since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1") } - if minDecryptionVersion > 0 && - minDecryptionVersion != p.MinDecryptionVersion { + if minDecryptionVersion != p.MinDecryptionVersion { if minDecryptionVersion > p.LatestVersion { return logical.ErrorResponse( fmt.Sprintf("cannot set min decryption version of %d, latest key version is %d", minDecryptionVersion, p.LatestVersion)), nil @@ -83,6 +91,32 @@ func (b *backend) pathConfigWrite( } } + minEncryptionVersionRaw, ok := d.GetOk("min_encryption_version") + if ok { + minEncryptionVersion := minEncryptionVersionRaw.(int) + + if minEncryptionVersion < 0 { + return logical.ErrorResponse("min encryption version cannot be negative"), nil + } + + if minEncryptionVersion != p.MinEncryptionVersion { + if minEncryptionVersion > p.LatestVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min encryption version of %d, latest key version is %d", minEncryptionVersion, p.LatestVersion)), nil + } + p.MinEncryptionVersion = minEncryptionVersion + persistNeeded = true + } + } + + // Check here to get the final picture after the logic on each + // individually. MinDecryptionVersion will always be 1 or above. + if p.MinEncryptionVersion > 0 && + p.MinEncryptionVersion < p.MinDecryptionVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min encryption/decryption values; min encryption version of %d must be greater than or equal to min decryption version of %d", p.MinEncryptionVersion, p.MinDecryptionVersion)), nil + } + allowDeletionInt, ok := d.GetOk("deletion_allowed") if ok { allowDeletion := allowDeletionInt.(bool) @@ -104,7 +138,7 @@ func (b *backend) pathConfigWrite( return nil, nil } - if len(resp.Warnings()) == 0 { + if len(resp.Warnings) == 0 { return nil, p.Persist(req.Storage) } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go new file mode 100644 index 0000000..6819710 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_config_test.go @@ -0,0 +1,223 @@ +package transit + +import ( + "strconv" + "strings" + "testing" + + "github.com/hashicorp/vault/logical" +) + +func TestTransit_ConfigSettings(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + b = Backend(&logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + doReq := func(req *logical.Request) *logical.Response { + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) + } + return resp + } + doErrReq := func(req *logical.Request) { + resp, err := b.HandleRequest(req) + if err == nil { + if resp == nil || !resp.IsError() { + t.Fatalf("expected error; req:\n%#v\n", *req) + } + } + } + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/aes", + Data: map[string]interface{}{ + "derived": true, + }, + } + doReq(req) + + req.Path = "keys/ed" + req.Data["type"] = "ed25519" + doReq(req) + + delete(req.Data, "derived") + + req.Path = "keys/p256" + req.Data["type"] = "ecdsa-p256" + doReq(req) + + delete(req.Data, "type") + + req.Path = "keys/aes/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/ed/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/p256/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/aes/config" + // Too high + req.Data["min_decryption_version"] = 7 + doErrReq(req) + // Too low + req.Data["min_decryption_version"] = -1 + doErrReq(req) + + delete(req.Data, "min_decryption_version") + // Too high + req.Data["min_encryption_version"] = 7 + doErrReq(req) + // Too low + req.Data["min_encryption_version"] = 7 + doErrReq(req) + + // Not allowed, cannot decrypt + req.Data["min_decryption_version"] = 3 + req.Data["min_encryption_version"] = 2 + doErrReq(req) + + // Allowed + req.Data["min_decryption_version"] = 2 + req.Data["min_encryption_version"] = 3 + doReq(req) + req.Path = "keys/ed/config" + doReq(req) + req.Path = "keys/p256/config" + doReq(req) + + req.Data = map[string]interface{}{ + "plaintext": "abcd", + "context": "abcd", + } + + maxKeyVersion := 5 + key := "aes" + + testHMAC := func(ver int, valid bool) { + req.Path = "hmac/" + key + delete(req.Data, "hmac") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["hmac"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong hmac version") + } + + req.Path = "verify/" + key + delete(req.Data, "key_version") + req.Data["hmac"] = resp.Data["hmac"] + doReq(req) + } + + testEncryptDecrypt := func(ver int, valid bool) { + req.Path = "encrypt/" + key + delete(req.Data, "ciphertext") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["ciphertext"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong encryption version") + } + + req.Path = "decrypt/" + key + delete(req.Data, "key_version") + req.Data["ciphertext"] = resp.Data["ciphertext"] + doReq(req) + } + testEncryptDecrypt(5, true) + testEncryptDecrypt(4, true) + testEncryptDecrypt(3, true) + testEncryptDecrypt(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + delete(req.Data, "plaintext") + req.Data["input"] = "abcd" + key = "ed" + testSignVerify := func(ver int, valid bool) { + req.Path = "sign/" + key + delete(req.Data, "signature") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["signature"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong signature version") + } + + req.Path = "verify/" + key + delete(req.Data, "key_version") + req.Data["signature"] = resp.Data["signature"] + doReq(req) + } + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + delete(req.Data, "context") + key = "p256" + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go index 36c6aea..7af1a03 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_datakey.go @@ -32,7 +32,7 @@ ciphertext; "wrapped" will return the ciphertext only.`, "nonce": &framework.FieldSchema{ Type: framework.TypeString, - Description: "Nonce for when convergent encryption is used", + Description: "Nonce for when convergent encryption v1 is used (only in Vault 0.6.1)", }, "bits": &framework.FieldSchema{ @@ -41,6 +41,14 @@ ciphertext; "wrapped" will return the ciphertext only.`, and 512 bits are supported. Defaults to 256.`, Default: 256, }, + + "key_version": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: `The version of the Vault key to use for +encryption of the data key. Must be 0 (for latest) +or a value greater than or equal to the +min_encryption_version configured on the key.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -55,6 +63,7 @@ and 512 bits are supported. Defaults to 256.`, func (b *backend) pathDatakeyWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) + ver := d.Get("key_version").(int) plaintext := d.Get("plaintext").(string) plaintextAllowed := false @@ -97,7 +106,7 @@ func (b *backend) pathDatakeyWrite( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } newKey := make([]byte, 32) @@ -116,7 +125,7 @@ func (b *backend) pathDatakeyWrite( return nil, err } - ciphertext, err := p.Encrypt(context, nonce, base64.StdEncoding.EncodeToString(newKey)) + ciphertext, err := p.Encrypt(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey)) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go index c66931d..9750beb 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_decrypt.go @@ -119,7 +119,7 @@ func (b *backend) pathDecryptWrite( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } for i, item := range batchInputItems { diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go index b4281d6..3b60198 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_encrypt.go @@ -29,6 +29,9 @@ type BatchRequestItem struct { // Nonce to be used when v1 convergent encryption is used Nonce string `json:"nonce" structs:"nonce" mapstructure:"nonce"` + // The key version to be used for encryption + KeyVersion int `json:"key_version" structs:"key_version" mapstructure:"key_version"` + // DecodedNonce is the base64 decoded version of Nonce DecodedNonce []byte } @@ -100,6 +103,13 @@ same ciphertext is generated. It is *very important* when using this mode that you ensure that all nonces are unique for a given context. Failing to do so will severely impact the ciphertext's security.`, }, + + "key_version": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: `The version of the key to use for encryption. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -151,9 +161,10 @@ func (b *backend) pathEncryptWrite( batchInputItems = make([]BatchRequestItem, 1) batchInputItems[0] = BatchRequestItem{ - Plaintext: valueRaw.(string), - Context: d.Get("context").(string), - Nonce: d.Get("nonce").(string), + Plaintext: valueRaw.(string), + Context: d.Get("context").(string), + Nonce: d.Get("nonce").(string), + KeyVersion: d.Get("key_version").(int), } } @@ -233,7 +244,7 @@ func (b *backend) pathEncryptWrite( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } // Process batch request items. If encryption of any request @@ -244,7 +255,7 @@ func (b *backend) pathEncryptWrite( continue } - ciphertext, err := p.Encrypt(item.DecodedContext, item.DecodedNonce, item.Plaintext) + ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go index 1f7350e..a18db91 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export.go @@ -151,7 +151,7 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st case exportTypeEncryptionKey: switch policy.Type { case keysutil.KeyType_AES256_GCM96: - return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.AESKey)), nil + return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil } case exportTypeSigningKey: @@ -162,6 +162,9 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st return "", err } return ecKey, nil + + case keysutil.KeyType_ED25519: + return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil } } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go index e021ac6..314653c 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_export_test.go @@ -12,8 +12,10 @@ import ( func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) { verifyExportsCorrectVersion(t, "encryption-key", "aes256-gcm96") verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p256") + verifyExportsCorrectVersion(t, "signing-key", "ed25519") verifyExportsCorrectVersion(t, "hmac-key", "aes256-gcm96") verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p256") + verifyExportsCorrectVersion(t, "hmac-key", "ed25519") } func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { @@ -293,6 +295,11 @@ func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) } func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T) { + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p256") + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ed25519") +} + +func testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T, keyType string) { var b *backend sysView := logical.TestSystemView() storage := &logical.InmemStorage{} @@ -309,7 +316,7 @@ func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testi } req.Data = map[string]interface{}{ "exportable": true, - "type": "ecdsa-p256", + "type": keyType, } _, err := b.HandleRequest(req) if err != nil { diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go index 31c156f..0a4ba19 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_hmac.go @@ -45,6 +45,13 @@ Defaults to "sha2-256".`, Type: framework.TypeString, Description: `Algorithm to use (POST URL parameter)`, }, + + "key_version": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: `The version of the key to use for generating the HMAC. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -59,6 +66,7 @@ Defaults to "sha2-256".`, func (b *backend) pathHMACWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) + ver := d.Get("key_version").(int) inputB64 := d.Get("input").(string) algorithm := d.Get("urlalgorithm").(string) if algorithm == "" { @@ -79,10 +87,21 @@ func (b *backend) pathHMACWrite( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } - key, err := p.HMACKey(p.LatestVersion) + switch { + case ver == 0: + // Allowed, will use latest; set explicitly here to ensure the string + // is generated properly + ver = p.LatestVersion + case ver == p.LatestVersion: + // Allowed + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return logical.ErrorResponse("cannot generate HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest + } + + key, err := p.HMACKey(ver) if err != nil { return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } @@ -107,7 +126,7 @@ func (b *backend) pathHMACWrite( retBytes := hf.Sum(nil) retStr := base64.StdEncoding.EncodeToString(retBytes) - retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(p.LatestVersion), retStr) + retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(ver), retStr) // Generate the response resp := &logical.Response{ @@ -162,7 +181,7 @@ func (b *backend) pathHMACVerify( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } if ver > p.LatestVersion { diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go index a69c555..ad9a918 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys.go @@ -2,9 +2,14 @@ package transit import ( "crypto/elliptic" + "encoding/base64" "fmt" "strconv" + "time" + "golang.org/x/crypto/ed25519" + + "github.com/fatih/structs" "github.com/hashicorp/vault/helper/keysutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -36,8 +41,8 @@ func (b *backend) pathKeys() *framework.Path { Type: framework.TypeString, Default: "aes256-gcm96", Description: `The type of key to create. Currently, -"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric) are -supported. Defaults to "aes256-gcm96".`, +"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric), and +'ed25519' (asymmetric) are supported. Defaults to "aes256-gcm96".`, }, "derived": &framework.FieldSchema{ @@ -69,6 +74,14 @@ impact the ciphertext's security.`, This allows for all the valid keys in the key ring to be exported.`, }, + + "context": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Base64 encoded context for key derivation. +When reading a key with key derivation enabled, +if the key type supports public keys, this will +return the public key for the given context.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -116,6 +129,8 @@ func (b *backend) pathPolicyWrite( polReq.KeyType = keysutil.KeyType_AES256_GCM96 case "ecdsa-p256": polReq.KeyType = keysutil.KeyType_ECDSA_P256 + case "ed25519": + polReq.KeyType = keysutil.KeyType_ED25519 default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } @@ -139,6 +154,13 @@ func (b *backend) pathPolicyWrite( return nil, nil } +// Built-in helper type for returning asymmetric keys +type asymKey struct { + Name string `json:"name" structs:"name" mapstructure:"name"` + PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"` + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"` +} + func (b *backend) pathPolicyRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) @@ -162,6 +184,7 @@ func (b *backend) pathPolicyRead( "derived": p.Derived, "deletion_allowed": p.DeletionAllowed, "min_decryption_version": p.MinDecryptionVersion, + "min_encryption_version": p.MinEncryptionVersion, "latest_version": p.LatestVersion, "exportable": p.Exportable, "supports_encryption": p.Type.EncryptionSupported(), @@ -185,25 +208,54 @@ func (b *backend) pathPolicyRead( } } + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + switch p.Type { case keysutil.KeyType_AES256_GCM96: retKeys := map[string]int64{} for k, v := range p.Keys { - retKeys[strconv.Itoa(k)] = v.CreationTime + retKeys[strconv.Itoa(k)] = v.DeprecatedCreationTime } resp.Data["keys"] = retKeys - case keysutil.KeyType_ECDSA_P256: - type ecdsaKey struct { - Name string `json:"name"` - PublicKey string `json:"public_key"` - } - retKeys := map[string]ecdsaKey{} + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ED25519: + retKeys := map[string]map[string]interface{}{} for k, v := range p.Keys { - retKeys[strconv.Itoa(k)] = ecdsaKey{ - Name: elliptic.P256().Params().Name, - PublicKey: v.FormattedPublicKey, + key := asymKey{ + PublicKey: v.FormattedPublicKey, + CreationTime: v.CreationTime, } + if key.CreationTime.IsZero() { + key.CreationTime = time.Unix(v.DeprecatedCreationTime, 0) + } + + switch p.Type { + case keysutil.KeyType_ECDSA_P256: + key.Name = elliptic.P256().Params().Name + case keysutil.KeyType_ED25519: + if p.Derived { + if len(context) == 0 { + key.PublicKey = "" + } else { + derived, err := p.DeriveKey(context, k) + if err != nil { + return nil, fmt.Errorf("failed to derive key to return public component") + } + pubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey) + key.PublicKey = base64.StdEncoding.EncodeToString(pubKey) + } + } + key.Name = "ed25519" + } + + retKeys[strconv.Itoa(k)] = structs.New(key).Map() } resp.Data["keys"] = retKeys } diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go new file mode 100644 index 0000000..7a87fdd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_keys_test.go @@ -0,0 +1,77 @@ +package transit_test + +import ( + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/audit/file" + "github.com/hashicorp/vault/builtin/logical/transit" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/vault" +) + +func TestTransit_Issue_2958(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + AuditBackends: map[string]audit.Factory{ + "file": file.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "/dev/null", + }, + }) + if err != nil { + t.Fatal(err) + } + + err = client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("transit/keys/foo", map[string]interface{}{ + "type": "ecdsa-p256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("transit/keys/bar", map[string]interface{}{ + "type": "ed25519", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Read("transit/keys/foo") + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Read("transit/keys/bar") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go index 167656a..81e811a 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_rewrap.go @@ -33,6 +33,13 @@ func (b *backend) pathRewrap() *framework.Path { Type: framework.TypeString, Description: "Nonce for when convergent encryption is used", }, + + "key_version": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: `The version of the key to use for encryption. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -69,6 +76,7 @@ func (b *backend) pathRewrapWrite( Ciphertext: ciphertext, Context: d.Get("context").(string), Nonce: d.Get("nonce").(string), + KeyVersion: d.Get("key_version").(int), } } @@ -113,7 +121,7 @@ func (b *backend) pathRewrapWrite( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } for i, item := range batchInputItems { @@ -132,7 +140,7 @@ func (b *backend) pathRewrapWrite( } } - ciphertext, err := p.Encrypt(item.DecodedContext, item.DecodedNonce, plaintext) + ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, plaintext) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go index ff01880..549ae05 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify.go @@ -26,6 +26,12 @@ func (b *backend) pathSign() *framework.Path { Description: "The base64-encoded input data", }, + "context": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Base64 encoded context for key derivation. Required if key +derivation is enabled; currently only available with ed25519 keys.`, + }, + "algorithm": &framework.FieldSchema{ Type: framework.TypeString, Default: "sha2-256", @@ -36,13 +42,21 @@ func (b *backend) pathSign() *framework.Path { * sha2-384 * sha2-512 -Defaults to "sha2-256".`, +Defaults to "sha2-256". Not valid for all key types, +including ed25519.`, }, "urlalgorithm": &framework.FieldSchema{ Type: framework.TypeString, Description: `Hash algorithm to use (POST URL parameter)`, }, + + "key_version": &framework.FieldSchema{ + Type: framework.TypeInt, + Description: `The version of the key to use for signing. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -63,6 +77,12 @@ func (b *backend) pathVerify() *framework.Path { Description: "The key to use", }, + "context": &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Base64 encoded context for key derivation. Required if key +derivation is enabled; currently only available with ed25519 keys.`, + }, + "signature": &framework.FieldSchema{ Type: framework.TypeString, Description: "The signature, including vault header/key version", @@ -93,7 +113,7 @@ func (b *backend) pathVerify() *framework.Path { * sha2-384 * sha2-512 -Defaults to "sha2-256".`, +Defaults to "sha2-256". Not valid for all key types.`, }, }, @@ -109,6 +129,7 @@ Defaults to "sha2-256".`, func (b *backend) pathSignWrite( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) + ver := d.Get("key_version").(int) inputB64 := d.Get("input").(string) algorithm := d.Get("urlalgorithm").(string) if algorithm == "" { @@ -120,22 +141,6 @@ func (b *backend) pathSignWrite( return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest } - var hf hash.Hash - switch algorithm { - case "sha2-224": - hf = sha256.New224() - case "sha2-256": - hf = sha256.New() - case "sha2-384": - hf = sha512.New384() - case "sha2-512": - hf = sha512.New() - default: - return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil - } - hf.Write(input) - hashedInput := hf.Sum(nil) - // Get the policy p, lock, err := b.lm.GetPolicyShared(req.Storage, name) if lock != nil { @@ -145,27 +150,59 @@ func (b *backend) pathSignWrite( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } if !p.Type.SigningSupported() { return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest } - sig, err := p.Sign(hashedInput) + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + + if p.Type.HashSignatureInput() { + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = sha256.New224() + case "sha2-256": + hf = sha256.New() + case "sha2-384": + hf = sha512.New384() + case "sha2-512": + hf = sha512.New() + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + input = hf.Sum(nil) + } + + sig, err := p.Sign(ver, context, input) if err != nil { return nil, err } - if sig == "" { + if sig == nil { return nil, fmt.Errorf("signature could not be computed") } // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ - "signature": sig, + "signature": sig.Signature, }, } + + if len(sig.PublicKey) > 0 { + resp.Data["public_key"] = sig.PublicKey + } + return resp, nil } @@ -197,22 +234,6 @@ func (b *backend) pathVerifyWrite( return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest } - var hf hash.Hash - switch algorithm { - case "sha2-224": - hf = sha256.New224() - case "sha2-256": - hf = sha256.New() - case "sha2-384": - hf = sha512.New384() - case "sha2-512": - hf = sha512.New() - default: - return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil - } - hf.Write(input) - hashedInput := hf.Sum(nil) - // Get the policy p, lock, err := b.lm.GetPolicyShared(req.Storage, name) if lock != nil { @@ -222,10 +243,41 @@ func (b *backend) pathVerifyWrite( return nil, err } if p == nil { - return logical.ErrorResponse("policy not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest } - valid, err := p.VerifySignature(hashedInput, sig) + if !p.Type.SigningSupported() { + return logical.ErrorResponse(fmt.Sprintf("key type %v does not support verification", p.Type)), logical.ErrInvalidRequest + } + + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + + if p.Type.HashSignatureInput() { + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = sha256.New224() + case "sha2-256": + hf = sha256.New() + case "sha2-384": + hf = sha512.New384() + case "sha2-512": + hf = sha512.New() + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + input = hf.Sum(nil) + } + + valid, err := p.VerifySignature(context, input, sig) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go index 3a41c28..4abdad6 100644 --- a/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go +++ b/vendor/github.com/hashicorp/vault/builtin/logical/transit/path_sign_verify_test.go @@ -1,12 +1,17 @@ package transit import ( + "encoding/base64" + "strings" "testing" + "golang.org/x/crypto/ed25519" + "github.com/hashicorp/vault/logical" + "github.com/mitchellh/mapstructure" ) -func TestTransit_SignVerify(t *testing.T) { +func TestTransit_SignVerify_P256(t *testing.T) { var b *backend sysView := logical.TestSystemView() storage := &logical.InmemStorage{} @@ -91,7 +96,7 @@ func TestTransit_SignVerify(t *testing.T) { } if errExpected { if !resp.IsError() { - t.Fatalf("bad: got error response: %#v", *resp) + t.Fatalf("bad: should have gotten error response: %#v", *resp) } return "" } @@ -114,7 +119,7 @@ func TestTransit_SignVerify(t *testing.T) { } if errExpected { if resp != nil && !resp.IsError() { - t.Fatalf("bad: got error response: %#v", *resp) + t.Fatalf("bad: should have gotten error response: %#v", *resp) } return } @@ -199,3 +204,210 @@ func TestTransit_SignVerify(t *testing.T) { // Now try the v1 verifyRequest(req, true, "", v1sig) } + +func TestTransit_SignVerify_ED25519(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + b = Backend(&logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": "ed25519", + }, + } + _, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + // Now create a derived key" + req = &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/bar", + Data: map[string]interface{}{ + "type": "ed25519", + "derived": true, + }, + } + _, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + // Get the keys for later + fooP, lock, err := b.lm.GetPolicyShared(storage, "foo") + if err != nil { + t.Fatal(err) + } + // We don't care as we're the only one using this + lock.RUnlock() + + barP, lock, err := b.lm.GetPolicyShared(storage, "bar") + if err != nil { + t.Fatal(err) + } + lock.RUnlock() + + signRequest := func(req *logical.Request, errExpected bool, postpath string) string { + // Delete any key that exists in the request + delete(req.Data, "public_key") + req.Path = "sign/" + postpath + resp, err := b.HandleRequest(req) + if err != nil && !errExpected { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return "" + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["signature"] + if !ok { + t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data) + } + // memoize any pubic key + if key, ok := resp.Data["public_key"]; ok { + req.Data["public_key"] = key + } + return value.(string) + } + + verifyRequest := func(req *logical.Request, errExpected bool, postpath, sig string) { + req.Path = "verify/" + postpath + req.Data["signature"] = sig + resp, err := b.HandleRequest(req) + if err != nil && !errExpected { + t.Fatalf("got error: %v, sig was %v", err, sig) + } + if errExpected { + if resp != nil && !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["valid"] + if !ok { + t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data) + } + if !value.(bool) && !errExpected { + t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) + } + + if pubKeyRaw, ok := req.Data["public_key"]; ok { + input, _ := base64.StdEncoding.DecodeString(req.Data["input"].(string)) + splitSig := strings.Split(sig, ":") + signature, _ := base64.StdEncoding.DecodeString(splitSig[2]) + if !ed25519.Verify(ed25519.PublicKey(pubKeyRaw.([]byte)), input, signature) && !errExpected { + t.Fatal("invalid signature") + } + + keyReadReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/" + postpath, + } + keyReadResp, err := b.HandleRequest(keyReadReq) + if err != nil { + t.Fatal(err) + } + val := keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")] + var ak asymKey + if err := mapstructure.Decode(val, &ak); err != nil { + t.Fatal(err) + } + if ak.PublicKey != "" { + t.Fatal("got non-empty public key") + } + keyReadReq.Data = map[string]interface{}{ + "context": "abcd", + } + keyReadResp, err = b.HandleRequest(keyReadReq) + if err != nil { + t.Fatal(err) + } + val = keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")] + if err := mapstructure.Decode(val, &ak); err != nil { + t.Fatal(err) + } + if ak.PublicKey != base64.StdEncoding.EncodeToString(pubKeyRaw.([]byte)) { + t.Fatalf("got incorrect public key; got %q, expected %q\nasymKey struct is\n%#v", ak.PublicKey, pubKeyRaw, ak) + } + } + } + + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + "context": "abcd", + } + + // Test defaults + sig := signRequest(req, false, "foo") + verifyRequest(req, false, "foo", sig) + + sig = signRequest(req, false, "bar") + verifyRequest(req, false, "bar", sig) + + // Test a bad signature + verifyRequest(req, true, "foo", sig[0:len(sig)-2]) + verifyRequest(req, true, "bar", sig[0:len(sig)-2]) + + v1sig := sig + + // Rotate and set min decryption version + err = fooP.Rotate(storage) + if err != nil { + t.Fatal(err) + } + err = fooP.Rotate(storage) + if err != nil { + t.Fatal(err) + } + fooP.MinDecryptionVersion = 2 + if err = fooP.Persist(storage); err != nil { + t.Fatal(err) + } + err = barP.Rotate(storage) + if err != nil { + t.Fatal(err) + } + err = barP.Rotate(storage) + if err != nil { + t.Fatal(err) + } + barP.MinDecryptionVersion = 2 + if err = barP.Persist(storage); err != nil { + t.Fatal(err) + } + + // Make sure signing still works fine + sig = signRequest(req, false, "foo") + verifyRequest(req, false, "foo", sig) + // Now try the v1 + verifyRequest(req, true, "foo", v1sig) + // Repeat with the other key + sig = signRequest(req, false, "bar") + verifyRequest(req, false, "bar", sig) + verifyRequest(req, true, "bar", v1sig) +} diff --git a/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go b/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go new file mode 100644 index 0000000..a1c781f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/plugin/backend.go @@ -0,0 +1,231 @@ +package plugin + +import ( + "fmt" + "net/rpc" + "reflect" + "sync" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" + bplugin "github.com/hashicorp/vault/logical/plugin" +) + +var ( + ErrMismatchType = fmt.Errorf("mismatch on mounted backend and plugin backend type") + ErrMismatchPaths = fmt.Errorf("mismatch on mounted backend and plugin backend special paths") +) + +// Factory returns a configured plugin logical.Backend. +func Factory(conf *logical.BackendConfig) (logical.Backend, error) { + _, ok := conf.Config["plugin_name"] + if !ok { + return nil, fmt.Errorf("plugin_name not provided") + } + b, err := Backend(conf) + if err != nil { + return nil, err + } + + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil +} + +// Backend returns an instance of the backend, either as a plugin if external +// or as a concrete implementation if builtin, casted as logical.Backend. +func Backend(conf *logical.BackendConfig) (logical.Backend, error) { + var b backend + + name := conf.Config["plugin_name"] + sys := conf.System + + // NewBackend with isMetadataMode set to true + raw, err := bplugin.NewBackend(name, sys, conf.Logger, true) + if err != nil { + return nil, err + } + err = raw.Setup(conf) + if err != nil { + return nil, err + } + // Get SpecialPaths and BackendType + paths := raw.SpecialPaths() + btype := raw.Type() + + // Cleanup meta plugin backend + raw.Cleanup() + + // Initialize b.Backend with dummy backend since plugin + // backends will need to be lazy loaded. + b.Backend = &framework.Backend{ + PathsSpecial: paths, + BackendType: btype, + } + + b.config = conf + + return &b, nil +} + +// backend is a thin wrapper around plugin.BackendPluginClient +type backend struct { + logical.Backend + sync.RWMutex + + config *logical.BackendConfig + + // Used to detect if we already reloaded + canary string + + // Used to detect if plugin is set + loaded bool +} + +func (b *backend) reloadBackend() error { + b.Logger().Trace("plugin: reloading plugin backend", "plugin", b.config.Config["plugin_name"]) + return b.startBackend() +} + +// startBackend starts a plugin backend +func (b *backend) startBackend() error { + pluginName := b.config.Config["plugin_name"] + + // Ensure proper cleanup of the backend (i.e. call client.Kill()) + b.Backend.Cleanup() + + nb, err := bplugin.NewBackend(pluginName, b.config.System, b.config.Logger, false) + if err != nil { + return err + } + err = nb.Setup(b.config) + if err != nil { + return err + } + + // If the backend has not been loaded (i.e. still in metadata mode), + // check if type and special paths still matches + if !b.loaded { + if b.Backend.Type() != nb.Type() { + nb.Cleanup() + b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchType) + return ErrMismatchType + } + if !reflect.DeepEqual(b.Backend.SpecialPaths(), nb.SpecialPaths()) { + nb.Cleanup() + b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchPaths) + return ErrMismatchPaths + } + } + + b.Backend = nb + b.loaded = true + + // Call initialize + if err := b.Backend.Initialize(); err != nil { + return err + } + + return nil +} + +// HandleRequest is a thin wrapper implementation of HandleRequest that includes automatic plugin reload. +func (b *backend) HandleRequest(req *logical.Request) (*logical.Response, error) { + b.RLock() + canary := b.canary + + // Lazy-load backend + if !b.loaded { + // Upgrade lock + b.RUnlock() + b.Lock() + // Check once more after lock swap + if !b.loaded { + err := b.startBackend() + if err != nil { + b.Unlock() + return nil, err + } + } + b.Unlock() + b.RLock() + } + resp, err := b.Backend.HandleRequest(req) + b.RUnlock() + // Need to compare string value for case were err comes from plugin RPC + // and is returned as plugin.BasicError type. + if err != nil && err.Error() == rpc.ErrShutdown.Error() { + // Reload plugin if it's an rpc.ErrShutdown + b.Lock() + if b.canary == canary { + err := b.reloadBackend() + if err != nil { + b.Unlock() + return nil, err + } + b.canary, err = uuid.GenerateUUID() + if err != nil { + b.Unlock() + return nil, err + } + } + b.Unlock() + + // Try request once more + b.RLock() + defer b.RUnlock() + return b.Backend.HandleRequest(req) + } + return resp, err +} + +// HandleExistenceCheck is a thin wrapper implementation of HandleRequest that includes automatic plugin reload. +func (b *backend) HandleExistenceCheck(req *logical.Request) (bool, bool, error) { + b.RLock() + canary := b.canary + + // Lazy-load backend + if !b.loaded { + // Upgrade lock + b.RUnlock() + b.Lock() + // Check once more after lock swap + if !b.loaded { + err := b.startBackend() + if err != nil { + b.Unlock() + return false, false, err + } + } + b.Unlock() + b.RLock() + } + + checkFound, exists, err := b.Backend.HandleExistenceCheck(req) + b.RUnlock() + if err != nil && err.Error() == rpc.ErrShutdown.Error() { + // Reload plugin if it's an rpc.ErrShutdown + b.Lock() + if b.canary == canary { + err := b.reloadBackend() + if err != nil { + b.Unlock() + return false, false, err + } + b.canary, err = uuid.GenerateUUID() + if err != nil { + b.Unlock() + return false, false, err + } + } + b.Unlock() + + // Try request once more + b.RLock() + defer b.RUnlock() + return b.Backend.HandleExistenceCheck(req) + } + return checkFound, exists, err +} diff --git a/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go b/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go new file mode 100644 index 0000000..5b07197 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/builtin/plugin/backend_test.go @@ -0,0 +1,96 @@ +package plugin + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/pluginutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/plugin" + "github.com/hashicorp/vault/logical/plugin/mock" + "github.com/hashicorp/vault/vault" + log "github.com/mgutz/logxi/v1" +) + +func TestBackend_impl(t *testing.T) { + var _ logical.Backend = &backend{} +} + +func TestBackend(t *testing.T) { + config, cleanup := testConfig(t) + defer cleanup() + + _, err := Backend(config) + if err != nil { + t.Fatal(err) + } +} + +func TestBackend_Factory(t *testing.T) { + config, cleanup := testConfig(t) + defer cleanup() + + _, err := Factory(config) + if err != nil { + t.Fatal(err) + } +} + +func TestBackend_PluginMain(t *testing.T) { + args := []string{} + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadaModeEnv) != "true" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + + args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM)) + + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig) + + err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: mock.Factory, + TLSProviderFunc: tlsProviderFunc, + }) + if err != nil { + t.Fatal(err) + } +} + +func testConfig(t *testing.T) (*logical.BackendConfig, func()) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + cores := cluster.Cores + + core := cores[0] + + sys := vault.TestDynamicSystemView(core.Core) + + config := &logical.BackendConfig{ + Logger: logformat.NewVaultLogger(log.LevelTrace), + System: sys, + Config: map[string]string{ + "plugin_name": "mock-plugin", + }, + } + + os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) + + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMain") + + return config, func() { + cluster.Cleanup() + } +} diff --git a/vendor/github.com/hashicorp/vault/cli/commands.go b/vendor/github.com/hashicorp/vault/cli/commands.go index 7494c06..22c8640 100644 --- a/vendor/github.com/hashicorp/vault/cli/commands.go +++ b/vendor/github.com/hashicorp/vault/cli/commands.go @@ -6,8 +6,11 @@ import ( auditFile "github.com/hashicorp/vault/builtin/audit/file" auditSocket "github.com/hashicorp/vault/builtin/audit/socket" auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" + "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/version" + credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" + credKube "github.com/hashicorp/vault-plugin-auth-kubernetes" credAppId "github.com/hashicorp/vault/builtin/credential/app-id" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" credAws "github.com/hashicorp/vault/builtin/credential/aws" @@ -18,9 +21,27 @@ import ( credRadius "github.com/hashicorp/vault/builtin/credential/radius" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + physAzure "github.com/hashicorp/vault/physical/azure" + physCassandra "github.com/hashicorp/vault/physical/cassandra" + physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb" + physConsul "github.com/hashicorp/vault/physical/consul" + physCouchDB "github.com/hashicorp/vault/physical/couchdb" + physDynamoDB "github.com/hashicorp/vault/physical/dynamodb" + physEtcd "github.com/hashicorp/vault/physical/etcd" + physFile "github.com/hashicorp/vault/physical/file" + physGCS "github.com/hashicorp/vault/physical/gcs" + physInmem "github.com/hashicorp/vault/physical/inmem" + physMSSQL "github.com/hashicorp/vault/physical/mssql" + physMySQL "github.com/hashicorp/vault/physical/mysql" + physPostgreSQL "github.com/hashicorp/vault/physical/postgresql" + physS3 "github.com/hashicorp/vault/physical/s3" + physSwift "github.com/hashicorp/vault/physical/swift" + physZooKeeper "github.com/hashicorp/vault/physical/zookeeper" + "github.com/hashicorp/vault/builtin/logical/aws" "github.com/hashicorp/vault/builtin/logical/cassandra" "github.com/hashicorp/vault/builtin/logical/consul" + "github.com/hashicorp/vault/builtin/logical/database" "github.com/hashicorp/vault/builtin/logical/mongodb" "github.com/hashicorp/vault/builtin/logical/mssql" "github.com/hashicorp/vault/builtin/logical/mysql" @@ -28,7 +49,9 @@ import ( "github.com/hashicorp/vault/builtin/logical/postgresql" "github.com/hashicorp/vault/builtin/logical/rabbitmq" "github.com/hashicorp/vault/builtin/logical/ssh" + "github.com/hashicorp/vault/builtin/logical/totp" "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/builtin/plugin" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/command" @@ -59,9 +82,8 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory { Meta: *metaPtr, }, nil }, - "server": func() (cli.Command, error) { - return &command.ServerCommand{ + c := &command.ServerCommand{ Meta: *metaPtr, AuditBackends: map[string]audit.Factory{ "file": auditFile.Factory, @@ -69,15 +91,18 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory { "socket": auditSocket.Factory, }, CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - "cert": credCert.Factory, - "aws": credAws.Factory, - "app-id": credAppId.Factory, - "github": credGitHub.Factory, - "userpass": credUserpass.Factory, - "ldap": credLdap.Factory, - "okta": credOkta.Factory, - "radius": credRadius.Factory, + "approle": credAppRole.Factory, + "cert": credCert.Factory, + "aws": credAws.Factory, + "app-id": credAppId.Factory, + "gcp": credGcp.Factory, + "github": credGitHub.Factory, + "userpass": credUserpass.Factory, + "ldap": credLdap.Factory, + "okta": credOkta.Factory, + "radius": credRadius.Factory, + "kubernetes": credKube.Factory, + "plugin": plugin.Factory, }, LogicalBackends: map[string]logical.Factory{ "aws": aws.Factory, @@ -91,10 +116,40 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory { "mysql": mysql.Factory, "ssh": ssh.Factory, "rabbitmq": rabbitmq.Factory, + "database": database.Factory, + "totp": totp.Factory, + "plugin": plugin.Factory, }, + ShutdownCh: command.MakeShutdownCh(), SighupCh: command.MakeSighupCh(), - }, nil + } + + c.PhysicalBackends = map[string]physical.Factory{ + "azure": physAzure.NewAzureBackend, + "cassandra": physCassandra.NewCassandraBackend, + "cockroachdb": physCockroachDB.NewCockroachDBBackend, + "consul": physConsul.NewConsulBackend, + "couchdb": physCouchDB.NewCouchDBBackend, + "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend, + "dynamodb": physDynamoDB.NewDynamoDBBackend, + "etcd": physEtcd.NewEtcdBackend, + "file": physFile.NewFileBackend, + "file_transactional": physFile.NewTransactionalFileBackend, + "gcs": physGCS.NewGCSBackend, + "inmem": physInmem.NewInmem, + "inmem_ha": physInmem.NewInmemHA, + "inmem_transactional": physInmem.NewTransactionalInmem, + "inmem_transactional_ha": physInmem.NewTransactionalInmemHA, + "mssql": physMSSQL.NewMSSQLBackend, + "mysql": physMySQL.NewMySQLBackend, + "postgresql": physPostgreSQL.NewPostgreSQLBackend, + "s3": physS3.NewS3Backend, + "swift": physSwift.NewSwiftBackend, + "zookeeper": physZooKeeper.NewZooKeeperBackend, + } + + return c, nil }, "ssh": func() (cli.Command, error) { diff --git a/vendor/github.com/hashicorp/vault/cli/main.go b/vendor/github.com/hashicorp/vault/cli/main.go index 3d0ced3..000e1e9 100644 --- a/vendor/github.com/hashicorp/vault/cli/main.go +++ b/vendor/github.com/hashicorp/vault/cli/main.go @@ -36,9 +36,11 @@ func RunCustom(args []string, commands map[string]cli.CommandFactory) int { } cli := &cli.CLI{ - Args: args, - Commands: commands, - HelpFunc: cli.FilteredHelpFunc(commandsInclude, HelpFunc), + Args: args, + Commands: commands, + Name: "vault", + Autocomplete: true, + HelpFunc: cli.FilteredHelpFunc(commandsInclude, HelpFunc), } exitCode, err := cli.Run() diff --git a/vendor/github.com/hashicorp/vault/command/audit_enable.go b/vendor/github.com/hashicorp/vault/command/audit_enable.go index 3293c79..680a94e 100644 --- a/vendor/github.com/hashicorp/vault/command/audit_enable.go +++ b/vendor/github.com/hashicorp/vault/command/audit_enable.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/vault/helper/kv-builder" "github.com/hashicorp/vault/meta" "github.com/mitchellh/mapstructure" + "github.com/posener/complete" ) // AuditEnableCommand is a Command that mounts a new mount. @@ -72,7 +73,7 @@ func (c *AuditEnableCommand) Run(args []string) int { } err = client.Sys().EnableAuditWithOptions(path, &api.EnableAuditOptions{ - Type: auditType, + Type: auditType, Description: desc, Options: opts, Local: local, @@ -127,3 +128,19 @@ Audit Enable Options: ` return strings.TrimSpace(helpText) } + +func (c *AuditEnableCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictSet( + "file", + "syslog", + "socket", + ) +} + +func (c *AuditEnableCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-description": complete.PredictNothing, + "-path": complete.PredictNothing, + "-local": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/auth.go b/vendor/github.com/hashicorp/vault/command/auth.go index 2af8780..00b21ce 100644 --- a/vendor/github.com/hashicorp/vault/command/auth.go +++ b/vendor/github.com/hashicorp/vault/command/auth.go @@ -15,13 +15,14 @@ import ( "github.com/hashicorp/vault/helper/password" "github.com/hashicorp/vault/meta" "github.com/mitchellh/mapstructure" + "github.com/posener/complete" "github.com/ryanuber/columnize" ) // AuthHandler is the interface that any auth handlers must implement // to enable auth via the CLI. type AuthHandler interface { - Auth(*api.Client, map[string]string) (string, error) + Auth(*api.Client, map[string]string) (*api.Secret, error) Help() string } @@ -37,11 +38,13 @@ type AuthCommand struct { func (c *AuthCommand) Run(args []string) int { var method, authPath string - var methods, methodHelp, noVerify bool + var methods, methodHelp, noVerify, noStore, tokenOnly bool flags := c.Meta.FlagSet("auth", meta.FlagSetDefault) flags.BoolVar(&methods, "methods", false, "") flags.BoolVar(&methodHelp, "method-help", false, "") flags.BoolVar(&noVerify, "no-verify", false, "") + flags.BoolVar(&noStore, "no-store", false, "") + flags.BoolVar(&tokenOnly, "token-only", false, "") flags.StringVar(&method, "method", "", "method") flags.StringVar(&authPath, "path", "", "") flags.Usage = func() { c.Ui.Error(c.Help()) } @@ -127,8 +130,8 @@ func (c *AuthCommand) Run(args []string) int { } // Warn if the VAULT_TOKEN environment variable is set, as that will take - // precedence - if os.Getenv("VAULT_TOKEN") != "" { + // precedence. Don't output on token-only since we're likely piping output. + if os.Getenv("VAULT_TOKEN") != "" && !tokenOnly { c.Ui.Output("==> WARNING: VAULT_TOKEN environment variable set!\n") c.Ui.Output(" The environment variable takes precedence over the value") c.Ui.Output(" set by the auth command. Either update the value of the") @@ -164,11 +167,52 @@ func (c *AuthCommand) Run(args []string) int { } // Authenticate - token, err := handler.Auth(client, vars) + secret, err := handler.Auth(client, vars) if err != nil { c.Ui.Error(err.Error()) return 1 } + if secret == nil { + c.Ui.Error("Empty response from auth helper") + return 1 + } + + // If we had requested a wrapped token, we want to unset that request + // before performing further functions + client.SetWrappingLookupFunc(func(string, string) string { + return "" + }) + +CHECK_TOKEN: + var token string + switch { + case secret == nil: + c.Ui.Error("Empty response from auth helper") + return 1 + + case secret.Auth != nil: + token = secret.Auth.ClientToken + + case secret.WrapInfo != nil: + if secret.WrapInfo.WrappedAccessor == "" { + c.Ui.Error("Got a wrapped response from Vault but wrapped reply does not seem to contain a token") + return 1 + } + if tokenOnly { + c.Ui.Output(secret.WrapInfo.Token) + return 0 + } + if noStore { + return OutputSecret(c.Ui, "table", secret) + } + client.SetToken(secret.WrapInfo.Token) + secret, err = client.Logical().Unwrap("") + goto CHECK_TOKEN + + default: + c.Ui.Error("No auth or wrapping info in auth helper response") + return 1 + } // Cache the previous token so that it can be restored if authentication fails var previousToken string @@ -177,14 +221,21 @@ func (c *AuthCommand) Run(args []string) int { return 1 } + if tokenOnly { + c.Ui.Output(token) + return 0 + } + // Store the token! - if err := tokenHelper.Store(token); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error storing token: %s\n\n"+ - "Authentication was not successful and did not persist.\n"+ - "Please reauthenticate, or fix the issue above if possible.", - err)) - return 1 + if !noStore { + if err := tokenHelper.Store(token); err != nil { + c.Ui.Error(fmt.Sprintf( + "Error storing token: %s\n\n"+ + "Authentication was not successful and did not persist.\n"+ + "Please reauthenticate, or fix the issue above if possible.", + err)) + return 1 + } } if noVerify { @@ -192,6 +243,16 @@ func (c *AuthCommand) Run(args []string) int { "Authenticated - no token verification has been performed.", )) + if noStore { + if err := tokenHelper.Erase(); err != nil { + c.Ui.Error(fmt.Sprintf( + "Error removing prior token: %s\n\n"+ + "Authentication was successful, but unable to remove the\n"+ + "previous token.", + err)) + return 1 + } + } return 0 } @@ -200,17 +261,28 @@ func (c *AuthCommand) Run(args []string) int { if err != nil { c.Ui.Error(fmt.Sprintf( "Error initializing client to verify the token: %s", err)) - if err := tokenHelper.Store(previousToken); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error restoring the previous token: %s\n\n"+ - "Please reauthenticate with a valid token.", - err)) + if !noStore { + if err := tokenHelper.Store(previousToken); err != nil { + c.Ui.Error(fmt.Sprintf( + "Error restoring the previous token: %s\n\n"+ + "Please reauthenticate with a valid token.", + err)) + } } return 1 } + client.SetWrappingLookupFunc(func(string, string) string { + return "" + }) + + // If in no-store mode it won't have read the token from a token-helper (or + // will read an old one) so set it explicitly + if noStore { + client.SetToken(token) + } // Verify the token - secret, err := client.Auth().Token().LookupSelf() + secret, err = client.Auth().Token().LookupSelf() if err != nil { c.Ui.Error(fmt.Sprintf( "Error validating token: %s", err)) @@ -222,7 +294,7 @@ func (c *AuthCommand) Run(args []string) int { } return 1 } - if secret == nil { + if secret == nil && !noStore { c.Ui.Error(fmt.Sprintf("Error: Invalid token")) if err := tokenHelper.Store(previousToken); err != nil { c.Ui.Error(fmt.Sprintf( @@ -233,10 +305,21 @@ func (c *AuthCommand) Run(args []string) int { return 1 } + if noStore { + if err := tokenHelper.Erase(); err != nil { + c.Ui.Error(fmt.Sprintf( + "Error removing prior token: %s\n\n"+ + "Authentication was successful, but unable to remove the\n"+ + "previous token.", + err)) + return 1 + } + } + // Get the policies we have policiesRaw, ok := secret.Data["policies"] - if !ok { - policiesRaw = []string{"unknown"} + if !ok || policiesRaw == nil { + policiesRaw = []interface{}{"unknown"} } var policies []string for _, v := range policiesRaw.([]interface{}) { @@ -244,6 +327,9 @@ func (c *AuthCommand) Run(args []string) int { } output := "Successfully authenticated! You are now logged in." + if noStore { + output += "\nThe token has not been stored to the configured token helper." + } if method != "" { output += "\nThe token below is already saved in the session. You do not" output += "\nneed to \"vault auth\" again with the token." @@ -260,15 +346,25 @@ func (c *AuthCommand) Run(args []string) int { } -func (c *AuthCommand) listMethods() int { +func (c *AuthCommand) getMethods() (map[string]*api.AuthMount, error) { client, err := c.Client() if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error initializing client: %s", err)) - return 1 + return nil, err } + client.SetWrappingLookupFunc(func(string, string) string { + return "" + }) auth, err := client.Sys().ListAuth() + if err != nil { + return nil, err + } + + return auth, nil +} + +func (c *AuthCommand) listMethods() int { + auth, err := c.getMethods() if err != nil { c.Ui.Error(fmt.Sprintf( "Error reading auth table: %s", err)) @@ -281,7 +377,7 @@ func (c *AuthCommand) listMethods() int { } sort.Strings(paths) - columns := []string{"Path | Type | Default TTL | Max TTL | Replication Behavior | Description"} + columns := []string{"Path | Type | Accessor | Default TTL | Max TTL | Replication Behavior | Description"} for _, path := range paths { auth := auth[path] defTTL := "system" @@ -297,7 +393,7 @@ func (c *AuthCommand) listMethods() int { replicatedBehavior = "local" } columns = append(columns, fmt.Sprintf( - "%s | %s | %s | %s | %s | %s", path, auth.Type, defTTL, maxTTL, replicatedBehavior, auth.Description)) + "%s | %s | %s | %s | %s | %s | %s", path, auth.Type, auth.Accessor, defTTL, maxTTL, replicatedBehavior, auth.Description)) } c.Ui.Output(columnize.SimpleFormat(columns)) @@ -338,15 +434,21 @@ Usage: vault auth [options] [auth-information] The value of the "-path" flag is supplied to auth providers as the "mount" option in the payload to specify the mount point. + If response wrapping is used (via -wrap-ttl), the returned token will be + automatically unwrapped unless: + * -token-only is used, in which case the wrapping token will be output + * -no-store is used, in which case the details of the wrapping token + will be printed + General Options: ` + meta.GeneralOptionsUsage() + ` Auth Options: - -method=name Outputs help for the authentication method with the given - name for the remote server. If this authentication method - is not available, exit with code 1. + -method=name Use the method given here, which is a type of backend, not + the path. If this authentication method is not available, + exit with code 1. -method-help If set, the help for the selected method will be shown. @@ -355,6 +457,12 @@ Auth Options: -no-verify Do not verify the token after creation; avoids a use count decrement. + -no-store Do not store the token after creation; it will only be + displayed in the command output. + + -token-only Output only the token to stdout. This implies -no-verify + and -no-store. + -path The path at which the auth backend is enabled. If an auth backend is mounted at multiple paths, this option can be used to authenticate against specific paths. @@ -367,7 +475,7 @@ type tokenAuthHandler struct { Token string } -func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error) { +func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (*api.Secret, error) { token := h.Token if token == "" { var err error @@ -377,7 +485,7 @@ func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error) token, err = password.Read(os.Stdin) fmt.Printf("\n") if err != nil { - return "", fmt.Errorf( + return nil, fmt.Errorf( "Error attempting to ask for token. The raw error message\n"+ "is shown below, but the most common reason for this error is\n"+ "that you attempted to pipe a value into auth. If you want to\n"+ @@ -387,12 +495,16 @@ func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error) } if token == "" { - return "", fmt.Errorf( + return nil, fmt.Errorf( "A token must be passed to auth. Please view the help\n" + "for more information.") } - return token, nil + return &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: token, + }, + }, nil } func (h *tokenAuthHandler) Help() string { @@ -411,3 +523,35 @@ tokens are created via the API or command line interface (with the return strings.TrimSpace(help) } + +func (c *AuthCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *AuthCommand) AutocompleteFlags() complete.Flags { + var predictFunc complete.PredictFunc = func(a complete.Args) []string { + auths, err := c.getMethods() + if err != nil { + return []string{} + } + + methods := make([]string, 0, len(auths)) + for _, auth := range auths { + if strings.HasPrefix(auth.Type, a.Last) { + methods = append(methods, auth.Type) + } + } + + return methods + } + + return complete.Flags{ + "-method": predictFunc, + "-methods": complete.PredictNothing, + "-method-help": complete.PredictNothing, + "-no-verify": complete.PredictNothing, + "-no-store": complete.PredictNothing, + "-token-only": complete.PredictNothing, + "-path": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/auth_enable.go b/vendor/github.com/hashicorp/vault/command/auth_enable.go index 81c7cce..e6b7f20 100644 --- a/vendor/github.com/hashicorp/vault/command/auth_enable.go +++ b/vendor/github.com/hashicorp/vault/command/auth_enable.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/meta" + "github.com/posener/complete" ) // AuthEnableCommand is a Command that enables a new endpoint. @@ -14,11 +15,12 @@ type AuthEnableCommand struct { } func (c *AuthEnableCommand) Run(args []string) int { - var description, path string + var description, path, pluginName string var local bool flags := c.Meta.FlagSet("auth-enable", meta.FlagSetDefault) flags.StringVar(&description, "description", "", "") flags.StringVar(&path, "path", "", "") + flags.StringVar(&pluginName, "plugin-name", "", "") flags.BoolVar(&local, "local", false, "") flags.Usage = func() { c.Ui.Error(c.Help()) } if err := flags.Parse(args); err != nil { @@ -36,8 +38,13 @@ func (c *AuthEnableCommand) Run(args []string) int { authType := args[0] // If no path is specified, we default the path to the backend type + // or use the plugin name if it's a plugin backend if path == "" { - path = authType + if authType == "plugin" { + path = pluginName + } else { + path = authType + } } client, err := c.Client() @@ -50,16 +57,24 @@ func (c *AuthEnableCommand) Run(args []string) int { if err := client.Sys().EnableAuthWithOptions(path, &api.EnableAuthOptions{ Type: authType, Description: description, - Local: local, + Config: api.AuthConfigInput{ + PluginName: pluginName, + }, + Local: local, }); err != nil { c.Ui.Error(fmt.Sprintf( "Error: %s", err)) return 2 } + authTypeOutput := fmt.Sprintf("'%s'", authType) + if authType == "plugin" { + authTypeOutput = fmt.Sprintf("plugin '%s'", pluginName) + } + c.Ui.Output(fmt.Sprintf( - "Successfully enabled '%s' at '%s'!", - authType, path)) + "Successfully enabled %s at '%s'!", + authTypeOutput, path)) return 0 } @@ -89,9 +104,38 @@ Auth Enable Options: to the type of the mount. This will make the auth provider available at "/auth/" + -plugin-name Name of the auth plugin to use based from the name + in the plugin catalog. + -local Mark the mount as a local mount. Local mounts are not replicated nor (if a secondary) removed by replication. ` return strings.TrimSpace(helpText) } + +func (c *AuthEnableCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictSet( + "approle", + "cert", + "aws", + "app-id", + "gcp", + "github", + "userpass", + "ldap", + "okta", + "radius", + "plugin", + ) + +} + +func (c *AuthEnableCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-description": complete.PredictNothing, + "-path": complete.PredictNothing, + "-plugin-name": complete.PredictNothing, + "-local": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/auth_test.go b/vendor/github.com/hashicorp/vault/command/auth_test.go index 9ffd0ac..8243129 100644 --- a/vendor/github.com/hashicorp/vault/command/auth_test.go +++ b/vendor/github.com/hashicorp/vault/command/auth_test.go @@ -9,6 +9,9 @@ import ( "strings" "testing" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/meta" @@ -84,6 +87,194 @@ func TestAuth_token(t *testing.T) { } } +func TestAuth_wrapping(t *testing.T) { + baseConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": credUserpass.Factory, + }, + } + cluster := vault.NewTestCluster(t, baseConfig, &vault.TestClusterOptions{ + HandlerFunc: http.Handler, + BaseListenAddress: "127.0.0.1:8200", + }) + cluster.Start() + defer cluster.Cleanup() + + testAuthInit(t) + + client := cluster.Cores[0].Client + err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + }) + if err != nil { + t.Fatal(err) + } + _, err = client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ + "password": "bar", + "policies": "zip,zap", + }) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + c := &AuthCommand{ + Meta: meta.Meta{ + Ui: ui, + TokenHelper: DefaultTokenHelper, + }, + Handlers: map[string]AuthHandler{ + "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"}, + }, + } + + args := []string{ + "-address", + "https://127.0.0.1:8200", + "-tls-skip-verify", + "-method", + "userpass", + "username=foo", + "password=bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test again with wrapping + ui = new(cli.MockUi) + c = &AuthCommand{ + Meta: meta.Meta{ + Ui: ui, + TokenHelper: DefaultTokenHelper, + }, + Handlers: map[string]AuthHandler{ + "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"}, + }, + } + + args = []string{ + "-address", + "https://127.0.0.1:8200", + "-tls-skip-verify", + "-wrap-ttl", + "5m", + "-method", + "userpass", + "username=foo", + "password=bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test again with no-store + ui = new(cli.MockUi) + c = &AuthCommand{ + Meta: meta.Meta{ + Ui: ui, + TokenHelper: DefaultTokenHelper, + }, + Handlers: map[string]AuthHandler{ + "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"}, + }, + } + + args = []string{ + "-address", + "https://127.0.0.1:8200", + "-tls-skip-verify", + "-wrap-ttl", + "5m", + "-no-store", + "-method", + "userpass", + "username=foo", + "password=bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test again with wrapping and token-only + ui = new(cli.MockUi) + c = &AuthCommand{ + Meta: meta.Meta{ + Ui: ui, + TokenHelper: DefaultTokenHelper, + }, + Handlers: map[string]AuthHandler{ + "userpass": &credUserpass.CLIHandler{DefaultMount: "userpass"}, + }, + } + + args = []string{ + "-address", + "https://127.0.0.1:8200", + "-tls-skip-verify", + "-wrap-ttl", + "5m", + "-token-only", + "-method", + "userpass", + "username=foo", + "password=bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + token := strings.TrimSpace(ui.OutputWriter.String()) + if token == "" { + t.Fatal("expected to find token in output") + } + secret, err := client.Logical().Unwrap(token) + if err != nil { + t.Fatal(err) + } + if secret.Auth.ClientToken == "" { + t.Fatal("no client token found") + } +} + +func TestAuth_token_nostore(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := http.TestServer(t, core) + defer ln.Close() + + testAuthInit(t) + + ui := new(cli.MockUi) + c := &AuthCommand{ + Meta: meta.Meta{ + Ui: ui, + TokenHelper: DefaultTokenHelper, + }, + } + + args := []string{ + "-address", addr, + "-no-store", + token, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + helper, err := c.TokenHelper() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := helper.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + if actual != "" { + t.Fatalf("bad: %s", actual) + } +} + func TestAuth_stdin(t *testing.T) { core, _, token := vault.TestCoreUnsealed(t) ln, addr := http.TestServer(t, core) @@ -198,8 +389,12 @@ func testAuthInit(t *testing.T) { type testAuthHandler struct{} -func (h *testAuthHandler) Auth(c *api.Client, m map[string]string) (string, error) { - return m["foo"], nil +func (h *testAuthHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + return &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: m["foo"], + }, + }, nil } func (h *testAuthHandler) Help() string { return "" } diff --git a/vendor/github.com/hashicorp/vault/command/format.go b/vendor/github.com/hashicorp/vault/command/format.go index 4520b20..38f24d4 100644 --- a/vendor/github.com/hashicorp/vault/command/format.go +++ b/vendor/github.com/hashicorp/vault/command/format.go @@ -14,9 +14,12 @@ import ( "github.com/ghodss/yaml" "github.com/hashicorp/vault/api" "github.com/mitchellh/cli" + "github.com/posener/complete" "github.com/ryanuber/columnize" ) +var predictFormat complete.Predictor = complete.PredictSet("json", "yaml") + func OutputSecret(ui cli.Ui, format string, secret *api.Secret) int { return outputWithFormat(ui, format, secret, secret) } @@ -181,6 +184,7 @@ func (t TableFormatter) OutputSecret(ui cli.Ui, secret, s *api.Secret) error { input = append(input, fmt.Sprintf("wrapping_token: %s %s", config.Delim, s.WrapInfo.Token)) input = append(input, fmt.Sprintf("wrapping_token_ttl: %s %s", config.Delim, (time.Second*time.Duration(s.WrapInfo.TTL)).String())) input = append(input, fmt.Sprintf("wrapping_token_creation_time: %s %s", config.Delim, s.WrapInfo.CreationTime.String())) + input = append(input, fmt.Sprintf("wrapping_token_creation_path: %s %s", config.Delim, s.WrapInfo.CreationPath)) if s.WrapInfo.WrappedAccessor != "" { input = append(input, fmt.Sprintf("wrapped_accessor: %s %s", config.Delim, s.WrapInfo.WrappedAccessor)) } diff --git a/vendor/github.com/hashicorp/vault/command/generate-root.go b/vendor/github.com/hashicorp/vault/command/generate-root.go index f013294..2d9521b 100644 --- a/vendor/github.com/hashicorp/vault/command/generate-root.go +++ b/vendor/github.com/hashicorp/vault/command/generate-root.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/helper/xor" "github.com/hashicorp/vault/meta" + "github.com/posener/complete" ) // GenerateRootCommand is a Command that generates a new root token. @@ -352,3 +353,20 @@ Generate Root Options: ` return strings.TrimSpace(helpText) } + +func (c *GenerateRootCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *GenerateRootCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-init": complete.PredictNothing, + "-cancel": complete.PredictNothing, + "-status": complete.PredictNothing, + "-decode": complete.PredictNothing, + "-genotp": complete.PredictNothing, + "-otp": complete.PredictNothing, + "-pgp-key": complete.PredictNothing, + "-nonce": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/generate-root_test.go b/vendor/github.com/hashicorp/vault/command/generate-root_test.go index 847400d..31d956d 100644 --- a/vendor/github.com/hashicorp/vault/command/generate-root_test.go +++ b/vendor/github.com/hashicorp/vault/command/generate-root_test.go @@ -82,7 +82,7 @@ func TestGenerateRoot_status(t *testing.T) { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } - if !strings.Contains(string(ui.OutputWriter.Bytes()), "Started: true") { + if !strings.Contains(ui.OutputWriter.String(), "Started: true") { t.Fatalf("bad: %s", ui.OutputWriter.String()) } } diff --git a/vendor/github.com/hashicorp/vault/command/init.go b/vendor/github.com/hashicorp/vault/command/init.go index 4c638dc..470c325 100644 --- a/vendor/github.com/hashicorp/vault/command/init.go +++ b/vendor/github.com/hashicorp/vault/command/init.go @@ -11,7 +11,8 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/meta" - "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/consul" + "github.com/posener/complete" ) // InitCommand is a Command that initializes a new Vault server. @@ -36,7 +37,7 @@ func (c *InitCommand) Run(args []string) int { flags.Var(&recoveryPgpKeys, "recovery-pgp-keys", "") flags.BoolVar(&check, "check", false, "") flags.BoolVar(&auto, "auto", false, "") - flags.StringVar(&consulServiceName, "consul-service", physical.DefaultServiceName, "") + flags.StringVar(&consulServiceName, "consul-service", consul.DefaultServiceName, "") if err := flags.Parse(args); err != nil { return 1 } @@ -384,3 +385,22 @@ Init Options: ` return strings.TrimSpace(helpText) } + +func (c *InitCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *InitCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-check": complete.PredictNothing, + "-key-shares": complete.PredictNothing, + "-key-threshold": complete.PredictNothing, + "-pgp-keys": complete.PredictNothing, + "-root-token-pgp-key": complete.PredictNothing, + "-recovery-shares": complete.PredictNothing, + "-recovery-threshold": complete.PredictNothing, + "-recovery-pgp-keys": complete.PredictNothing, + "-auto": complete.PredictNothing, + "-consul-service": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/mount.go b/vendor/github.com/hashicorp/vault/command/mount.go index eb2b53a..895e7b8 100644 --- a/vendor/github.com/hashicorp/vault/command/mount.go +++ b/vendor/github.com/hashicorp/vault/command/mount.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/meta" + "github.com/posener/complete" ) // MountCommand is a Command that mounts a new mount. @@ -14,13 +15,14 @@ type MountCommand struct { } func (c *MountCommand) Run(args []string) int { - var description, path, defaultLeaseTTL, maxLeaseTTL string + var description, path, defaultLeaseTTL, maxLeaseTTL, pluginName string var local, forceNoCache bool flags := c.Meta.FlagSet("mount", meta.FlagSetDefault) flags.StringVar(&description, "description", "", "") flags.StringVar(&path, "path", "", "") flags.StringVar(&defaultLeaseTTL, "default-lease-ttl", "", "") flags.StringVar(&maxLeaseTTL, "max-lease-ttl", "", "") + flags.StringVar(&pluginName, "plugin-name", "", "") flags.BoolVar(&forceNoCache, "force-no-cache", false, "") flags.BoolVar(&local, "local", false, "") flags.Usage = func() { c.Ui.Error(c.Help()) } @@ -39,8 +41,13 @@ func (c *MountCommand) Run(args []string) int { mountType := args[0] // If no path is specified, we default the path to the backend type + // or use the plugin name if it's a plugin backend if path == "" { - path = mountType + if mountType == "plugin" { + path = pluginName + } else { + path = mountType + } } client, err := c.Client() @@ -57,6 +64,7 @@ func (c *MountCommand) Run(args []string) int { DefaultLeaseTTL: defaultLeaseTTL, MaxLeaseTTL: maxLeaseTTL, ForceNoCache: forceNoCache, + PluginName: pluginName, }, Local: local, } @@ -67,9 +75,14 @@ func (c *MountCommand) Run(args []string) int { return 2 } + mountTypeOutput := fmt.Sprintf("'%s'", mountType) + if mountType == "plugin" { + mountTypeOutput = fmt.Sprintf("plugin '%s'", pluginName) + } + c.Ui.Output(fmt.Sprintf( - "Successfully mounted '%s' at '%s'!", - mountType, path)) + "Successfully mounted %s at '%s'!", + mountTypeOutput, path)) return 0 } @@ -112,10 +125,40 @@ Mount Options: not affect caching of the underlying encrypted data storage. + -plugin-name Name of the plugin to mount based from the name + in the plugin catalog. + -local Mark the mount as a local mount. Local mounts are not replicated nor (if a secondary) removed by replication. - ` return strings.TrimSpace(helpText) } + +func (c *MountCommand) AutocompleteArgs() complete.Predictor { + // This list does not contain deprecated backends + return complete.PredictSet( + "aws", + "consul", + "pki", + "transit", + "ssh", + "rabbitmq", + "database", + "totp", + "plugin", + ) + +} + +func (c *MountCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-description": complete.PredictNothing, + "-path": complete.PredictNothing, + "-default-lease-ttl": complete.PredictNothing, + "-max-lease-ttl": complete.PredictNothing, + "-force-no-cache": complete.PredictNothing, + "-plugin-name": complete.PredictNothing, + "-local": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/mount_test.go b/vendor/github.com/hashicorp/vault/command/mount_test.go index 314ac13..ea9108c 100644 --- a/vendor/github.com/hashicorp/vault/command/mount_test.go +++ b/vendor/github.com/hashicorp/vault/command/mount_test.go @@ -22,6 +22,46 @@ func TestMount(t *testing.T) { }, } + args := []string{ + "-address", addr, + "kv", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + client, err := c.Client() + if err != nil { + t.Fatalf("err: %s", err) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatalf("err: %s", err) + } + + mount, ok := mounts["kv/"] + if !ok { + t.Fatal("should have kv mount") + } + if mount.Type != "kv" { + t.Fatal("should be kv type") + } +} + +func TestMount_Generic(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := http.TestServer(t, core) + defer ln.Close() + + ui := new(cli.MockUi) + c := &MountCommand{ + Meta: meta.Meta{ + ClientToken: token, + Ui: ui, + }, + } + args := []string{ "-address", addr, "generic", @@ -42,7 +82,7 @@ func TestMount(t *testing.T) { mount, ok := mounts["generic/"] if !ok { - t.Fatal("should have generic mount") + t.Fatal("should have generic mount path") } if mount.Type != "generic" { t.Fatal("should be generic type") diff --git a/vendor/github.com/hashicorp/vault/command/mounts.go b/vendor/github.com/hashicorp/vault/command/mounts.go index d918d67..2615776 100644 --- a/vendor/github.com/hashicorp/vault/command/mounts.go +++ b/vendor/github.com/hashicorp/vault/command/mounts.go @@ -42,9 +42,13 @@ func (c *MountsCommand) Run(args []string) int { } sort.Strings(paths) - columns := []string{"Path | Type | Default TTL | Max TTL | Force No Cache | Replication Behavior | Description"} + columns := []string{"Path | Type | Accessor | Plugin | Default TTL | Max TTL | Force No Cache | Replication Behavior | Description"} for _, path := range paths { mount := mounts[path] + pluginName := "n/a" + if mount.Config.PluginName != "" { + pluginName = mount.Config.PluginName + } defTTL := "system" switch { case mount.Type == "system": @@ -68,7 +72,7 @@ func (c *MountsCommand) Run(args []string) int { replicatedBehavior = "local" } columns = append(columns, fmt.Sprintf( - "%s | %s | %s | %s | %v | %s | %s", path, mount.Type, defTTL, maxTTL, + "%s | %s | %s | %s | %s | %s | %v | %s | %s", path, mount.Type, mount.Accessor, pluginName, defTTL, maxTTL, mount.Config.ForceNoCache, replicatedBehavior, mount.Description)) } diff --git a/vendor/github.com/hashicorp/vault/command/policy_write.go b/vendor/github.com/hashicorp/vault/command/policy_write.go index 4f73ffe..59b26fb 100644 --- a/vendor/github.com/hashicorp/vault/command/policy_write.go +++ b/vendor/github.com/hashicorp/vault/command/policy_write.go @@ -37,7 +37,8 @@ func (c *PolicyWriteCommand) Run(args []string) int { return 2 } - name := args[0] + // Policies are normalized to lowercase + name := strings.ToLower(args[0]) path := args[1] // Read the policy diff --git a/vendor/github.com/hashicorp/vault/command/read.go b/vendor/github.com/hashicorp/vault/command/read.go index 6e9c4d7..d989178 100644 --- a/vendor/github.com/hashicorp/vault/command/read.go +++ b/vendor/github.com/hashicorp/vault/command/read.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/meta" + "github.com/posener/complete" ) // ReadCommand is a Command that reads data from the Vault. @@ -95,3 +96,14 @@ Read Options: ` return strings.TrimSpace(helpText) } + +func (c *ReadCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *ReadCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-format": predictFormat, + "-field": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/rekey.go b/vendor/github.com/hashicorp/vault/command/rekey.go index 16022be..bf47c2c 100644 --- a/vendor/github.com/hashicorp/vault/command/rekey.go +++ b/vendor/github.com/hashicorp/vault/command/rekey.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/vault/helper/password" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/meta" + "github.com/posener/complete" ) // RekeyCommand is a Command that rekeys the vault. @@ -418,3 +419,23 @@ Rekey Options: ` return strings.TrimSpace(helpText) } + +func (c *RekeyCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *RekeyCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-init": complete.PredictNothing, + "-cancel": complete.PredictNothing, + "-status": complete.PredictNothing, + "-retrieve": complete.PredictNothing, + "-delete": complete.PredictNothing, + "-key-shares": complete.PredictNothing, + "-key-threshold": complete.PredictNothing, + "-nonce": complete.PredictNothing, + "-pgp-keys": complete.PredictNothing, + "-backup": complete.PredictNothing, + "-recovery-key": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/rekey_test.go b/vendor/github.com/hashicorp/vault/command/rekey_test.go index 21e4e24..6f12d78 100644 --- a/vendor/github.com/hashicorp/vault/command/rekey_test.go +++ b/vendor/github.com/hashicorp/vault/command/rekey_test.go @@ -182,7 +182,7 @@ func TestRekey_status(t *testing.T) { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } - if !strings.Contains(string(ui.OutputWriter.Bytes()), "Started: true") { + if !strings.Contains(ui.OutputWriter.String(), "Started: true") { t.Fatalf("bad: %s", ui.OutputWriter.String()) } } @@ -199,7 +199,8 @@ func TestRekey_init_pgp(t *testing.T) { MaxLeaseTTLVal: time.Hour * 24 * 32, }, } - sysBackend, err := vault.NewSystemBackend(core, bc) + sysBackend := vault.NewSystemBackend(core) + err := sysBackend.Backend.Setup(bc) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/hashicorp/vault/command/remount.go b/vendor/github.com/hashicorp/vault/command/remount.go index a6defa7..a36f141 100644 --- a/vendor/github.com/hashicorp/vault/command/remount.go +++ b/vendor/github.com/hashicorp/vault/command/remount.go @@ -65,7 +65,7 @@ Usage: vault remount [options] from to the data associated with the backend (such as configuration), will be preserved. - Example: vault remount secret/ generic/ + Example: vault remount secret/ kv/ General Options: ` + meta.GeneralOptionsUsage() diff --git a/vendor/github.com/hashicorp/vault/command/remount_test.go b/vendor/github.com/hashicorp/vault/command/remount_test.go index 0d6f191..7ec1321 100644 --- a/vendor/github.com/hashicorp/vault/command/remount_test.go +++ b/vendor/github.com/hashicorp/vault/command/remount_test.go @@ -24,7 +24,7 @@ func TestRemount(t *testing.T) { args := []string{ "-address", addr, - "secret/", "generic", + "secret/", "kv", } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -45,8 +45,8 @@ func TestRemount(t *testing.T) { t.Fatal("should not have mount") } - _, ok = mounts["generic/"] + _, ok = mounts["kv/"] if !ok { - t.Fatal("should have generic") + t.Fatal("should have kv") } } diff --git a/vendor/github.com/hashicorp/vault/command/renew_test.go b/vendor/github.com/hashicorp/vault/command/renew_test.go index d43e516..2191662 100644 --- a/vendor/github.com/hashicorp/vault/command/renew_test.go +++ b/vendor/github.com/hashicorp/vault/command/renew_test.go @@ -90,6 +90,27 @@ func TestRenewBothWays(t *testing.T) { t.Fatal("bad lease duration") } + // Test another + r = client.NewRequest("PUT", "/v1/sys/leases/renew") + body = map[string]interface{}{ + "lease_id": secret.LeaseID, + } + if err := r.SetJSONBody(body); err != nil { + t.Fatal(err) + } + resp, err = client.RawRequest(r) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + secret, err = api.ParseSecret(resp.Body) + if err != nil { + t.Fatal(err) + } + if secret.LeaseDuration != 60 { + t.Fatal("bad lease duration") + } + // Test the other r = client.NewRequest("PUT", "/v1/sys/renew/"+secret.LeaseID) resp, err = client.RawRequest(r) @@ -104,4 +125,19 @@ func TestRenewBothWays(t *testing.T) { if secret.LeaseDuration != 60 { t.Fatalf("bad lease duration; secret is %#v\n", *secret) } + + // Test another + r = client.NewRequest("PUT", "/v1/sys/leases/renew/"+secret.LeaseID) + resp, err = client.RawRequest(r) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + secret, err = api.ParseSecret(resp.Body) + if err != nil { + t.Fatal(err) + } + if secret.LeaseDuration != 60 { + t.Fatalf("bad lease duration; secret is %#v\n", *secret) + } } diff --git a/vendor/github.com/hashicorp/vault/command/server.go b/vendor/github.com/hashicorp/vault/command/server.go index c09db3d..e089ef2 100644 --- a/vendor/github.com/hashicorp/vault/command/server.go +++ b/vendor/github.com/hashicorp/vault/command/server.go @@ -3,11 +3,13 @@ package command import ( "encoding/base64" "fmt" + "io/ioutil" "net" "net/http" "net/url" "os" "os/signal" + "path/filepath" "runtime" "sort" "strconv" @@ -20,11 +22,14 @@ import ( colorable "github.com/mattn/go-colorable" log "github.com/mgutz/logxi/v1" + testing "github.com/mitchellh/go-testing-interface" + "github.com/posener/complete" "google.golang.org/grpc/grpclog" "github.com/armon/go-metrics" "github.com/armon/go-metrics/circonus" + "github.com/armon/go-metrics/datadog" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/audit" @@ -33,6 +38,8 @@ import ( "github.com/hashicorp/vault/helper/gated-writer" "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/helper/mlock" + "github.com/hashicorp/vault/helper/parseutil" + "github.com/hashicorp/vault/helper/reload" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/meta" @@ -46,6 +53,7 @@ type ServerCommand struct { AuditBackends map[string]audit.Factory CredentialBackends map[string]logical.Factory LogicalBackends map[string]logical.Factory + PhysicalBackends map[string]physical.Factory ShutdownCh chan struct{} SighupCh chan struct{} @@ -54,26 +62,33 @@ type ServerCommand struct { meta.Meta - logger log.Logger + logGate *gatedwriter.Writer + logger log.Logger cleanupGuard sync.Once reloadFuncsLock *sync.RWMutex - reloadFuncs *map[string][]vault.ReloadFunc + reloadFuncs *map[string][]reload.ReloadFunc } func (c *ServerCommand) Run(args []string) int { - var dev, verifyOnly, devHA, devTransactional bool + var dev, verifyOnly, devHA, devTransactional, devLeasedKV, devThreeNode bool var configPath []string - var logLevel, devRootTokenID, devListenAddress string + var logLevel, devRootTokenID, devListenAddress, devPluginDir string + var devLatency, devLatencyJitter int flags := c.Meta.FlagSet("server", meta.FlagSetDefault) flags.BoolVar(&dev, "dev", false, "") flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "") flags.StringVar(&devListenAddress, "dev-listen-address", "", "") + flags.StringVar(&devPluginDir, "dev-plugin-dir", "", "") flags.StringVar(&logLevel, "log-level", "info", "") + flags.IntVar(&devLatency, "dev-latency", 0, "") + flags.IntVar(&devLatencyJitter, "dev-latency-jitter", 20, "") flags.BoolVar(&verifyOnly, "verify-only", false, "") - flags.BoolVar(&devHA, "ha", false, "") - flags.BoolVar(&devTransactional, "transactional", false, "") + flags.BoolVar(&devHA, "dev-ha", false, "") + flags.BoolVar(&devTransactional, "dev-transactional", false, "") + flags.BoolVar(&devLeasedKV, "dev-leased-kv", false, "") + flags.BoolVar(&devThreeNode, "dev-three-node", false, "") flags.Usage = func() { c.Ui.Output(c.Help()) } flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config") if err := flags.Parse(args); err != nil { @@ -82,7 +97,7 @@ func (c *ServerCommand) Run(args []string) int { // Create a logger. We wrap it in a gated writer so that it doesn't // start logging too early. - logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)} + c.logGate = &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)} var level int logLevel = strings.ToLower(strings.TrimSpace(logLevel)) switch logLevel { @@ -109,9 +124,9 @@ func (c *ServerCommand) Run(args []string) int { } switch strings.ToLower(logFormat) { case "vault", "vault_json", "vault-json", "vaultjson", "json", "": - c.logger = logformat.NewVaultLoggerWithWriter(logGate, level) + c.logger = logformat.NewVaultLoggerWithWriter(c.logGate, level) default: - c.logger = log.NewLogger(logGate, "vault") + c.logger = log.NewLogger(c.logGate, "vault") c.logger.SetLevel(level) } grpclog.SetLogger(&grpclogFaker{ @@ -126,7 +141,7 @@ func (c *ServerCommand) Run(args []string) int { devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS") } - if devHA || devTransactional { + if devHA || devTransactional || devLeasedKV || devThreeNode { dev = true } @@ -194,8 +209,14 @@ func (c *ServerCommand) Run(args []string) int { } // Initialize the backend - backend, err := physical.NewBackend( - config.Storage.Type, c.logger, config.Storage.Config) + factory, exists := c.PhysicalBackends[config.Storage.Type] + if !exists { + c.Ui.Output(fmt.Sprintf( + "Unknown storage type %s", + config.Storage.Type)) + return 1 + } + backend, err := factory(config.Storage.Config, c.logger) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing storage of type %s: %s", @@ -238,9 +259,29 @@ func (c *ServerCommand) Run(args []string) int { DefaultLeaseTTL: config.DefaultLeaseTTL, ClusterName: config.ClusterName, CacheSize: config.CacheSize, + PluginDirectory: config.PluginDirectory, + EnableRaw: config.EnableRawEndpoint, } if dev { coreConfig.DevToken = devRootTokenID + if devLeasedKV { + coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory + } + if devPluginDir != "" { + coreConfig.PluginDirectory = devPluginDir + } + if devLatency > 0 { + injectLatency := time.Duration(devLatency) * time.Millisecond + if _, txnOK := backend.(physical.Transactional); txnOK { + coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger) + } else { + coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger) + } + } + } + + if devThreeNode { + return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, devListenAddress) } var disableClustering bool @@ -248,8 +289,14 @@ func (c *ServerCommand) Run(args []string) int { // Initialize the separate HA storage backend, if it exists var ok bool if config.HAStorage != nil { - habackend, err := physical.NewBackend( - config.HAStorage.Type, c.logger, config.HAStorage.Config) + factory, exists := c.PhysicalBackends[config.HAStorage.Type] + if !exists { + c.Ui.Output(fmt.Sprintf( + "Unknown HA storage type %s", + config.HAStorage.Type)) + return 1 + } + habackend, err := factory(config.HAStorage.Config, c.logger) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing HA storage of type %s: %s", @@ -415,16 +462,7 @@ CLUSTER_SYNTHESIS_COMPLETE: c.reloadFuncsLock.Lock() lns := make([]net.Listener, 0, len(config.Listeners)) for i, lnConfig := range config.Listeners { - if lnConfig.Type == "atlas" { - if config.ClusterName == "" { - c.Ui.Output("cluster_name is not set in the config and is a required value") - return 1 - } - - lnConfig.Config["cluster_name"] = config.ClusterName - } - - ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, logGate) + ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.logGate) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing listener of type %s: %s", @@ -441,9 +479,11 @@ CLUSTER_SYNTHESIS_COMPLETE: } if !disableClustering && lnConfig.Type == "tcp" { + var addrRaw interface{} var addr string var ok bool - if addr, ok = lnConfig.Config["cluster_address"]; ok { + if addrRaw, ok = lnConfig.Config["cluster_address"]; ok { + addr = addrRaw.(string) tcpAddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { c.Ui.Output(fmt.Sprintf( @@ -538,7 +578,7 @@ CLUSTER_SYNTHESIS_COMPLETE: sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery) if ok { activeFunc := func() bool { - if isLeader, _, err := core.Leader(); err == nil { + if isLeader, _, _, err := core.Leader(); err == nil { return isLeader } return false @@ -563,11 +603,11 @@ CLUSTER_SYNTHESIS_COMPLETE: // This needs to happen before we first unseal, so before we trigger dev // mode if it's set core.SetClusterListenerAddrs(clusterAddrs) - core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger)) + core.SetClusterHandler(handler) // If we're in Dev mode, then initialize the core if dev { - init, err := c.enableDev(core, devRootTokenID) + init, err := c.enableDev(core, coreConfig) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing Dev mode: %s", err)) @@ -589,7 +629,7 @@ CLUSTER_SYNTHESIS_COMPLETE: "immediately begin using the Vault CLI.\n\n"+ "The only step you need to take is to set the following\n"+ "environment variables:\n\n"+ - " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"]+quote+"\n\n"+ + " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"].(string)+quote+"\n\n"+ "The unseal key and root token are reproduced below in case you\n"+ "want to seal/unseal the Vault or play with authentication.\n\n"+ "Unseal Key: %s\nRoot Token: %s\n", @@ -618,7 +658,19 @@ CLUSTER_SYNTHESIS_COMPLETE: c.Ui.Output("==> Vault server started! Log data will stream in below:\n") // Release the log gate. - logGate.Flush() + c.logGate.Flush() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.Ui.Output(fmt.Sprintf("Error storing PID: %v", err)) + return 1 + } + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.Ui.Output(fmt.Sprintf("Error deleting the PID file: %v", err)) + } + }() // Wait for shutdown shutdownTriggered := false @@ -642,7 +694,7 @@ CLUSTER_SYNTHESIS_COMPLETE: case <-c.SighupCh: c.Ui.Output("==> Vault reload triggered") - if err := c.Reload(configPath); err != nil { + if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, configPath); err != nil { c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) } } @@ -653,7 +705,7 @@ CLUSTER_SYNTHESIS_COMPLETE: return 0 } -func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.InitResult, error) { +func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) { // Initialize it with a basic single key init, err := core.Initialize(&vault.InitParams{ BarrierConfig: &vault.SealConfig{ @@ -679,7 +731,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault. return nil, fmt.Errorf("failed to unseal Vault for dev mode") } - isLeader, _, err := core.Leader() + isLeader, _, _, err := core.Leader() if err != nil && err != vault.ErrHANotEnabled { return nil, fmt.Errorf("failed to check active status: %v", err) } @@ -692,7 +744,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault. return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s\n", buf) } time.Sleep(1 * time.Second) - isLeader, _, err = core.Leader() + isLeader, _, _, err = core.Leader() if err != nil { return nil, fmt.Errorf("failed to check active status: %v", err) } @@ -700,14 +752,14 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault. } } - if rootTokenID != "" { + if coreConfig.DevToken != "" { req := &logical.Request{ ID: "dev-gen-root", Operation: logical.UpdateOperation, ClientToken: init.RootToken, Path: "auth/token/create", Data: map[string]interface{}{ - "id": rootTokenID, + "id": coreConfig.DevToken, "policies": []string{"root"}, "no_parent": true, "no_default_policy": true, @@ -715,13 +767,13 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault. } resp, err := core.HandleRequest(req) if err != nil { - return nil, fmt.Errorf("failed to create root token with ID %s: %s", rootTokenID, err) + return nil, fmt.Errorf("failed to create root token with ID %s: %s", coreConfig.DevToken, err) } if resp == nil { - return nil, fmt.Errorf("nil response when creating root token with ID %s", rootTokenID) + return nil, fmt.Errorf("nil response when creating root token with ID %s", coreConfig.DevToken) } if resp.Auth == nil { - return nil, fmt.Errorf("nil auth when creating root token with ID %s", rootTokenID) + return nil, fmt.Errorf("nil auth when creating root token with ID %s", coreConfig.DevToken) } init.RootToken = resp.Auth.ClientToken @@ -747,6 +799,178 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault. return init, nil } +func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress string) int { + testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + BaseListenAddress: devListenAddress, + }) + defer c.cleanupGuard.Do(testCluster.Cleanup) + + info["cluster parameters path"] = testCluster.TempDir + info["log level"] = "trace" + infoKeys = append(infoKeys, "cluster parameters path", "log level") + + for i, core := range testCluster.Cores { + info[fmt.Sprintf("node %d redirect address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String()) + infoKeys = append(infoKeys, fmt.Sprintf("node %d redirect address", i)) + } + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + // Server configuration output + padding := 24 + sort.Strings(infoKeys) + c.Ui.Output("==> Vault server configuration:\n") + for _, k := range infoKeys { + c.Ui.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + strings.Title(k), + info[k])) + } + c.Ui.Output("") + + for _, core := range testCluster.Cores { + core.Server.Handler = vaulthttp.Handler(core.Core) + core.SetClusterHandler(core.Server.Handler) + } + + testCluster.Start() + + if base.DevToken != "" { + req := &logical.Request{ + ID: "dev-gen-root", + Operation: logical.UpdateOperation, + ClientToken: testCluster.RootToken, + Path: "auth/token/create", + Data: map[string]interface{}{ + "id": base.DevToken, + "policies": []string{"root"}, + "no_parent": true, + "no_default_policy": true, + }, + } + resp, err := testCluster.Cores[0].HandleRequest(req) + if err != nil { + c.Ui.Output(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err)) + return 1 + } + if resp == nil { + c.Ui.Output(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken)) + return 1 + } + if resp.Auth == nil { + c.Ui.Output(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken)) + return 1 + } + + testCluster.RootToken = resp.Auth.ClientToken + + req.ID = "dev-revoke-init-root" + req.Path = "auth/token/revoke-self" + req.Data = nil + resp, err = testCluster.Cores[0].HandleRequest(req) + if err != nil { + c.Ui.Output(fmt.Sprintf("failed to revoke initial root token: %s", err)) + return 1 + } + } + + // Set the token + tokenHelper, err := c.TokenHelper() + if err != nil { + c.Ui.Output(fmt.Sprintf("%v", err)) + return 1 + } + if err := tokenHelper.Store(testCluster.RootToken); err != nil { + c.Ui.Output(fmt.Sprintf("%v", err)) + return 1 + } + + if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil { + c.Ui.Output(fmt.Sprintf("%v", err)) + return 1 + } + + c.Ui.Output(fmt.Sprintf( + "==> Three node dev mode is enabled\n\n" + + "The unseal key and root token are reproduced below in case you\n" + + "want to seal/unseal the Vault or play with authentication.\n", + )) + + for i, key := range testCluster.BarrierKeys { + c.Ui.Output(fmt.Sprintf( + "Unseal Key %d: %s", + i+1, base64.StdEncoding.EncodeToString(key), + )) + } + + c.Ui.Output(fmt.Sprintf( + "\nRoot Token: %s\n", testCluster.RootToken, + )) + + c.Ui.Output(fmt.Sprintf( + "\nUseful env vars:\n"+ + "VAULT_TOKEN=%s\n"+ + "VAULT_ADDR=%s\n"+ + "VAULT_CACERT=%s/ca_cert.pem\n", + testCluster.RootToken, + testCluster.Cores[0].Client.Address(), + testCluster.TempDir, + )) + + // Output the header that the server has started + c.Ui.Output("==> Vault server started! Log data will stream in below:\n") + + // Release the log gate. + c.logGate.Flush() + + // Wait for shutdown + shutdownTriggered := false + + for !shutdownTriggered { + select { + case <-c.ShutdownCh: + c.Ui.Output("==> Vault shutdown triggered") + + // Stop the listners so that we don't process further client requests. + c.cleanupGuard.Do(testCluster.Cleanup) + + // Shutdown will wait until after Vault is sealed, which means the + // request forwarding listeners will also be closed (and also + // waited for). + for _, core := range testCluster.Cores { + if err := core.Shutdown(); err != nil { + c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err)) + } + } + + shutdownTriggered = true + + case <-c.SighupCh: + c.Ui.Output("==> Vault reload triggered") + for _, core := range testCluster.Cores { + if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil { + c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) + } + } + } + } + + return 0 +} + // detectRedirect is used to attempt redirect address detection func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, config *server.Config) (string, error) { @@ -774,7 +998,7 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, // Check if TLS is disabled if val, ok := list.Config["tls_disable"]; ok { - disable, err := strconv.ParseBool(val) + disable, err := parseutil.ParseBool(val) if err != nil { return "", fmt.Errorf("tls_disable: %s", err) } @@ -785,9 +1009,12 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, } // Check for address override - addr, ok := list.Config["address"] + var addr string + addrRaw, ok := list.Config["address"] if !ok { addr = "127.0.0.1:8200" + } else { + addr = addrRaw.(string) } // Check for localhost @@ -892,6 +1119,21 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error { fanout = append(fanout, sink) } + if telConfig.DogStatsDAddr != "" { + var tags []string + + if telConfig.DogStatsDTags != nil { + tags = telConfig.DogStatsDTags + } + + sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName) + if err != nil { + return fmt.Errorf("failed to start DogStatsD sink. Got: %s", err) + } + sink.SetTags(tags) + fanout = append(fanout, sink) + } + // Initialize the global sink if len(fanout) > 0 { fanout = append(fanout, inm) @@ -903,55 +1145,29 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error { return nil } -func (c *ServerCommand) Reload(configPath []string) error { - c.reloadFuncsLock.RLock() - defer c.reloadFuncsLock.RUnlock() +func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reload.ReloadFunc, configPath []string) error { + lock.RLock() + defer lock.RUnlock() var reloadErrors *multierror.Error - // Read the new config - var config *server.Config - for _, path := range configPath { - current, err := server.LoadConfig(path, c.logger) - if err != nil { - reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error loading configuration from %s: %s", path, err)) - goto audit - } - - if config == nil { - config = current - } else { - config = config.Merge(current) - } - } - - // Ensure at least one config was found. - if config == nil { - reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("No configuration files found")) - goto audit - } - - // Call reload on the listeners. This will call each listener with each - // config block, but they verify the address. - for _, lnConfig := range config.Listeners { - for _, relFunc := range (*c.reloadFuncs)["listener|"+lnConfig.Type] { - if err := relFunc(lnConfig.Config); err != nil { - reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading configuration: %s", err)) - goto audit + for k, relFuncs := range *reloadFuncs { + switch { + case strings.HasPrefix(k, "listener|"): + for _, relFunc := range relFuncs { + if relFunc != nil { + if err := relFunc(nil); err != nil { + reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading listener: %v", err)) + } + } } - } - } -audit: - // file audit reload funcs - for k, relFuncs := range *c.reloadFuncs { - if !strings.HasPrefix(k, "audit_file|") { - continue - } - for _, relFunc := range relFuncs { - if relFunc != nil { - if err := relFunc(nil); err != nil { - reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading file audit backend at path %s: %v", strings.TrimPrefix(k, "audit_file|"), err)) + case strings.HasPrefix(k, "audit_file|"): + for _, relFunc := range relFuncs { + if relFunc != nil { + if err := relFunc(nil); err != nil { + reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading file audit backend at path %s: %v", strings.TrimPrefix(k, "audit_file|"), err)) + } } } } @@ -1008,6 +1224,51 @@ General Options: return strings.TrimSpace(helpText) } +func (c *ServerCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *ServerCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-config": complete.PredictOr(complete.PredictFiles("*.hcl"), complete.PredictFiles("*.json")), + "-dev": complete.PredictNothing, + "-dev-root-token-id": complete.PredictNothing, + "-dev-listen-address": complete.PredictNothing, + "-log-level": complete.PredictSet("trace", "debug", "info", "warn", "err"), + } +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *ServerCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("could not open pid file: %v", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %v", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *ServerCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + // MakeShutdownCh returns a channel that can be used for shutdown // notifications for commands. This channel will send a message for every // SIGINT or SIGTERM received. diff --git a/vendor/github.com/hashicorp/vault/command/server/config.go b/vendor/github.com/hashicorp/vault/command/server/config.go index e6ea123..8f78ac0 100644 --- a/vendor/github.com/hashicorp/vault/command/server/config.go +++ b/vendor/github.com/hashicorp/vault/command/server/config.go @@ -42,14 +42,22 @@ type Config struct { DefaultLeaseTTL time.Duration `hcl:"-"` DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"` - ClusterName string `hcl:"cluster_name"` + ClusterName string `hcl:"cluster_name"` + ClusterCipherSuites string `hcl:"cluster_cipher_suites"` + + PluginDirectory string `hcl:"plugin_directory"` + + PidFile string `hcl:"pid_file"` + EnableRawEndpoint bool `hcl:"-"` + EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"` } // DevConfig is a Config that is used for dev mode of Vault. func DevConfig(ha, transactional bool) *Config { ret := &Config{ - DisableCache: false, - DisableMlock: true, + DisableCache: false, + DisableMlock: true, + EnableRawEndpoint: true, Storage: &Storage{ Type: "inmem", @@ -58,9 +66,11 @@ func DevConfig(ha, transactional bool) *Config { Listeners: []*Listener{ &Listener{ Type: "tcp", - Config: map[string]string{ - "address": "127.0.0.1:8200", - "tls_disable": "1", + Config: map[string]interface{}{ + "address": "127.0.0.1:8200", + "tls_disable": true, + "proxy_protocol_behavior": "allow_authorized", + "proxy_protocol_authorized_addrs": "127.0.0.1:8200", }, }, }, @@ -68,9 +78,6 @@ func DevConfig(ha, transactional bool) *Config { EnableUI: true, Telemetry: &Telemetry{}, - - MaxLeaseTTL: 32 * 24 * time.Hour, - DefaultLeaseTTL: 32 * 24 * time.Hour, } switch { @@ -88,7 +95,7 @@ func DevConfig(ha, transactional bool) *Config { // Listener is the listener configuration for the server. type Listener struct { Type string - Config map[string]string + Config map[string]interface{} } func (l *Listener) GoString() string { @@ -195,6 +202,15 @@ type Telemetry struct { // (e.g. a specific geo location or datacenter, dc:sfo) // Default: none CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"` + + // Dogstats: + // DogStatsdAddr is the address of a dogstatsd instance. If provided, + // metrics will be sent to that instance + DogStatsDAddr string `hcl:"dogstatsd_addr"` + + // DogStatsdTags are the global tags that should be sent with each packet to dogstatsd + // It is a list of strings, where each string looks like "my_tag_name:my_tag_value" + DogStatsDTags []string `hcl:"dogstatsd_tags"` } func (s *Telemetry) GoString() string { @@ -267,11 +283,31 @@ func (c *Config) Merge(c2 *Config) *Config { result.ClusterName = c2.ClusterName } + result.ClusterCipherSuites = c.ClusterCipherSuites + if c2.ClusterCipherSuites != "" { + result.ClusterCipherSuites = c2.ClusterCipherSuites + } + result.EnableUI = c.EnableUI if c2.EnableUI { result.EnableUI = c2.EnableUI } + result.EnableRawEndpoint = c.EnableRawEndpoint + if c2.EnableRawEndpoint { + result.EnableRawEndpoint = c2.EnableRawEndpoint + } + + result.PluginDirectory = c.PluginDirectory + if c2.PluginDirectory != "" { + result.PluginDirectory = c2.PluginDirectory + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + return result } @@ -285,9 +321,8 @@ func LoadConfig(path string, logger log.Logger) (*Config, error) { if fi.IsDir() { return LoadConfigDir(path, logger) - } else { - return LoadConfigFile(path, logger) } + return LoadConfigFile(path, logger) } // LoadConfigFile loads the configuration from the given file. @@ -342,13 +377,18 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) { } } + if result.EnableRawEndpointRaw != nil { + if result.EnableRawEndpoint, err = parseutil.ParseBool(result.EnableRawEndpointRaw); err != nil { + return nil, err + } + } + list, ok := obj.Node.(*ast.ObjectList) if !ok { return nil, fmt.Errorf("error parsing: file doesn't contain a root object") } valid := []string{ - "atlas", "storage", "ha_storage", "backend", @@ -363,6 +403,10 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) { "default_lease_ttl", "max_lease_ttl", "cluster_name", + "cluster_cipher_suites", + "plugin_directory", + "pid_file", + "raw_storage_endpoint", } if err := checkHCLKeys(list, valid); err != nil { return nil, err @@ -641,8 +685,6 @@ func parseHSMs(result *Config, list *ast.ObjectList) error { } func parseListeners(result *Config, list *ast.ObjectList) error { - var foundAtlas bool - listeners := make([]*Listener, 0, len(list.Items)) for _, item := range list.Items { key := "listener" @@ -656,6 +698,8 @@ func parseListeners(result *Config, list *ast.ObjectList) error { "endpoint", "infrastructure", "node_id", + "proxy_protocol_behavior", + "proxy_protocol_authorized_addrs", "tls_disable", "tls_cert_file", "tls_key_file", @@ -663,36 +707,20 @@ func parseListeners(result *Config, list *ast.ObjectList) error { "tls_cipher_suites", "tls_prefer_server_cipher_suites", "tls_require_and_verify_client_cert", + "tls_client_ca_file", "token", } if err := checkHCLKeys(item.Val, valid); err != nil { return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key)) } - var m map[string]string + var m map[string]interface{} if err := hcl.DecodeObject(&m, item.Val); err != nil { return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key)) } lnType := strings.ToLower(key) - if lnType == "atlas" { - if foundAtlas { - return multierror.Prefix(fmt.Errorf("only one listener of type 'atlas' is permitted"), fmt.Sprintf("listeners.%s", key)) - } - - foundAtlas = true - if m["token"] == "" { - return multierror.Prefix(fmt.Errorf("'token' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key)) - } - if m["infrastructure"] == "" { - return multierror.Prefix(fmt.Errorf("'infrastructure' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key)) - } - if m["node_id"] == "" { - return multierror.Prefix(fmt.Errorf("'node_id' must be specified for an Atlas listener"), fmt.Sprintf("listeners.%s", key)) - } - } - listeners = append(listeners, &Listener{ Type: lnType, Config: m, @@ -727,6 +755,8 @@ func parseTelemetry(result *Config, list *ast.ObjectList) error { "circonus_broker_id", "circonus_broker_select_tag", "disable_hostname", + "dogstatsd_addr", + "dogstatsd_tags", "statsd_address", "statsite_address", } diff --git a/vendor/github.com/hashicorp/vault/command/server/config_test.go b/vendor/github.com/hashicorp/vault/command/server/config_test.go index 789be40..bdc9128 100644 --- a/vendor/github.com/hashicorp/vault/command/server/config_test.go +++ b/vendor/github.com/hashicorp/vault/command/server/config_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/vault/helper/logformat" log "github.com/mgutz/logxi/v1" ) @@ -20,18 +22,9 @@ func TestLoadConfigFile(t *testing.T) { expected := &Config{ Listeners: []*Listener{ - &Listener{ - Type: "atlas", - Config: map[string]string{ - "token": "foobar", - "infrastructure": "foo/bar", - "endpoint": "https://foo.bar:1111", - "node_id": "foo_node", - }, - }, &Listener{ Type: "tcp", - Config: map[string]string{ + Config: map[string]interface{}{ "address": "127.0.0.1:443", }, }, @@ -58,6 +51,8 @@ func TestLoadConfigFile(t *testing.T) { StatsdAddr: "bar", StatsiteAddr: "foo", DisableHostname: false, + DogStatsDAddr: "127.0.0.1:7254", + DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"}, }, DisableCache: true, @@ -67,11 +62,16 @@ func TestLoadConfigFile(t *testing.T) { EnableUI: true, EnableUIRaw: true, + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + MaxLeaseTTL: 10 * time.Hour, MaxLeaseTTLRaw: "10h", DefaultLeaseTTL: 10 * time.Hour, DefaultLeaseTTLRaw: "10h", ClusterName: "testcluster", + + PidFile: "./pidfile", } if !reflect.DeepEqual(config, expected) { t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) @@ -90,19 +90,10 @@ func TestLoadConfigFile_json(t *testing.T) { Listeners: []*Listener{ &Listener{ Type: "tcp", - Config: map[string]string{ + Config: map[string]interface{}{ "address": "127.0.0.1:443", }, }, - &Listener{ - Type: "atlas", - Config: map[string]string{ - "token": "foobar", - "infrastructure": "foo/bar", - "endpoint": "https://foo.bar:1111", - "node_id": "foo_node", - }, - }, }, Storage: &Storage{ @@ -113,6 +104,8 @@ func TestLoadConfigFile_json(t *testing.T) { DisableClustering: true, }, + ClusterCipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + Telemetry: &Telemetry{ StatsiteAddr: "baz", StatsdAddr: "", @@ -132,15 +125,18 @@ func TestLoadConfigFile_json(t *testing.T) { CirconusBrokerSelectTag: "", }, - MaxLeaseTTL: 10 * time.Hour, - MaxLeaseTTLRaw: "10h", - DefaultLeaseTTL: 10 * time.Hour, - DefaultLeaseTTLRaw: "10h", - ClusterName: "testcluster", - DisableCacheRaw: interface{}(nil), - DisableMlockRaw: interface{}(nil), - EnableUI: true, - EnableUIRaw: true, + MaxLeaseTTL: 10 * time.Hour, + MaxLeaseTTLRaw: "10h", + DefaultLeaseTTL: 10 * time.Hour, + DefaultLeaseTTLRaw: "10h", + ClusterName: "testcluster", + DisableCacheRaw: interface{}(nil), + DisableMlockRaw: interface{}(nil), + EnableUI: true, + EnableUIRaw: true, + PidFile: "./pidfile", + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, } if !reflect.DeepEqual(config, expected) { t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) @@ -159,13 +155,13 @@ func TestLoadConfigFile_json2(t *testing.T) { Listeners: []*Listener{ &Listener{ Type: "tcp", - Config: map[string]string{ + Config: map[string]interface{}{ "address": "127.0.0.1:443", }, }, &Listener{ Type: "tcp", - Config: map[string]string{ + Config: map[string]interface{}{ "address": "127.0.0.1:444", }, }, @@ -190,6 +186,8 @@ func TestLoadConfigFile_json2(t *testing.T) { EnableUI: true, + EnableRawEndpoint: true, + Telemetry: &Telemetry{ StatsiteAddr: "foo", StatsdAddr: "bar", @@ -228,7 +226,7 @@ func TestLoadConfigDir(t *testing.T) { Listeners: []*Listener{ &Listener{ Type: "tcp", - Config: map[string]string{ + Config: map[string]interface{}{ "address": "127.0.0.1:443", }, }, @@ -244,6 +242,8 @@ func TestLoadConfigDir(t *testing.T) { EnableUI: true, + EnableRawEndpoint: true, + Telemetry: &Telemetry{ StatsiteAddr: "qux", StatsdAddr: "baz", @@ -259,6 +259,56 @@ func TestLoadConfigDir(t *testing.T) { } } +func TestParseListeners(t *testing.T) { + obj, _ := hcl.Parse(strings.TrimSpace(` +listener "tcp" { + address = "127.0.0.1:443" + cluster_address = "127.0.0.1:8201" + tls_disable = false + tls_cert_file = "./certs/server.crt" + tls_key_file = "./certs/server.key" + tls_client_ca_file = "./certs/rootca.crt" + tls_min_version = "tls12" + tls_require_and_verify_client_cert = true +}`)) + + var config Config + list, _ := obj.Node.(*ast.ObjectList) + objList := list.Filter("listener") + parseListeners(&config, objList) + listeners := config.Listeners + if len(listeners) == 0 { + t.Fatalf("expected at least one listener in the config") + } + listener := listeners[0] + if listener.Type != "tcp" { + t.Fatalf("expected tcp listener in the config") + } + + expected := &Config{ + Listeners: []*Listener{ + &Listener{ + Type: "tcp", + Config: map[string]interface{}{ + "address": "127.0.0.1:443", + "cluster_address": "127.0.0.1:8201", + "tls_disable": false, + "tls_cert_file": "./certs/server.crt", + "tls_key_file": "./certs/server.key", + "tls_client_ca_file": "./certs/rootca.crt", + "tls_min_version": "tls12", + "tls_require_and_verify_client_cert": true, + }, + }, + }, + } + + if !reflect.DeepEqual(config, *expected) { + t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, *expected) + } + +} + func TestParseConfig_badTopLevel(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) diff --git a/vendor/github.com/hashicorp/vault/command/server/listener.go b/vendor/github.com/hashicorp/vault/command/server/listener.go index 999966e..4f9aedf 100644 --- a/vendor/github.com/hashicorp/vault/command/server/listener.go +++ b/vendor/github.com/hashicorp/vault/command/server/listener.go @@ -5,28 +5,29 @@ import ( // certificates that use it can be parsed. _ "crypto/sha512" "crypto/tls" + "crypto/x509" "fmt" "io" + "io/ioutil" "net" - "strconv" - "sync" + "github.com/hashicorp/vault/helper/parseutil" + "github.com/hashicorp/vault/helper/proxyutil" + "github.com/hashicorp/vault/helper/reload" "github.com/hashicorp/vault/helper/tlsutil" - "github.com/hashicorp/vault/vault" ) // ListenerFactory is the factory function to create a listener. -type ListenerFactory func(map[string]string, io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) +type ListenerFactory func(map[string]interface{}, io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) // BuiltinListeners is the list of built-in listener types. var BuiltinListeners = map[string]ListenerFactory{ - "tcp": tcpListenerFactory, - "atlas": atlasListenerFactory, + "tcp": tcpListenerFactory, } // NewListener creates a new listener of the given type with the given // configuration. The type is looked up in the BuiltinListeners map. -func NewListener(t string, config map[string]string, logger io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) { +func NewListener(t string, config map[string]interface{}, logger io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) { f, ok := BuiltinListeners[t] if !ok { return nil, nil, nil, fmt.Errorf("unknown listener type: %s", t) @@ -35,14 +36,45 @@ func NewListener(t string, config map[string]string, logger io.Writer) (net.List return f(config, logger) } +func listenerWrapProxy(ln net.Listener, config map[string]interface{}) (net.Listener, error) { + behaviorRaw, ok := config["proxy_protocol_behavior"] + if !ok { + return ln, nil + } + + behavior, ok := behaviorRaw.(string) + if !ok { + return nil, fmt.Errorf("failed parsing proxy_protocol_behavior value: not a string") + } + + authorizedAddrsRaw, ok := config["proxy_protocol_authorized_addrs"] + if !ok { + return nil, fmt.Errorf("proxy_protocol_behavior set but no proxy_protocol_authorized_addrs value") + } + + proxyProtoConfig := &proxyutil.ProxyProtoConfig{ + Behavior: behavior, + } + if err := proxyProtoConfig.SetAuthorizedAddrs(authorizedAddrsRaw); err != nil { + return nil, fmt.Errorf("failed parsing proxy_protocol_authorized_addrs: %v", err) + } + + newLn, err := proxyutil.WrapInProxyProto(ln, proxyProtoConfig) + if err != nil { + return nil, fmt.Errorf("failed configuring PROXY protocol wrapper: %s", err) + } + + return newLn, nil +} + func listenerWrapTLS( ln net.Listener, props map[string]string, - config map[string]string) (net.Listener, map[string]string, vault.ReloadFunc, error) { + config map[string]interface{}) (net.Listener, map[string]string, reload.ReloadFunc, error) { props["tls"] = "disabled" if v, ok := config["tls_disable"]; ok { - disabled, err := strconv.ParseBool(v) + disabled, err := parseutil.ParseBool(v) if err != nil { return nil, nil, nil, fmt.Errorf("invalid value for 'tls_disable': %v", err) } @@ -61,21 +93,22 @@ func listenerWrapTLS( return nil, nil, nil, fmt.Errorf("'tls_key_file' must be set") } - cg := &certificateGetter{ - id: config["address"], - } + cg := reload.NewCertificateGetter(config["tls_cert_file"].(string), config["tls_key_file"].(string)) - if err := cg.reload(config); err != nil { + if err := cg.Reload(config); err != nil { return nil, nil, nil, fmt.Errorf("error loading TLS cert: %s", err) } - tlsvers, ok := config["tls_min_version"] + var tlsvers string + tlsversRaw, ok := config["tls_min_version"] if !ok { tlsvers = "tls12" + } else { + tlsvers = tlsversRaw.(string) } tlsConf := &tls.Config{} - tlsConf.GetCertificate = cg.getCertificate + tlsConf.GetCertificate = cg.GetCertificate tlsConf.NextProtos = []string{"h2", "http/1.1"} tlsConf.MinVersion, ok = tlsutil.TLSLookup[tlsvers] if !ok { @@ -84,67 +117,42 @@ func listenerWrapTLS( tlsConf.ClientAuth = tls.RequestClientCert if v, ok := config["tls_cipher_suites"]; ok { - ciphers, err := tlsutil.ParseCiphers(v) + ciphers, err := tlsutil.ParseCiphers(v.(string)) if err != nil { return nil, nil, nil, fmt.Errorf("invalid value for 'tls_cipher_suites': %v", err) } tlsConf.CipherSuites = ciphers } if v, ok := config["tls_prefer_server_cipher_suites"]; ok { - preferServer, err := strconv.ParseBool(v) + preferServer, err := parseutil.ParseBool(v) if err != nil { return nil, nil, nil, fmt.Errorf("invalid value for 'tls_prefer_server_cipher_suites': %v", err) } tlsConf.PreferServerCipherSuites = preferServer } if v, ok := config["tls_require_and_verify_client_cert"]; ok { - requireClient, err := strconv.ParseBool(v) + requireClient, err := parseutil.ParseBool(v) if err != nil { return nil, nil, nil, fmt.Errorf("invalid value for 'tls_require_and_verify_client_cert': %v", err) } if requireClient { tlsConf.ClientAuth = tls.RequireAndVerifyClientCert } + if tlsClientCaFile, ok := config["tls_client_ca_file"]; ok { + caPool := x509.NewCertPool() + data, err := ioutil.ReadFile(tlsClientCaFile.(string)) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to read tls_client_ca_file: %v", err) + } + + if !caPool.AppendCertsFromPEM(data) { + return nil, nil, nil, fmt.Errorf("failed to parse CA certificate in tls_client_ca_file") + } + tlsConf.ClientCAs = caPool + } } ln = tls.NewListener(ln, tlsConf) props["tls"] = "enabled" - return ln, props, cg.reload, nil -} - -type certificateGetter struct { - sync.RWMutex - - cert *tls.Certificate - - id string -} - -func (cg *certificateGetter) reload(config map[string]string) error { - if config["address"] != cg.id { - return nil - } - - cert, err := tls.LoadX509KeyPair(config["tls_cert_file"], config["tls_key_file"]) - if err != nil { - return err - } - - cg.Lock() - defer cg.Unlock() - - cg.cert = &cert - - return nil -} - -func (cg *certificateGetter) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - cg.RLock() - defer cg.RUnlock() - - if cg.cert == nil { - return nil, fmt.Errorf("nil certificate") - } - - return cg.cert, nil + return ln, props, cg.Reload, nil } diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go b/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go deleted file mode 100644 index c000474..0000000 --- a/vendor/github.com/hashicorp/vault/command/server/listener_atlas.go +++ /dev/null @@ -1,66 +0,0 @@ -package server - -import ( - "io" - "net" - - "github.com/hashicorp/scada-client/scada" - "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/version" -) - -type SCADAListener struct { - ln net.Listener - scadaProvider *scada.Provider -} - -func (s *SCADAListener) Accept() (net.Conn, error) { - return s.ln.Accept() -} - -func (s *SCADAListener) Close() error { - s.scadaProvider.Shutdown() - return s.ln.Close() -} - -func (s *SCADAListener) Addr() net.Addr { - return s.ln.Addr() -} - -func atlasListenerFactory(config map[string]string, logger io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) { - scadaConfig := &scada.Config{ - Service: "vault", - Version: version.GetVersion().VersionNumber(), - ResourceType: "vault-cluster", - Meta: map[string]string{ - "node_id": config["node_id"], - "cluster_name": config["cluster_name"], - }, - Atlas: scada.AtlasConfig{ - Endpoint: config["endpoint"], - Infrastructure: config["infrastructure"], - Token: config["token"], - }, - } - - provider, list, err := scada.NewHTTPProvider(scadaConfig, logger) - if err != nil { - return nil, nil, nil, err - } - - ln := &SCADAListener{ - ln: list, - scadaProvider: provider, - } - - props := map[string]string{ - "addr": "Atlas/SCADA", - "infrastructure": scadaConfig.Atlas.Infrastructure, - } - - // The outer connection is already TLS-enabled; this is just the listener - // that reaches back inside that connection - config["tls_disable"] = "1" - - return listenerWrapTLS(ln, props, config) -} diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go index 4e5e9b4..b0ab687 100644 --- a/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go +++ b/vendor/github.com/hashicorp/vault/command/server/listener_tcp.go @@ -6,14 +6,17 @@ import ( "strings" "time" - "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/helper/reload" ) -func tcpListenerFactory(config map[string]string, _ io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) { +func tcpListenerFactory(config map[string]interface{}, _ io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) { bind_proto := "tcp" - addr, ok := config["address"] + var addr string + addrRaw, ok := config["address"] if !ok { addr = "127.0.0.1:8200" + } else { + addr = addrRaw.(string) } // If they've passed 0.0.0.0, we only want to bind on IPv4 @@ -28,6 +31,12 @@ func tcpListenerFactory(config map[string]string, _ io.Writer) (net.Listener, ma } ln = tcpKeepAliveListener{ln.(*net.TCPListener)} + + ln, err = listenerWrapProxy(ln, config) + if err != nil { + return nil, nil, nil, err + } + props := map[string]string{"addr": addr} return listenerWrapTLS(ln, props, config) } diff --git a/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go b/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go index 7da2033..4da12b3 100644 --- a/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go +++ b/vendor/github.com/hashicorp/vault/command/server/listener_tcp_test.go @@ -13,7 +13,7 @@ import ( ) func TestTCPListener(t *testing.T) { - ln, _, _, err := tcpListenerFactory(map[string]string{ + ln, _, _, err := tcpListenerFactory(map[string]interface{}{ "address": "127.0.0.1:0", "tls_disable": "1", }, nil) @@ -48,19 +48,28 @@ func TestTCPListener_tls(t *testing.T) { t.Fatal("not ok when appending CA cert") } - ln, _, _, err := tcpListenerFactory(map[string]string{ - "address": "127.0.0.1:0", - "tls_cert_file": wd + "reload_foo.pem", - "tls_key_file": wd + "reload_foo.key", + ln, _, _, err := tcpListenerFactory(map[string]interface{}{ + "address": "127.0.0.1:0", + "tls_cert_file": wd + "reload_foo.pem", + "tls_key_file": wd + "reload_foo.key", + "tls_require_and_verify_client_cert": "true", + "tls_client_ca_file": wd + "reload_ca.pem", }, nil) if err != nil { t.Fatalf("err: %s", err) } + cwd, _ := os.Getwd() + + clientCert, _ := tls.LoadX509KeyPair( + cwd+"/test-fixtures/reload/reload_foo.pem", + cwd+"/test-fixtures/reload/reload_foo.key") connFn := func(lnReal net.Listener) (net.Conn, error) { conn, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{ - RootCAs: certPool, + RootCAs: certPool, + Certificates: []tls.Certificate{clientCert}, }) + if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json index 70e7e14..918af56 100644 --- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json +++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config.hcl.json @@ -3,14 +3,8 @@ "tcp": { "address": "127.0.0.1:443" } - }, { - "atlas": { - "token": "foobar", - "infrastructure": "foo/bar", - "endpoint": "https://foo.bar:1111", - "node_id": "foo_node" - } }], + "cluster_cipher_suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "storage": { "consul": { "foo": "bar", @@ -23,5 +17,7 @@ "max_lease_ttl": "10h", "default_lease_ttl": "10h", "cluster_name":"testcluster", - "ui":true + "ui":true, + "pid_file":"./pidfile", + "raw_storage_endpoint":true } diff --git a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json index 5279d63..e1eb73e 100644 --- a/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json +++ b/vendor/github.com/hashicorp/vault/command/server/test-fixtures/config2.hcl.json @@ -1,5 +1,6 @@ { "ui":true, + "raw_storage_endpoint":true, "listener":[ { "tcp":{ diff --git a/vendor/github.com/hashicorp/vault/command/server_ha_test.go b/vendor/github.com/hashicorp/vault/command/server_ha_test.go index 5562191..a9b1188 100644 --- a/vendor/github.com/hashicorp/vault/command/server_ha_test.go +++ b/vendor/github.com/hashicorp/vault/command/server_ha_test.go @@ -9,7 +9,10 @@ import ( "testing" "github.com/hashicorp/vault/meta" + "github.com/hashicorp/vault/physical" "github.com/mitchellh/cli" + + physConsul "github.com/hashicorp/vault/physical/consul" ) // The following tests have a go-metrics/exp manager race condition @@ -19,6 +22,9 @@ func TestServer_CommonHA(t *testing.T) { Meta: meta.Meta{ Ui: ui, }, + PhysicalBackends: map[string]physical.Factory{ + "consul": physConsul.NewConsulBackend, + }, } tmpfile, err := ioutil.TempFile("", "") @@ -47,6 +53,9 @@ func TestServer_GoodSeparateHA(t *testing.T) { Meta: meta.Meta{ Ui: ui, }, + PhysicalBackends: map[string]physical.Factory{ + "consul": physConsul.NewConsulBackend, + }, } tmpfile, err := ioutil.TempFile("", "") @@ -75,6 +84,9 @@ func TestServer_BadSeparateHA(t *testing.T) { Meta: meta.Meta{ Ui: ui, }, + PhysicalBackends: map[string]physical.Factory{ + "consul": physConsul.NewConsulBackend, + }, } tmpfile, err := ioutil.TempFile("", "") diff --git a/vendor/github.com/hashicorp/vault/command/server_test.go b/vendor/github.com/hashicorp/vault/command/server_test.go index f95016f..9a90239 100644 --- a/vendor/github.com/hashicorp/vault/command/server_test.go +++ b/vendor/github.com/hashicorp/vault/command/server_test.go @@ -15,7 +15,10 @@ import ( "time" "github.com/hashicorp/vault/meta" + "github.com/hashicorp/vault/physical" "github.com/mitchellh/cli" + + physFile "github.com/hashicorp/vault/physical/file" ) var ( @@ -58,8 +61,8 @@ disable_mlock = true listener "tcp" { address = "127.0.0.1:8203" - tls_cert_file = "TMPDIR/reload_FILE.pem" - tls_key_file = "TMPDIR/reload_FILE.key" + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" } ` ) @@ -79,15 +82,11 @@ func TestServer_ReloadListener(t *testing.T) { // Setup initial certs inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem") - ioutil.WriteFile(td+"/reload_foo.pem", inBytes, 0777) + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777) inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key") - ioutil.WriteFile(td+"/reload_foo.key", inBytes, 0777) - inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem") - ioutil.WriteFile(td+"/reload_bar.pem", inBytes, 0777) - inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key") - ioutil.WriteFile(td+"/reload_bar.key", inBytes, 0777) + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777) - relhcl := strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "foo", -1) + relhcl := strings.Replace(reloadhcl, "TMPDIR", td, -1) ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777) inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem") @@ -104,6 +103,9 @@ func TestServer_ReloadListener(t *testing.T) { }, ShutdownCh: MakeShutdownCh(), SighupCh: MakeSighupCh(), + PhysicalBackends: map[string]physical.Factory{ + "file": physFile.NewFileBackend, + }, } finished := false @@ -155,7 +157,11 @@ func TestServer_ReloadListener(t *testing.T) { t.Fatalf("certificate name didn't check out: %s", err) } - relhcl = strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "bar", -1) + relhcl = strings.Replace(reloadhcl, "TMPDIR", td, -1) + inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem") + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777) + inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key") + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777) ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777) c.SighupCh <- struct{}{} diff --git a/vendor/github.com/hashicorp/vault/command/ssh.go b/vendor/github.com/hashicorp/vault/command/ssh.go index a9aebbe..03e1933 100644 --- a/vendor/github.com/hashicorp/vault/command/ssh.go +++ b/vendor/github.com/hashicorp/vault/command/ssh.go @@ -10,15 +10,42 @@ import ( "os/user" "strings" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/ssh" "github.com/hashicorp/vault/meta" + homedir "github.com/mitchellh/go-homedir" "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" ) -// SSHCommand is a Command that establishes a SSH connection -// with target by generating a dynamic key +// SSHCommand is a Command that establishes a SSH connection with target by +// generating a dynamic key type SSHCommand struct { meta.Meta + + // API + client *api.Client + sshClient *api.SSH + + // Common options + mode string + noExec bool + format string + mountPoint string + role string + username string + ip string + sshArgs []string + + // Key options + strictHostKeyChecking string + userKnownHostsFile string + + // SSH CA backend specific options + publicKeyPath string + privateKeyPath string + hostKeyMountPoint string + hostKeyHostnames string } // Structure to hold the fields returned when asked for a credential from SSHh backend. @@ -31,42 +58,50 @@ type SSHCredentialResp struct { } func (c *SSHCommand) Run(args []string) int { - var role, mountPoint, format, userKnownHostsFile, strictHostKeyChecking string - var noExec bool - var sshCmdArgs []string + flags := c.Meta.FlagSet("ssh", meta.FlagSetDefault) - flags.StringVar(&strictHostKeyChecking, "strict-host-key-checking", "", "") - flags.StringVar(&userKnownHostsFile, "user-known-hosts-file", "", "") - flags.StringVar(&format, "format", "table", "") - flags.StringVar(&role, "role", "", "") - flags.StringVar(&mountPoint, "mount-point", "ssh", "") - flags.BoolVar(&noExec, "no-exec", false, "") + + envOrDefault := func(key string, def string) string { + if k := os.Getenv(key); k != "" { + return k + } + return def + } + + expandPath := func(p string) string { + e, err := homedir.Expand(p) + if err != nil { + return p + } + return e + } + + // Common options + flags.StringVar(&c.mode, "mode", "", "") + flags.BoolVar(&c.noExec, "no-exec", false, "") + flags.StringVar(&c.format, "format", "table", "") + flags.StringVar(&c.mountPoint, "mount-point", "ssh", "") + flags.StringVar(&c.role, "role", "", "") + + // Key options + flags.StringVar(&c.strictHostKeyChecking, "strict-host-key-checking", + envOrDefault("VAULT_SSH_STRICT_HOST_KEY_CHECKING", "ask"), "") + flags.StringVar(&c.userKnownHostsFile, "user-known-hosts-file", + envOrDefault("VAULT_SSH_USER_KNOWN_HOSTS_FILE", expandPath("~/.ssh/known_hosts")), "") + + // CA-specific options + flags.StringVar(&c.publicKeyPath, "public-key-path", + expandPath("~/.ssh/id_rsa.pub"), "") + flags.StringVar(&c.privateKeyPath, "private-key-path", + expandPath("~/.ssh/id_rsa"), "") + flags.StringVar(&c.hostKeyMountPoint, "host-key-mount-point", "", "") + flags.StringVar(&c.hostKeyHostnames, "host-key-hostnames", "*", "") flags.Usage = func() { c.Ui.Error(c.Help()) } if err := flags.Parse(args); err != nil { return 1 } - // If the flag is already set then it takes the precedence. If the flag is not - // set, try setting it from env var. - if os.Getenv("VAULT_SSH_STRICT_HOST_KEY_CHECKING") != "" && strictHostKeyChecking == "" { - strictHostKeyChecking = os.Getenv("VAULT_SSH_STRICT_HOST_KEY_CHECKING") - } - // Assign default value if both flag and env var are not set - if strictHostKeyChecking == "" { - strictHostKeyChecking = "ask" - } - - // If the flag is already set then it takes the precedence. If the flag is not - // set, try setting it from env var. - if os.Getenv("VAULT_SSH_USER_KNOWN_HOSTS_FILE") != "" && userKnownHostsFile == "" { - userKnownHostsFile = os.Getenv("VAULT_SSH_USER_KNOWN_HOSTS_FILE") - } - // Assign default value if both flag and env var are not set - if userKnownHostsFile == "" { - userKnownHostsFile = "~/.ssh/known_hosts" - } - args = flags.Args() if len(args) < 1 { c.Ui.Error("ssh expects at least one argument") @@ -78,46 +113,35 @@ func (c *SSHCommand) Run(args []string) int { c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err)) return 1 } + c.client = client + c.sshClient = client.SSHWithMountPoint(c.mountPoint) - // split the parameter username@ip - input := strings.Split(args[0], "@") - var username string - var ipAddr string - - // If only IP is mentioned and username is skipped, assume username to - // be the current username. Vault SSH role's default username could have - // been used, but in order to retain the consistency with SSH command, - // current username is employed. - if len(input) == 1 { - u, err := user.Current() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error fetching username: %v", err)) - return 1 - } - username = u.Username - ipAddr = input[0] - } else if len(input) == 2 { - username = input[0] - ipAddr = input[1] - } else { - c.Ui.Error(fmt.Sprintf("Invalid parameter: %q", args[0])) + // Extract the username and IP. + c.username, c.ip, err = c.userAndIP(args[0]) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing user and IP: %s", err)) return 1 } - // Resolving domain names to IP address on the client side. - // Vault only deals with IP addresses. - ip, err := net.ResolveIPAddr("ip", ipAddr) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error resolving IP Address: %v", err)) - return 1 + // The rest of the args are ssh args + if len(args) > 1 { + c.sshArgs = args[1:] } // Credentials are generated only against a registered role. If user // does not specify a role with the SSH command, then lookup API is used // to fetch all the roles with which this IP is associated. If there is // only one role associated with it, use it to establish the connection. - if role == "" { - role, err = c.defaultRole(mountPoint, ip.String()) + // + // TODO: remove in 0.9.0, convert to validation error + if c.role == "" { + c.Ui.Warn("" + + "WARNING: No -role specified. Use -role to tell Vault which ssh role\n" + + "to use for authentication. In the future, you will need to tell Vault\n" + + "which role to use. For now, Vault will attempt to guess based on a\n" + + "the API response.") + + role, err := c.defaultRole(c.mountPoint, c.ip) if err != nil { c.Ui.Error(fmt.Sprintf("Error choosing role: %v", err)) return 1 @@ -127,110 +151,362 @@ func (c *SSHCommand) Run(args []string) int { // be used by the user (ACL enforcement), then user should see an // error message accordingly. c.Ui.Output(fmt.Sprintf("Vault SSH: Role: %q", role)) + c.role = role } - data := map[string]interface{}{ - "username": username, - "ip": ip.String(), - } - - keySecret, err := client.SSHWithMountPoint(mountPoint).Credential(role, data) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error getting key for SSH session: %v", err)) - return 1 - } - - // if no-exec was chosen, just print out the secret and return. - if noExec { - return OutputSecret(c.Ui, format, keySecret) - } - - // Port comes back as a json.Number which mapstructure doesn't like, so convert it - if keySecret.Data["port"] != nil { - keySecret.Data["port"] = keySecret.Data["port"].(json.Number).String() - } - var resp SSHCredentialResp - if err := mapstructure.Decode(keySecret.Data, &resp); err != nil { - c.Ui.Error(fmt.Sprintf("Error parsing the credential response: %v", err)) - return 1 - } - - if resp.KeyType == ssh.KeyTypeDynamic { - if len(resp.Key) == 0 { - c.Ui.Error(fmt.Sprintf("Invalid key")) - return 1 - } - sshDynamicKeyFile, err := ioutil.TempFile("", fmt.Sprintf("vault_ssh_%s_%s_", username, ip.String())) + // If no mode was given, perform the old-school lookup. Keep this now for + // backwards-compatability, but print a warning. + // + // TODO: remove in 0.9.0, convert to validation error + if c.mode == "" { + c.Ui.Warn("" + + "WARNING: No -mode specified. Use -mode to tell Vault which ssh\n" + + "authentication mode to use. In the future, you will need to tell\n" + + "Vault which mode to use. For now, Vault will attempt to guess based\n" + + "on the API response. This guess involves creating a temporary\n" + + "credential, reading its type, and then revoking it. To reduce the\n" + + "number of API calls and surface area, specify -mode directly.") + secret, cred, err := c.generateCredential() if err != nil { - c.Ui.Error(fmt.Sprintf("Error creating temporary file: %v", err)) + // This is _very_ hacky, but is the only sane backwards-compatible way + // to do this. If the error is "key type unknown", we just assume the + // type is "ca". In the future, mode will be required as an option. + if strings.Contains(err.Error(), "key type unknown") { + c.mode = ssh.KeyTypeCA + } else { + c.Ui.Error(fmt.Sprintf("Error getting credential: %s", err)) + return 1 + } + } else { + c.mode = cred.KeyType + } + + // Revoke the secret, since the child functions will generate their own + // credential. Users wishing to avoid this should specify -mode. + if secret != nil { + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + c.Ui.Warn(fmt.Sprintf("Failed to revoke temporary key: %s", err)) + } + } + } + + switch strings.ToLower(c.mode) { + case ssh.KeyTypeCA: + if err := c.handleTypeCA(); err != nil { + c.Ui.Error(err.Error()) return 1 } - - // Ensure that we delete the temporary file - defer os.Remove(sshDynamicKeyFile.Name()) - - if err = ioutil.WriteFile(sshDynamicKeyFile.Name(), - []byte(resp.Key), 0600); err != nil { - c.Ui.Error(fmt.Sprintf("Error storing the dynamic key into the temporary file: %v", err)) + case ssh.KeyTypeOTP: + if err := c.handleTypeOTP(); err != nil { + c.Ui.Error(err.Error()) return 1 } - sshCmdArgs = append(sshCmdArgs, []string{"-i", sshDynamicKeyFile.Name()}...) - - } else if resp.KeyType == ssh.KeyTypeOTP { - // Check if the application 'sshpass' is installed in the client machine. - // If it is then, use it to automate typing in OTP to the prompt. Unfortunately, - // it was not possible to automate it without a third-party application, with - // only the Go libraries. - // Feel free to try and remove this dependency. - sshpassPath, err := exec.LookPath("sshpass") - if err == nil { - sshCmdArgs = append(sshCmdArgs, []string{"-p", string(resp.Key), "ssh", "-o UserKnownHostsFile=" + userKnownHostsFile, "-o StrictHostKeyChecking=" + strictHostKeyChecking, "-p", resp.Port, username + "@" + ip.String()}...) - if len(args) > 1 { - sshCmdArgs = append(sshCmdArgs, args[1:]...) - } - sshCmd := exec.Command(sshpassPath, sshCmdArgs...) - sshCmd.Stdin = os.Stdin - sshCmd.Stdout = os.Stdout - err = sshCmd.Run() - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to establish SSH connection: %q", err)) - } - return 0 + case ssh.KeyTypeDynamic: + if err := c.handleTypeDynamic(); err != nil { + c.Ui.Error(err.Error()) + return 1 } - c.Ui.Output("OTP for the session is " + resp.Key) - c.Ui.Output("[Note: Install 'sshpass' to automate typing in OTP]") - } - sshCmdArgs = append(sshCmdArgs, []string{"-o UserKnownHostsFile=" + userKnownHostsFile, "-o StrictHostKeyChecking=" + strictHostKeyChecking, "-p", resp.Port, username + "@" + ip.String()}...) - if len(args) > 1 { - sshCmdArgs = append(sshCmdArgs, args[1:]...) - } - - sshCmd := exec.Command("ssh", sshCmdArgs...) - sshCmd.Stdin = os.Stdin - sshCmd.Stdout = os.Stdout - - // Running the command as a separate command. The reason for using exec.Command instead - // of using crypto/ssh package is that, this way, user can have the same feeling of - // connecting to remote hosts with the ssh command. Package crypto/ssh did not have a way - // to establish an independent session like this. - err = sshCmd.Run() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error while running ssh command: %q", err)) - } - - // If the session established was longer than the lease expiry, the secret - // might have been revoked already. If not, then revoke it. Since the key - // file is deleted and since user doesn't know the credential anymore, there - // is not point in Vault maintaining this secret anymore. Every time the command - // is run, a fresh credential is generated anyways. - err = client.Sys().Revoke(keySecret.LeaseID) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error revoking the key: %q", err)) + default: + c.Ui.Error(fmt.Sprintf("Unknown SSH mode: %s", c.mode)) + return 1 } return 0 } +// handleTypeCA is used to handle SSH logins using the "CA" key type. +func (c *SSHCommand) handleTypeCA() error { + // Read the key from disk + publicKey, err := ioutil.ReadFile(c.publicKeyPath) + if err != nil { + return errors.Wrap(err, "failed to read public key") + } + + // Attempt to sign the public key + secret, err := c.sshClient.SignKey(c.role, map[string]interface{}{ + // WARNING: publicKey is []byte, which is b64 encoded on JSON upload. We + // have to convert it to a string. SV lost many hours to this... + "public_key": string(publicKey), + "valid_principals": c.username, + "cert_type": "user", + + // TODO: let the user configure these. In the interim, if users want to + // customize these values, they can produce the key themselves. + "extensions": map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + }, + }) + if err != nil { + return errors.Wrap(err, "failed to sign public key") + } + if secret == nil || secret.Data == nil { + return fmt.Errorf("client signing returned empty credentials") + } + + // Handle no-exec + if c.noExec { + // This is hacky, but OutputSecret returns an int, not an error :( + if i := OutputSecret(c.Ui, c.format, secret); i != 0 { + return fmt.Errorf("an error occurred outputting the secret") + } + return nil + } + + // Extract public key + key, ok := secret.Data["signed_key"].(string) + if !ok { + return fmt.Errorf("missing signed key") + } + + // Capture the current value - this could be overwritten later if the user + // enabled host key signing verification. + userKnownHostsFile := c.userKnownHostsFile + strictHostKeyChecking := c.strictHostKeyChecking + + // Handle host key signing verification. If the user specified a mount point, + // download the public key, trust it with the given domains, and use that + // instead of the user's regular known_hosts file. + if c.hostKeyMountPoint != "" { + secret, err := c.client.Logical().Read(c.hostKeyMountPoint + "/config/ca") + if err != nil { + return errors.Wrap(err, "failed to get host signing key") + } + if secret == nil || secret.Data == nil { + return fmt.Errorf("missing host signing key") + } + publicKey, ok := secret.Data["public_key"].(string) + if !ok { + return fmt.Errorf("host signing key is empty") + } + + // Write the known_hosts file + name := fmt.Sprintf("vault_ssh_ca_known_hosts_%s_%s", c.username, c.ip) + data := fmt.Sprintf("@cert-authority %s %s", c.hostKeyHostnames, publicKey) + knownHosts, err, closer := c.writeTemporaryFile(name, []byte(data), 0644) + defer closer() + if err != nil { + return errors.Wrap(err, "failed to write host public key") + } + + // Update the variables + userKnownHostsFile = knownHosts + strictHostKeyChecking = "yes" + } + + // Write the signed public key to disk + name := fmt.Sprintf("vault_ssh_ca_%s_%s", c.username, c.ip) + signedPublicKeyPath, err, closer := c.writeTemporaryKey(name, []byte(key)) + defer closer() + if err != nil { + return errors.Wrap(err, "failed to write signed public key") + } + + args := append([]string{ + "-i", c.privateKeyPath, + "-i", signedPublicKeyPath, + "-o UserKnownHostsFile=" + userKnownHostsFile, + "-o StrictHostKeyChecking=" + strictHostKeyChecking, + c.username + "@" + c.ip, + }, c.sshArgs...) + + cmd := exec.Command("ssh", args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return errors.Wrap(err, "failed to run ssh command") + } + + // There is no secret to revoke, since it's a certificate signing + + return nil +} + +// handleTypeOTP is used to handle SSH logins using the "otp" key type. +func (c *SSHCommand) handleTypeOTP() error { + secret, cred, err := c.generateCredential() + if err != nil { + return errors.Wrap(err, "failed to generate credential") + } + + // Handle no-exec + if c.noExec { + // This is hacky, but OutputSecret returns an int, not an error :( + if i := OutputSecret(c.Ui, c.format, secret); i != 0 { + return fmt.Errorf("an error occurred outputting the secret") + } + return nil + } + + var cmd *exec.Cmd + + // Check if the application 'sshpass' is installed in the client machine. + // If it is then, use it to automate typing in OTP to the prompt. Unfortunately, + // it was not possible to automate it without a third-party application, with + // only the Go libraries. + // Feel free to try and remove this dependency. + sshpassPath, err := exec.LookPath("sshpass") + if err != nil { + c.Ui.Warn("" + + "Vault could not locate sshpass. The OTP code for the session will be\n" + + "displayed below. Enter this code in the SSH password prompt. If you\n" + + "install sshpass, Vault can automatically perform this step for you.") + c.Ui.Output("OTP for the session is " + cred.Key) + + args := append([]string{ + "-o UserKnownHostsFile=" + c.userKnownHostsFile, + "-o StrictHostKeyChecking=" + c.strictHostKeyChecking, + "-p", cred.Port, + c.username + "@" + c.ip, + }, c.sshArgs...) + cmd = exec.Command("ssh", args...) + } else { + args := append([]string{ + "-e", // Read password for SSHPASS environment variable + "ssh", + "-o UserKnownHostsFile=" + c.userKnownHostsFile, + "-o StrictHostKeyChecking=" + c.strictHostKeyChecking, + "-p", cred.Port, + c.username + "@" + c.ip, + }, c.sshArgs...) + cmd = exec.Command(sshpassPath, args...) + env := os.Environ() + env = append(env, fmt.Sprintf("SSHPASS=%s", string(cred.Key))) + cmd.Env = env + } + + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return errors.Wrap(err, "failed to run ssh command") + } + + // Revoke the key if it's longer than expected + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + return errors.Wrap(err, "failed to revoke key") + } + + return nil +} + +// handleTypeDynamic is used to handle SSH logins using the "dyanmic" key type. +func (c *SSHCommand) handleTypeDynamic() error { + // Generate the credential + secret, cred, err := c.generateCredential() + if err != nil { + return errors.Wrap(err, "failed to generate credential") + } + + // Handle no-exec + if c.noExec { + // This is hacky, but OutputSecret returns an int, not an error :( + if i := OutputSecret(c.Ui, c.format, secret); i != 0 { + return fmt.Errorf("an error occurred outputting the secret") + } + return nil + } + + // Write the dynamic key to disk + name := fmt.Sprintf("vault_ssh_dynamic_%s_%s", c.username, c.ip) + keyPath, err, closer := c.writeTemporaryKey(name, []byte(cred.Key)) + defer closer() + if err != nil { + return errors.Wrap(err, "failed to save dyanmic key") + } + + args := append([]string{ + "-i", keyPath, + "-o UserKnownHostsFile=" + c.userKnownHostsFile, + "-o StrictHostKeyChecking=" + c.strictHostKeyChecking, + "-p", cred.Port, + c.username + "@" + c.ip, + }, c.sshArgs...) + + cmd := exec.Command("ssh", args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return errors.Wrap(err, "failed to run ssh command") + } + + // Revoke the key if it's longer than expected + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + return errors.Wrap(err, "failed to revoke key") + } + + return nil +} + +// generateCredential generates a credential for the given role and returns the +// decoded secret data. +func (c *SSHCommand) generateCredential() (*api.Secret, *SSHCredentialResp, error) { + // Attempt to generate the credential. + secret, err := c.sshClient.Credential(c.role, map[string]interface{}{ + "username": c.username, + "ip": c.ip, + }) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to get credentials") + } + if secret == nil || secret.Data == nil { + return nil, nil, fmt.Errorf("vault returned empty credentials") + } + + // Port comes back as a json.Number which mapstructure doesn't like, so + // convert it + if d, ok := secret.Data["port"].(json.Number); ok { + secret.Data["port"] = d.String() + } + + // Use mapstructure to decode the response + var resp SSHCredentialResp + if err := mapstructure.Decode(secret.Data, &resp); err != nil { + return nil, nil, errors.Wrap(err, "failed to decode credential") + } + + // Check for an empty key response + if len(resp.Key) == 0 { + return nil, nil, fmt.Errorf("vault returned an invalid key") + } + + return secret, &resp, nil +} + +// writeTemporaryFile writes a file to a temp location with the given data and +// file permissions. +func (c *SSHCommand) writeTemporaryFile(name string, data []byte, perms os.FileMode) (string, error, func() error) { + // default closer to prevent panic + closer := func() error { return nil } + + f, err := ioutil.TempFile("", name) + if err != nil { + return "", errors.Wrap(err, "creating temporary file"), closer + } + + closer = func() error { return os.Remove(f.Name()) } + + if err := ioutil.WriteFile(f.Name(), data, perms); err != nil { + return "", errors.Wrap(err, "writing temporary key"), closer + } + + return f.Name(), nil, closer +} + +// writeTemporaryKey writes the key to a temporary file and returns the path. +// The caller should defer the closer to cleanup the key. +func (c *SSHCommand) writeTemporaryKey(name string, data []byte) (string, error, func() error) { + return c.writeTemporaryFile(name, data, 0600) +} + // If user did not provide the role with which SSH connection has // to be established and if there is only one role associated with // the IP, it is used by default. @@ -247,7 +523,7 @@ func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) { return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err) } - if secret == nil { + if secret == nil || secret.Data == nil { return "", fmt.Errorf("Error finding roles for IP %q: %q", ip, err) } @@ -270,61 +546,136 @@ func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) { } } +// userAndIP takes an argument in the format foo@1.2.3.4 and separates the IP +// and user parts, returning any errors. +func (c *SSHCommand) userAndIP(s string) (string, string, error) { + // split the parameter username@ip + input := strings.Split(s, "@") + var username, address string + + // If only IP is mentioned and username is skipped, assume username to + // be the current username. Vault SSH role's default username could have + // been used, but in order to retain the consistency with SSH command, + // current username is employed. + switch len(input) { + case 1: + u, err := user.Current() + if err != nil { + return "", "", errors.Wrap(err, "failed to fetch current user") + } + username, address = u.Username, input[0] + case 2: + username, address = input[0], input[1] + default: + return "", "", fmt.Errorf("invalid arguments: %q", s) + } + + // Resolving domain names to IP address on the client side. + // Vault only deals with IP addresses. + ipAddr, err := net.ResolveIPAddr("ip", address) + if err != nil { + return "", "", errors.Wrap(err, "failed to resolve IP address") + } + ip := ipAddr.String() + + return username, ip, nil +} + func (c *SSHCommand) Synopsis() string { return "Initiate an SSH session" } func (c *SSHCommand) Help() string { helpText := ` -Usage: vault ssh [options] username@ip +Usage: vault ssh [options] username@ip [ssh options] Establishes an SSH connection with the target machine. - This command generates a key and uses it to establish an SSH - connection with the target machine. This operation requires - that the SSH backend is mounted and at least one 'role' is - registered with Vault beforehand. + This command uses one of the SSH authentication backends to authenticate and + automatically establish an SSH connection to a host. This operation requires + that the SSH backend is mounted and configured. - For setting up SSH backends with one-time-passwords, installation - of vault-ssh-helper or a compatible agent on target machines - is required. See [https://github.com/hashicorp/vault-ssh-agent]. + SSH using the OTP mode (requires sshpass for full automation): + + $ vault ssh -mode=otp -role=my-role user@1.2.3.4 + + SSH using the CA mode: + + $ vault ssh -mode=ca -role=my-role user@1.2.3.4 + + SSH using CA mode with host key verification: + + $ vault ssh \ + -mode=ca \ + -role=my-role \ + -host-key-mount-point=host-signer \ + -host-key-hostnames=example.com \ + user@example.com + + For the full list of options and arguments, please see the documentation. General Options: ` + meta.GeneralOptionsUsage() + ` SSH Options: - -role Role to be used to create the key. - Each IP is associated with a role. To see the associated - roles with IP, use "lookup" endpoint. If you are certain - that there is only one role associated with the IP, you can - skip mentioning the role. It will be chosen by default. If - there are no roles associated with the IP, register the - CIDR block of that IP using the "roles/" endpoint. + -role Role to be used to create the key. Each IP is associated with + a role. To see the associated roles with IP, use "lookup" + endpoint. If you are certain that there is only one role + associated with the IP, you can skip mentioning the role. It + will be chosen by default. If there are no roles associated + with the IP, register the CIDR block of that IP using the + "roles/" endpoint. - -no-exec Shows the credentials but does not establish connection. + -no-exec Shows the credentials but does not establish connection. - -mount-point Mount point of SSH backend. If the backend is mounted at - 'ssh', which is the default as well, this parameter can be - skipped. + -mount-point Mount point of SSH backend. If the backend is mounted at + "ssh" (default), this parameter can be skipped. - -format If no-exec option is enabled, then the credentials will be - printed out and SSH connection will not be established. The - format of the output can be 'json' or 'table'. JSON output - is useful when writing scripts. Default is 'table'. + -format If the "no-exec" option is enabled, the credentials will be + printed out and SSH connection will not be established. The + format of the output can be "json" or "table" (default). - -strict-host-key-checking This option corresponds to StrictHostKeyChecking of SSH configuration. - If 'sshpass' is employed to enable automated login, then if host key - is not "known" to the client, 'vault ssh' command will fail. Set this - option to "no" to bypass the host key checking. Defaults to "ask". - Can also be specified with VAULT_SSH_STRICT_HOST_KEY_CHECKING environment - variable. + -strict-host-key-checking This option corresponds to "StrictHostKeyChecking" + of SSH configuration. If "sshpass" is employed to enable + automated login, then if host key is not "known" to the + client, "vault ssh" command will fail. Set this option to + "no" to bypass the host key checking. Defaults to "ask". + Can also be specified with the + "VAULT_SSH_STRICT_HOST_KEY_CHECKING" environment variable. - -user-known-hosts-file This option corresponds to UserKnownHostsFile of SSH configuration. - Assigns the file to use for storing the host keys. If this option is - set to "/dev/null" along with "-strict-host-key-checking=no", both - warnings and host key checking can be avoided while establishing the - connection. Defaults to "~/.ssh/known_hosts". Can also be specified - with VAULT_SSH_USER_KNOWN_HOSTS_FILE environment variable. + -user-known-hosts-file This option corresponds to "UserKnownHostsFile" of + SSH configuration. Assigns the file to use for storing the + host keys. If this option is set to "/dev/null" along with + "-strict-host-key-checking=no", both warnings and host key + checking can be avoided while establishing the connection. + Defaults to "~/.ssh/known_hosts". Can also be specified with + "VAULT_SSH_USER_KNOWN_HOSTS_FILE" environment variable. + +CA Mode Options: + + - public-key-path= + The path to the public key to send to Vault for signing. The default value + is ~/.ssh/id_rsa.pub. + + - private-key-path= + The path to the private key to use for authentication. This must be the + corresponding private key to -public-key-path. The default value is + ~/.ssh/id_rsa. + + - host-key-mount-point= + The mount point to the SSH backend where host keys are signed. When given + a value, Vault will generate a custom known_hosts file with delegation to + the CA at the provided mount point and verify the SSH connection's host + keys against the provided CA. By default, this command uses the users's + existing known_hosts file. When this flag is set, this command will force + strict host key checking and will override any values provided for a + custom -user-known-hosts-file. + + - host-key-hostnames= + The list of hostnames to delegate for this certificate authority. By + default, this is "*", which allows all domains and IPs. To restrict + validation to a series of hostnames, specify them as comma-separated + values here. ` return strings.TrimSpace(helpText) } diff --git a/vendor/github.com/hashicorp/vault/command/status.go b/vendor/github.com/hashicorp/vault/command/status.go index 3d584c7..7b6cce3 100644 --- a/vendor/github.com/hashicorp/vault/command/status.go +++ b/vendor/github.com/hashicorp/vault/command/status.go @@ -84,7 +84,10 @@ func (c *StatusCommand) Run(args []string) int { if leaderStatus.LeaderAddress == "" { leaderStatus.LeaderAddress = "" } - c.Ui.Output(fmt.Sprintf("\tLeader: %s", leaderStatus.LeaderAddress)) + if leaderStatus.LeaderClusterAddress == "" { + leaderStatus.LeaderClusterAddress = "" + } + c.Ui.Output(fmt.Sprintf("\tLeader Cluster Address: %s", leaderStatus.LeaderClusterAddress)) } } diff --git a/vendor/github.com/hashicorp/vault/command/util.go b/vendor/github.com/hashicorp/vault/command/util.go index 0ec3916..1eefc92 100644 --- a/vendor/github.com/hashicorp/vault/command/util.go +++ b/vendor/github.com/hashicorp/vault/command/util.go @@ -57,6 +57,8 @@ func PrintRawField(ui cli.Ui, secret *api.Secret, field string) int { val = secret.WrapInfo.TTL case "wrapping_token_creation_time": val = secret.WrapInfo.CreationTime.Format(time.RFC3339Nano) + case "wrapping_token_creation_path": + val = secret.WrapInfo.CreationPath case "wrapped_accessor": val = secret.WrapInfo.WrappedAccessor default: diff --git a/vendor/github.com/hashicorp/vault/command/write.go b/vendor/github.com/hashicorp/vault/command/write.go index 0614f9b..6f7b495 100644 --- a/vendor/github.com/hashicorp/vault/command/write.go +++ b/vendor/github.com/hashicorp/vault/command/write.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/vault/helper/kv-builder" "github.com/hashicorp/vault/meta" + "github.com/posener/complete" ) // WriteCommand is a Command that puts data into the Vault. @@ -32,6 +33,12 @@ func (c *WriteCommand) Run(args []string) int { } args = flags.Args() + if len(args) < 1 { + c.Ui.Error("write requires a path") + flags.Usage() + return 1 + } + if len(args) < 2 && !force { c.Ui.Error("write expects at least two arguments; use -f to perform the write anyways") flags.Usage() @@ -133,3 +140,15 @@ Write Options: ` return strings.TrimSpace(helpText) } + +func (c *WriteCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *WriteCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-force": complete.PredictNothing, + "-format": predictFormat, + "-field": complete.PredictNothing, + } +} diff --git a/vendor/github.com/hashicorp/vault/command/write_test.go b/vendor/github.com/hashicorp/vault/command/write_test.go index 786bbc3..5aa3c1e 100644 --- a/vendor/github.com/hashicorp/vault/command/write_test.go +++ b/vendor/github.com/hashicorp/vault/command/write_test.go @@ -243,8 +243,8 @@ func TestWrite_Output(t *testing.T) { if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } - if !strings.Contains(string(ui.OutputWriter.Bytes()), "Key") { - t.Fatalf("bad: %s", string(ui.OutputWriter.Bytes())) + if !strings.Contains(ui.OutputWriter.String(), "Key") { + t.Fatalf("bad: %s", ui.OutputWriter.String()) } } diff --git a/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go b/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go index 7399a5c..6b18968 100644 --- a/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go +++ b/vendor/github.com/hashicorp/vault/helper/awsutil/generate_credentials.go @@ -6,9 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/aws/defaults" ) type CredentialsConfig struct { @@ -65,14 +63,16 @@ func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, Profile: c.Profile, }) - // Add the instance metadata role provider - providers = append(providers, &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(session.New(&aws.Config{ - Region: aws.String(c.Region), - HTTPClient: c.HTTPClient, - })), - ExpiryWindow: 15, - }) + // Add the remote provider + def := defaults.Get() + if c.Region != "" { + def.Config.Region = aws.String(c.Region) + } + if c.HTTPClient != nil { + def.Config.HTTPClient = c.HTTPClient + } + + providers = append(providers, defaults.RemoteCredProvider(*def.Config, def.Handlers)) // Create the credentials required to access the API. creds := credentials.NewChainCredentials(providers) diff --git a/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go b/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go new file mode 100644 index 0000000..df424ce --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/builtinplugins/builtin.go @@ -0,0 +1,50 @@ +package builtinplugins + +import ( + "github.com/hashicorp/vault/plugins/database/cassandra" + "github.com/hashicorp/vault/plugins/database/hana" + "github.com/hashicorp/vault/plugins/database/mongodb" + "github.com/hashicorp/vault/plugins/database/mssql" + "github.com/hashicorp/vault/plugins/database/mysql" + "github.com/hashicorp/vault/plugins/database/postgresql" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" +) + +// BuiltinFactory is the func signature that should be returned by +// the plugin's New() func. +type BuiltinFactory func() (interface{}, error) + +var plugins = map[string]BuiltinFactory{ + // These four plugins all use the same mysql implementation but with + // different username settings passed by the constructor. + "mysql-database-plugin": mysql.New(mysql.MetadataLen, mysql.MetadataLen, mysql.UsernameLen), + "mysql-aurora-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen), + "mysql-rds-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen), + "mysql-legacy-database-plugin": mysql.New(credsutil.NoneLength, mysql.LegacyMetadataLen, mysql.LegacyUsernameLen), + + "postgresql-database-plugin": postgresql.New, + "mssql-database-plugin": mssql.New, + "cassandra-database-plugin": cassandra.New, + "mongodb-database-plugin": mongodb.New, + "hana-database-plugin": hana.New, +} + +// Get returns the BuiltinFactory func for a particular backend plugin +// from the plugins map. +func Get(name string) (BuiltinFactory, bool) { + f, ok := plugins[name] + return f, ok +} + +// Keys returns the list of plugin names that are considered builtin plugins. +func Keys() []string { + keys := make([]string, len(plugins)) + + i := 0 + for k := range plugins { + keys[i] = k + i++ + } + + return keys +} diff --git a/vendor/github.com/hashicorp/vault/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/helper/certutil/types.go index 35b7317..c955222 100644 --- a/vendor/github.com/hashicorp/vault/helper/certutil/types.go +++ b/vendor/github.com/hashicorp/vault/helper/certutil/types.go @@ -286,7 +286,7 @@ func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { } // Verify checks if the parsed bundle is valid. It validates the public -// key of the certificate to the private key and checks the certficate trust +// key of the certificate to the private key and checks the certificate trust // chain for path issues. func (p *ParsedCertBundle) Verify() error { // If private key exists, check if it matches the public key of cert diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go index e485f2f..31a2dcd 100644 --- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go +++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go @@ -6,6 +6,8 @@ import ( "compress/lzw" "fmt" "io" + + "github.com/golang/snappy" ) const ( @@ -20,16 +22,35 @@ const ( // Byte value used as canary when using Lzw format CompressionCanaryLzw byte = 'L' + // Byte value used as canary when using Snappy format + CompressionCanarySnappy byte = 'S' + CompressionTypeLzw = "lzw" CompressionTypeGzip = "gzip" + + CompressionTypeSnappy = "snappy" ) +// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// interface. The decompress procedure in this utility expectes an +// io.ReadCloser. This type implements the io.Closer interface to retain the +// generic way of decompression. +type SnappyReadCloser struct { + *snappy.Reader +} + +// Close is a noop method implemented only to satisfy the io.Closer interface +func (s *SnappyReadCloser) Close() error { + return nil +} + // CompressionConfig is used to select a compression type to be performed by // Compress and Decompress utilities. // Supported types are: // * CompressionTypeLzw // * CompressionTypeGzip +// * CompressionTypeSnappy // // When using CompressionTypeGzip, the compression levels can also be chosen: // * gzip.DefaultCompression @@ -78,9 +99,13 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { config.GzipCompressionLevel = gzip.DefaultCompression } writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) + case CompressionTypeSnappy: + buf.Write([]byte{CompressionCanarySnappy}) + writer = snappy.NewBufferedWriter(&buf) default: return nil, fmt.Errorf("unsupported compression type") } + if err != nil { return nil, fmt.Errorf("failed to create a compression writer; err: %v", err) } @@ -117,22 +142,29 @@ func Decompress(data []byte) ([]byte, bool, error) { } switch { + // If the first byte matches the canary byte, remove the canary + // byte and try to decompress the data that is after the canary. case data[0] == CompressionCanaryGzip: - // If the first byte matches the canary byte, remove the canary - // byte and try to decompress the data that is after the canary. if len(data) < 2 { return nil, false, fmt.Errorf("invalid 'data' after the canary") } data = data[1:] reader, err = gzip.NewReader(bytes.NewReader(data)) case data[0] == CompressionCanaryLzw: - // If the first byte matches the canary byte, remove the canary - // byte and try to decompress the data that is after the canary. if len(data) < 2 { return nil, false, fmt.Errorf("invalid 'data' after the canary") } data = data[1:] reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8) + + case data[0] == CompressionCanarySnappy: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + data = data[1:] + reader = &SnappyReadCloser{ + Reader: snappy.NewReader(bytes.NewReader(data)), + } default: // If the first byte doesn't match the canary byte, it means // that the content was not compressed at all. Indicate the diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go index 52b03d5..5eeeea8 100644 --- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go +++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress_test.go @@ -7,6 +7,47 @@ import ( "testing" ) +func TestCompressUtil_CompressSnappy(t *testing.T) { + input := map[string]interface{}{ + "sample": "data", + "verification": "process", + } + + // Encode input into JSON + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(input); err != nil { + t.Fatal(err) + } + inputJSONBytes := buf.Bytes() + + // Set Snappy compression in the configuration + compressionConfig := &CompressionConfig{ + Type: CompressionTypeSnappy, + } + + // Compress the input + compressedJSONBytes, err := Compress(inputJSONBytes, compressionConfig) + if err != nil { + t.Fatal(err) + } + + decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes) + if err != nil { + t.Fatal(err) + } + + // Check if the input for decompress was not compressed in the first place + if wasNotCompressed { + t.Fatalf("bad: expected compressed bytes") + } + + // Compare the value after decompression + if string(inputJSONBytes) != string(decompressedJSONBytes) { + t.Fatalf("bad: decompressed value;\nexpected: %q\nactual: %q", string(inputJSONBytes), string(decompressedJSONBytes)) + } +} + func TestCompressUtil_CompressDecompress(t *testing.T) { input := map[string]interface{}{ "sample": "data", diff --git a/vendor/github.com/hashicorp/vault/helper/consts/error.go b/vendor/github.com/hashicorp/vault/helper/consts/error.go index d96ba4f..06977d5 100644 --- a/vendor/github.com/hashicorp/vault/helper/consts/error.go +++ b/vendor/github.com/hashicorp/vault/helper/consts/error.go @@ -10,4 +10,7 @@ var ( // ErrStandby is returned if an operation is performed on a standby Vault. // No operation is expected to succeed until active. ErrStandby = errors.New("Vault is in standby mode") + + // Used when .. is used in a path + ErrPathContainsParentReferences = errors.New("path cannot contain parent references") ) diff --git a/vendor/github.com/hashicorp/vault/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/helper/consts/replication.go index 62bbcb3..7fbeb88 100644 --- a/vendor/github.com/hashicorp/vault/helper/consts/replication.go +++ b/vendor/github.com/hashicorp/vault/helper/consts/replication.go @@ -3,18 +3,37 @@ package consts type ReplicationState uint32 const ( - ReplicationDisabled ReplicationState = iota - ReplicationPrimary - ReplicationSecondary + _ ReplicationState = iota + OldReplicationPrimary + OldReplicationSecondary + OldReplicationBootstrapping + + ReplicationDisabled ReplicationState = 0 + ReplicationPerformancePrimary ReplicationState = 1 << iota + ReplicationPerformanceSecondary + ReplicationBootstrapping + ReplicationDRPrimary + ReplicationDRSecondary ) func (r ReplicationState) String() string { switch r { - case ReplicationSecondary: - return "secondary" - case ReplicationPrimary: - return "primary" + case ReplicationPerformanceSecondary: + return "perf-secondary" + case ReplicationPerformancePrimary: + return "perf-primary" + case ReplicationBootstrapping: + return "bootstrapping" + case ReplicationDRPrimary: + return "dr-primary" + case ReplicationDRSecondary: + return "dr-secondary" } return "disabled" } + +func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 } +func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag } +func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag } +func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag } diff --git a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go index 27ca3fc..d146a37 100644 --- a/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go +++ b/vendor/github.com/hashicorp/vault/helper/forwarding/types.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: types.proto -// DO NOT EDIT! /* Package forwarding is a generated protocol buffer package. diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go b/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go index e0bdd64..7588199 100644 --- a/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go +++ b/vendor/github.com/hashicorp/vault/helper/keysutil/lock_manager.go @@ -243,15 +243,24 @@ func (lm *LockManager) getPolicyCommon(req PolicyRequest, lockType bool) (*Polic switch req.KeyType { case KeyType_AES256_GCM96: if req.Convergent && !req.Derived { + lm.UnlockPolicy(lock, lockType) return nil, nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled") } case KeyType_ECDSA_P256: if req.Derived || req.Convergent { - return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", KeyType_ECDSA_P256) + lm.UnlockPolicy(lock, lockType) + return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + + case KeyType_ED25519: + if req.Convergent { + lm.UnlockPolicy(lock, lockType) + return nil, nil, false, fmt.Errorf("convergent encryption not not supported for keys of type %v", req.KeyType) } default: + lm.UnlockPolicy(lock, lockType) return nil, nil, false, fmt.Errorf("unsupported key type %v", req.KeyType) } diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go index 1faaca4..7c4a691 100644 --- a/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go +++ b/vendor/github.com/hashicorp/vault/helper/keysutil/policy.go @@ -2,6 +2,7 @@ package keysutil import ( "bytes" + "crypto" "crypto/aes" "crypto/cipher" "crypto/ecdsa" @@ -21,6 +22,7 @@ import ( "strings" "time" + "golang.org/x/crypto/ed25519" "golang.org/x/crypto/hkdf" uuid "github.com/hashicorp/go-uuid" @@ -41,10 +43,16 @@ const ( const ( KeyType_AES256_GCM96 = iota KeyType_ECDSA_P256 + KeyType_ED25519 ) const ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)" +type SigningResult struct { + Signature string + PublicKey []byte +} + type ecdsaSignature struct { R, S *big.Int } @@ -68,6 +76,14 @@ func (kt KeyType) DecryptionSupported() bool { } func (kt KeyType) SigningSupported() bool { + switch kt { + case KeyType_ECDSA_P256, KeyType_ED25519: + return true + } + return false +} + +func (kt KeyType) HashSignatureInput() bool { switch kt { case KeyType_ECDSA_P256: return true @@ -77,7 +93,7 @@ func (kt KeyType) SigningSupported() bool { func (kt KeyType) DerivationSupported() bool { switch kt { - case KeyType_AES256_GCM96: + case KeyType_AES256_GCM96, KeyType_ED25519: return true } return false @@ -89,6 +105,8 @@ func (kt KeyType) String() string { return "aes256-gcm96" case KeyType_ECDSA_P256: return "ecdsa-p256" + case KeyType_ED25519: + return "ed25519" } return "[unknown]" @@ -96,13 +114,25 @@ func (kt KeyType) String() string { // KeyEntry stores the key and metadata type KeyEntry struct { - AESKey []byte `json:"key"` - HMACKey []byte `json:"hmac_key"` - CreationTime int64 `json:"creation_time"` - EC_X *big.Int `json:"ec_x"` - EC_Y *big.Int `json:"ec_y"` - EC_D *big.Int `json:"ec_d"` - FormattedPublicKey string `json:"public_key"` + // AES or some other kind that is a pure byte slice like ED25519 + Key []byte `json:"key"` + + // Key used for HMAC functions + HMACKey []byte `json:"hmac_key"` + + // Time of creation + CreationTime time.Time `json:"time"` + + EC_X *big.Int `json:"ec_x"` + EC_Y *big.Int `json:"ec_y"` + EC_D *big.Int `json:"ec_d"` + + // The public key in an appropriate format for the type of key + FormattedPublicKey string `json:"public_key"` + + // This is deprecated (but still filled) in favor of the value above which + // is more precise + DeprecatedCreationTime int64 `json:"creation_time"` } // keyEntryMap is used to allow JSON marshal/unmarshal @@ -150,10 +180,12 @@ type Policy struct { // Whether the key is exportable Exportable bool `json:"exportable"` - // The minimum version of the key allowed to be used - // for decryption + // The minimum version of the key allowed to be used for decryption MinDecryptionVersion int `json:"min_decryption_version"` + // The minimum version of the key allowed to be used for encryption + MinEncryptionVersion int `json:"min_encryption_version"` + // The latest key version in this policy LatestVersion int `json:"latest_version"` @@ -239,6 +271,9 @@ func (p *Policy) handleArchiving(storage logical.Storage) error { case p.ArchiveVersion > p.LatestVersion: return fmt.Errorf("archive version of %d is greater than the latest version %d", p.ArchiveVersion, p.LatestVersion) + case p.MinEncryptionVersion > 0 && p.MinEncryptionVersion < p.MinDecryptionVersion: + return fmt.Errorf("minimum decryption version of %d is greater than minimum encryption version %d", + p.MinDecryptionVersion, p.MinEncryptionVersion) case p.MinDecryptionVersion > p.LatestVersion: return fmt.Errorf("minimum decryption version of %d is greater than the latest version %d", p.MinDecryptionVersion, p.LatestVersion) @@ -427,41 +462,59 @@ func (p *Policy) DeriveKey(context []byte, ver int) ([]byte, error) { // Fast-path non-derived keys if !p.Derived { - return p.Keys[ver].AESKey, nil + return p.Keys[ver].Key, nil } // Ensure a context is provided if len(context) == 0 { - return nil, errutil.UserError{Err: "missing 'context' for key deriviation. The key was created using a derived key, which means additional, per-request information must be included in order to encrypt or decrypt information"} + return nil, errutil.UserError{Err: "missing 'context' for key derivation; the key was created using a derived key, which means additional, per-request information must be included in order to perform operations with the key"} } switch p.KDF { case Kdf_hmac_sha256_counter: prf := kdf.HMACSHA256PRF prfLen := kdf.HMACSHA256PRFLen - return kdf.CounterMode(prf, prfLen, p.Keys[ver].AESKey, context, 256) + return kdf.CounterMode(prf, prfLen, p.Keys[ver].Key, context, 256) + case Kdf_hkdf_sha256: - reader := hkdf.New(sha256.New, p.Keys[ver].AESKey, nil, context) + reader := hkdf.New(sha256.New, p.Keys[ver].Key, nil, context) derBytes := bytes.NewBuffer(nil) derBytes.Grow(32) limReader := &io.LimitedReader{ R: reader, N: 32, } - n, err := derBytes.ReadFrom(limReader) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)} + + switch p.Type { + case KeyType_AES256_GCM96: + n, err := derBytes.ReadFrom(limReader) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)} + } + if n != 32 { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed 32, got %d", n)} + } + return derBytes.Bytes(), nil + + case KeyType_ED25519: + // We use the limited reader containing the derived bytes as the + // "random" input to the generation function + _, pri, err := ed25519.GenerateKey(limReader) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error generating derived key: %v", err)} + } + return pri, nil + + default: + return nil, errutil.InternalError{Err: "unsupported key type for derivation"} } - if n != 32 { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed 32, got %d", n)} - } - return derBytes.Bytes(), nil + default: return nil, errutil.InternalError{Err: "unsupported key derivation mode"} } } -func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) { +func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, error) { if !p.Type.EncryptionSupported() { return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} } @@ -479,8 +532,19 @@ func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) { return "", errutil.UserError{Err: "failed to base64-decode plaintext"} } + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return "", errutil.UserError{Err: "requested version for encryption is negative"} + case ver > p.LatestVersion: + return "", errutil.UserError{Err: "requested version for encryption is higher than the latest key version"} + case ver < p.MinEncryptionVersion: + return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"} + } + // Derive the key that should be used - key, err := p.DeriveKey(context, p.LatestVersion) + key, err := p.DeriveKey(context, ver) if err != nil { return "", err } @@ -537,7 +601,7 @@ func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) { encoded := base64.StdEncoding.EncodeToString(full) // Prepend some information - encoded = "vault:v" + strconv.Itoa(p.LatestVersion) + ":" + encoded + encoded = "vault:v" + strconv.Itoa(ver) + ":" + encoded return encoded, nil } @@ -630,11 +694,10 @@ func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) { } func (p *Policy) HMACKey(version int) ([]byte, error) { - if version < p.MinDecryptionVersion { - return nil, fmt.Errorf("key version disallowed by policy (minimum is %d)", p.MinDecryptionVersion) - } - - if version > p.LatestVersion { + switch { + case version < 0: + return nil, fmt.Errorf("key version does not exist (cannot be negative)") + case version > p.LatestVersion: return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion) } @@ -645,15 +708,28 @@ func (p *Policy) HMACKey(version int) ([]byte, error) { return p.Keys[version].HMACKey, nil } -func (p *Policy) Sign(hashedInput []byte) (string, error) { +func (p *Policy) Sign(ver int, context, input []byte) (*SigningResult, error) { if !p.Type.SigningSupported() { - return "", fmt.Errorf("message signing not supported for key type %v", p.Type) + return nil, fmt.Errorf("message signing not supported for key type %v", p.Type) + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return nil, errutil.UserError{Err: "requested version for signing is negative"} + case ver > p.LatestVersion: + return nil, errutil.UserError{Err: "requested version for signing is higher than the latest key version"} + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return nil, errutil.UserError{Err: "requested version for signing is less than the minimum encryption key version"} } var sig []byte + var pubKey []byte + var err error switch p.Type { case KeyType_ECDSA_P256: - keyParams := p.Keys[p.LatestVersion] + keyParams := p.Keys[ver] key := &ecdsa.PrivateKey{ PublicKey: ecdsa.PublicKey{ Curve: elliptic.P256(), @@ -662,33 +738,57 @@ func (p *Policy) Sign(hashedInput []byte) (string, error) { }, D: keyParams.EC_D, } - r, s, err := ecdsa.Sign(rand.Reader, key, hashedInput) + r, s, err := ecdsa.Sign(rand.Reader, key, input) if err != nil { - return "", err + return nil, err } marshaledSig, err := asn1.Marshal(ecdsaSignature{ R: r, S: s, }) if err != nil { - return "", err + return nil, err } sig = marshaledSig + case KeyType_ED25519: + var key ed25519.PrivateKey + + if p.Derived { + // Derive the key that should be used + var err error + key, err = p.DeriveKey(context, ver) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} + } + pubKey = key.Public().(ed25519.PublicKey) + } else { + key = ed25519.PrivateKey(p.Keys[ver].Key) + } + + // Per docs, do not pre-hash ed25519; it does two passes and performs + // its own hashing + sig, err = key.Sign(rand.Reader, input, crypto.Hash(0)) + if err != nil { + return nil, err + } + default: - return "", fmt.Errorf("unsupported key type %v", p.Type) + return nil, fmt.Errorf("unsupported key type %v", p.Type) } // Convert to base64 encoded := base64.StdEncoding.EncodeToString(sig) - // Prepend some information - encoded = "vault:v" + strconv.Itoa(p.LatestVersion) + ":" + encoded + res := &SigningResult{ + Signature: "vault:v" + strconv.Itoa(ver) + ":" + encoded, + PublicKey: pubKey, + } - return encoded, nil + return res, nil } -func (p *Policy) VerifySignature(hashedInput []byte, sig string) (bool, error) { +func (p *Policy) VerifySignature(context, input []byte, sig string) (bool, error) { if !p.Type.SigningSupported() { return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)} } @@ -716,15 +816,15 @@ func (p *Policy) VerifySignature(hashedInput []byte, sig string) (bool, error) { return false, errutil.UserError{Err: ErrTooOld} } + sigBytes, err := base64.StdEncoding.DecodeString(splitVerSig[1]) + if err != nil { + return false, errutil.UserError{Err: "invalid base64 signature value"} + } + switch p.Type { case KeyType_ECDSA_P256: - asn1Sig, err := base64.StdEncoding.DecodeString(splitVerSig[1]) - if err != nil { - return false, errutil.UserError{Err: "invalid base64 signature value"} - } - var ecdsaSig ecdsaSignature - rest, err := asn1.Unmarshal(asn1Sig, &ecdsaSig) + rest, err := asn1.Unmarshal(sigBytes, &ecdsaSig) if err != nil { return false, errutil.UserError{Err: "supplied signature is invalid"} } @@ -739,7 +839,24 @@ func (p *Policy) VerifySignature(hashedInput []byte, sig string) (bool, error) { Y: keyParams.EC_Y, } - return ecdsa.Verify(key, hashedInput, ecdsaSig.R, ecdsaSig.S), nil + return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil + + case KeyType_ED25519: + var key ed25519.PrivateKey + + if p.Derived { + // Derive the key that should be used + var err error + key, err = p.DeriveKey(context, ver) + if err != nil { + return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} + } + } else { + key = ed25519.PrivateKey(p.Keys[ver].Key) + } + + return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil + default: return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } @@ -756,8 +873,10 @@ func (p *Policy) Rotate(storage logical.Storage) error { } p.LatestVersion += 1 + now := time.Now() entry := KeyEntry{ - CreationTime: time.Now().Unix(), + CreationTime: now, + DeprecatedCreationTime: now.Unix(), } hmacKey, err := uuid.GenerateRandomBytes(32) @@ -773,7 +892,7 @@ func (p *Policy) Rotate(storage logical.Storage) error { if err != nil { return err } - entry.AESKey = newKey + entry.Key = newKey case KeyType_ECDSA_P256: privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) @@ -796,6 +915,14 @@ func (p *Policy) Rotate(storage logical.Storage) error { return fmt.Errorf("error PEM-encoding public key") } entry.FormattedPublicKey = string(pemBytes) + + case KeyType_ED25519: + pub, pri, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return err + } + entry.Key = pri + entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub) } p.Keys[p.LatestVersion] = entry @@ -811,10 +938,12 @@ func (p *Policy) Rotate(storage logical.Storage) error { } func (p *Policy) MigrateKeyToKeysMap() { + now := time.Now() p.Keys = keyEntryMap{ 1: KeyEntry{ - AESKey: p.Key, - CreationTime: time.Now().Unix(), + Key: p.Key, + CreationTime: now, + DeprecatedCreationTime: now.Unix(), }, } p.Key = nil diff --git a/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go b/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go index 600238b..7969cf9 100644 --- a/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go +++ b/vendor/github.com/hashicorp/vault/helper/keysutil/policy_test.go @@ -40,10 +40,10 @@ func testKeyUpgradeCommon(t *testing.T, lm *LockManager) { t.Fatal("expected an upsert") } - testBytes := make([]byte, len(p.Keys[1].AESKey)) - copy(testBytes, p.Keys[1].AESKey) + testBytes := make([]byte, len(p.Keys[1].Key)) + copy(testBytes, p.Keys[1].Key) - p.Key = p.Keys[1].AESKey + p.Key = p.Keys[1].Key p.Keys = nil p.MigrateKeyToKeysMap() if p.Key != nil { @@ -52,7 +52,7 @@ func testKeyUpgradeCommon(t *testing.T, lm *LockManager) { if len(p.Keys) != 1 { t.Fatal("policy.Keys is the wrong size") } - if !reflect.DeepEqual(testBytes, p.Keys[1].AESKey) { + if !reflect.DeepEqual(testBytes, p.Keys[1].Key) { t.Fatal("key mismatch") } } @@ -198,7 +198,8 @@ func Test_Archiving(t *testing.T) { func testArchivingCommon(t *testing.T, lm *LockManager) { resetKeysArchive() - // First, we generate a policy and rotate it a number of times. Each time // we'll ensure that we have the expected number of keys in the archive and + // First, we generate a policy and rotate it a number of times. Each time + // we'll ensure that we have the expected number of keys in the archive and // the main keys object, which without changing the min version should be // zero and latest, respectively @@ -330,14 +331,21 @@ func checkKeys(t *testing.T, } for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ { + // Travis has weird time zone issues and gets super unhappy + if !p.Keys[i].CreationTime.Equal(keysArchive[i].CreationTime) { + t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[i], keysArchive[i]) + } + polKey := p.Keys[i] + polKey.CreationTime = keysArchive[i].CreationTime + p.Keys[i] = polKey if !reflect.DeepEqual(p.Keys[i], keysArchive[i]) { - t.Fatalf("key %d not equivalent between policy keys and test keys archive", i) + t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[i], keysArchive[i]) } } for i := 1; i < len(archive.Keys); i++ { - if !reflect.DeepEqual(archive.Keys[i].AESKey, keysArchive[i].AESKey) { - t.Fatalf("key %d not equivalent between policy archive and test keys archive", i) + if !reflect.DeepEqual(archive.Keys[i].Key, keysArchive[i].Key) { + t.Fatalf("key %d not equivalent between policy archive and test keys archive; policy archive:\n%#v\ntest keys archive:\n%#v\n", i, archive.Keys[i].Key, keysArchive[i].Key) } } } diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go index 7ecf754..685624a 100644 --- a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go +++ b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder.go @@ -48,33 +48,36 @@ func (b *Builder) add(raw string) error { return nil } - // If the arg is exactly "-", then we need to read from stdin - // and merge the results into the resulting structure. - if raw == "-" { - if b.Stdin == nil { - return fmt.Errorf("stdin is not supported") - } - if b.stdin { - return fmt.Errorf("stdin already consumed") - } - - b.stdin = true - return b.addReader(b.Stdin) - } - - // If the arg begins with "@" then we need to read a file directly - if raw[0] == '@' { - f, err := os.Open(raw[1:]) - if err != nil { - return err - } - defer f.Close() - - return b.addReader(f) - } - // Split into key/value parts := strings.SplitN(raw, "=", 2) + + // If the arg is exactly "-", then we need to read from stdin + // and merge the results into the resulting structure. + if len(parts) == 1 { + if raw == "-" { + if b.Stdin == nil { + return fmt.Errorf("stdin is not supported") + } + if b.stdin { + return fmt.Errorf("stdin already consumed") + } + + b.stdin = true + return b.addReader(b.Stdin) + } + + // If the arg begins with "@" then we need to read a file directly + if raw[0] == '@' { + f, err := os.Open(raw[1:]) + if err != nil { + return err + } + defer f.Close() + + return b.addReader(f) + } + } + if len(parts) != 2 { return fmt.Errorf("format must be key=value") } diff --git a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go index 9b0cffb..aa31784 100644 --- a/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go +++ b/vendor/github.com/hashicorp/vault/helper/kv-builder/builder_test.go @@ -118,3 +118,22 @@ func TestBuilder_sameKeyMultipleTimes(t *testing.T) { t.Fatalf("bad: %#v", actual) } } + +func TestBuilder_specialCharacteresInKey(t *testing.T) { + var b Builder + b.Stdin = bytes.NewBufferString("{\"foo\": \"bay\"}") + err := b.Add("@foo=bar", "-foo=baz", "-") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "@foo": "bar", + "-foo": "baz", + "foo": "bay", + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go index 9ba2bf7..957d533 100644 --- a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go +++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go @@ -19,6 +19,9 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) { switch in.(type) { case string: inp := in.(string) + if inp == "" { + return time.Duration(0), nil + } var err error // Look for a suffix otherwise its a plain second value if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") { diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go new file mode 100644 index 0000000..fff8ff1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/logger.go @@ -0,0 +1,158 @@ +package pluginutil + +import ( + "bytes" + "fmt" + stdlog "log" + "strings" + + hclog "github.com/hashicorp/go-hclog" + log "github.com/mgutz/logxi/v1" +) + +// pluginLogFaker is a wrapper on logxi.Logger that +// implements hclog.Logger +type hclogFaker struct { + logger log.Logger + + name string + implied []interface{} +} + +func (f *hclogFaker) buildLog(msg string, args ...interface{}) (string, []interface{}) { + if f.name != "" { + msg = fmt.Sprintf("%s: %s", f.name, msg) + } + args = append(f.implied, args...) + + return msg, args +} + +func (f *hclogFaker) Trace(msg string, args ...interface{}) { + msg, args = f.buildLog(msg, args...) + f.logger.Trace(msg, args...) +} + +func (f *hclogFaker) Debug(msg string, args ...interface{}) { + msg, args = f.buildLog(msg, args...) + f.logger.Debug(msg, args...) +} + +func (f *hclogFaker) Info(msg string, args ...interface{}) { + msg, args = f.buildLog(msg, args...) + f.logger.Info(msg, args...) +} + +func (f *hclogFaker) Warn(msg string, args ...interface{}) { + msg, args = f.buildLog(msg, args...) + f.logger.Warn(msg, args...) +} + +func (f *hclogFaker) Error(msg string, args ...interface{}) { + msg, args = f.buildLog(msg, args...) + f.logger.Error(msg, args...) +} + +func (f *hclogFaker) IsTrace() bool { + return f.logger.IsTrace() +} + +func (f *hclogFaker) IsDebug() bool { + return f.logger.IsDebug() +} + +func (f *hclogFaker) IsInfo() bool { + return f.logger.IsInfo() +} + +func (f *hclogFaker) IsWarn() bool { + return f.logger.IsWarn() +} + +func (f *hclogFaker) IsError() bool { + return !f.logger.IsTrace() && !f.logger.IsDebug() && !f.logger.IsInfo() && !f.IsWarn() +} + +func (f *hclogFaker) With(args ...interface{}) hclog.Logger { + var nf = *f + nf.implied = append(nf.implied, args...) + return f +} + +func (f *hclogFaker) Named(name string) hclog.Logger { + var nf = *f + if nf.name != "" { + nf.name = nf.name + "." + name + } + return &nf +} + +func (f *hclogFaker) ResetNamed(name string) hclog.Logger { + var nf = *f + nf.name = name + return &nf +} + +func (f *hclogFaker) StandardLogger(opts *hclog.StandardLoggerOptions) *stdlog.Logger { + if opts == nil { + opts = &hclog.StandardLoggerOptions{} + } + + return stdlog.New(&stdlogAdapter{f, opts.InferLevels}, "", 0) +} + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + hl hclog.Logger + inferLevels bool +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.inferLevels { + level, str := s.pickLevel(str) + switch level { + case hclog.Trace: + s.hl.Trace(str) + case hclog.Debug: + s.hl.Debug(str) + case hclog.Info: + s.hl.Info(str) + case hclog.Warn: + s.hl.Warn(str) + case hclog.Error: + s.hl.Error(str) + default: + s.hl.Info(str) + } + } else { + s.hl.Info(str) + } + + return len(data), nil +} + +// Detect, based on conventions, what log level this is +func (s *stdlogAdapter) pickLevel(str string) (hclog.Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return hclog.Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return hclog.Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return hclog.Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return hclog.Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return hclog.Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return hclog.Error, strings.TrimSpace(str[5:]) + default: + return hclog.Info, str + } +} diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go new file mode 100644 index 0000000..1660ca8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/mlock.go @@ -0,0 +1,23 @@ +package pluginutil + +import ( + "os" + + "github.com/hashicorp/vault/helper/mlock" +) + +var ( + // PluginMlockEnabled is the ENV name used to pass the configuration for + // enabling mlock + PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" +) + +// OptionallyEnableMlock determines if mlock should be called, and if so enables +// mlock. +func OptionallyEnableMlock() error { + if os.Getenv(PluginMlockEnabled) == "true" { + return mlock.LockMemory() + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go new file mode 100644 index 0000000..2047651 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/runner.go @@ -0,0 +1,166 @@ +package pluginutil + +import ( + "crypto/sha256" + "crypto/tls" + "flag" + "fmt" + "os/exec" + "time" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/wrapping" + log "github.com/mgutz/logxi/v1" +) + +// Looker defines the plugin Lookup function that looks into the plugin catalog +// for availible plugins and returns a PluginRunner +type Looker interface { + LookupPlugin(string) (*PluginRunner, error) +} + +// Wrapper interface defines the functions needed by the runner to wrap the +// metadata needed to run a plugin process. This includes looking up Mlock +// configuration and wrapping data in a respose wrapped token. +// logical.SystemView implementataions satisfy this interface. +type RunnerUtil interface { + ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + MlockEnabled() bool +} + +// LookWrapper defines the functions for both Looker and Wrapper +type LookRunnerUtil interface { + Looker + RunnerUtil +} + +// PluginRunner defines the metadata needed to run a plugin securely with +// go-plugin. +type PluginRunner struct { + Name string `json:"name" structs:"name"` + Command string `json:"command" structs:"command"` + Args []string `json:"args" structs:"args"` + Sha256 []byte `json:"sha256" structs:"sha256"` + Builtin bool `json:"builtin" structs:"builtin"` + BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` +} + +// Run takes a wrapper RunnerUtil instance along with the go-plugin paramaters and +// returns a configured plugin.Client with TLS Configured and a wrapping token set +// on PluginUnwrapTokenEnv for plugin process consumption. +func (r *PluginRunner) Run(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.runCommon(wrapper, pluginMap, hs, env, logger, false) +} + +// RunMetadataMode returns a configured plugin.Client that will dispense a plugin +// in metadata mode. The PluginMetadaModeEnv is passed in as part of the Cmd to +// plugin.Client, and consumed by the plugin process on pluginutil.VaultPluginTLSProvider. +func (r *PluginRunner) RunMetadataMode(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.runCommon(wrapper, pluginMap, hs, env, logger, true) + +} + +func (r *PluginRunner) runCommon(wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger, isMetadataMode bool) (*plugin.Client, error) { + cmd := exec.Command(r.Command, r.Args...) + cmd.Env = append(cmd.Env, env...) + + // Add the mlock setting to the ENV of the plugin + if wrapper.MlockEnabled() { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + } + + // Create logger for the plugin client + clogger := &hclogFaker{ + logger: logger, + } + namedLogger := clogger.ResetNamed("plugin") + + var clientTLSConfig *tls.Config + if !isMetadataMode { + // Add the metadata mode ENV and set it to false + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadaModeEnv, "false")) + + // Get a CA TLS Certificate + certBytes, key, err := generateCert() + if err != nil { + return nil, err + } + + // Use CA to sign a client cert and return a configured TLS config + clientTLSConfig, err = createClientTLSConfig(certBytes, key) + if err != nil { + return nil, err + } + + // Use CA to sign a server cert and wrap the values in a response wrapped + // token. + wrapToken, err := wrapServerConfig(wrapper, certBytes, key) + if err != nil { + return nil, err + } + + // Add the response wrap token to the ENV of the plugin + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + } else { + namedLogger = clogger.ResetNamed("plugin.metadata") + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadaModeEnv, "true")) + } + + secureConfig := &plugin.SecureConfig{ + Checksum: r.Sha256, + Hash: sha256.New(), + } + + clientConfig := &plugin.ClientConfig{ + HandshakeConfig: hs, + Plugins: pluginMap, + Cmd: cmd, + SecureConfig: secureConfig, + TLSConfig: clientTLSConfig, + Logger: namedLogger, + } + + client := plugin.NewClient(clientConfig) + + return client, nil +} + +type APIClientMeta struct { + // These are set by the command line flags. + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagInsecure bool +} + +func (f *APIClientMeta) FlagSet() *flag.FlagSet { + fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError) + + fs.StringVar(&f.flagCACert, "ca-cert", "", "") + fs.StringVar(&f.flagCAPath, "ca-path", "", "") + fs.StringVar(&f.flagClientCert, "client-cert", "", "") + fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") + + return fs +} + +func (f *APIClientMeta) GetTLSConfig() *api.TLSConfig { + // If we need custom TLS configuration, then set it + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + t := &api.TLSConfig{ + CACert: f.flagCACert, + CAPath: f.flagCAPath, + ClientCert: f.flagClientCert, + ClientKey: f.flagClientKey, + TLSServerName: "", + Insecure: f.flagInsecure, + } + + return t + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go b/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go new file mode 100644 index 0000000..112d33c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/pluginutil/tls.go @@ -0,0 +1,242 @@ +package pluginutil + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "errors" + "fmt" + "net/url" + "os" + "time" + + "github.com/SermoDigital/jose/jws" + "github.com/hashicorp/errwrap" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/certutil" +) + +var ( + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" + + // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded + // string. Used for testing. + PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" + + // PluginMetadaModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadaModeEnv = "VAULT_PLUGIN_METADATA_MODE" +) + +// generateCert is used internally to create certificates for the plugin +// client and server. +func generateCert() ([]byte, *ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, err + } + + sn, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, nil, err + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err) + } + + return certBytes, key, nil +} + +// createClientTLSConfig creates a signed certificate and returns a configured +// TLS config. +func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) { + clientCert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, fmt.Errorf("error parsing generated plugin certificate: %v", err) + } + + cert := tls.Certificate{ + Certificate: [][]byte{certBytes}, + PrivateKey: key, + Leaf: clientCert, + } + + clientCertPool := x509.NewCertPool() + clientCertPool.AddCert(clientCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: clientCertPool, + ServerName: clientCert.Subject.CommonName, + MinVersion: tls.VersionTLS12, + } + + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} + +// wrapServerConfig is used to create a server certificate and private key, then +// wrap them in an unwrap token for later retrieval by the plugin. +func wrapServerConfig(sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) { + rawKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + return "", err + } + + wrapInfo, err := sys.ResponseWrapData(map[string]interface{}{ + "ServerCert": certBytes, + "ServerKey": rawKey, + }, time.Second*60, true) + if err != nil { + return "", err + } + + return wrapInfo.Token, nil +} + +// VaultPluginTLSProvider is run inside a plugin and retrives the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProvider(apiTLSConfig *api.TLSConfig) func() (*tls.Config, error) { + if os.Getenv(PluginMetadaModeEnv) == "true" { + return nil + } + + return func() (*tls.Config, error) { + unwrapToken := os.Getenv(PluginUnwrapTokenEnv) + + // Parse the JWT and retrieve the vault address + wt, err := jws.ParseJWT([]byte(unwrapToken)) + if err != nil { + return nil, fmt.Errorf("error decoding token: %s", err) + } + if wt == nil { + return nil, errors.New("nil decoded token") + } + + addrRaw := wt.Claims().Get("addr") + if addrRaw == nil { + return nil, errors.New("decoded token does not contain primary cluster address") + } + vaultAddr, ok := addrRaw.(string) + if !ok { + return nil, errors.New("decoded token's address not valid") + } + if vaultAddr == "" { + return nil, errors.New(`no address for the vault found`) + } + + // Sanity check the value + if _, err := url.Parse(vaultAddr); err != nil { + return nil, fmt.Errorf("error parsing the vault address: %s", err) + } + + // Unwrap the token + clientConf := api.DefaultConfig() + clientConf.Address = vaultAddr + if apiTLSConfig != nil { + err := clientConf.ConfigureTLS(apiTLSConfig) + if err != nil { + return nil, errwrap.Wrapf("error configuring api client {{err}}", err) + } + } + client, err := api.NewClient(clientConf) + if err != nil { + return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) + } + + secret, err := client.Logical().Unwrap(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) + } + if secret == nil { + return nil, errors.New("error during token unwrap request: secret is nil") + } + + // Retrieve and parse the server's certificate + serverCertBytesRaw, ok := secret.Data["ServerCert"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + // Retrieve and parse the server's private key + serverKeyB64, ok := secret.Data["ServerKey"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + serverKey, err := x509.ParseECPrivateKey(serverKeyRaw) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + + // Add CA cert to the cert pool + caCertPool := x509.NewCertPool() + caCertPool.AddCert(serverCert) + + // Build a certificate object out of the server's cert and private key. + cert := tls.Certificate{ + Certificate: [][]byte{serverCertBytes}, + PrivateKey: serverKey, + Leaf: serverCert, + } + + // Setup TLS config + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + RootCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + // TLS 1.2 minimum + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil + } +} diff --git a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go index 9ac9b93..f6d9f66 100644 --- a/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go +++ b/vendor/github.com/hashicorp/vault/helper/policyutil/policyutil.go @@ -18,14 +18,23 @@ const ( // all other policies will be ignored, the result will contain // just the 'root'. In cases where 'root' is not present, if // 'default' policy is not already present, it will be added. -func ParsePolicies(policiesRaw string) []string { - if policiesRaw == "" { +func ParsePolicies(policiesRaw interface{}) []string { + if policiesRaw == nil { return []string{"default"} } - policies := strings.Split(policiesRaw, ",") + var policies []string + switch policiesRaw.(type) { + case string: + if policiesRaw.(string) == "" { + return []string{} + } + policies = strings.Split(policiesRaw.(string), ",") + case []string: + policies = policiesRaw.([]string) + } - return SanitizePolicies(policies, true) + return SanitizePolicies(policies, false) } // SanitizePolicies performs the common input validation tasks diff --git a/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go b/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go new file mode 100644 index 0000000..5ff59b1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/proxyutil/proxyutil.go @@ -0,0 +1,107 @@ +package proxyutil + +import ( + "fmt" + "net" + "sync" + + proxyproto "github.com/armon/go-proxyproto" + "github.com/hashicorp/errwrap" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/helper/strutil" +) + +// ProxyProtoConfig contains configuration for the PROXY protocol +type ProxyProtoConfig struct { + sync.RWMutex + Behavior string + AuthorizedAddrs []*sockaddr.SockAddrMarshaler `json:"authorized_addrs"` +} + +func (p *ProxyProtoConfig) SetAuthorizedAddrs(addrs interface{}) error { + p.AuthorizedAddrs = make([]*sockaddr.SockAddrMarshaler, 0) + stringAddrs := make([]string, 0) + + switch addrs.(type) { + case string: + stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",") + if len(stringAddrs) == 0 { + return fmt.Errorf("unable to parse addresses from %v", addrs) + } + + case []string: + stringAddrs = addrs.([]string) + + case []interface{}: + for _, v := range addrs.([]interface{}) { + stringAddr, ok := v.(string) + if !ok { + return fmt.Errorf("error parsing %q as string") + } + stringAddrs = append(stringAddrs, stringAddr) + } + + default: + return fmt.Errorf("unknown address input type %T", addrs) + } + + for _, addr := range stringAddrs { + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + return errwrap.Wrapf("error parsing authorized address: {{err}}", err) + } + p.AuthorizedAddrs = append(p.AuthorizedAddrs, &sockaddr.SockAddrMarshaler{ + SockAddr: sa, + }) + } + + return nil +} + +// WrapInProxyProto wraps the given listener in the PROXY protocol. If behavior +// is "use_if_authorized" or "deny_if_unauthorized" it also configures a +// SourceCheck based on the given ProxyProtoConfig. In an error case it returns +// the original listener and the error. +func WrapInProxyProto(listener net.Listener, config *ProxyProtoConfig) (net.Listener, error) { + config.Lock() + defer config.Unlock() + + var newLn *proxyproto.Listener + + switch config.Behavior { + case "use_always": + newLn = &proxyproto.Listener{ + Listener: listener, + } + + case "allow_authorized", "deny_unauthorized": + newLn = &proxyproto.Listener{ + Listener: listener, + SourceCheck: func(addr net.Addr) (bool, error) { + config.RLock() + defer config.RUnlock() + + sa, err := sockaddr.NewSockAddr(addr.String()) + if err != nil { + return false, errwrap.Wrapf("error parsing remote address: {{err}}", err) + } + + for _, authorizedAddr := range config.AuthorizedAddrs { + if authorizedAddr.Contains(sa) { + return true, nil + } + } + + if config.Behavior == "allow_authorized" { + return false, nil + } + + return false, proxyproto.ErrInvalidUpstream + }, + } + default: + return listener, fmt.Errorf("unknown behavior type for proxy proto config") + } + + return newLn, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/reload/reload.go b/vendor/github.com/hashicorp/vault/helper/reload/reload.go new file mode 100644 index 0000000..cc450b9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/reload/reload.go @@ -0,0 +1,54 @@ +package reload + +import ( + "crypto/tls" + "fmt" + "sync" +) + +// ReloadFunc are functions that are called when a reload is requested +type ReloadFunc func(map[string]interface{}) error + +// CertificateGetter satisfies ReloadFunc and its GetCertificate method +// satisfies the tls.GetCertificate function signature. Currently it does not +// allow changing paths after the fact. +type CertificateGetter struct { + sync.RWMutex + + cert *tls.Certificate + + certFile string + keyFile string +} + +func NewCertificateGetter(certFile, keyFile string) *CertificateGetter { + return &CertificateGetter{ + certFile: certFile, + keyFile: keyFile, + } +} + +func (cg *CertificateGetter) Reload(_ map[string]interface{}) error { + cert, err := tls.LoadX509KeyPair(cg.certFile, cg.keyFile) + if err != nil { + return err + } + + cg.Lock() + defer cg.Unlock() + + cg.cert = &cert + + return nil +} + +func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cg.RLock() + defer cg.RUnlock() + + if cg.cert == nil { + return nil, fmt.Errorf("nil certificate") + } + + return cg.cert, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go index 7c7f64d..b5e69c4 100644 --- a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go +++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go @@ -29,6 +29,19 @@ func StrListSubset(super, sub []string) bool { return true } +// Parses a comma separated list of strings into a slice of strings. +// The return slice will be sorted and will not contain duplicate or +// empty items. +func ParseDedupAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), false) +} + // Parses a comma separated list of strings into a slice of strings. // The return slice will be sorted and will not contain duplicate or // empty items. The values will be converted to lower case. @@ -56,6 +69,10 @@ func ParseKeyValues(input string, out map[string]string, sep string) error { for _, keyValue := range keyValues { shards := strings.Split(keyValue, "=") + if len(shards) != 2 { + return fmt.Errorf("invalid format") + } + key := strings.TrimSpace(shards[0]) value := strings.TrimSpace(shards[1]) if key == "" || value == "" { @@ -286,3 +303,11 @@ func GlobbedStringsMatch(item, val string) bool { return val == item } + +// AppendIfMissing adds a string to a slice if the given string is not present +func AppendIfMissing(slice []string, i string) []string { + if StrListContains(slice, i) { + return slice + } + return append(slice, i) +} diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go index 9fd3bef..ce02719 100644 --- a/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go +++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil_test.go @@ -139,7 +139,7 @@ func TestStrutil_ParseKeyValues(t *testing.T) { input = "key1 = value1, key2 = " err = ParseKeyValues(input, actual, ",") if err == nil { - t.Fatal("expected an error") + t.Fatalf("expected an error") } for k, _ := range actual { delete(actual, k) @@ -148,11 +148,17 @@ func TestStrutil_ParseKeyValues(t *testing.T) { input = "key1 = value1, = value2 " err = ParseKeyValues(input, actual, ",") if err == nil { - t.Fatal("expected an error") + t.Fatalf("expected an error") } for k, _ := range actual { delete(actual, k) } + + input = "key1" + err = ParseKeyValues(input, actual, ",") + if err == nil { + t.Fatalf("expected an error") + } } func TestStrutil_ParseArbitraryKeyValues(t *testing.T) { @@ -324,3 +330,40 @@ func TestTrimStrings(t *testing.T) { t.Fatalf("Bad TrimStrings: expected:%#v, got:%#v", expected, actual) } } + +func TestStrutil_AppendIfMissing(t *testing.T) { + keys := []string{} + + keys = AppendIfMissing(keys, "foo") + + if len(keys) != 1 { + t.Fatalf("expected slice to be length of 1: %v", keys) + } + if keys[0] != "foo" { + t.Fatalf("expected slice to contain key 'foo': %v", keys) + } + + keys = AppendIfMissing(keys, "bar") + + if len(keys) != 2 { + t.Fatalf("expected slice to be length of 2: %v", keys) + } + if keys[0] != "foo" { + t.Fatalf("expected slice to contain key 'foo': %v", keys) + } + if keys[1] != "bar" { + t.Fatalf("expected slice to contain key 'bar': %v", keys) + } + + keys = AppendIfMissing(keys, "foo") + + if len(keys) != 2 { + t.Fatalf("expected slice to still be length of 2: %v", keys) + } + if keys[0] != "foo" { + t.Fatalf("expected slice to still contain key 'foo': %v", keys) + } + if keys[1] != "bar" { + t.Fatalf("expected slice to still contain key 'bar': %v", keys) + } +} diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go similarity index 83% rename from vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go rename to vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go index 5cbd060..08b3ebd 100644 --- a/vendor/github.com/hashicorp/vault/helper/tlsutil/tls.go +++ b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil.go @@ -23,6 +23,7 @@ func ParseCiphers(cipherStr string) ([]uint16, error) { "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, @@ -32,10 +33,14 @@ func ParseCiphers(cipherStr string) ([]uint16, error) { "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, } for _, cipher := range ciphers { if v, ok := cipherMap[cipher]; ok { diff --git a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go index a8e9e77..79aac9b 100644 --- a/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go +++ b/vendor/github.com/hashicorp/vault/helper/tlsutil/tlsutil_test.go @@ -7,12 +7,12 @@ import ( ) func TestParseCiphers(t *testing.T) { - testOk := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + testOk := "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305" v, err := ParseCiphers(testOk) if err != nil { t.Fatal(err) } - if len(v) != 12 { + if len(v) != 17 { t.Fatal("missed ciphers after parse") } diff --git a/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go b/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go new file mode 100644 index 0000000..2242c7b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/wrapping/wrapinfo.go @@ -0,0 +1,27 @@ +package wrapping + +import "time" + +type ResponseWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` + + // The token containing the wrapped response + Token string `json:"token" structs:"token" mapstructure:"token"` + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"` + + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"` + + // The format to use. This doesn't get returned, it's only internal. + Format string `json:"format" structs:"format" mapstructure:"format"` + + // CreationPath is the original request path that was used to create + // the wrapped response. + CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path"` +} diff --git a/vendor/github.com/hashicorp/vault/http/cors.go b/vendor/github.com/hashicorp/vault/http/cors.go new file mode 100644 index 0000000..a01228b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/cors.go @@ -0,0 +1,62 @@ +package http + +import ( + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/vault" +) + +var allowedMethods = []string{ + http.MethodDelete, + http.MethodGet, + http.MethodOptions, + http.MethodPost, + http.MethodPut, + "LIST", // LIST is not an official HTTP method, but Vault supports it. +} + +func wrapCORSHandler(h http.Handler, core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + corsConf := core.CORSConfig() + + origin := req.Header.Get("Origin") + requestMethod := req.Header.Get("Access-Control-Request-Method") + + // If CORS is not enabled or if no Origin header is present (i.e. the request + // is from the Vault CLI. A browser will always send an Origin header), then + // just return a 204. + if !corsConf.IsEnabled() || origin == "" { + h.ServeHTTP(w, req) + return + } + + // Return a 403 if the origin is not allowed to make cross-origin requests. + if !corsConf.IsValidOrigin(origin) { + respondError(w, http.StatusForbidden, fmt.Errorf("origin not allowed")) + return + } + + if req.Method == http.MethodOptions && !strutil.StrListContains(allowedMethods, requestMethod) { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Vary", "Origin") + + // apply headers for preflight requests + if req.Method == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ",")) + w.Header().Set("Access-Control-Allow-Headers", strings.Join(corsConf.AllowedHeaders, ",")) + w.Header().Set("Access-Control-Max-Age", "300") + + return + } + + h.ServeHTTP(w, req) + return + }) +} diff --git a/vendor/github.com/hashicorp/vault/http/forwarding_test.go b/vendor/github.com/hashicorp/vault/http/forwarding_test.go index fdc3b76..4f1aefe 100644 --- a/vendor/github.com/hashicorp/vault/http/forwarding_test.go +++ b/vendor/github.com/hashicorp/vault/http/forwarding_test.go @@ -8,7 +8,6 @@ import ( "io" "math/rand" "net/http" - "os" "strings" "sync" "sync/atomic" @@ -27,10 +26,6 @@ import ( ) func TestHTTP_Fallback_Bad_Address(t *testing.T) { - handler1 := http.NewServeMux() - handler2 := http.NewServeMux() - handler3 := http.NewServeMux() - coreConfig := &vault.CoreConfig{ LogicalBackends: map[string]logical.Factory{ "transit": transit.Factory, @@ -38,22 +33,17 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) { ClusterAddr: "https://127.3.4.1:8382", } - // Chicken-and-egg: Handler needs a core. So we create handlers first, then - // add routes chained to a Handler-created handler. - cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true) - for _, core := range cores { - defer core.CloseListeners() - } - handler1.Handle("/", Handler(cores[0].Core)) - handler2.Handle("/", Handler(cores[1].Core)) - handler3.Handle("/", Handler(cores[2].Core)) + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores // make it easy to get access to the active core := cores[0].Core vault.TestWaitActive(t, core) - root := cores[0].Root - addrs := []string{ fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port), fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port), @@ -68,7 +58,7 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) { if err != nil { t.Fatal(err) } - client.SetToken(root) + client.SetToken(cluster.RootToken) secret, err := client.Auth().Token().LookupSelf() if err != nil { @@ -77,17 +67,13 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) { if secret == nil { t.Fatal("secret is nil") } - if secret.Data["id"].(string) != root { + if secret.Data["id"].(string) != cluster.RootToken { t.Fatal("token mismatch") } } } func TestHTTP_Fallback_Disabled(t *testing.T) { - handler1 := http.NewServeMux() - handler2 := http.NewServeMux() - handler3 := http.NewServeMux() - coreConfig := &vault.CoreConfig{ LogicalBackends: map[string]logical.Factory{ "transit": transit.Factory, @@ -95,22 +81,17 @@ func TestHTTP_Fallback_Disabled(t *testing.T) { ClusterAddr: "empty", } - // Chicken-and-egg: Handler needs a core. So we create handlers first, then - // add routes chained to a Handler-created handler. - cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true) - for _, core := range cores { - defer core.CloseListeners() - } - handler1.Handle("/", Handler(cores[0].Core)) - handler2.Handle("/", Handler(cores[1].Core)) - handler3.Handle("/", Handler(cores[2].Core)) + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores // make it easy to get access to the active core := cores[0].Core vault.TestWaitActive(t, core) - root := cores[0].Root - addrs := []string{ fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port), fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port), @@ -125,7 +106,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) { if err != nil { t.Fatal(err) } - client.SetToken(root) + client.SetToken(cluster.RootToken) secret, err := client.Auth().Token().LookupSelf() if err != nil { @@ -134,7 +115,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) { if secret == nil { t.Fatal("secret is nil") } - if secret.Data["id"].(string) != root { + if secret.Data["id"].(string) != cluster.RootToken { t.Fatal("token mismatch") } } @@ -143,49 +124,31 @@ func TestHTTP_Fallback_Disabled(t *testing.T) { // This function recreates the fuzzy testing from transit to pipe a large // number of requests from the standbys to the active node. func TestHTTP_Forwarding_Stress(t *testing.T) { - testHTTP_Forwarding_Stress_Common(t, false, false, 50) - testHTTP_Forwarding_Stress_Common(t, false, true, 50) - testHTTP_Forwarding_Stress_Common(t, true, false, 50) - testHTTP_Forwarding_Stress_Common(t, true, true, 50) - os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "") + testHTTP_Forwarding_Stress_Common(t, false, 50) + testHTTP_Forwarding_Stress_Common(t, true, 50) } -func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uint64) { +func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) { testPlaintext := "the quick brown fox" testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" - if rpc { - os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1") - } else { - os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "") - } - - handler1 := http.NewServeMux() - handler2 := http.NewServeMux() - handler3 := http.NewServeMux() - coreConfig := &vault.CoreConfig{ LogicalBackends: map[string]logical.Factory{ "transit": transit.Factory, }, } - // Chicken-and-egg: Handler needs a core. So we create handlers first, then - // add routes chained to a Handler-created handler. - cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true) - for _, core := range cores { - defer core.CloseListeners() - } - handler1.Handle("/", Handler(cores[0].Core)) - handler2.Handle("/", Handler(cores[1].Core)) - handler3.Handle("/", Handler(cores[2].Core)) + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores // make it easy to get access to the active core := cores[0].Core vault.TestWaitActive(t, core) - root := cores[0].Root - wg := sync.WaitGroup{} funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"} @@ -216,7 +179,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin if err != nil { t.Fatal(err) } - req.Header.Set(AuthHeaderName, root) + req.Header.Set(AuthHeaderName, cluster.RootToken) _, err = client.Do(req) if err != nil { t.Fatal(err) @@ -265,7 +228,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin if err != nil { return nil, err } - req.Header.Set(AuthHeaderName, root) + req.Header.Set(AuthHeaderName, cluster.RootToken) resp, err := client.Do(req) if err != nil { return nil, err @@ -465,40 +428,31 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin wg.Wait() if totalOps == 0 || totalOps != successfulOps { - t.Fatalf("total/successful ops zero or mismatch: %d/%d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num) + t.Fatalf("total/successful ops zero or mismatch: %d/%d; parallel: %t, num %d", totalOps, successfulOps, parallel, num) } - t.Logf("total operations tried: %d, total successful: %d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num) + t.Logf("total operations tried: %d, total successful: %d; parallel: %t, num %d", totalOps, successfulOps, parallel, num) } // This tests TLS connection state forwarding by ensuring that we can use a // client TLS to authenticate against the cert backend func TestHTTP_Forwarding_ClientTLS(t *testing.T) { - handler1 := http.NewServeMux() - handler2 := http.NewServeMux() - handler3 := http.NewServeMux() - coreConfig := &vault.CoreConfig{ CredentialBackends: map[string]logical.Factory{ "cert": credCert.Factory, }, } - // Chicken-and-egg: Handler needs a core. So we create handlers first, then - // add routes chained to a Handler-created handler. - cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true) - for _, core := range cores { - defer core.CloseListeners() - } - handler1.Handle("/", Handler(cores[0].Core)) - handler2.Handle("/", Handler(cores[1].Core)) - handler3.Handle("/", Handler(cores[2].Core)) + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores // make it easy to get access to the active core := cores[0].Core vault.TestWaitActive(t, core) - root := cores[0].Root - transport := cleanhttp.DefaultTransport() transport.TLSClientConfig = cores[0].TLSConfig if err := http2.ConfigureTransport(transport); err != nil { @@ -514,7 +468,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { if err != nil { t.Fatal(err) } - req.Header.Set(AuthHeaderName, root) + req.Header.Set(AuthHeaderName, cluster.RootToken) _, err = client.Do(req) if err != nil { t.Fatal(err) @@ -525,7 +479,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { Policies string `json:"policies"` } encodedCertConfig, err := json.Marshal(&certConfig{ - Certificate: vault.TestClusterCACert, + Certificate: string(cluster.CACertPEM), Policies: "default", }) if err != nil { @@ -536,7 +490,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { if err != nil { t.Fatal(err) } - req.Header.Set(AuthHeaderName, root) + req.Header.Set(AuthHeaderName, cluster.RootToken) _, err = client.Do(req) if err != nil { t.Fatal(err) @@ -559,7 +513,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { }, } - //cores[0].Logger().Printf("root token is %s", root) + //cores[0].Logger().Printf("cluster.RootToken token is %s", cluster.RootToken) //time.Sleep(4 * time.Hour) for _, addr := range addrs { @@ -595,3 +549,27 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { } } } + +func TestHTTP_Forwarding_HelpOperation(t *testing.T) { + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + testHelp := func(client *api.Client) { + help, err := client.Help("auth/token") + if err != nil { + t.Fatal(err) + } + if help == nil { + t.Fatal("help was nil") + } + } + + testHelp(cores[0].Client) + testHelp(cores[1].Client) +} diff --git a/vendor/github.com/hashicorp/vault/http/handler.go b/vendor/github.com/hashicorp/vault/http/handler.go index fb9b7a8..6290768 100644 --- a/vendor/github.com/hashicorp/vault/http/handler.go +++ b/vendor/github.com/hashicorp/vault/http/handler.go @@ -10,8 +10,8 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/consts" - "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/vault" ) @@ -46,10 +46,11 @@ func Handler(core *vault.Core) http.Handler { mux.Handle("/v1/sys/init", handleSysInit(core)) mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core)) mux.Handle("/v1/sys/seal", handleSysSeal(core)) - mux.Handle("/v1/sys/step-down", handleSysStepDown(core)) + mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core))) mux.Handle("/v1/sys/unseal", handleSysUnseal(core)) mux.Handle("/v1/sys/renew", handleRequestForwarding(core, handleLogical(core, false, nil))) mux.Handle("/v1/sys/renew/", handleRequestForwarding(core, handleLogical(core, false, nil))) + mux.Handle("/v1/sys/leases/", handleRequestForwarding(core, handleLogical(core, false, nil))) mux.Handle("/v1/sys/leader", handleSysLeader(core)) mux.Handle("/v1/sys/health", handleSysHealth(core)) mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, handleSysGenerateRootAttempt(core))) @@ -67,10 +68,11 @@ func Handler(core *vault.Core) http.Handler { // Wrap the handler in another handler to trigger all help paths. helpWrappedHandler := wrapHelpHandler(mux, core) + corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core) // Wrap the help wrapped handler with another layer with a generic // handler - genericWrappedHandler := wrapGenericHandler(helpWrappedHandler) + genericWrappedHandler := wrapGenericHandler(corsWrappedHandler) return genericWrappedHandler } @@ -152,7 +154,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle // Note: in an HA setup, this call will also ensure that connections to // the leader are set up, as that happens once the advertised cluster // values are read during this function - isLeader, leaderAddr, err := core.Leader() + isLeader, leaderAddr, _, err := core.Leader() if err != nil { if err == vault.ErrHANotEnabled { // Standalone node, serve request normally @@ -169,7 +171,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle return } if leaderAddr == "" { - respondError(w, http.StatusInternalServerError, fmt.Errorf("node not active but active node not found")) + respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found")) return } @@ -221,7 +223,7 @@ func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *l // respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) { // Request the leader address - _, redirectAddr, err := core.Leader() + _, redirectAddr, _, err := core.Leader() if err != nil { respondError(w, http.StatusInternalServerError, err) return diff --git a/vendor/github.com/hashicorp/vault/http/handler_test.go b/vendor/github.com/hashicorp/vault/http/handler_test.go index 149e603..8eae984 100644 --- a/vendor/github.com/hashicorp/vault/http/handler_test.go +++ b/vendor/github.com/hashicorp/vault/http/handler_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/http/httptest" "reflect" + "strings" "testing" "github.com/hashicorp/go-cleanhttp" @@ -14,6 +15,87 @@ import ( "github.com/hashicorp/vault/vault" ) +func TestHandler_cors(t *testing.T) { + core, _, _ := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + // Enable CORS and allow from any origin for testing. + corsConfig := core.CORSConfig() + err := corsConfig.Enable([]string{addr}, nil) + if err != nil { + t.Fatalf("Error enabling CORS: %s", err) + } + + req, err := http.NewRequest(http.MethodOptions, addr+"/v1/sys/seal-status", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set("Origin", "BAD ORIGIN") + + // Requests from unacceptable origins will be rejected with a 403. + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + if resp.StatusCode != http.StatusForbidden { + t.Fatalf("Bad status:\nexpected: 403 Forbidden\nactual: %s", resp.Status) + } + + // + // Test preflight requests + // + + // Set a valid origin + req.Header.Set("Origin", addr) + + // Server should NOT accept arbitrary methods. + req.Header.Set("Access-Control-Request-Method", "FOO") + + client = cleanhttp.DefaultClient() + resp, err = client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Fail if an arbitrary method is accepted. + if resp.StatusCode != http.StatusMethodNotAllowed { + t.Fatalf("Bad status:\nexpected: 405 Method Not Allowed\nactual: %s", resp.Status) + } + + // Server SHOULD accept acceptable methods. + req.Header.Set("Access-Control-Request-Method", http.MethodPost) + + client = cleanhttp.DefaultClient() + resp, err = client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + // + // Test that the CORS headers are applied correctly. + // + expHeaders := map[string]string{ + "Access-Control-Allow-Origin": addr, + "Access-Control-Allow-Headers": strings.Join(vault.StdAllowedHeaders, ","), + "Access-Control-Max-Age": "300", + "Vary": "Origin", + } + + for expHeader, expected := range expHeaders { + actual := resp.Header.Get(expHeader) + if actual == "" { + t.Fatalf("bad:\nHeader: %#v was not on response.", expHeader) + } + + if actual != expected { + t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual) + } + } +} + func TestHandler_CacheControlNoStore(t *testing.T) { core, _, token := vault.TestCoreUnsealed(t) ln, addr := TestServer(t, core) @@ -75,8 +157,8 @@ func TestSysMounts_headerAuth(t *testing.T) { "auth": nil, "data": map[string]interface{}{ "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -106,8 +188,8 @@ func TestSysMounts_headerAuth(t *testing.T) { }, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -140,6 +222,13 @@ func TestSysMounts_headerAuth(t *testing.T) { testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual) @@ -194,6 +283,12 @@ func TestSysMounts_headerAuth_Wrapped(t *testing.T) { } expected["wrap_info"].(map[string]interface{})["creation_time"] = actualCreationTime + actualCreationPath, ok := actual["wrap_info"].(map[string]interface{})["creation_path"] + if !ok || actualCreationPath == "" { + t.Fatal("creation_path missing in wrap info") + } + expected["wrap_info"].(map[string]interface{})["creation_path"] = actualCreationPath + if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n%T %T", expected, actual, actual["warnings"], actual["data"]) } diff --git a/vendor/github.com/hashicorp/vault/http/help.go b/vendor/github.com/hashicorp/vault/http/help.go index f0ca8b1..1c3a956 100644 --- a/vendor/github.com/hashicorp/vault/http/help.go +++ b/vendor/github.com/hashicorp/vault/http/help.go @@ -8,14 +8,18 @@ import ( ) func wrapHelpHandler(h http.Handler, core *vault.Core) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // If the help parameter is not blank, then show the help + return http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) { + // If the help parameter is not blank, then show the help. We request + // forward because standby nodes do not have mounts and other state. if v := req.URL.Query().Get("help"); v != "" || req.Method == "HELP" { - handleHelp(core, w, req) + handleRequestForwarding(core, + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handleHelp(core, w, r) + })).ServeHTTP(writer, req) return } - h.ServeHTTP(w, req) + h.ServeHTTP(writer, req) return }) } diff --git a/vendor/github.com/hashicorp/vault/http/http_test.go b/vendor/github.com/hashicorp/vault/http/http_test.go index 16e0521..eb43817 100644 --- a/vendor/github.com/hashicorp/vault/http/http_test.go +++ b/vendor/github.com/hashicorp/vault/http/http_test.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "regexp" "strings" "testing" "time" @@ -55,6 +56,11 @@ func testHttpData(t *testing.T, method string, token string, addr string, body i t.Fatalf("err: %s", err) } + // Get the address of the local listener in order to attach it to an Origin header. + // This will allow for the testing of requests that require CORS, without using a browser. + hostURLRegexp, _ := regexp.Compile("http[s]?://.+:[0-9]+") + req.Header.Set("Origin", hostURLRegexp.FindString(addr)) + req.Header.Set("Content-Type", "application/json") if len(token) != 0 { diff --git a/vendor/github.com/hashicorp/vault/http/logical.go b/vendor/github.com/hashicorp/vault/http/logical.go index f73e532..642314e 100644 --- a/vendor/github.com/hashicorp/vault/http/logical.go +++ b/vendor/github.com/hashicorp/vault/http/logical.go @@ -49,6 +49,7 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques op = logical.UpdateOperation case "LIST": op = logical.ListOperation + case "OPTIONS": default: return nil, http.StatusMethodNotAllowed, nil } @@ -95,7 +96,7 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques return req, 0, nil } -func handleLogical(core *vault.Core, dataOnly bool, prepareRequestCallback PrepareRequestFunc) http.Handler { +func handleLogical(core *vault.Core, injectDataIntoTopLevel bool, prepareRequestCallback PrepareRequestFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { req, statusCode, err := buildLogicalRequest(core, w, r) if err != nil || statusCode != 0 { @@ -124,11 +125,11 @@ func handleLogical(core *vault.Core, dataOnly bool, prepareRequestCallback Prepa } // Build the proper response - respondLogical(w, r, req, dataOnly, resp) + respondLogical(w, r, req, injectDataIntoTopLevel, resp) }) } -func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request, dataOnly bool, resp *logical.Response) { +func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request, injectDataIntoTopLevel bool, resp *logical.Response) { var httpResp *logical.HTTPResponse var ret interface{} @@ -152,6 +153,7 @@ func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request Token: resp.WrapInfo.Token, TTL: int(resp.WrapInfo.TTL.Seconds()), CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, WrappedAccessor: resp.WrapInfo.WrappedAccessor, }, } @@ -162,7 +164,7 @@ func respondLogical(w http.ResponseWriter, r *http.Request, req *logical.Request ret = httpResp - if dataOnly { + if injectDataIntoTopLevel { injector := logical.HTTPSysInjector{ Response: httpResp, } diff --git a/vendor/github.com/hashicorp/vault/http/logical_test.go b/vendor/github.com/hashicorp/vault/http/logical_test.go index bbbd892..e4101a5 100644 --- a/vendor/github.com/hashicorp/vault/http/logical_test.go +++ b/vendor/github.com/hashicorp/vault/http/logical_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" "github.com/hashicorp/vault/vault" ) @@ -83,10 +84,13 @@ func TestLogical_StandbyRedirect(t *testing.T) { // Create an HA Vault logger := logformat.NewVaultLogger(log.LevelTrace) - inmha := physical.NewInmemHA(logger) + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } conf := &vault.CoreConfig{ Physical: inmha, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: addr1, DisableMlock: true, } @@ -108,7 +112,7 @@ func TestLogical_StandbyRedirect(t *testing.T) { // Create a second HA Vault conf2 := &vault.CoreConfig{ Physical: inmha, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: addr2, DisableMlock: true, } diff --git a/vendor/github.com/hashicorp/vault/http/sys_auth_test.go b/vendor/github.com/hashicorp/vault/http/sys_auth_test.go index 9e19391..fa3c692 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_auth_test.go +++ b/vendor/github.com/hashicorp/vault/http/sys_auth_test.go @@ -49,6 +49,13 @@ func TestSysAuth(t *testing.T) { testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) @@ -120,6 +127,13 @@ func TestSysEnableAuth(t *testing.T) { testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) @@ -176,6 +190,13 @@ func TestSysDisableAuth(t *testing.T) { testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) diff --git a/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go b/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go new file mode 100644 index 0000000..bd6c7ae --- /dev/null +++ b/vendor/github.com/hashicorp/vault/http/sys_config_cors_test.go @@ -0,0 +1,78 @@ +package http + +import ( + "encoding/json" + "net/http" + "reflect" + "testing" + + "github.com/hashicorp/vault/vault" +) + +func TestSysConfigCors(t *testing.T) { + var resp *http.Response + + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + corsConf := core.CORSConfig() + + // Try to enable CORS without providing a value for allowed_origins + resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{ + "allowed_headers": "X-Custom-Header", + }) + + testResponseStatus(t, resp, 500) + + // Enable CORS, but provide an origin this time. + resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{ + "allowed_origins": addr, + "allowed_headers": "X-Custom-Header", + }) + + testResponseStatus(t, resp, 204) + + // Read the CORS configuration + resp = testHttpGet(t, token, addr+"/v1/sys/config/cors") + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + var expected map[string]interface{} + + lenStdHeaders := len(corsConf.AllowedHeaders) + + expectedHeaders := make([]interface{}, lenStdHeaders) + + for i := range corsConf.AllowedHeaders { + expectedHeaders[i] = corsConf.AllowedHeaders[i] + } + + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "enabled": true, + "allowed_origins": []interface{}{addr}, + "allowed_headers": expectedHeaders, + }, + "enabled": true, + "allowed_origins": []interface{}{addr}, + "allowed_headers": expectedHeaders, + } + + testResponseStatus(t, resp, 200) + + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) + } + +} diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader.go b/vendor/github.com/hashicorp/vault/http/sys_leader.go index ad5f281..98eb04a 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_leader.go +++ b/vendor/github.com/hashicorp/vault/http/sys_leader.go @@ -20,7 +20,7 @@ func handleSysLeader(core *vault.Core) http.Handler { func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { haEnabled := true - isLeader, address, err := core.Leader() + isLeader, address, clusterAddr, err := core.Leader() if errwrap.Contains(err, vault.ErrHANotEnabled.Error()) { haEnabled = false err = nil @@ -31,14 +31,16 @@ func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request } respondOk(w, &LeaderResponse{ - HAEnabled: haEnabled, - IsSelf: isLeader, - LeaderAddress: address, + HAEnabled: haEnabled, + IsSelf: isLeader, + LeaderAddress: address, + LeaderClusterAddress: clusterAddr, }) } type LeaderResponse struct { - HAEnabled bool `json:"ha_enabled"` - IsSelf bool `json:"is_self"` - LeaderAddress string `json:"leader_address"` + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` } diff --git a/vendor/github.com/hashicorp/vault/http/sys_leader_test.go b/vendor/github.com/hashicorp/vault/http/sys_leader_test.go index 9c0c7d2..afe0dbd 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_leader_test.go +++ b/vendor/github.com/hashicorp/vault/http/sys_leader_test.go @@ -20,9 +20,10 @@ func TestSysLeader_get(t *testing.T) { var actual map[string]interface{} expected := map[string]interface{}{ - "ha_enabled": false, - "is_self": false, - "leader_address": "", + "ha_enabled": false, + "is_self": false, + "leader_address": "", + "leader_cluster_address": "", } testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) diff --git a/vendor/github.com/hashicorp/vault/http/sys_lease_test.go b/vendor/github.com/hashicorp/vault/http/sys_lease_test.go index 6b7bc34..de1dc6c 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_lease_test.go +++ b/vendor/github.com/hashicorp/vault/http/sys_lease_test.go @@ -23,14 +23,33 @@ func TestSysRenew(t *testing.T) { // read secret resp = testHttpGet(t, token, addr+"/v1/secret/foo") var result struct { - LeaseId string `json:"lease_id"` + LeaseID string `json:"lease_id"` } if err := jsonutil.DecodeJSONFromReader(resp.Body, &result); err != nil { t.Fatalf("bad: %s", err) } - resp = testHttpPut(t, token, addr+"/v1/sys/renew/"+result.LeaseId, nil) + var renewResult struct { + LeaseID string `json:"lease_id"` + Data map[string]interface{} `json:"data"` + } + resp = testHttpPut(t, token, addr+"/v1/sys/renew/"+result.LeaseID, nil) testResponseStatus(t, resp, 200) + if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil { + t.Fatal(err) + } + if result.LeaseID != renewResult.LeaseID { + t.Fatal("lease id changed in renew request") + } + + resp = testHttpPut(t, token, addr+"/v1/sys/leases/renew/"+result.LeaseID, nil) + testResponseStatus(t, resp, 200) + if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil { + t.Fatal(err) + } + if result.LeaseID != renewResult.LeaseID { + t.Fatal("lease id changed in renew request") + } } func TestSysRevoke(t *testing.T) { diff --git a/vendor/github.com/hashicorp/vault/http/sys_mount_test.go b/vendor/github.com/hashicorp/vault/http/sys_mount_test.go index 2e12f0f..57f6dd7 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_mount_test.go +++ b/vendor/github.com/hashicorp/vault/http/sys_mount_test.go @@ -27,8 +27,8 @@ func TestSysMounts(t *testing.T) { "auth": nil, "data": map[string]interface{}{ "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -58,8 +58,8 @@ func TestSysMounts(t *testing.T) { }, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -91,6 +91,14 @@ func TestSysMounts(t *testing.T) { testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } + if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: %#v", actual) } @@ -103,7 +111,7 @@ func TestSysMount(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ - "type": "generic", + "type": "kv", "description": "foo", }) testResponseStatus(t, resp, 204) @@ -121,7 +129,7 @@ func TestSysMount(t *testing.T) { "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -130,8 +138,8 @@ func TestSysMount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -162,7 +170,7 @@ func TestSysMount(t *testing.T) { }, "foo/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -171,8 +179,8 @@ func TestSysMount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -204,6 +212,14 @@ func TestSysMount(t *testing.T) { testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } + if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: %#v", actual) } @@ -216,7 +232,7 @@ func TestSysMount_put(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPut(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ - "type": "generic", + "type": "kv", "description": "foo", }) testResponseStatus(t, resp, 204) @@ -232,7 +248,7 @@ func TestSysRemount(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ - "type": "generic", + "type": "kv", "description": "foo", }) testResponseStatus(t, resp, 204) @@ -256,7 +272,7 @@ func TestSysRemount(t *testing.T) { "data": map[string]interface{}{ "bar/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -265,8 +281,8 @@ func TestSysRemount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -297,7 +313,7 @@ func TestSysRemount(t *testing.T) { }, "bar/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -306,8 +322,8 @@ func TestSysRemount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -339,6 +355,14 @@ func TestSysRemount(t *testing.T) { testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } + if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: %#v", actual) } @@ -351,7 +375,7 @@ func TestSysUnmount(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ - "type": "generic", + "type": "kv", "description": "foo", }) testResponseStatus(t, resp, 204) @@ -371,8 +395,8 @@ func TestSysUnmount(t *testing.T) { "auth": nil, "data": map[string]interface{}{ "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -402,8 +426,8 @@ func TestSysUnmount(t *testing.T) { }, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -435,6 +459,14 @@ func TestSysUnmount(t *testing.T) { testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } + if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: %#v", actual) } @@ -447,7 +479,7 @@ func TestSysTuneMount(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ - "type": "generic", + "type": "kv", "description": "foo", }) testResponseStatus(t, resp, 204) @@ -465,7 +497,7 @@ func TestSysTuneMount(t *testing.T) { "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -474,8 +506,8 @@ func TestSysTuneMount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -506,7 +538,7 @@ func TestSysTuneMount(t *testing.T) { }, "foo/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -515,8 +547,8 @@ func TestSysTuneMount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -548,6 +580,14 @@ func TestSysTuneMount(t *testing.T) { testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } + if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: %#v", actual) } @@ -562,7 +602,7 @@ func TestSysTuneMount(t *testing.T) { resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ "default_lease_ttl": "72000h", }) - testResponseStatus(t, resp, 400) + testResponseStatus(t, resp, 204) // Longer than system default resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ @@ -599,7 +639,7 @@ func TestSysTuneMount(t *testing.T) { "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("259196400"), "max_lease_ttl": json.Number("259200000"), @@ -608,8 +648,8 @@ func TestSysTuneMount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -640,7 +680,7 @@ func TestSysTuneMount(t *testing.T) { }, "foo/": map[string]interface{}{ "description": "foo", - "type": "generic", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("259196400"), "max_lease_ttl": json.Number("259200000"), @@ -649,8 +689,8 @@ func TestSysTuneMount(t *testing.T) { "local": false, }, "secret/": map[string]interface{}{ - "description": "generic secret storage", - "type": "generic", + "description": "key/value secret storage", + "type": "kv", "config": map[string]interface{}{ "default_lease_ttl": json.Number("0"), "max_lease_ttl": json.Number("0"), @@ -683,6 +723,14 @@ func TestSysTuneMount(t *testing.T) { testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + } + if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) } diff --git a/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go b/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go index 5dc0bf9..53e4996 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go +++ b/vendor/github.com/hashicorp/vault/http/sys_mounts_test.go @@ -54,12 +54,12 @@ func TestSysMountConfig(t *testing.T) { } } -// testMount sets up a test mount of a generic backend w/ a random path; caller +// testMount sets up a test mount of a kv backend w/ a random path; caller // is responsible for unmounting func testMount(client *api.Client) (string, error) { rand.Seed(time.Now().UTC().UnixNano()) randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() path := fmt.Sprintf("testmount-%d", randInt) - err := client.Sys().Mount(path, &api.MountInput{Type: "generic"}) + err := client.Sys().Mount(path, &api.MountInput{Type: "kv"}) return path, err } diff --git a/vendor/github.com/hashicorp/vault/http/sys_policy_test.go b/vendor/github.com/hashicorp/vault/http/sys_policy_test.go index 6a8a33b..42c1e4b 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_policy_test.go +++ b/vendor/github.com/hashicorp/vault/http/sys_policy_test.go @@ -77,7 +77,7 @@ func TestSysWritePolicy(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{ - "rules": ``, + "rules": `path "*" { capabilities = ["read"] }`, }) testResponseStatus(t, resp, 204) @@ -118,7 +118,7 @@ func TestSysDeletePolicy(t *testing.T) { TestServerAuth(t, addr, token) resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{ - "rules": ``, + "rules": `path "*" { capabilities = ["read"] }`, }) testResponseStatus(t, resp, 204) diff --git a/vendor/github.com/hashicorp/vault/http/sys_rekey.go b/vendor/github.com/hashicorp/vault/http/sys_rekey.go index bd597b6..9f26f3b 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_rekey.go +++ b/vendor/github.com/hashicorp/vault/http/sys_rekey.go @@ -21,7 +21,7 @@ func handleSysRekeyInit(core *vault.Core, recovery bool) http.Handler { } repState := core.ReplicationState() - if repState == consts.ReplicationSecondary { + if repState.HasState(consts.ReplicationPerformanceSecondary) { respondError(w, http.StatusBadRequest, fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated")) return diff --git a/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go b/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go index 9c27ebb..7ab2143 100644 --- a/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go +++ b/vendor/github.com/hashicorp/vault/http/sys_wrapping_test.go @@ -2,7 +2,6 @@ package http import ( "encoding/json" - "net/http" "reflect" "testing" "time" @@ -14,29 +13,20 @@ import ( // Test wrapping functionality func TestHTTP_Wrapping(t *testing.T) { - handler1 := http.NewServeMux() - handler2 := http.NewServeMux() - handler3 := http.NewServeMux() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() - coreConfig := &vault.CoreConfig{} - - // Chicken-and-egg: Handler needs a core. So we create handlers first, then - // add routes chained to a Handler-created handler. - cores := vault.TestCluster(t, []http.Handler{handler1, handler2, handler3}, coreConfig, true) - for _, core := range cores { - defer core.CloseListeners() - } - handler1.Handle("/", Handler(cores[0].Core)) - handler2.Handle("/", Handler(cores[1].Core)) - handler3.Handle("/", Handler(cores[2].Core)) + cores := cluster.Cores // make it easy to get access to the active core := cores[0].Core vault.TestWaitActive(t, core) - root := cores[0].Root client := cores[0].Client - client.SetToken(root) + client.SetToken(cluster.RootToken) // Write a value that we will use with wrapping for lookup _, err := client.Logical().Write("secret/foo", map[string]interface{}{ @@ -78,7 +68,7 @@ func TestHTTP_Wrapping(t *testing.T) { // Second: basic things that should fail, unwrap edition // Root token isn't a wrapping token - _, err = client.Logical().Unwrap(root) + _, err = client.Logical().Unwrap(cluster.RootToken) if err == nil { t.Fatal("expected error") } @@ -121,6 +111,9 @@ func TestHTTP_Wrapping(t *testing.T) { secret, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{ "token": wrapInfo.Token, }) + if err != nil { + t.Fatal(err) + } if secret == nil || secret.Data == nil { t.Fatal("secret or secret data is nil") } @@ -150,6 +143,9 @@ func TestHTTP_Wrapping(t *testing.T) { // Test unwrap via the client token client.SetToken(wrapInfo.Token) secret, err = client.Logical().Write("sys/wrapping/unwrap", nil) + if err != nil { + t.Fatal(err) + } if secret == nil || secret.Data == nil { t.Fatal("secret or secret data is nil") } @@ -161,7 +157,7 @@ func TestHTTP_Wrapping(t *testing.T) { } // Create a wrapping token - client.SetToken(root) + client.SetToken(cluster.RootToken) secret, err = client.Logical().Read("secret/foo") if err != nil { t.Fatal(err) @@ -175,6 +171,9 @@ func TestHTTP_Wrapping(t *testing.T) { secret, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ "token": wrapInfo.Token, }) + if err != nil { + t.Fatal(err) + } ret2 := secret // Should be expired and fail _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ @@ -197,6 +196,9 @@ func TestHTTP_Wrapping(t *testing.T) { // Read response directly client.SetToken(wrapInfo.Token) secret, err = client.Logical().Read("cubbyhole/response") + if err != nil { + t.Fatal(err) + } ret3 := secret // Should be expired and fail _, err = client.Logical().Write("cubbyhole/response", nil) @@ -205,7 +207,7 @@ func TestHTTP_Wrapping(t *testing.T) { } // Create a wrapping token - client.SetToken(root) + client.SetToken(cluster.RootToken) secret, err = client.Logical().Read("secret/foo") if err != nil { t.Fatal(err) @@ -217,6 +219,9 @@ func TestHTTP_Wrapping(t *testing.T) { // Read via Unwrap method secret, err = client.Logical().Unwrap(wrapInfo.Token) + if err != nil { + t.Fatal(err) + } ret4 := secret // Should be expired and fail _, err = client.Logical().Unwrap(wrapInfo.Token) @@ -254,7 +259,7 @@ func TestHTTP_Wrapping(t *testing.T) { // Custom wrapping // - client.SetToken(root) + client.SetToken(cluster.RootToken) data := map[string]interface{}{ "zip": "zap", "three": json.Number("2"), @@ -303,10 +308,24 @@ func TestHTTP_Wrapping(t *testing.T) { } wrapInfo = secret.WrapInfo + // Check for correct CreationPath before rewrap + if wrapInfo.CreationPath != "secret/foo" { + t.Fatal("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath) + } + // Test rewrapping secret, err = client.Logical().Write("sys/wrapping/rewrap", map[string]interface{}{ "token": wrapInfo.Token, }) + if err != nil { + t.Fatal(err) + } + + // Check for correct Creation path after rewrap + if wrapInfo.CreationPath != "secret/foo" { + t.Fatal("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath) + } + // Should be expired and fail _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ "token": wrapInfo.Token, diff --git a/vendor/github.com/hashicorp/vault/logical/auth.go b/vendor/github.com/hashicorp/vault/logical/auth.go index b454790..09694c4 100644 --- a/vendor/github.com/hashicorp/vault/logical/auth.go +++ b/vendor/github.com/hashicorp/vault/logical/auth.go @@ -51,6 +51,10 @@ type Auth struct { // Number of allowed uses of the issued token NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // Persona is the information about the authenticated client returned by + // the auth backend + Persona *Persona `json:"persona" structs:"persona" mapstructure:"persona"` } func (a *Auth) GoString() string { diff --git a/vendor/github.com/hashicorp/vault/logical/framework/backend.go b/vendor/github.com/hashicorp/vault/logical/framework/backend.go index e94ea04..477a926 100644 --- a/vendor/github.com/hashicorp/vault/logical/framework/backend.go +++ b/vendor/github.com/hashicorp/vault/logical/framework/backend.go @@ -82,6 +82,12 @@ type Backend struct { // See the built-in AuthRenew helpers in lease.go for common callbacks. AuthRenew OperationFunc + // LicenseRegistration is called to register the license for a backend. + LicenseRegistration LicenseRegistrationFunc + + // Type is the logical.BackendType for the backend implementation + BackendType logical.BackendType + logger log.Logger system logical.SystemView once sync.Once @@ -107,6 +113,10 @@ type InitializeFunc func() error // InvalidateFunc is the callback for backend key invalidation. type InvalidateFunc func(string) +// LicenseRegistrationFunc is the callback for backend license registration. +type LicenseRegistrationFunc func(interface{}) error + +// HandleExistenceCheck is the logical.Backend implementation. func (b *Backend) HandleExistenceCheck(req *logical.Request) (checkFound bool, exists bool, err error) { b.once.Do(b.init) @@ -154,7 +164,7 @@ func (b *Backend) HandleExistenceCheck(req *logical.Request) (checkFound bool, e return } -// logical.Backend impl. +// HandleRequest is the logical.Backend implementation. func (b *Backend) HandleRequest(req *logical.Request) (*logical.Response, error) { b.once.Do(b.init) @@ -221,18 +231,11 @@ func (b *Backend) HandleRequest(req *logical.Request) (*logical.Response, error) return callback(req, &fd) } -// logical.Backend impl. +// SpecialPaths is the logical.Backend implementation. func (b *Backend) SpecialPaths() *logical.Paths { return b.PathsSpecial } -// Setup is used to initialize the backend with the initial backend configuration -func (b *Backend) Setup(config *logical.BackendConfig) (logical.Backend, error) { - b.logger = config.Logger - b.system = config.System - return b, nil -} - // Cleanup is used to release resources and prepare to stop the backend func (b *Backend) Cleanup() { if b.Clean != nil { @@ -240,6 +243,7 @@ func (b *Backend) Cleanup() { } } +// Initialize calls the backend's Init func if set. func (b *Backend) Initialize() error { if b.Init != nil { return b.Init() @@ -255,6 +259,13 @@ func (b *Backend) InvalidateKey(key string) { } } +// Setup is used to initialize the backend with the initial backend configuration +func (b *Backend) Setup(config *logical.BackendConfig) error { + b.logger = config.Logger + b.system = config.System + return nil +} + // Logger can be used to get the logger. If no logger has been set, // the logs will be discarded. func (b *Backend) Logger() log.Logger { @@ -265,11 +276,25 @@ func (b *Backend) Logger() log.Logger { return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff) } +// System returns the backend's system view. func (b *Backend) System() logical.SystemView { return b.system } -// This method takes in the TTL and MaxTTL values provided by the user, +// Type returns the backend type +func (b *Backend) Type() logical.BackendType { + return b.BackendType +} + +// RegisterLicense performs backend license registration. +func (b *Backend) RegisterLicense(license interface{}) error { + if b.LicenseRegistration == nil { + return nil + } + return b.LicenseRegistration(license) +} + +// SanitizeTTLStr takes in the TTL and MaxTTL values provided by the user, // compares those with the SystemView values. If they are empty a value of 0 is // set, which will cause initial secret or LeaseExtend operations to use the // mount/system defaults. If they are set, their boundaries are validated. @@ -297,7 +322,8 @@ func (b *Backend) SanitizeTTLStr(ttlStr, maxTTLStr string) (ttl, maxTTL time.Dur return } -// Caps the boundaries of ttl and max_ttl values to the backend mount's max_ttl value. +// SanitizeTTL caps the boundaries of ttl and max_ttl values to the +// backend mount's max_ttl value. func (b *Backend) SanitizeTTL(ttl, maxTTL time.Duration) (time.Duration, time.Duration, error) { sysMaxTTL := b.System().MaxLeaseTTL() if ttl > sysMaxTTL { @@ -575,6 +601,7 @@ func (s *FieldSchema) DefaultOrZero() interface{} { return s.Type.Zero() } +// Zero returns the correct zero-value for a specific FieldType func (t FieldType) Zero() interface{} { switch t { case TypeString: diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data.go index 9783802..7fac976 100644 --- a/vendor/github.com/hashicorp/vault/logical/framework/field_data.go +++ b/vendor/github.com/hashicorp/vault/logical/framework/field_data.go @@ -2,7 +2,9 @@ package framework import ( "encoding/json" + "errors" "fmt" + "regexp" "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/helper/strutil" @@ -18,7 +20,7 @@ type FieldData struct { Schema map[string]*FieldSchema } -// Cycle through raw data and validate conversions in +// Validate cycles through raw data and validate conversions in // the schema, so we don't get an error/panic later when // trying to get data out. Data not in the schema is not // an error at this point, so we don't worry about it. @@ -31,8 +33,8 @@ func (d *FieldData) Validate() error { } switch schema.Type { - case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, TypeSlice, - TypeStringSlice, TypeCommaStringSlice: + case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, + TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice: _, _, err := d.getPrimitive(field, schema) if err != nil { return fmt.Errorf("Error converting input %v for field %s: %s", value, field, err) @@ -108,7 +110,7 @@ func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) { switch schema.Type { case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, - TypeSlice, TypeStringSlice, TypeCommaStringSlice: + TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice: return d.getPrimitive(k, schema) default: return nil, false, @@ -145,6 +147,20 @@ func (d *FieldData) getPrimitive( } return result, true, nil + case TypeNameString: + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, true, err + } + matched, err := regexp.MatchString("^\\w(([\\w-.]+)?\\w)?$", result) + if err != nil { + return nil, true, err + } + if !matched { + return nil, true, errors.New("field does not match the formatting rules") + } + return result, true, nil + case TypeMap: var result map[string]interface{} if err := mapstructure.WeakDecode(raw, &result); err != nil { @@ -159,6 +175,16 @@ func (d *FieldData) getPrimitive( return nil, false, nil case int: result = inp + case int32: + result = int(inp) + case int64: + result = int(inp) + case uint: + result = int(inp) + case uint32: + result = int(inp) + case uint64: + result = int(inp) case float32: result = int(inp) case float64: diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go b/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go index a801f9c..a9bc474 100644 --- a/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go +++ b/vendor/github.com/hashicorp/vault/logical/framework/field_data_test.go @@ -180,6 +180,17 @@ func TestFieldDataGet(t *testing.T) { []string{"123", "abc"}, }, + "string slice type, single value": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeStringSlice}, + }, + map[string]interface{}{ + "foo": "abc", + }, + "foo", + []string{"abc"}, + }, + "comma string slice type, comma string with one value": { map[string]*FieldSchema{ "foo": &FieldSchema{Type: TypeCommaStringSlice}, @@ -245,6 +256,28 @@ func TestFieldDataGet(t *testing.T) { "foo", []string{}, }, + + "name string type, valid string": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "bar", + }, + "foo", + "bar", + }, + + "name string type, valid value with special characters": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "bar.baz-bay123", + }, + "foo", + "bar.baz-bay123", + }, } for name, tc := range cases { @@ -253,6 +286,10 @@ func TestFieldDataGet(t *testing.T) { Schema: tc.Schema, } + if err := data.Validate(); err != nil { + t.Fatalf("bad: %#v", err) + } + actual := data.Get(tc.Key) if !reflect.DeepEqual(actual, tc.Value) { t.Fatalf( @@ -261,3 +298,60 @@ func TestFieldDataGet(t *testing.T) { } } } + +func TestFieldDataGet_Error(t *testing.T) { + cases := map[string]struct { + Schema map[string]*FieldSchema + Raw map[string]interface{} + Key string + }{ + "name string type, invalid value with invalid characters": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "bar baz", + }, + "foo", + }, + "name string type, invalid value with special characters at beginning": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": ".barbaz", + }, + "foo", + }, + "name string type, invalid value with special characters at end": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "barbaz-", + }, + "foo", + }, + "name string type, empty string": { + map[string]*FieldSchema{ + "foo": &FieldSchema{Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "", + }, + "foo", + }, + } + + for _, tc := range cases { + data := &FieldData{ + Raw: tc.Raw, + Schema: tc.Schema, + } + + _, _, err := data.GetOkErr(tc.Key) + if err == nil { + t.Fatalf("error expected, none received") + } + } +} diff --git a/vendor/github.com/hashicorp/vault/logical/framework/field_type.go b/vendor/github.com/hashicorp/vault/logical/framework/field_type.go index 034d0fe..304d45f 100644 --- a/vendor/github.com/hashicorp/vault/logical/framework/field_type.go +++ b/vendor/github.com/hashicorp/vault/logical/framework/field_type.go @@ -23,12 +23,19 @@ const ( // slice of strings and also supports parsing a comma-separated list in // a string field TypeCommaStringSlice + + // TypeNameString represents a name that is URI safe and follows specific + // rules. These rules include start and end with an alphanumeric + // character and characters in the middle can be alphanumeric or . or -. + TypeNameString ) func (t FieldType) String() string { switch t { case TypeString: return "string" + case TypeNameString: + return "name string" case TypeInt: return "int" case TypeBool: diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go index ff1d277..f9fa3a6 100644 --- a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go +++ b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go @@ -21,6 +21,7 @@ type PathMap struct { Schema map[string]*FieldSchema CaseSensitive bool Salt *salt.Salt + SaltFunc func() (*salt.Salt, error) once sync.Once } @@ -41,7 +42,7 @@ func (p *PathMap) init() { } // pathStruct returns the pathStruct for this mapping -func (p *PathMap) pathStruct(k string) *PathStruct { +func (p *PathMap) pathStruct(s logical.Storage, k string) (*PathStruct, error) { p.once.Do(p.init) // If we don't care about casing, store everything lowercase @@ -49,30 +50,90 @@ func (p *PathMap) pathStruct(k string) *PathStruct { k = strings.ToLower(k) } + // The original key before any salting + origKey := k + // If we have a salt, apply it before lookup - if p.Salt != nil { - k = p.Salt.SaltID(k) + salt := p.Salt + var err error + if p.SaltFunc != nil { + salt, err = p.SaltFunc() + if err != nil { + return nil, err + } + } + if salt != nil { + k = salt.SaltID(k) } - return &PathStruct{ - Name: fmt.Sprintf("map/%s/%s", p.Name, k), + finalName := fmt.Sprintf("map/%s/%s", p.Name, k) + ps := &PathStruct{ + Name: finalName, Schema: p.Schema, } + + // Check for unsalted version and upgrade if so + if k != origKey { + // Generate the unsalted name + unsaltedName := fmt.Sprintf("map/%s/%s", p.Name, origKey) + // Set the path struct to use the unsalted name + ps.Name = unsaltedName + // Ensure that no matter what happens what is returned is the final + // path + defer func() { + ps.Name = finalName + }() + val, err := ps.Get(s) + if err != nil { + return nil, err + } + // If not nil, we have an unsalted entry -- upgrade it + if val != nil { + // Set the path struct to use the desired final name + ps.Name = finalName + err = ps.Put(s, val) + if err != nil { + return nil, err + } + // Set it back to the old path and delete + ps.Name = unsaltedName + err = ps.Delete(s) + if err != nil { + return nil, err + } + // We'll set this in the deferred function but doesn't hurt here + ps.Name = finalName + } + } + + return ps, nil } // Get reads a value out of the mapping func (p *PathMap) Get(s logical.Storage, k string) (map[string]interface{}, error) { - return p.pathStruct(k).Get(s) + ps, err := p.pathStruct(s, k) + if err != nil { + return nil, err + } + return ps.Get(s) } // Put writes a value into the mapping func (p *PathMap) Put(s logical.Storage, k string, v map[string]interface{}) error { - return p.pathStruct(k).Put(s, v) + ps, err := p.pathStruct(s, k) + if err != nil { + return err + } + return ps.Put(s, v) } // Delete removes a value from the mapping func (p *PathMap) Delete(s logical.Storage, k string) error { - return p.pathStruct(k).Delete(s) + ps, err := p.pathStruct(s, k) + if err != nil { + return err + } + return ps.Delete(s) } // List reads the keys under a given path diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go index 7d30d7d..ce9215b 100644 --- a/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go +++ b/vendor/github.com/hashicorp/vault/logical/framework/path_map_test.go @@ -254,4 +254,192 @@ func TestPathMap_Salted(t *testing.T) { if v != nil { t.Fatalf("bad: %#v", v) } + + // Put in a non-salted version and make sure that after reading it's been + // upgraded + err = storage.Put(&logical.StorageEntry{ + Key: "struct/map/foo/b", + Value: []byte(`{"foo": "bar"}`), + }) + if err != nil { + t.Fatal("err: %v", err) + } + // A read should transparently upgrade + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/b", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + list, _ := storage.List("struct/map/foo/") + if len(list) != 1 { + t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list)) + } + found := false + for _, v := range list { + if v == salt.SaltID("b") { + found = true + break + } + } + if !found { + t.Fatal("did not find upgraded value") + } +} + +func TestPathMap_SaltFunc(t *testing.T) { + storage := new(logical.InmemStorage) + locSalt, err := salt.NewSalt(storage, &salt.Config{ + HashFunc: salt.SHA1Hash, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + saltFunc := func() (*salt.Salt, error) { + return locSalt, nil + } + p := &PathMap{Name: "foo", SaltFunc: saltFunc} + var b logical.Backend = &Backend{Paths: p.Paths()} + + // Write via HTTP + _, err = b.HandleRequest(&logical.Request{ + Operation: logical.UpdateOperation, + Path: "map/foo/a", + Data: map[string]interface{}{ + "value": "bar", + }, + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + // Non-salted version should not be there + out, err := storage.Get("struct/map/foo/a") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("non-salted key found") + } + + // Ensure the path is salted + expect := locSalt.SaltID("a") + out, err = storage.Get("struct/map/foo/" + expect) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("missing salted key") + } + + // Read via HTTP + resp, err := b.HandleRequest(&logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp.Data["value"] != "bar" { + t.Fatalf("bad: %#v", resp) + } + + // Read via API + v, err := p.Get(storage, "a") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v["value"] != "bar" { + t.Fatalf("bad: %#v", v) + } + + // Read via API with other casing + v, err = p.Get(storage, "A") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v["value"] != "bar" { + t.Fatalf("bad: %#v", v) + } + + // Verify List + keys, err := p.List(storage, "") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if len(keys) != 1 || keys[0] != expect { + t.Fatalf("bad: %#v", keys) + } + + // Delete via HTTP + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.DeleteOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp != nil { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via HTTP + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if _, ok := resp.Data["value"]; ok { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via API + v, err = p.Get(storage, "a") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v != nil { + t.Fatalf("bad: %#v", v) + } + + // Put in a non-salted version and make sure that after reading it's been + // upgraded + err = storage.Put(&logical.StorageEntry{ + Key: "struct/map/foo/b", + Value: []byte(`{"foo": "bar"}`), + }) + if err != nil { + t.Fatal("err: %v", err) + } + // A read should transparently upgrade + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/b", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + list, _ := storage.List("struct/map/foo/") + if len(list) != 1 { + t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list)) + } + found := false + for _, v := range list { + if v == locSalt.SaltID("b") { + found = true + break + } + } + if !found { + t.Fatal("did not find upgraded value") + } } diff --git a/vendor/github.com/hashicorp/vault/logical/identity.go b/vendor/github.com/hashicorp/vault/logical/identity.go new file mode 100644 index 0000000..fbc4fbb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/identity.go @@ -0,0 +1,26 @@ +package logical + +// Persona represents the information used by core to create implicit entity. +// Implicit entities get created when a client authenticates successfully from +// any of the authentication backends (except token backend). +// +// This is applicable to enterprise binaries only. Persona should be set in the +// Auth response returned by the credential backends. This structure is placed +// in the open source repository only to enable custom authetication plugins to +// be used along with enterprise binary. The custom auth plugins should make +// use of this and fill out the Persona information in the authentication +// response. +type Persona struct { + // MountType is the backend mount's type to which this identity belongs + // to. + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"` + + // MountAccessor is the identifier of the mount entry to which + // this identity + // belongs to. + MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor"` + + // Name is the identifier of this identity in its + // authentication source. + Name string `json:"name" structs:"name" mapstructure:"name"` +} diff --git a/vendor/github.com/hashicorp/vault/logical/logical.go b/vendor/github.com/hashicorp/vault/logical/logical.go index 3b66fba..9ce0d85 100644 --- a/vendor/github.com/hashicorp/vault/logical/logical.go +++ b/vendor/github.com/hashicorp/vault/logical/logical.go @@ -2,6 +2,29 @@ package logical import log "github.com/mgutz/logxi/v1" +// BackendType is the type of backend that is being implemented +type BackendType uint32 + +// The these are the types of backends that can be derived from +// logical.Backend +const ( + TypeUnknown BackendType = 0 // This is also the zero-value for BackendType + TypeLogical BackendType = 1 + TypeCredential BackendType = 2 +) + +// Stringer implementation +func (b BackendType) String() string { + switch b { + case TypeLogical: + return "secret" + case TypeCredential: + return "auth" + } + + return "unknown" +} + // Backend interface must be implemented to be "mountable" at // a given path. Requests flow through a router which has various mount // points that flow to a logical backend. The logic of each backend is flexible, @@ -27,6 +50,11 @@ type Backend interface { // information, such as globally configured default and max lease TTLs. System() SystemView + // Logger provides an interface to access the underlying logger. This + // is useful when a struct embeds a Backend-implemented struct that + // contains a private instance of logger. + Logger() log.Logger + // HandleExistenceCheck is used to handle a request and generate a response // indicating whether the given path exists or not; this is used to // understand whether the request must have a Create or Update capability @@ -47,6 +75,16 @@ type Backend interface { // to the backend. The backend can use this to clear any caches or reset // internal state as needed. InvalidateKey(key string) + + // Setup is used to set up the backend based on the provided backend + // configuration. + Setup(*BackendConfig) error + + // Type returns the BackendType for the particular backend + Type() BackendType + + // RegisterLicense performs backend license registration + RegisterLicense(interface{}) error } // BackendConfig is provided to the factory to initialize the backend diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend.go new file mode 100644 index 0000000..081922c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/logical" +) + +// BackendPlugin is the plugin.Plugin implementation +type BackendPlugin struct { + Factory func(*logical.BackendConfig) (logical.Backend, error) + metadataMode bool +} + +// Server gets called when on plugin.Serve() +func (b *BackendPlugin) Server(broker *plugin.MuxBroker) (interface{}, error) { + return &backendPluginServer{factory: b.Factory, broker: broker}, nil +} + +// Client gets called on plugin.NewClient() +func (b BackendPlugin) Client(broker *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &backendPluginClient{client: c, broker: broker, metadataMode: b.metadataMode}, nil +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go new file mode 100644 index 0000000..cc2d83b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go @@ -0,0 +1,285 @@ +package plugin + +import ( + "errors" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/logical" + log "github.com/mgutz/logxi/v1" +) + +var ( + ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode") +) + +// backendPluginClient implements logical.Backend and is the +// go-plugin client. +type backendPluginClient struct { + broker *plugin.MuxBroker + client *rpc.Client + metadataMode bool + + system logical.SystemView + logger log.Logger +} + +// HandleRequestArgs is the args for HandleRequest method. +type HandleRequestArgs struct { + StorageID uint32 + Request *logical.Request +} + +// HandleRequestReply is the reply for HandleRequest method. +type HandleRequestReply struct { + Response *logical.Response + Error *plugin.BasicError +} + +// SpecialPathsReply is the reply for SpecialPaths method. +type SpecialPathsReply struct { + Paths *logical.Paths +} + +// SystemReply is the reply for System method. +type SystemReply struct { + SystemView logical.SystemView + Error *plugin.BasicError +} + +// HandleExistenceCheckArgs is the args for HandleExistenceCheck method. +type HandleExistenceCheckArgs struct { + StorageID uint32 + Request *logical.Request +} + +// HandleExistenceCheckReply is the reply for HandleExistenceCheck method. +type HandleExistenceCheckReply struct { + CheckFound bool + Exists bool + Error *plugin.BasicError +} + +// SetupArgs is the args for Setup method. +type SetupArgs struct { + StorageID uint32 + LoggerID uint32 + SysViewID uint32 + Config map[string]string +} + +// SetupReply is the reply for Setup method. +type SetupReply struct { + Error *plugin.BasicError +} + +// TypeReply is the reply for the Type method. +type TypeReply struct { + Type logical.BackendType +} + +// RegisterLicenseArgs is the args for the RegisterLicense method. +type RegisterLicenseArgs struct { + License interface{} +} + +// RegisterLicenseReply is the reply for the RegisterLicense method. +type RegisterLicenseReply struct { + Error *plugin.BasicError +} + +func (b *backendPluginClient) HandleRequest(req *logical.Request) (*logical.Response, error) { + if b.metadataMode { + return nil, ErrClientInMetadataMode + } + + // Do not send the storage, since go-plugin cannot serialize + // interfaces. The server will pick up the storage from the shim. + req.Storage = nil + args := &HandleRequestArgs{ + Request: req, + } + var reply HandleRequestReply + + if req.Connection != nil { + oldConnState := req.Connection.ConnState + req.Connection.ConnState = nil + defer func() { + req.Connection.ConnState = oldConnState + }() + } + + err := b.client.Call("Plugin.HandleRequest", args, &reply) + if err != nil { + return nil, err + } + if reply.Error != nil { + if reply.Error.Error() == logical.ErrUnsupportedOperation.Error() { + return nil, logical.ErrUnsupportedOperation + } + return nil, reply.Error + } + + return reply.Response, nil +} + +func (b *backendPluginClient) SpecialPaths() *logical.Paths { + var reply SpecialPathsReply + err := b.client.Call("Plugin.SpecialPaths", new(interface{}), &reply) + if err != nil { + return nil + } + + return reply.Paths +} + +// System returns vault's system view. The backend client stores the view during +// Setup, so there is no need to shim the system just to get it back. +func (b *backendPluginClient) System() logical.SystemView { + return b.system +} + +// Logger returns vault's logger. The backend client stores the logger during +// Setup, so there is no need to shim the logger just to get it back. +func (b *backendPluginClient) Logger() log.Logger { + return b.logger +} + +func (b *backendPluginClient) HandleExistenceCheck(req *logical.Request) (bool, bool, error) { + if b.metadataMode { + return false, false, ErrClientInMetadataMode + } + + // Do not send the storage, since go-plugin cannot serialize + // interfaces. The server will pick up the storage from the shim. + req.Storage = nil + args := &HandleExistenceCheckArgs{ + Request: req, + } + var reply HandleExistenceCheckReply + + if req.Connection != nil { + oldConnState := req.Connection.ConnState + req.Connection.ConnState = nil + defer func() { + req.Connection.ConnState = oldConnState + }() + } + + err := b.client.Call("Plugin.HandleExistenceCheck", args, &reply) + if err != nil { + return false, false, err + } + if reply.Error != nil { + // THINKING: Should be be a switch on all error types? + if reply.Error.Error() == logical.ErrUnsupportedPath.Error() { + return false, false, logical.ErrUnsupportedPath + } + return false, false, reply.Error + } + + return reply.CheckFound, reply.Exists, nil +} + +func (b *backendPluginClient) Cleanup() { + b.client.Call("Plugin.Cleanup", new(interface{}), &struct{}{}) +} + +func (b *backendPluginClient) Initialize() error { + if b.metadataMode { + return ErrClientInMetadataMode + } + err := b.client.Call("Plugin.Initialize", new(interface{}), &struct{}{}) + return err +} + +func (b *backendPluginClient) InvalidateKey(key string) { + if b.metadataMode { + return + } + b.client.Call("Plugin.InvalidateKey", key, &struct{}{}) +} + +func (b *backendPluginClient) Setup(config *logical.BackendConfig) error { + // Shim logical.Storage + storageImpl := config.StorageView + if b.metadataMode { + storageImpl = &NOOPStorage{} + } + storageID := b.broker.NextId() + go b.broker.AcceptAndServe(storageID, &StorageServer{ + impl: storageImpl, + }) + + // Shim log.Logger + loggerImpl := config.Logger + if b.metadataMode { + loggerImpl = log.NullLog + } + loggerID := b.broker.NextId() + go b.broker.AcceptAndServe(loggerID, &LoggerServer{ + logger: loggerImpl, + }) + + // Shim logical.SystemView + sysViewImpl := config.System + if b.metadataMode { + sysViewImpl = &logical.StaticSystemView{} + } + sysViewID := b.broker.NextId() + go b.broker.AcceptAndServe(sysViewID, &SystemViewServer{ + impl: sysViewImpl, + }) + + args := &SetupArgs{ + StorageID: storageID, + LoggerID: loggerID, + SysViewID: sysViewID, + Config: config.Config, + } + var reply SetupReply + + err := b.client.Call("Plugin.Setup", args, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + // Set system and logger for getter methods + b.system = config.System + b.logger = config.Logger + + return nil +} + +func (b *backendPluginClient) Type() logical.BackendType { + var reply TypeReply + err := b.client.Call("Plugin.Type", new(interface{}), &reply) + if err != nil { + return logical.TypeUnknown + } + + return logical.BackendType(reply.Type) +} + +func (b *backendPluginClient) RegisterLicense(license interface{}) error { + if b.metadataMode { + return ErrClientInMetadataMode + } + + var reply RegisterLicenseReply + args := RegisterLicenseArgs{ + License: license, + } + err := b.client.Call("Plugin.RegisterLicense", args, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go new file mode 100644 index 0000000..47045b1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go @@ -0,0 +1,187 @@ +package plugin + +import ( + "errors" + "net/rpc" + "os" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/logical" +) + +var ( + ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode") +) + +// backendPluginServer is the RPC server that backendPluginClient talks to, +// it methods conforming to requirements by net/rpc +type backendPluginServer struct { + broker *plugin.MuxBroker + backend logical.Backend + factory func(*logical.BackendConfig) (logical.Backend, error) + + loggerClient *rpc.Client + sysViewClient *rpc.Client + storageClient *rpc.Client +} + +func inMetadataMode() bool { + return os.Getenv(pluginutil.PluginMetadaModeEnv) == "true" +} + +func (b *backendPluginServer) HandleRequest(args *HandleRequestArgs, reply *HandleRequestReply) error { + if inMetadataMode() { + return ErrServerInMetadataMode + } + + storage := &StorageClient{client: b.storageClient} + args.Request.Storage = storage + + resp, err := b.backend.HandleRequest(args.Request) + *reply = HandleRequestReply{ + Response: resp, + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (b *backendPluginServer) SpecialPaths(_ interface{}, reply *SpecialPathsReply) error { + *reply = SpecialPathsReply{ + Paths: b.backend.SpecialPaths(), + } + return nil +} + +func (b *backendPluginServer) HandleExistenceCheck(args *HandleExistenceCheckArgs, reply *HandleExistenceCheckReply) error { + if inMetadataMode() { + return ErrServerInMetadataMode + } + + storage := &StorageClient{client: b.storageClient} + args.Request.Storage = storage + + checkFound, exists, err := b.backend.HandleExistenceCheck(args.Request) + *reply = HandleExistenceCheckReply{ + CheckFound: checkFound, + Exists: exists, + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (b *backendPluginServer) Cleanup(_ interface{}, _ *struct{}) error { + b.backend.Cleanup() + + // Close rpc clients + b.loggerClient.Close() + b.sysViewClient.Close() + b.storageClient.Close() + return nil +} + +func (b *backendPluginServer) Initialize(_ interface{}, _ *struct{}) error { + if inMetadataMode() { + return ErrServerInMetadataMode + } + + err := b.backend.Initialize() + return err +} + +func (b *backendPluginServer) InvalidateKey(args string, _ *struct{}) error { + if inMetadataMode() { + return ErrServerInMetadataMode + } + + b.backend.InvalidateKey(args) + return nil +} + +// Setup dials into the plugin's broker to get a shimmed storage, logger, and +// system view of the backend. This method also instantiates the underlying +// backend through its factory func for the server side of the plugin. +func (b *backendPluginServer) Setup(args *SetupArgs, reply *SetupReply) error { + // Dial for storage + storageConn, err := b.broker.Dial(args.StorageID) + if err != nil { + *reply = SetupReply{ + Error: plugin.NewBasicError(err), + } + return nil + } + rawStorageClient := rpc.NewClient(storageConn) + b.storageClient = rawStorageClient + + storage := &StorageClient{client: rawStorageClient} + + // Dial for logger + loggerConn, err := b.broker.Dial(args.LoggerID) + if err != nil { + *reply = SetupReply{ + Error: plugin.NewBasicError(err), + } + return nil + } + rawLoggerClient := rpc.NewClient(loggerConn) + b.loggerClient = rawLoggerClient + + logger := &LoggerClient{client: rawLoggerClient} + + // Dial for sys view + sysViewConn, err := b.broker.Dial(args.SysViewID) + if err != nil { + *reply = SetupReply{ + Error: plugin.NewBasicError(err), + } + return nil + } + rawSysViewClient := rpc.NewClient(sysViewConn) + b.sysViewClient = rawSysViewClient + + sysView := &SystemViewClient{client: rawSysViewClient} + + config := &logical.BackendConfig{ + StorageView: storage, + Logger: logger, + System: sysView, + Config: args.Config, + } + + // Call the underlying backend factory after shims have been created + // to set b.backend + backend, err := b.factory(config) + if err != nil { + *reply = SetupReply{ + Error: plugin.NewBasicError(err), + } + } + b.backend = backend + + return nil +} + +func (b *backendPluginServer) Type(_ interface{}, reply *TypeReply) error { + *reply = TypeReply{ + Type: b.backend.Type(), + } + + return nil +} + +func (b *backendPluginServer) RegisterLicense(args *RegisterLicenseArgs, reply *RegisterLicenseReply) error { + if inMetadataMode() { + return ErrServerInMetadataMode + } + + err := b.backend.RegisterLicense(args.License) + if err != nil { + *reply = RegisterLicenseReply{ + Error: plugin.NewBasicError(err), + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go new file mode 100644 index 0000000..deb5b63 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/backend_test.go @@ -0,0 +1,178 @@ +package plugin + +import ( + "testing" + "time" + + gplugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/plugin/mock" + log "github.com/mgutz/logxi/v1" +) + +func TestBackendPlugin_impl(t *testing.T) { + var _ gplugin.Plugin = new(BackendPlugin) + var _ logical.Backend = new(backendPluginClient) +} + +func TestBackendPlugin_HandleRequest(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + resp, err := b.HandleRequest(&logical.Request{ + Operation: logical.CreateOperation, + Path: "kv/foo", + Data: map[string]interface{}{ + "value": "bar", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp.Data["value"] != "bar" { + t.Fatalf("bad: %#v", resp) + } +} + +func TestBackendPlugin_SpecialPaths(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + paths := b.SpecialPaths() + if paths == nil { + t.Fatal("SpecialPaths() returned nil") + } +} + +func TestBackendPlugin_System(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + sys := b.System() + if sys == nil { + t.Fatal("System() returned nil") + } + + actual := sys.DefaultLeaseTTL() + expected := 300 * time.Second + + if actual != expected { + t.Fatalf("bad: %v, expected %v", actual, expected) + } +} + +func TestBackendPlugin_Logger(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + logger := b.Logger() + if logger == nil { + t.Fatal("Logger() returned nil") + } +} + +func TestBackendPlugin_HandleExistenceCheck(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + checkFound, exists, err := b.HandleExistenceCheck(&logical.Request{ + Operation: logical.CreateOperation, + Path: "kv/foo", + Data: map[string]interface{}{"value": "bar"}, + }) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'kv/foo") + } + if exists { + t.Fatal("existence check should have returned 'false' for 'kv/foo'") + } +} + +func TestBackendPlugin_Cleanup(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + b.Cleanup() +} + +func TestBackendPlugin_Initialize(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + err := b.Initialize() + if err != nil { + t.Fatal(err) + } +} + +func TestBackendPlugin_InvalidateKey(t *testing.T) { + b, cleanup := testBackend(t) + defer cleanup() + + resp, err := b.HandleRequest(&logical.Request{ + Operation: logical.ReadOperation, + Path: "internal", + }) + if err != nil { + t.Fatal(err) + } + if resp.Data["value"] == "" { + t.Fatalf("bad: %#v, expected non-empty value", resp) + } + + b.InvalidateKey("internal") + + resp, err = b.HandleRequest(&logical.Request{ + Operation: logical.ReadOperation, + Path: "internal", + }) + if err != nil { + t.Fatal(err) + } + if resp.Data["value"] != "" { + t.Fatalf("bad: expected empty response data, got %#v", resp) + } +} + +func TestBackendPlugin_Setup(t *testing.T) { + _, cleanup := testBackend(t) + defer cleanup() +} + +func testBackend(t *testing.T) (logical.Backend, func()) { + // Create a mock provider + pluginMap := map[string]gplugin.Plugin{ + "backend": &BackendPlugin{ + Factory: mock.Factory, + }, + } + client, _ := gplugin.TestPluginRPCConn(t, pluginMap) + cleanup := func() { + client.Close() + } + + // Request the backend + raw, err := client.Dispense(BackendPluginName) + if err != nil { + t.Fatal(err) + } + b := raw.(logical.Backend) + + err = b.Setup(&logical.BackendConfig{ + Logger: logformat.NewVaultLogger(log.LevelTrace), + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: 300 * time.Second, + MaxLeaseTTLVal: 1800 * time.Second, + }, + StorageView: &logical.InmemStorage{}, + }) + if err != nil { + t.Fatal(err) + } + + return b, cleanup +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/logger.go b/vendor/github.com/hashicorp/vault/logical/plugin/logger.go new file mode 100644 index 0000000..ceb8947 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/logger.go @@ -0,0 +1,205 @@ +package plugin + +import ( + "net/rpc" + + plugin "github.com/hashicorp/go-plugin" + log "github.com/mgutz/logxi/v1" +) + +type LoggerClient struct { + client *rpc.Client +} + +func (l *LoggerClient) Trace(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Trace", cArgs, &struct{}{}) +} + +func (l *LoggerClient) Debug(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Debug", cArgs, &struct{}{}) +} + +func (l *LoggerClient) Info(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Info", cArgs, &struct{}{}) +} +func (l *LoggerClient) Warn(msg string, args ...interface{}) error { + var reply LoggerReply + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + err := l.client.Call("Plugin.Warn", cArgs, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + return nil +} +func (l *LoggerClient) Error(msg string, args ...interface{}) error { + var reply LoggerReply + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + err := l.client.Call("Plugin.Error", cArgs, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + return nil +} + +func (l *LoggerClient) Fatal(msg string, args ...interface{}) { + // NOOP since it's not actually used within vault + return +} + +func (l *LoggerClient) Log(level int, msg string, args []interface{}) { + cArgs := &LoggerArgs{ + Level: level, + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Log", cArgs, &struct{}{}) +} + +func (l *LoggerClient) SetLevel(level int) { + l.client.Call("Plugin.SetLevel", level, &struct{}{}) +} + +func (l *LoggerClient) IsTrace() bool { + var reply LoggerReply + l.client.Call("Plugin.IsTrace", new(interface{}), &reply) + return reply.IsTrue +} +func (l *LoggerClient) IsDebug() bool { + var reply LoggerReply + l.client.Call("Plugin.IsDebug", new(interface{}), &reply) + return reply.IsTrue +} + +func (l *LoggerClient) IsInfo() bool { + var reply LoggerReply + l.client.Call("Plugin.IsInfo", new(interface{}), &reply) + return reply.IsTrue +} + +func (l *LoggerClient) IsWarn() bool { + var reply LoggerReply + l.client.Call("Plugin.IsWarn", new(interface{}), &reply) + return reply.IsTrue +} + +type LoggerServer struct { + logger log.Logger +} + +func (l *LoggerServer) Trace(args *LoggerArgs, _ *struct{}) error { + l.logger.Trace(args.Msg, args.Args) + return nil +} + +func (l *LoggerServer) Debug(args *LoggerArgs, _ *struct{}) error { + l.logger.Debug(args.Msg, args.Args) + return nil +} + +func (l *LoggerServer) Info(args *LoggerArgs, _ *struct{}) error { + l.logger.Info(args.Msg, args.Args) + return nil +} + +func (l *LoggerServer) Warn(args *LoggerArgs, reply *LoggerReply) error { + err := l.logger.Warn(args.Msg, args.Args) + if err != nil { + *reply = LoggerReply{ + Error: plugin.NewBasicError(err), + } + return nil + } + return nil +} + +func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error { + err := l.logger.Error(args.Msg, args.Args) + if err != nil { + *reply = LoggerReply{ + Error: plugin.NewBasicError(err), + } + return nil + } + return nil +} + +func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error { + l.logger.Log(args.Level, args.Msg, args.Args) + return nil +} + +func (l *LoggerServer) SetLevel(args int, _ *struct{}) error { + l.logger.SetLevel(args) + return nil +} + +func (l *LoggerServer) IsTrace(args interface{}, reply *LoggerReply) error { + result := l.logger.IsTrace() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsDebug(args interface{}, reply *LoggerReply) error { + result := l.logger.IsDebug() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsInfo(args interface{}, reply *LoggerReply) error { + result := l.logger.IsInfo() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsWarn(args interface{}, reply *LoggerReply) error { + result := l.logger.IsWarn() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +type LoggerArgs struct { + Level int + Msg string + Args []interface{} +} + +// LoggerReply contains the RPC reply. Not all fields may be used +// for a particular RPC call. +type LoggerReply struct { + IsTrue bool + Error *plugin.BasicError +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go new file mode 100644 index 0000000..10b389c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/logger_test.go @@ -0,0 +1,163 @@ +package plugin + +import ( + "bufio" + "bytes" + "io/ioutil" + "strings" + "testing" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/logformat" + log "github.com/mgutz/logxi/v1" +) + +func TestLogger_impl(t *testing.T) { + var _ log.Logger = new(LoggerClient) +} + +func TestLogger_levels(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace) + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + expected := "foobar" + testLogger := &LoggerClient{client: client} + + // Test trace + testLogger.Trace(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result := buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test debug + testLogger.Debug(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test debug + testLogger.Info(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test warn + testLogger.Warn(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test error + testLogger.Error(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test fatal + testLogger.Fatal(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if result != "" { + t.Fatalf("expected log Fatal() to be no-op, got %s", result) + } +} + +func TestLogger_isLevels(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + l := logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelAll) + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + testLogger := &LoggerClient{client: client} + + if !testLogger.IsDebug() || !testLogger.IsInfo() || !testLogger.IsTrace() || !testLogger.IsWarn() { + t.Fatal("expected logger to return true for all logger level checks") + } +} + +func TestLogger_log(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace) + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + expected := "foobar" + testLogger := &LoggerClient{client: client} + + // Test trace + testLogger.Log(log.LevelInfo, expected, nil) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result := buf.String() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + +} + +func TestLogger_setLevel(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + l := log.NewLogger(ioutil.Discard, "test-logger") + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + testLogger := &LoggerClient{client: client} + testLogger.SetLevel(log.LevelWarn) + + if !testLogger.IsWarn() { + t.Fatal("expected logger to support warn level") + } +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go new file mode 100644 index 0000000..ac8c0ba --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend.go @@ -0,0 +1,74 @@ +package mock + +import ( + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +// New returns a new backend as an interface. This func +// is only necessary for builtin backend plugins. +func New() (interface{}, error) { + return Backend(), nil +} + +// Factory returns a new backend as logical.Backend. +func Factory(conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil +} + +// FactoryType is a wrapper func that allows the Factory func to specify +// the backend type for the mock backend plugin instance. +func FactoryType(backendType logical.BackendType) func(*logical.BackendConfig) (logical.Backend, error) { + return func(conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + b.BackendType = backendType + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil + } +} + +// Backend returns a private embedded struct of framework.Backend. +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: "", + Paths: framework.PathAppend( + errorPaths(&b), + kvPaths(&b), + []*framework.Path{ + pathInternal(&b), + pathSpecial(&b), + }, + ), + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "special", + }, + }, + Secrets: []*framework.Secret{}, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + } + b.internal = "bar" + return &b +} + +type backend struct { + *framework.Backend + + // internal is used to test invalidate + internal string +} + +func (b *backend) invalidate(key string) { + switch key { + case "internal": + b.internal = "" + } +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go new file mode 100644 index 0000000..075911c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/backend_test.go @@ -0,0 +1,11 @@ +package mock + +import ( + "testing" + + "github.com/hashicorp/vault/logical" +) + +func TestBackend_impl(t *testing.T) { + var _ logical.Backend = new(backend) +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go new file mode 100644 index 0000000..b1b7fbd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/mock-plugin/main.go @@ -0,0 +1,31 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/plugin" + "github.com/hashicorp/vault/logical/plugin/mock" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) // Ignore command, strictly parse flags + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig) + + factoryFunc := mock.FactoryType(logical.TypeLogical) + + err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: factoryFunc, + TLSProviderFunc: tlsProviderFunc, + }) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go new file mode 100644 index 0000000..00c4e3d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_errors.go @@ -0,0 +1,32 @@ +package mock + +import ( + "net/rpc" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +// pathInternal is used to test viewing internal backend values. In this case, +// it is used to test the invalidate func. +func errorPaths(b *backend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: "errors/rpc", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathErrorRPCRead, + }, + }, + &framework.Path{ + Pattern: "errors/kill", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathErrorRPCRead, + }, + }, + } +} + +func (b *backend) pathErrorRPCRead( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return nil, rpc.ErrShutdown +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go new file mode 100644 index 0000000..92c4f8b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_internal.go @@ -0,0 +1,41 @@ +package mock + +import ( + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +// pathInternal is used to test viewing internal backend values. In this case, +// it is used to test the invalidate func. +func pathInternal(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "internal", + Fields: map[string]*framework.FieldSchema{ + "value": &framework.FieldSchema{Type: framework.TypeString}, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathInternalUpdate, + logical.ReadOperation: b.pathInternalRead, + }, + } +} + +func (b *backend) pathInternalUpdate( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + value := data.Get("value").(string) + b.internal = value + // Return the secret + return nil, nil + +} + +func (b *backend) pathInternalRead( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "value": b.internal, + }, + }, nil + +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go new file mode 100644 index 0000000..badede2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_kv.go @@ -0,0 +1,103 @@ +package mock + +import ( + "fmt" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +// kvPaths is used to test CRUD and List operations. It is a simplified +// version of the passthrough backend that only accepts string values. +func kvPaths(b *backend) []*framework.Path { + return []*framework.Path{ + &framework.Path{ + Pattern: "kv/?", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathKVList, + }, + }, + &framework.Path{ + Pattern: "kv/" + framework.GenericNameRegex("key"), + Fields: map[string]*framework.FieldSchema{ + "key": &framework.FieldSchema{Type: framework.TypeString}, + "value": &framework.FieldSchema{Type: framework.TypeString}, + }, + ExistenceCheck: b.pathExistenceCheck, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathKVRead, + logical.CreateOperation: b.pathKVCreateUpdate, + logical.UpdateOperation: b.pathKVCreateUpdate, + logical.DeleteOperation: b.pathKVDelete, + }, + }, + } +} + +func (b *backend) pathExistenceCheck(req *logical.Request, data *framework.FieldData) (bool, error) { + out, err := req.Storage.Get(req.Path) + if err != nil { + return false, fmt.Errorf("existence check failed: %v", err) + } + + return out != nil, nil +} + +func (b *backend) pathKVRead( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := req.Storage.Get(req.Path) + if err != nil { + return nil, err + } + + if entry == nil { + return nil, nil + } + + value := string(entry.Value) + + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "value": value, + }, + }, nil +} + +func (b *backend) pathKVCreateUpdate( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + value := data.Get("value").(string) + + entry := &logical.StorageEntry{ + Key: req.Path, + Value: []byte(value), + } + + s := req.Storage + err := s.Put(entry) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "value": value, + }, + }, nil +} + +func (b *backend) pathKVDelete(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if err := req.Storage.Delete(req.Path); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathKVList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + vals, err := req.Storage.List("kv/") + if err != nil { + return nil, err + } + return logical.ListResponse(vals), nil +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go new file mode 100644 index 0000000..f695e20 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/mock/path_special.go @@ -0,0 +1,27 @@ +package mock + +import ( + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +// pathSpecial is used to test special paths. +func pathSpecial(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "special", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathSpecialRead, + }, + } +} + +func (b *backend) pathSpecialRead( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "data": "foo", + }, + }, nil + +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go b/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go new file mode 100644 index 0000000..ede0622 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go @@ -0,0 +1,119 @@ +package plugin + +import ( + "crypto/ecdsa" + "crypto/rsa" + "encoding/gob" + "fmt" + "time" + + "sync" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/logical" + log "github.com/mgutz/logxi/v1" +) + +// Register these types since we have to serialize and de-serialize tls.ConnectionState +// over the wire as part of logical.Request.Connection. +func init() { + gob.Register(rsa.PublicKey{}) + gob.Register(ecdsa.PublicKey{}) + gob.Register(time.Duration(0)) +} + +// BackendPluginClient is a wrapper around backendPluginClient +// that also contains its plugin.Client instance. It's primarily +// used to cleanly kill the client on Cleanup() +type BackendPluginClient struct { + client *plugin.Client + sync.Mutex + + *backendPluginClient +} + +// Cleanup calls the RPC client's Cleanup() func and also calls +// the go-plugin's client Kill() func +func (b *BackendPluginClient) Cleanup() { + b.backendPluginClient.Cleanup() + b.client.Kill() +} + +// NewBackend will return an instance of an RPC-based client implementation of the backend for +// external plugins, or a concrete implementation of the backend if it is a builtin backend. +// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether +// the plugin should run in metadata mode. +func NewBackend(pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger, isMetadataMode bool) (logical.Backend, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPlugin(pluginName) + if err != nil { + return nil, err + } + + var backend logical.Backend + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to logical.Backend. + backendRaw, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, fmt.Errorf("error getting plugin type: %s", err) + } + + var ok bool + backend, ok = backendRaw.(logical.Backend) + if !ok { + return nil, fmt.Errorf("unsuported backend type: %s", pluginName) + } + + } else { + // create a backendPluginClient instance + backend, err = newPluginClient(sys, pluginRunner, logger, isMetadataMode) + if err != nil { + return nil, err + } + } + + return backend, nil +} + +func newPluginClient(sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) { + // pluginMap is the map of plugins we can dispense. + pluginMap := map[string]plugin.Plugin{ + "backend": &BackendPlugin{ + metadataMode: isMetadataMode, + }, + } + + var client *plugin.Client + var err error + if isMetadataMode { + client, err = pluginRunner.RunMetadataMode(sys, pluginMap, handshakeConfig, []string{}, logger) + } else { + client, err = pluginRunner.Run(sys, pluginMap, handshakeConfig, []string{}, logger) + } + if err != nil { + return nil, err + } + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := rpcClient.Dispense("backend") + if err != nil { + return nil, err + } + + // We should have a logical backend type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + backendRPC := raw.(*backendPluginClient) + + return &BackendPluginClient{ + client: client, + backendPluginClient: backendRPC, + }, nil +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/serve.go b/vendor/github.com/hashicorp/vault/logical/plugin/serve.go new file mode 100644 index 0000000..1d70b3a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/serve.go @@ -0,0 +1,56 @@ +package plugin + +import ( + "crypto/tls" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/logical" +) + +// BackendPluginName is the name of the plugin that can be +// dispensed rom the plugin server. +const BackendPluginName = "backend" + +type BackendFactoryFunc func(*logical.BackendConfig) (logical.Backend, error) +type TLSProdiverFunc func() (*tls.Config, error) + +type ServeOpts struct { + BackendFactoryFunc BackendFactoryFunc + TLSProviderFunc TLSProdiverFunc +} + +// Serve is a helper function used to serve a backend plugin. This +// should be ran on the plugin's main process. +func Serve(opts *ServeOpts) error { + // pluginMap is the map of plugins we can dispense. + var pluginMap = map[string]plugin.Plugin{ + "backend": &BackendPlugin{ + Factory: opts.BackendFactoryFunc, + }, + } + + err := pluginutil.OptionallyEnableMlock() + if err != nil { + return err + } + + // If FetchMetadata is true, run without TLSProvider + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshakeConfig, + Plugins: pluginMap, + TLSProvider: opts.TLSProviderFunc, + }) + + return nil +} + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var handshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 2, + MagicCookieKey: "VAULT_BACKEND_PLUGIN", + MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20", +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/storage.go b/vendor/github.com/hashicorp/vault/logical/plugin/storage.go new file mode 100644 index 0000000..99c21f6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/storage.go @@ -0,0 +1,139 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/logical" +) + +// StorageClient is an implementation of logical.Storage that communicates +// over RPC. +type StorageClient struct { + client *rpc.Client +} + +func (s *StorageClient) List(prefix string) ([]string, error) { + var reply StorageListReply + err := s.client.Call("Plugin.List", prefix, &reply) + if err != nil { + return reply.Keys, err + } + if reply.Error != nil { + return reply.Keys, reply.Error + } + return reply.Keys, nil +} + +func (s *StorageClient) Get(key string) (*logical.StorageEntry, error) { + var reply StorageGetReply + err := s.client.Call("Plugin.Get", key, &reply) + if err != nil { + return nil, err + } + if reply.Error != nil { + return nil, reply.Error + } + return reply.StorageEntry, nil +} + +func (s *StorageClient) Put(entry *logical.StorageEntry) error { + var reply StoragePutReply + err := s.client.Call("Plugin.Put", entry, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + return nil +} + +func (s *StorageClient) Delete(key string) error { + var reply StorageDeleteReply + err := s.client.Call("Plugin.Delete", key, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + return nil +} + +// StorageServer is a net/rpc compatible structure for serving +type StorageServer struct { + impl logical.Storage +} + +func (s *StorageServer) List(prefix string, reply *StorageListReply) error { + keys, err := s.impl.List(prefix) + *reply = StorageListReply{ + Keys: keys, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *StorageServer) Get(key string, reply *StorageGetReply) error { + storageEntry, err := s.impl.Get(key) + *reply = StorageGetReply{ + StorageEntry: storageEntry, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *StorageServer) Put(entry *logical.StorageEntry, reply *StoragePutReply) error { + err := s.impl.Put(entry) + *reply = StoragePutReply{ + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *StorageServer) Delete(key string, reply *StorageDeleteReply) error { + err := s.impl.Delete(key) + *reply = StorageDeleteReply{ + Error: plugin.NewBasicError(err), + } + return nil +} + +type StorageListReply struct { + Keys []string + Error *plugin.BasicError +} + +type StorageGetReply struct { + StorageEntry *logical.StorageEntry + Error *plugin.BasicError +} + +type StoragePutReply struct { + Error *plugin.BasicError +} + +type StorageDeleteReply struct { + Error *plugin.BasicError +} + +// NOOPStorage is used to deny access to the storage interface while running a +// backend plugin in metadata mode. +type NOOPStorage struct{} + +func (s *NOOPStorage) List(prefix string) ([]string, error) { + return []string{}, nil +} + +func (s *NOOPStorage) Get(key string) (*logical.StorageEntry, error) { + return nil, nil +} + +func (s *NOOPStorage) Put(entry *logical.StorageEntry) error { + return nil +} + +func (s *NOOPStorage) Delete(key string) error { + return nil +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go new file mode 100644 index 0000000..9899a82 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/storage_test.go @@ -0,0 +1,27 @@ +package plugin + +import ( + "testing" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/logical" +) + +func TestStorage_impl(t *testing.T) { + var _ logical.Storage = new(StorageClient) +} + +func TestStorage_operations(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + storage := &logical.InmemStorage{} + + server.RegisterName("Plugin", &StorageServer{ + impl: storage, + }) + + testStorage := &StorageClient{client: client} + + logical.TestStorage(t, testStorage) +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/system.go b/vendor/github.com/hashicorp/vault/logical/plugin/system.go new file mode 100644 index 0000000..16f67df --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/system.go @@ -0,0 +1,247 @@ +package plugin + +import ( + "net/rpc" + "time" + + "fmt" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/helper/wrapping" + "github.com/hashicorp/vault/logical" +) + +type SystemViewClient struct { + client *rpc.Client +} + +func (s *SystemViewClient) DefaultLeaseTTL() time.Duration { + var reply DefaultLeaseTTLReply + err := s.client.Call("Plugin.DefaultLeaseTTL", new(interface{}), &reply) + if err != nil { + return 0 + } + + return reply.DefaultLeaseTTL +} + +func (s *SystemViewClient) MaxLeaseTTL() time.Duration { + var reply MaxLeaseTTLReply + err := s.client.Call("Plugin.MaxLeaseTTL", new(interface{}), &reply) + if err != nil { + return 0 + } + + return reply.MaxLeaseTTL +} + +func (s *SystemViewClient) SudoPrivilege(path string, token string) bool { + var reply SudoPrivilegeReply + args := &SudoPrivilegeArgs{ + Path: path, + Token: token, + } + + err := s.client.Call("Plugin.SudoPrivilege", args, &reply) + if err != nil { + return false + } + + return reply.Sudo +} + +func (s *SystemViewClient) Tainted() bool { + var reply TaintedReply + + err := s.client.Call("Plugin.Tainted", new(interface{}), &reply) + if err != nil { + return false + } + + return reply.Tainted +} + +func (s *SystemViewClient) CachingDisabled() bool { + var reply CachingDisabledReply + + err := s.client.Call("Plugin.CachingDisabled", new(interface{}), &reply) + if err != nil { + return false + } + + return reply.CachingDisabled +} + +func (s *SystemViewClient) ReplicationState() consts.ReplicationState { + var reply ReplicationStateReply + + err := s.client.Call("Plugin.ReplicationState", new(interface{}), &reply) + if err != nil { + return consts.ReplicationDisabled + } + + return reply.ReplicationState +} + +func (s *SystemViewClient) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + var reply ResponseWrapDataReply + // Do not allow JWTs to be returned + args := &ResponseWrapDataArgs{ + Data: data, + TTL: ttl, + JWT: false, + } + + err := s.client.Call("Plugin.ResponseWrapData", args, &reply) + if err != nil { + return nil, err + } + if reply.Error != nil { + return nil, reply.Error + } + + return reply.ResponseWrapInfo, nil +} + +func (s *SystemViewClient) LookupPlugin(name string) (*pluginutil.PluginRunner, error) { + return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend") +} + +func (s *SystemViewClient) MlockEnabled() bool { + var reply MlockEnabledReply + err := s.client.Call("Plugin.MlockEnabled", new(interface{}), &reply) + if err != nil { + return false + } + + return reply.MlockEnabled +} + +type SystemViewServer struct { + impl logical.SystemView +} + +func (s *SystemViewServer) DefaultLeaseTTL(_ interface{}, reply *DefaultLeaseTTLReply) error { + ttl := s.impl.DefaultLeaseTTL() + *reply = DefaultLeaseTTLReply{ + DefaultLeaseTTL: ttl, + } + + return nil +} + +func (s *SystemViewServer) MaxLeaseTTL(_ interface{}, reply *MaxLeaseTTLReply) error { + ttl := s.impl.MaxLeaseTTL() + *reply = MaxLeaseTTLReply{ + MaxLeaseTTL: ttl, + } + + return nil +} + +func (s *SystemViewServer) SudoPrivilege(args *SudoPrivilegeArgs, reply *SudoPrivilegeReply) error { + sudo := s.impl.SudoPrivilege(args.Path, args.Token) + *reply = SudoPrivilegeReply{ + Sudo: sudo, + } + + return nil +} + +func (s *SystemViewServer) Tainted(_ interface{}, reply *TaintedReply) error { + tainted := s.impl.Tainted() + *reply = TaintedReply{ + Tainted: tainted, + } + + return nil +} + +func (s *SystemViewServer) CachingDisabled(_ interface{}, reply *CachingDisabledReply) error { + cachingDisabled := s.impl.CachingDisabled() + *reply = CachingDisabledReply{ + CachingDisabled: cachingDisabled, + } + + return nil +} + +func (s *SystemViewServer) ReplicationState(_ interface{}, reply *ReplicationStateReply) error { + replicationState := s.impl.ReplicationState() + *reply = ReplicationStateReply{ + ReplicationState: replicationState, + } + + return nil +} + +func (s *SystemViewServer) ResponseWrapData(args *ResponseWrapDataArgs, reply *ResponseWrapDataReply) error { + // Do not allow JWTs to be returned + info, err := s.impl.ResponseWrapData(args.Data, args.TTL, false) + if err != nil { + *reply = ResponseWrapDataReply{ + Error: plugin.NewBasicError(err), + } + return nil + } + *reply = ResponseWrapDataReply{ + ResponseWrapInfo: info, + } + + return nil +} + +func (s *SystemViewServer) MlockEnabled(_ interface{}, reply *MlockEnabledReply) error { + enabled := s.impl.MlockEnabled() + *reply = MlockEnabledReply{ + MlockEnabled: enabled, + } + + return nil +} + +type DefaultLeaseTTLReply struct { + DefaultLeaseTTL time.Duration +} + +type MaxLeaseTTLReply struct { + MaxLeaseTTL time.Duration +} + +type SudoPrivilegeArgs struct { + Path string + Token string +} + +type SudoPrivilegeReply struct { + Sudo bool +} + +type TaintedReply struct { + Tainted bool +} + +type CachingDisabledReply struct { + CachingDisabled bool +} + +type ReplicationStateReply struct { + ReplicationState consts.ReplicationState +} + +type ResponseWrapDataArgs struct { + Data map[string]interface{} + TTL time.Duration + JWT bool +} + +type ResponseWrapDataReply struct { + ResponseWrapInfo *wrapping.ResponseWrapInfo + Error *plugin.BasicError +} + +type MlockEnabledReply struct { + MlockEnabled bool +} diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go b/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go new file mode 100644 index 0000000..57e386b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/logical/plugin/system_test.go @@ -0,0 +1,174 @@ +package plugin + +import ( + "testing" + + "reflect" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/logical" +) + +func Test_impl(t *testing.T) { + var _ logical.SystemView = new(SystemViewClient) +} + +func TestSystem_defaultLeaseTTL(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + expected := sys.DefaultLeaseTTL() + actual := testSystemView.DefaultLeaseTTL() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_maxLeaseTTL(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + expected := sys.MaxLeaseTTL() + actual := testSystemView.MaxLeaseTTL() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_sudoPrivilege(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + sys.SudoPrivilegeVal = true + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + expected := sys.SudoPrivilege("foo", "bar") + actual := testSystemView.SudoPrivilege("foo", "bar") + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_tainted(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + sys.TaintedVal = true + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + expected := sys.Tainted() + actual := testSystemView.Tainted() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_cachingDisabled(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + sys.CachingDisabledVal = true + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + expected := sys.CachingDisabled() + actual := testSystemView.CachingDisabled() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_replicationState(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + sys.ReplicationStateVal = consts.ReplicationPerformancePrimary + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + expected := sys.ReplicationState() + actual := testSystemView.ReplicationState() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_responseWrapData(t *testing.T) { + t.SkipNow() +} + +func TestSystem_lookupPlugin(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + if _, err := testSystemView.LookupPlugin("foo"); err == nil { + t.Fatal("LookPlugin(): expected error on due to unsupported call from plugin") + } +} + +func TestSystem_mlockEnabled(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + sys := logical.TestSystemView() + sys.EnableMlock = true + + server.RegisterName("Plugin", &SystemViewServer{ + impl: sys, + }) + + testSystemView := &SystemViewClient{client: client} + + expected := sys.MlockEnabled() + actual := testSystemView.MlockEnabled() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} diff --git a/vendor/github.com/hashicorp/vault/logical/request.go b/vendor/github.com/hashicorp/vault/logical/request.go index c41b1dc..cee0f0c 100644 --- a/vendor/github.com/hashicorp/vault/logical/request.go +++ b/vendor/github.com/hashicorp/vault/logical/request.go @@ -174,12 +174,13 @@ type Operation string const ( // The operations below are called per path - CreateOperation Operation = "create" - ReadOperation = "read" - UpdateOperation = "update" - DeleteOperation = "delete" - ListOperation = "list" - HelpOperation = "help" + CreateOperation Operation = "create" + ReadOperation = "read" + UpdateOperation = "update" + DeleteOperation = "delete" + ListOperation = "list" + HelpOperation = "help" + PersonaLookaheadOperation = "persona-lookahead" // The operations below are called globally, the path is less relevant. RevokeOperation Operation = "revoke" diff --git a/vendor/github.com/hashicorp/vault/logical/response.go b/vendor/github.com/hashicorp/vault/logical/response.go index ee6bfe1..6ee452b 100644 --- a/vendor/github.com/hashicorp/vault/logical/response.go +++ b/vendor/github.com/hashicorp/vault/logical/response.go @@ -2,11 +2,8 @@ package logical import ( "errors" - "fmt" - "reflect" - "time" - "github.com/mitchellh/copystructure" + "github.com/hashicorp/vault/helper/wrapping" ) const ( @@ -28,26 +25,6 @@ const ( HTTPStatusCode = "http_status_code" ) -type ResponseWrapInfo struct { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` - - // The token containing the wrapped response - Token string `json:"token" structs:"token" mapstructure:"token"` - - // The creation time. This can be used with the TTL to figure out an - // expected expiration. - CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"cration_time"` - - // If the contained response is the output of a token creation call, the - // created token's accessor will be accessible here - WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor"` - - // The format to use. This doesn't get returned, it's only internal. - Format string `json:"format" structs:"format" mapstructure:"format"` -} - // Response is a struct that stores the response of a request. // It is used to abstract the details of the higher level request protocol. type Response struct { @@ -72,85 +49,18 @@ type Response struct { // Warnings allow operations or backends to return warnings in response // to user actions without failing the action outright. - // Making it private helps ensure that it is easy for various parts of - // Vault (backend, core, etc.) to add warnings without accidentally - // replacing what exists. - warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` + Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` // Information for wrapping the response in a cubbyhole - WrapInfo *ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` -} - -func init() { - copystructure.Copiers[reflect.TypeOf(Response{})] = func(v interface{}) (interface{}, error) { - input := v.(Response) - ret := Response{ - Redirect: input.Redirect, - } - - if input.Secret != nil { - retSec, err := copystructure.Copy(input.Secret) - if err != nil { - return nil, fmt.Errorf("error copying Secret: %v", err) - } - ret.Secret = retSec.(*Secret) - } - - if input.Auth != nil { - retAuth, err := copystructure.Copy(input.Auth) - if err != nil { - return nil, fmt.Errorf("error copying Auth: %v", err) - } - ret.Auth = retAuth.(*Auth) - } - - if input.Data != nil { - retData, err := copystructure.Copy(&input.Data) - if err != nil { - return nil, fmt.Errorf("error copying Data: %v", err) - } - ret.Data = *(retData.(*map[string]interface{})) - } - - if input.Warnings() != nil { - for _, warning := range input.Warnings() { - ret.AddWarning(warning) - } - } - - if input.WrapInfo != nil { - retWrapInfo, err := copystructure.Copy(input.WrapInfo) - if err != nil { - return nil, fmt.Errorf("error copying WrapInfo: %v", err) - } - ret.WrapInfo = retWrapInfo.(*ResponseWrapInfo) - } - - return &ret, nil - } + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` } // AddWarning adds a warning into the response's warning list func (r *Response) AddWarning(warning string) { - if r.warnings == nil { - r.warnings = make([]string, 0, 1) + if r.Warnings == nil { + r.Warnings = make([]string, 0, 1) } - r.warnings = append(r.warnings, warning) -} - -// Warnings returns the list of warnings set on the response -func (r *Response) Warnings() []string { - return r.warnings -} - -// ClearWarnings clears the response's warning list -func (r *Response) ClearWarnings() { - r.warnings = make([]string, 0, 1) -} - -// Copies the warnings from the other response to this one -func (r *Response) CloneWarnings(other *Response) { - r.warnings = other.warnings + r.Warnings = append(r.Warnings, warning) } // IsError returns true if this response seems to indicate an error. diff --git a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go index 64c6e2b..0112ae2 100644 --- a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go +++ b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go @@ -1,53 +1,90 @@ package logical import ( + "strings" "sync" - "github.com/hashicorp/vault/physical" + radix "github.com/armon/go-radix" ) -// InmemStorage implements Storage and stores all data in memory. +// InmemStorage implements Storage and stores all data in memory. It is +// basically a straight copy of physical.Inmem, but it prevents backends from +// having to load all of physical's dependencies (which are legion) just to +// have some testing storage. type InmemStorage struct { - phys *physical.InmemBackend - + sync.RWMutex + root *radix.Tree once sync.Once } +func (s *InmemStorage) Get(key string) (*StorageEntry, error) { + s.once.Do(s.init) + + s.RLock() + defer s.RUnlock() + + if raw, ok := s.root.Get(key); ok { + se := raw.(*StorageEntry) + return &StorageEntry{ + Key: se.Key, + Value: se.Value, + }, nil + } + + return nil, nil +} + +func (s *InmemStorage) Put(entry *StorageEntry) error { + s.once.Do(s.init) + + s.Lock() + defer s.Unlock() + + s.root.Insert(entry.Key, &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + }) + return nil +} + +func (s *InmemStorage) Delete(key string) error { + s.once.Do(s.init) + + s.Lock() + defer s.Unlock() + + s.root.Delete(key) + return nil +} + func (s *InmemStorage) List(prefix string) ([]string, error) { s.once.Do(s.init) - return s.phys.List(prefix) -} + s.RLock() + defer s.RUnlock() -func (s *InmemStorage) Get(key string) (*StorageEntry, error) { - s.once.Do(s.init) - entry, err := s.phys.Get(key) - if err != nil { - return nil, err + var out []string + seen := make(map[string]interface{}) + walkFn := func(s string, v interface{}) bool { + trimmed := strings.TrimPrefix(s, prefix) + sep := strings.Index(trimmed, "/") + if sep == -1 { + out = append(out, trimmed) + } else { + trimmed = trimmed[:sep+1] + if _, ok := seen[trimmed]; !ok { + out = append(out, trimmed) + seen[trimmed] = struct{}{} + } + } + return false } - if entry == nil { - return nil, nil - } - return &StorageEntry{ - Key: entry.Key, - Value: entry.Value, - }, nil -} + s.root.WalkPrefix(prefix, walkFn) -func (s *InmemStorage) Put(entry *StorageEntry) error { - s.once.Do(s.init) - physEntry := &physical.Entry{ - Key: entry.Key, - Value: entry.Value, - } - return s.phys.Put(physEntry) -} + return out, nil -func (s *InmemStorage) Delete(k string) error { - s.once.Do(s.init) - return s.phys.Delete(k) } func (s *InmemStorage) init() { - s.phys = physical.NewInmem(nil) + s.root = radix.New() } diff --git a/vendor/github.com/hashicorp/vault/logical/system_view.go b/vendor/github.com/hashicorp/vault/logical/system_view.go index d769397..64fc51c 100644 --- a/vendor/github.com/hashicorp/vault/logical/system_view.go +++ b/vendor/github.com/hashicorp/vault/logical/system_view.go @@ -1,9 +1,12 @@ package logical import ( + "errors" "time" "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/helper/wrapping" ) // SystemView exposes system configuration information in a safe way @@ -37,6 +40,18 @@ type SystemView interface { // ReplicationState indicates the state of cluster replication ReplicationState() consts.ReplicationState + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + + // LookupPlugin looks into the plugin catalog for a plugin with the given + // name. Returns a PluginRunner or an error if a plugin can not be found. + LookupPlugin(string) (*pluginutil.PluginRunner, error) + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled() bool } type StaticSystemView struct { @@ -46,6 +61,7 @@ type StaticSystemView struct { TaintedVal bool CachingDisabledVal bool Primary bool + EnableMlock bool ReplicationStateVal consts.ReplicationState } @@ -72,3 +88,15 @@ func (d StaticSystemView) CachingDisabled() bool { func (d StaticSystemView) ReplicationState() consts.ReplicationState { return d.ReplicationStateVal } + +func (d StaticSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") +} + +func (d StaticSystemView) MlockEnabled() bool { + return d.EnableMlock +} diff --git a/vendor/github.com/hashicorp/vault/logical/testing/testing.go b/vendor/github.com/hashicorp/vault/logical/testing/testing.go index b2072ea..ca52cdd 100644 --- a/vendor/github.com/hashicorp/vault/logical/testing/testing.go +++ b/vendor/github.com/hashicorp/vault/logical/testing/testing.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" - "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" "github.com/hashicorp/vault/vault" ) @@ -136,8 +136,14 @@ func Test(tt TestT, c TestCase) { // Create an in-memory Vault core logger := logformat.NewVaultLogger(log.LevelTrace) + phys, err := inmem.NewInmem(nil, logger) + if err != nil { + tt.Fatal(err) + return + } + core, err := vault.NewCore(&vault.CoreConfig{ - Physical: physical.NewInmem(logger), + Physical: phys, LogicalBackends: map[string]logical.Factory{ "test": func(conf *logical.BackendConfig) (logical.Backend, error) { if c.Backend != nil { diff --git a/vendor/github.com/hashicorp/vault/logical/translate_response.go b/vendor/github.com/hashicorp/vault/logical/translate_response.go index 048adaf..d3d7271 100644 --- a/vendor/github.com/hashicorp/vault/logical/translate_response.go +++ b/vendor/github.com/hashicorp/vault/logical/translate_response.go @@ -14,7 +14,7 @@ import ( func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { httpResp := &HTTPResponse{ Data: input.Data, - Warnings: input.Warnings(), + Warnings: input.Warnings, } if input.Secret != nil { @@ -42,7 +42,7 @@ func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { logicalResp := &Response{ Data: input.Data, - warnings: input.Warnings, + Warnings: input.Warnings, } if input.LeaseID != "" { @@ -91,6 +91,7 @@ type HTTPWrapInfo struct { Token string `json:"token"` TTL int `json:"ttl"` CreationTime string `json:"creation_time"` + CreationPath string `json:"creation_path"` WrappedAccessor string `json:"wrapped_accessor,omitempty"` } diff --git a/vendor/github.com/hashicorp/vault/meta/meta.go b/vendor/github.com/hashicorp/vault/meta/meta.go index 0f5fef9..a81cbde 100644 --- a/vendor/github.com/hashicorp/vault/meta/meta.go +++ b/vendor/github.com/hashicorp/vault/meta/meta.go @@ -29,7 +29,7 @@ var ( -wrap-ttl="" Indicates that the response should be wrapped in a cubbyhole token with the requested TTL. The response can be fetched by calling the "sys/wrapping/unwrap" - endpoint, passing in the wrappping token's ID. This + endpoint, passing in the wrapping token's ID. This is a numeric string with an optional suffix "s", "m", or "h"; if no suffix is specified it will be parsed as seconds. May also be specified via diff --git a/vendor/github.com/hashicorp/vault/physical/azure.go b/vendor/github.com/hashicorp/vault/physical/azure/azure.go similarity index 62% rename from vendor/github.com/hashicorp/vault/physical/azure.go rename to vendor/github.com/hashicorp/vault/physical/azure/azure.go index 4d5083e..f938ae4 100644 --- a/vendor/github.com/hashicorp/vault/physical/azure.go +++ b/vendor/github.com/hashicorp/vault/physical/azure/azure.go @@ -1,4 +1,4 @@ -package physical +package azure import ( "encoding/base64" @@ -10,11 +10,14 @@ import ( "strings" "time" + storage "github.com/Azure/azure-sdk-for-go/storage" log "github.com/mgutz/logxi/v1" - "github.com/Azure/azure-storage-go" "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/physical" ) // MaxBlobSize at this time @@ -23,21 +26,19 @@ var MaxBlobSize = 1024 * 1024 * 4 // AzureBackend is a physical backend that stores data // within an Azure blob container. type AzureBackend struct { - container string - client storage.BlobStorageClient + container *storage.Container logger log.Logger - permitPool *PermitPool + permitPool *physical.PermitPool } -// newAzureBackend constructs an Azure backend using a pre-existing +// NewAzureBackend constructs an Azure backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from the environment, AWS credential files or by IAM role. -func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) { - - container := os.Getenv("AZURE_BLOB_CONTAINER") - if container == "" { - container = conf["container"] - if container == "" { +func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + name := os.Getenv("AZURE_BLOB_CONTAINER") + if name == "" { + name = conf["container"] + if name == "" { return nil, fmt.Errorf("'container' must be set") } } @@ -62,19 +63,15 @@ func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) if err != nil { return nil, fmt.Errorf("failed to create Azure client: %v", err) } + client.HTTPClient = cleanhttp.DefaultPooledClient() - contObj := client.GetBlobService().GetContainerReference(container) - created, err := contObj.CreateIfNotExists() + blobClient := client.GetBlobService() + container := blobClient.GetContainerReference(name) + _, err = container.CreateIfNotExists(&storage.CreateContainerOptions{ + Access: storage.ContainerAccessTypePrivate, + }) if err != nil { - return nil, fmt.Errorf("failed to upsert container: %v", err) - } - if created { - err = contObj.SetPermissions(storage.ContainerPermissions{ - AccessType: storage.ContainerAccessTypePrivate, - }, 0, "") - if err != nil { - return nil, fmt.Errorf("failed to set permissions on newly-created container: %v", err) - } + return nil, fmt.Errorf("failed to create %q container: %v", name, err) } maxParStr, ok := conf["max_parallel"] @@ -91,19 +88,18 @@ func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) a := &AzureBackend{ container: container, - client: client.GetBlobService(), logger: logger, - permitPool: NewPermitPool(maxParInt), + permitPool: physical.NewPermitPool(maxParInt), } return a, nil } // Put is used to insert or update an entry -func (a *AzureBackend) Put(entry *Entry) error { +func (a *AzureBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"azure", "put"}, time.Now()) if len(entry.Value) >= MaxBlobSize { - return fmt.Errorf("Value is bigger than the current supported limit of 4MBytes") + return fmt.Errorf("value is bigger than the current supported limit of 4MBytes") } blockID := base64.StdEncoding.EncodeToString([]byte("AAAA")) @@ -113,34 +109,44 @@ func (a *AzureBackend) Put(entry *Entry) error { a.permitPool.Acquire() defer a.permitPool.Release() - err := a.client.PutBlock(a.container, entry.Key, blockID, entry.Value) + blob := &storage.Blob{ + Container: a.container, + Name: entry.Key, + } + if err := blob.PutBlock(blockID, entry.Value, nil); err != nil { + return err + } - err = a.client.PutBlockList(a.container, entry.Key, blocks) - return err + return blob.PutBlockList(blocks, nil) } // Get is used to fetch an entry -func (a *AzureBackend) Get(key string) (*Entry, error) { +func (a *AzureBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"azure", "get"}, time.Now()) a.permitPool.Acquire() defer a.permitPool.Release() - exists, _ := a.client.BlobExists(a.container, key) - + blob := &storage.Blob{ + Container: a.container, + Name: key, + } + exists, err := blob.Exists() + if err != nil { + return nil, err + } if !exists { return nil, nil } - reader, err := a.client.GetBlob(a.container, key) - + reader, err := blob.Get(nil) if err != nil { return nil, err } - + defer reader.Close() data, err := ioutil.ReadAll(reader) - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: data, } @@ -152,10 +158,15 @@ func (a *AzureBackend) Get(key string) (*Entry, error) { func (a *AzureBackend) Delete(key string) error { defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now()) + blob := &storage.Blob{ + Container: a.container, + Name: key, + } + a.permitPool.Acquire() defer a.permitPool.Release() - _, err := a.client.DeleteBlobIfExists(a.container, key, nil) + _, err := blob.DeleteIfExists(nil) return err } @@ -165,15 +176,13 @@ func (a *AzureBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"azure", "list"}, time.Now()) a.permitPool.Acquire() - defer a.permitPool.Release() - - contObj := a.client.GetContainerReference(a.container) - list, err := contObj.ListBlobs(storage.ListBlobsParameters{Prefix: prefix}) - + list, err := a.container.ListBlobs(storage.ListBlobsParameters{Prefix: prefix}) if err != nil { // Break early. + a.permitPool.Release() return nil, err } + a.permitPool.Release() keys := []string{} for _, blob := range list.Blobs { @@ -181,7 +190,7 @@ func (a *AzureBackend) List(prefix string) ([]string, error) { if i := strings.Index(key, "/"); i == -1 { keys = append(keys, key) } else { - keys = appendIfMissing(keys, key[:i+1]) + keys = strutil.AppendIfMissing(keys, key[:i+1]) } } diff --git a/vendor/github.com/hashicorp/vault/physical/azure_test.go b/vendor/github.com/hashicorp/vault/physical/azure/azure_test.go similarity index 53% rename from vendor/github.com/hashicorp/vault/physical/azure_test.go rename to vendor/github.com/hashicorp/vault/physical/azure/azure_test.go index 135e658..eb0c510 100644 --- a/vendor/github.com/hashicorp/vault/physical/azure_test.go +++ b/vendor/github.com/hashicorp/vault/physical/azure/azure_test.go @@ -1,4 +1,4 @@ -package physical +package azure import ( "fmt" @@ -6,10 +6,12 @@ import ( "testing" "time" + cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" - "github.com/Azure/azure-storage-go" + storage "github.com/Azure/azure-sdk-for-go/storage" ) func TestAzureBackend(t *testing.T) { @@ -22,27 +24,29 @@ func TestAzureBackend(t *testing.T) { accountKey := os.Getenv("AZURE_ACCOUNT_KEY") ts := time.Now().UnixNano() - container := fmt.Sprintf("vault-test-%d", ts) + name := fmt.Sprintf("vault-test-%d", ts) cleanupClient, _ := storage.NewBasicClient(accountName, accountKey) + cleanupClient.HTTPClient = cleanhttp.DefaultPooledClient() logger := logformat.NewVaultLogger(log.LevelTrace) - backend, err := NewBackend("azure", logger, map[string]string{ - "container": container, + backend, err := NewAzureBackend(map[string]string{ + "container": name, "accountName": accountName, "accountKey": accountKey, - }) + }, logger) defer func() { - contObj := cleanupClient.GetBlobService().GetContainerReference(container) - contObj.DeleteIfExists() + blobService := cleanupClient.GetBlobService() + container := blobService.GetContainerReference(name) + container.DeleteIfExists(nil) }() if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, backend) - testBackend_ListPrefix(t, backend) + physical.ExerciseBackend(t, backend) + physical.ExerciseBackend_ListPrefix(t, backend) } diff --git a/vendor/github.com/hashicorp/vault/physical/cache.go b/vendor/github.com/hashicorp/vault/physical/cache.go index f1b1365..fc44d09 100644 --- a/vendor/github.com/hashicorp/vault/physical/cache.go +++ b/vendor/github.com/hashicorp/vault/physical/cache.go @@ -1,7 +1,6 @@ package physical import ( - "fmt" "strings" "github.com/hashicorp/golang-lru" @@ -19,11 +18,16 @@ const ( // Vault are for policy objects so there is a large read reduction // by using a simple write-through cache. type Cache struct { - backend Backend - transactional Transactional - lru *lru.TwoQueueCache - locks []*locksutil.LockEntry - logger log.Logger + backend Backend + lru *lru.TwoQueueCache + locks []*locksutil.LockEntry + logger log.Logger +} + +// TransactionalCache is a Cache that wraps the physical that is transactional +type TransactionalCache struct { + *Cache + Transactional } // NewCache returns a physical cache of the given size. @@ -43,10 +47,14 @@ func NewCache(b Backend, size int, logger log.Logger) *Cache { logger: logger, } - if txnl, ok := c.backend.(Transactional); ok { - c.transactional = txnl - } + return c +} +func NewTransactionalCache(b Backend, size int, logger log.Logger) *TransactionalCache { + c := &TransactionalCache{ + Cache: NewCache(b, size, logger), + Transactional: b.(Transactional), + } return c } @@ -128,18 +136,14 @@ func (c *Cache) List(prefix string) ([]string, error) { return c.backend.List(prefix) } -func (c *Cache) Transaction(txns []TxnEntry) error { - if c.transactional == nil { - return fmt.Errorf("physical/cache: underlying backend does not support transactions") - } - +func (c *TransactionalCache) Transaction(txns []TxnEntry) error { // Lock the world for _, lock := range c.locks { lock.Lock() defer lock.Unlock() } - if err := c.transactional.Transaction(txns); err != nil { + if err := c.Transactional.Transaction(txns); err != nil { return err } diff --git a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go new file mode 100644 index 0000000..493e156 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra.go @@ -0,0 +1,327 @@ +package cassandra + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net" + "strconv" + "strings" + "time" + + log "github.com/mgutz/logxi/v1" + + "github.com/armon/go-metrics" + "github.com/gocql/gocql" + "github.com/hashicorp/vault/helper/certutil" + "github.com/hashicorp/vault/physical" +) + +// CassandraBackend is a physical backend that stores data in Cassandra. +type CassandraBackend struct { + sess *gocql.Session + table string + + logger log.Logger +} + +// NewCassandraBackend constructs a Cassandra backend using a pre-existing +// keyspace and table. +func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + splitArray := func(v string) []string { + return strings.FieldsFunc(v, func(r rune) bool { + return r == ',' + }) + } + + var ( + hosts = splitArray(conf["hosts"]) + port = 9042 + explicitPort = false + keyspace = conf["keyspace"] + table = conf["table"] + consistency = gocql.LocalQuorum + ) + + if len(hosts) == 0 { + hosts = []string{"localhost"} + } + for i, hp := range hosts { + h, ps, err := net.SplitHostPort(hp) + if err != nil { + continue + } + p, err := strconv.Atoi(ps) + if err != nil { + return nil, err + } + + if explicitPort && p != port { + return nil, fmt.Errorf("all hosts must have the same port") + } + hosts[i], port = h, p + explicitPort = true + } + + if keyspace == "" { + keyspace = "vault" + } + if table == "" { + table = "entries" + } + if cs, ok := conf["consistency"]; ok { + switch cs { + case "ANY": + consistency = gocql.Any + case "ONE": + consistency = gocql.One + case "TWO": + consistency = gocql.Two + case "THREE": + consistency = gocql.Three + case "QUORUM": + consistency = gocql.Quorum + case "ALL": + consistency = gocql.All + case "LOCAL_QUORUM": + consistency = gocql.LocalQuorum + case "EACH_QUORUM": + consistency = gocql.EachQuorum + case "LOCAL_ONE": + consistency = gocql.LocalOne + default: + return nil, fmt.Errorf("'consistency' must be one of {ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_QUORUM, EACH_QUORUM, LOCAL_ONE}") + } + } + + connectStart := time.Now() + cluster := gocql.NewCluster(hosts...) + cluster.Port = port + cluster.Keyspace = keyspace + + cluster.ProtoVersion = 2 + if protoVersionStr, ok := conf["protocol_version"]; ok { + protoVersion, err := strconv.Atoi(protoVersionStr) + if err != nil { + return nil, fmt.Errorf("'protocol_version' must be an integer") + } + cluster.ProtoVersion = protoVersion + } + + if username, ok := conf["username"]; ok { + if cluster.ProtoVersion < 2 { + return nil, fmt.Errorf("Authentication is not supported with protocol version < 2") + } + authenticator := gocql.PasswordAuthenticator{Username: username} + if password, ok := conf["password"]; ok { + authenticator.Password = password + } + cluster.Authenticator = authenticator + } + + if connTimeoutStr, ok := conf["connection_timeout"]; ok { + connectionTimeout, err := strconv.Atoi(connTimeoutStr) + if err != nil { + return nil, fmt.Errorf("'connection_timeout' must be an integer") + } + cluster.Timeout = time.Duration(connectionTimeout) * time.Second + } + + if err := setupCassandraTLS(conf, cluster); err != nil { + return nil, err + } + + sess, err := cluster.CreateSession() + if err != nil { + return nil, err + } + metrics.MeasureSince([]string{"cassandra", "connect"}, connectStart) + sess.SetConsistency(consistency) + + impl := &CassandraBackend{ + sess: sess, + table: table, + logger: logger} + return impl, nil +} + +func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) error { + tlsOnStr, ok := conf["tls"] + if !ok { + return nil + } + + tlsOn, err := strconv.Atoi(tlsOnStr) + if err != nil { + return fmt.Errorf("'tls' must be an integer (0 or 1)") + } + + if tlsOn == 0 { + return nil + } + + var tlsConfig = &tls.Config{} + if pemBundlePath, ok := conf["pem_bundle_file"]; ok { + pemBundleData, err := ioutil.ReadFile(pemBundlePath) + if err != nil { + return fmt.Errorf("Error reading pem bundle from %s: %v", pemBundlePath, err) + } + pemBundle, err := certutil.ParsePEMBundle(string(pemBundleData)) + if err != nil { + return fmt.Errorf("Error parsing 'pem_bundle': %v", err) + } + tlsConfig, err = pemBundle.GetTLSConfig(certutil.TLSClient) + if err != nil { + return err + } + } else { + if pemJSONPath, ok := conf["pem_json_file"]; ok { + pemJSONData, err := ioutil.ReadFile(pemJSONPath) + if err != nil { + return fmt.Errorf("Error reading json bundle from %s: %v", pemJSONPath, err) + } + pemJSON, err := certutil.ParsePKIJSON([]byte(pemJSONData)) + if err != nil { + return err + } + tlsConfig, err = pemJSON.GetTLSConfig(certutil.TLSClient) + if err != nil { + return err + } + } + } + + if tlsSkipVerifyStr, ok := conf["tls_skip_verify"]; ok { + tlsSkipVerify, err := strconv.Atoi(tlsSkipVerifyStr) + if err != nil { + return fmt.Errorf("'tls_skip_verify' must be an integer (0 or 1)") + } + if tlsSkipVerify == 0 { + tlsConfig.InsecureSkipVerify = false + } else { + tlsConfig.InsecureSkipVerify = true + } + } + + if tlsMinVersion, ok := conf["tls_min_version"]; ok { + switch tlsMinVersion { + case "tls10": + tlsConfig.MinVersion = tls.VersionTLS10 + case "tls11": + tlsConfig.MinVersion = tls.VersionTLS11 + case "tls12": + tlsConfig.MinVersion = tls.VersionTLS12 + default: + return fmt.Errorf("'tls_min_version' must be one of `tls10`, `tls11` or `tls12`") + } + } + + cluster.SslOpts = &gocql.SslOptions{ + Config: tlsConfig.Clone()} + return nil +} + +// bucketName sanitises a bucket name for Cassandra +func (c *CassandraBackend) bucketName(name string) string { + if name == "" { + name = "." + } + return strings.TrimRight(name, "/") +} + +// bucket returns all the prefix buckets the key should be stored at +func (c *CassandraBackend) buckets(key string) []string { + vals := append([]string{""}, physical.Prefixes(key)...) + for i, v := range vals { + vals[i] = c.bucketName(v) + } + return vals +} + +// bucket returns the most specific bucket for the key +func (c *CassandraBackend) bucket(key string) string { + bs := c.buckets(key) + return bs[len(bs)-1] +} + +// Put is used to insert or update an entry +func (c *CassandraBackend) Put(entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"cassandra", "put"}, time.Now()) + + // Execute inserts to each key prefix simultaneously + stmt := fmt.Sprintf(`INSERT INTO "%s" (bucket, key, value) VALUES (?, ?, ?)`, c.table) + results := make(chan error) + buckets := c.buckets(entry.Key) + for _, _bucket := range buckets { + go func(bucket string) { + results <- c.sess.Query(stmt, bucket, entry.Key, entry.Value).Exec() + }(_bucket) + } + for i := 0; i < len(buckets); i++ { + if err := <-results; err != nil { + return err + } + } + return nil +} + +// Get is used to fetch an entry +func (c *CassandraBackend) Get(key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"cassandra", "get"}, time.Now()) + + v := []byte(nil) + stmt := fmt.Sprintf(`SELECT value FROM "%s" WHERE bucket = ? AND key = ? LIMIT 1`, c.table) + q := c.sess.Query(stmt, c.bucket(key), key) + if err := q.Scan(&v); err != nil { + if err == gocql.ErrNotFound { + return nil, nil + } + return nil, err + } + + return &physical.Entry{ + Key: key, + Value: v, + }, nil +} + +// Delete is used to permanently delete an entry +func (c *CassandraBackend) Delete(key string) error { + defer metrics.MeasureSince([]string{"cassandra", "delete"}, time.Now()) + + stmt := fmt.Sprintf(`DELETE FROM "%s" WHERE bucket = ? AND key = ?`, c.table) + batch := gocql.NewBatch(gocql.LoggedBatch) + for _, bucket := range c.buckets(key) { + batch.Entries = append(batch.Entries, gocql.BatchEntry{ + Stmt: stmt, + Args: []interface{}{bucket, key}}) + } + return c.sess.ExecuteBatch(batch) +} + +// List is used ot list all the keys under a given +// prefix, up to the next prefix. +func (c *CassandraBackend) List(prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"cassandra", "list"}, time.Now()) + + stmt := fmt.Sprintf(`SELECT key FROM "%s" WHERE bucket = ?`, c.table) + q := c.sess.Query(stmt, c.bucketName(prefix)) + iter := q.Iter() + k, keys := "", []string{} + for iter.Scan(&k) { + // Only return the next "component" (with a trailing slash if it has children) + k = strings.TrimPrefix(k, prefix) + if parts := strings.SplitN(k, "/", 2); len(parts) > 1 { + k = parts[0] + "/" + } else { + k = parts[0] + } + + // Deduplicate; this works because the keys are sorted + if len(keys) > 0 && keys[len(keys)-1] == k { + continue + } + keys = append(keys, k) + } + return keys, iter.Close() +} diff --git a/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go new file mode 100644 index 0000000..1c9b1f1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/cassandra/cassandra_test.go @@ -0,0 +1,112 @@ +package cassandra + +import ( + "fmt" + "os" + "reflect" + "strconv" + "testing" + "time" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func TestCassandraBackend(t *testing.T) { + if testing.Short() { + t.Skipf("skipping in short mode") + } + + cleanup, hosts := prepareCassandraTestContainer(t) + defer cleanup() + + // Run vault tests + logger := logformat.NewVaultLogger(log.LevelTrace) + b, err := NewCassandraBackend(map[string]string{ + "hosts": hosts, + "protocol_version": "3", + }, logger) + + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestCassandraBackendBuckets(t *testing.T) { + expectations := map[string][]string{ + "": {"."}, + "a": {"."}, + "a/b": {".", "a"}, + "a/b/c/d/e": {".", "a", "a/b", "a/b/c", "a/b/c/d"}} + + b := &CassandraBackend{} + for input, expected := range expectations { + actual := b.buckets(input) + if !reflect.DeepEqual(actual, expected) { + t.Errorf("bad: %v expected: %v", actual, expected) + } + } +} + +func prepareCassandraTestContainer(t *testing.T) (func(), string) { + if os.Getenv("CASSANDRA_HOSTS") != "" { + return func() {}, os.Getenv("CASSANDRA_HOSTS") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("cassandra: failed to connect to docker: %s", err) + } + + resource, err := pool.Run("cassandra", "3.11", []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"}) + if err != nil { + t.Fatalf("cassandra: could not start container: %s", err) + } + + cleanup := func() { + pool.Purge(resource) + } + + setup := func() error { + cluster := gocql.NewCluster("127.0.0.1") + p, _ := strconv.Atoi(resource.GetPort("9042/tcp")) + cluster.Port = p + cluster.Timeout = 5 * time.Second + sess, err := cluster.CreateSession() + if err != nil { + return err + } + defer sess.Close() + + // Create keyspace + q := sess.Query(`CREATE KEYSPACE "vault" WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };`) + if err := q.Exec(); err != nil { + t.Fatalf("could not create cassandra keyspace: %v", err) + } + + // Create table + q = sess.Query(`CREATE TABLE "vault"."entries" ( + bucket text, + key text, + value blob, + PRIMARY KEY (bucket, key) + ) WITH CLUSTERING ORDER BY (key ASC);`) + if err := q.Exec(); err != nil { + t.Fatalf("could not create cassandra table: %v", err) + } + + return nil + } + if pool.Retry(setup); err != nil { + cleanup() + t.Fatalf("cassandra: could not setup container: %s", err) + } + + return cleanup, fmt.Sprintf("127.0.0.1:%s", resource.GetPort("9042/tcp")) +} diff --git a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go new file mode 100644 index 0000000..395c2da --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb.go @@ -0,0 +1,237 @@ +package cockroachdb + +import ( + "context" + "database/sql" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/cockroachdb/cockroach-go/crdb" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" + + // CockroachDB uses the Postgres SQL driver + _ "github.com/lib/pq" +) + +// CockroachDBBackend Backend is a physical backend that stores data +// within a CockroachDB database. +type CockroachDBBackend struct { + table string + client *sql.DB + rawStatements map[string]string + statements map[string]*sql.Stmt + logger log.Logger + permitPool *physical.PermitPool +} + +// NewCockroachDBBackend constructs a CockroachDB backend using the given +// API client, server address, credentials, and database. +func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the CockroachDB credentials to perform read/write operations. + connURL, ok := conf["connection_url"] + if !ok || connURL == "" { + return nil, fmt.Errorf("missing connection_url") + } + + dbTable, ok := conf["table"] + if !ok { + dbTable = "vault_kv_store" + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + var err error + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + } + if logger.IsDebug() { + logger.Debug("cockroachdb: max_parallel set", "max_parallel", maxParInt) + } + } + + // Create CockroachDB handle for the database. + db, err := sql.Open("postgres", connURL) + if err != nil { + return nil, fmt.Errorf("failed to connect to cockroachdb: %v", err) + } + + // Create the required table if it doesn't exists. + createQuery := "CREATE TABLE IF NOT EXISTS " + dbTable + + " (path STRING, value BYTES, PRIMARY KEY (path))" + if _, err := db.Exec(createQuery); err != nil { + return nil, fmt.Errorf("failed to create mysql table: %v", err) + } + + // Setup the backend + c := &CockroachDBBackend{ + table: dbTable, + client: db, + rawStatements: map[string]string{ + "put": "INSERT INTO " + dbTable + " VALUES($1, $2)" + + " ON CONFLICT (path) DO " + + " UPDATE SET (path, value) = ($1, $2)", + "get": "SELECT value FROM " + dbTable + " WHERE path = $1", + "delete": "DELETE FROM " + dbTable + " WHERE path = $1", + "list": "SELECT path FROM " + dbTable + " WHERE path LIKE $1", + }, + statements: make(map[string]*sql.Stmt), + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + } + + // Prepare all the statements required + for name, query := range c.rawStatements { + if err := c.prepare(name, query); err != nil { + return nil, err + } + } + return c, nil +} + +// prepare is a helper to prepare a query for future execution +func (c *CockroachDBBackend) prepare(name, query string) error { + stmt, err := c.client.Prepare(query) + if err != nil { + return fmt.Errorf("failed to prepare '%s': %v", name, err) + } + c.statements[name] = stmt + return nil +} + +// Put is used to insert or update an entry. +func (c *CockroachDBBackend) Put(entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"cockroachdb", "put"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + _, err := c.statements["put"].Exec(entry.Key, entry.Value) + if err != nil { + return err + } + return nil +} + +// Get is used to fetch and entry. +func (c *CockroachDBBackend) Get(key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"cockroachdb", "get"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + var result []byte + err := c.statements["get"].QueryRow(key).Scan(&result) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + + ent := &physical.Entry{ + Key: key, + Value: result, + } + return ent, nil +} + +// Delete is used to permanently delete an entry +func (c *CockroachDBBackend) Delete(key string) error { + defer metrics.MeasureSince([]string{"cockroachdb", "delete"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + _, err := c.statements["delete"].Exec(key) + if err != nil { + return err + } + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (c *CockroachDBBackend) List(prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"cockroachdb", "list"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + likePrefix := prefix + "%" + rows, err := c.statements["list"].Query(likePrefix) + if err != nil { + return nil, err + } + defer rows.Close() + + var keys []string + for rows.Next() { + var key string + err = rows.Scan(&key) + if err != nil { + return nil, fmt.Errorf("failed to scan rows: %v", err) + } + + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else if i != -1 { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + sort.Strings(keys) + return keys, nil +} + +// Transaction is used to run multiple entries via a transaction +func (c *CockroachDBBackend) Transaction(txns []physical.TxnEntry) error { + defer metrics.MeasureSince([]string{"cockroachdb", "transaction"}, time.Now()) + if len(txns) == 0 { + return nil + } + + c.permitPool.Acquire() + defer c.permitPool.Release() + + return crdb.ExecuteTx(context.Background(), c.client, nil, func(tx *sql.Tx) error { + return c.transaction(tx, txns) + }) +} + +func (c *CockroachDBBackend) transaction(tx *sql.Tx, txns []physical.TxnEntry) error { + deleteStmt, err := tx.Prepare(c.rawStatements["delete"]) + if err != nil { + return err + } + putStmt, err := tx.Prepare(c.rawStatements["put"]) + if err != nil { + return err + } + + for _, op := range txns { + switch op.Operation { + case physical.DeleteOperation: + _, err = deleteStmt.Exec(op.Entry.Key) + case physical.PutOperation: + _, err = putStmt.Exec(op.Entry.Key, op.Entry.Value) + default: + return fmt.Errorf("%q is not a supported transaction operation", op.Operation) + } + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go new file mode 100644 index 0000000..35bcecf --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/cockroachdb/cockroachdb_test.go @@ -0,0 +1,103 @@ +package cockroachdb + +import ( + "database/sql" + "fmt" + "os" + "testing" + + dockertest "gopkg.in/ory-am/dockertest.v3" + + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" + + _ "github.com/lib/pq" +) + +func prepareCockroachDBTestContainer(t *testing.T) (cleanup func(), retURL, tableName string) { + tableName = os.Getenv("CR_TABLE") + if tableName == "" { + tableName = "vault_kv_store" + } + retURL = os.Getenv("CR_URL") + if retURL != "" { + return func() {}, retURL, tableName + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + dockerOptions := &dockertest.RunOptions{ + Repository: "cockroachdb/cockroach", + Tag: "release-1.0", + Cmd: []string{"start", "--insecure"}, + } + resource, err := pool.RunWithOptions(dockerOptions) + if err != nil { + t.Fatalf("Could not start local CockroachDB docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("postgresql://root@localhost:%s/?sslmode=disable", resource.GetPort("26257/tcp")) + database := "database" + tableName = database + ".vault_kv" + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + db, err := sql.Open("postgres", retURL) + if err != nil { + return err + } + _, err = db.Exec("CREATE DATABASE database") + return err + }); err != nil { + cleanup() + t.Fatalf("Could not connect to docker: %s", err) + } + return cleanup, retURL, tableName +} + +func TestCockroachDBBackend(t *testing.T) { + cleanup, connURL, table := prepareCockroachDBTestContainer(t) + defer cleanup() + + // Run vault tests + logger := logformat.NewVaultLogger(log.LevelTrace) + + b, err := NewCockroachDBBackend(map[string]string{ + "connection_url": connURL, + "table": table, + }, logger) + + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + defer func() { + truncate(t, b) + }() + + physical.ExerciseBackend(t, b) + truncate(t, b) + physical.ExerciseBackend_ListPrefix(t, b) + truncate(t, b) + physical.ExerciseTransactionalBackend(t, b) +} + +func truncate(t *testing.T, b physical.Backend) { + crdb := b.(*CockroachDBBackend) + _, err := crdb.client.Exec("TRUNCATE TABLE " + crdb.table) + if err != nil { + t.Fatalf("Failed to drop table: %v", err) + } +} diff --git a/vendor/github.com/hashicorp/vault/physical/consul.go b/vendor/github.com/hashicorp/vault/physical/consul/consul.go similarity index 92% rename from vendor/github.com/hashicorp/vault/physical/consul.go rename to vendor/github.com/hashicorp/vault/physical/consul/consul.go index 93aabf0..6c31466 100644 --- a/vendor/github.com/hashicorp/vault/physical/consul.go +++ b/vendor/github.com/hashicorp/vault/physical/consul/consul.go @@ -1,10 +1,11 @@ -package physical +package consul import ( "errors" "fmt" "io/ioutil" "net" + "net/http" "net/url" "strconv" "strings" @@ -23,11 +24,11 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-cleanhttp" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/helper/tlsutil" + "github.com/hashicorp/vault/physical" ) const ( @@ -72,7 +73,7 @@ type ConsulBackend struct { logger log.Logger client *api.Client kv *api.KV - permitPool *PermitPool + permitPool *physical.PermitPool serviceLock sync.RWMutex redirectHost string redirectPort int64 @@ -86,9 +87,9 @@ type ConsulBackend struct { notifySealedCh chan notifyEvent } -// newConsulBackend constructs a Consul backend using the given API client +// NewConsulBackend constructs a Consul backend using the given API client // and the prefix in the KV store. -func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error) { +func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { // Get the path in Consul path, ok := conf["path"] if !ok { @@ -160,9 +161,7 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error // Configure the client consulConf := api.DefaultConfig() // Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore - tr := cleanhttp.DefaultPooledTransport() - tr.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount - consulConf.HttpClient.Transport = tr + consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount if addr, ok := conf["address"]; ok { consulConf.Address = addr @@ -187,16 +186,14 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error return nil, err } - transport := cleanhttp.DefaultPooledTransport() - transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount - transport.TLSClientConfig = tlsClientConfig - if err := http2.ConfigureTransport(transport); err != nil { + consulConf.Transport.TLSClientConfig = tlsClientConfig + if err := http2.ConfigureTransport(consulConf.Transport); err != nil { return nil, err } - consulConf.HttpClient.Transport = transport logger.Debug("physical/consul: configured TLS") } + consulConf.HttpClient = &http.Client{Transport: consulConf.Transport} client, err := api.NewClient(consulConf) if err != nil { return nil, errwrap.Wrapf("client setup failed: {{err}}", err) @@ -231,7 +228,7 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error logger: logger, client: client, kv: client.KV(), - permitPool: NewPermitPool(maxParInt), + permitPool: physical.NewPermitPool(maxParInt), serviceName: service, serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","), checkTimeout: checkTimeout, @@ -244,7 +241,14 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error } func setupTLSConfig(conf map[string]string) (*tls.Config, error) { - serverName := strings.Split(conf["address"], ":") + serverName, _, err := net.SplitHostPort(conf["address"]) + switch { + case err == nil: + case strings.Contains(err.Error(), "missing port"): + serverName = conf["address"] + default: + return nil, err + } insecureSkipVerify := false if _, ok := conf["tls_skip_verify"]; ok { @@ -265,7 +269,7 @@ func setupTLSConfig(conf map[string]string) (*tls.Config, error) { tlsClientConfig := &tls.Config{ MinVersion: tlsMinVersion, InsecureSkipVerify: insecureSkipVerify, - ServerName: serverName[0], + ServerName: serverName, } _, okCert := conf["tls_cert_file"] @@ -299,7 +303,7 @@ func setupTLSConfig(conf map[string]string) (*tls.Config, error) { } // Used to run multiple entries via a transaction -func (c *ConsulBackend) Transaction(txns []TxnEntry) error { +func (c *ConsulBackend) Transaction(txns []physical.TxnEntry) error { if len(txns) == 0 { return nil } @@ -311,9 +315,9 @@ func (c *ConsulBackend) Transaction(txns []TxnEntry) error { Key: c.path + op.Entry.Key, } switch op.Operation { - case DeleteOperation: + case physical.DeleteOperation: cop.Verb = api.KVDelete - case PutOperation: + case physical.PutOperation: cop.Verb = api.KVSet cop.Value = op.Entry.Value default: @@ -343,7 +347,7 @@ func (c *ConsulBackend) Transaction(txns []TxnEntry) error { } // Put is used to insert or update an entry -func (c *ConsulBackend) Put(entry *Entry) error { +func (c *ConsulBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"consul", "put"}, time.Now()) c.permitPool.Acquire() @@ -359,7 +363,7 @@ func (c *ConsulBackend) Put(entry *Entry) error { } // Get is used to fetch an entry -func (c *ConsulBackend) Get(key string) (*Entry, error) { +func (c *ConsulBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"consul", "get"}, time.Now()) c.permitPool.Acquire() @@ -379,7 +383,7 @@ func (c *ConsulBackend) Get(key string) (*Entry, error) { if pair == nil { return nil, nil } - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: pair.Value, } @@ -422,7 +426,7 @@ func (c *ConsulBackend) List(prefix string) ([]string, error) { } // Lock is used for mutual exclusion based on the given key. -func (c *ConsulBackend) LockWith(key, value string) (Lock, error) { +func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) { // Create the lock opts := &api.LockOptions{ Key: c.path + key, @@ -529,7 +533,7 @@ func (c *ConsulBackend) checkDuration() time.Duration { return lib.DurationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor) } -func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) (err error) { +func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (err error) { if err := c.setRedirectAddr(redirectAddr); err != nil { return err } @@ -542,7 +546,7 @@ func (c *ConsulBackend) RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownC return nil } -func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) { +func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh physical.ShutdownChannel, redirectAddr string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) { // This defer statement should be executed last. So push it first. defer waitGroup.Done() @@ -659,7 +663,7 @@ func (c *ConsulBackend) serviceID() string { // without any locks held and can be run concurrently, therefore no changes // to ConsulBackend can be made in this method (i.e. wtb const receiver for // compiler enforced safety). -func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc activeFunction, sealedFunc sealedFunction) (serviceID string, err error) { +func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc physical.ActiveFunction, sealedFunc physical.SealedFunction) (serviceID string, err error) { // Query vault Core for its current state active := activeFunc() sealed := sealedFunc() diff --git a/vendor/github.com/hashicorp/vault/physical/consul_test.go b/vendor/github.com/hashicorp/vault/physical/consul/consul_test.go similarity index 94% rename from vendor/github.com/hashicorp/vault/physical/consul_test.go rename to vendor/github.com/hashicorp/vault/physical/consul/consul_test.go index 59b1294..4d3230c 100644 --- a/vendor/github.com/hashicorp/vault/physical/consul_test.go +++ b/vendor/github.com/hashicorp/vault/physical/consul/consul_test.go @@ -1,4 +1,4 @@ -package physical +package consul import ( "fmt" @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/physical" dockertest "gopkg.in/ory-am/dockertest.v2" ) @@ -37,7 +38,7 @@ func testConsulBackend(t *testing.T) *ConsulBackend { func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend { logger := logformat.NewVaultLogger(log.LevelTrace) - be, err := newConsulBackend(*conf, logger) + be, err := NewConsulBackend(*conf, logger) if err != nil { t.Fatalf("Expected Consul to initialize: %v", err) } @@ -57,7 +58,7 @@ func testConsul_testConsulBackend(t *testing.T) { } } -func testActiveFunc(activePct float64) activeFunction { +func testActiveFunc(activePct float64) physical.ActiveFunction { return func() bool { var active bool standbyProb := rand.Float64() @@ -68,7 +69,7 @@ func testActiveFunc(activePct float64) activeFunction { } } -func testSealedFunc(sealedPct float64) sealedFunction { +func testSealedFunc(sealedPct float64) physical.SealedFunction { return func() bool { var sealed bool unsealedProb := rand.Float64() @@ -94,7 +95,7 @@ func TestConsul_ServiceTags(t *testing.T) { } logger := logformat.NewVaultLogger(log.LevelTrace) - be, err := newConsulBackend(consulConfig, logger) + be, err := NewConsulBackend(consulConfig, logger) if err != nil { t.Fatal(err) } @@ -182,7 +183,7 @@ func TestConsul_newConsulBackend(t *testing.T) { for _, test := range tests { logger := logformat.NewVaultLogger(log.LevelTrace) - be, err := newConsulBackend(test.consulConfig, logger) + be, err := NewConsulBackend(test.consulConfig, logger) if test.fail { if err == nil { t.Fatalf(`Expected config "%s" to fail`, test.name) @@ -206,7 +207,7 @@ func TestConsul_newConsulBackend(t *testing.T) { } } - var shutdownCh ShutdownChannel + var shutdownCh physical.ShutdownChannel waitGroup := &sync.WaitGroup{} if err := c.RunServiceDiscovery(waitGroup, shutdownCh, test.redirectAddr, testActiveFunc(0.5), testSealedFunc(0.5)); err != nil { t.Fatalf("bad: %v", err) @@ -411,18 +412,18 @@ func TestConsulBackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("consul", logger, map[string]string{ + b, err := NewConsulBackend(map[string]string{ "address": conf.Address, "path": randPath, "max_parallel": "256", "token": conf.Token, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } func TestConsulHABackend(t *testing.T) { @@ -452,23 +453,23 @@ func TestConsulHABackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("consul", logger, map[string]string{ + b, err := NewConsulBackend(map[string]string{ "address": conf.Address, "path": randPath, "max_parallel": "-1", "token": conf.Token, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - ha, ok := b.(HABackend) + ha, ok := b.(physical.HABackend) if !ok { t.Fatalf("consul does not implement HABackend") } - testHABackend(t, ha, ha) + physical.ExerciseHABackend(t, ha, ha) - detect, ok := b.(RedirectDetect) + detect, ok := b.(physical.RedirectDetect) if !ok { t.Fatalf("consul does not implement RedirectDetect") } diff --git a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go new file mode 100644 index 0000000..e7f945f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb.go @@ -0,0 +1,305 @@ +package couchdb + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" +) + +// CouchDBBackend allows the management of couchdb users +type CouchDBBackend struct { + logger log.Logger + client *couchDBClient + permitPool *physical.PermitPool +} + +type couchDBClient struct { + endpoint string + username string + password string + *http.Client +} + +type couchDBListItem struct { + ID string `json:"id"` + Key string `json:"key"` + Value struct { + Revision string + } `json:"value"` +} + +type couchDBList struct { + TotalRows int `json:"total_rows"` + Offset int `json:"offset"` + Rows []couchDBListItem `json:"rows"` +} + +func (m *couchDBClient) rev(key string) (string, error) { + req, err := http.NewRequest("HEAD", fmt.Sprintf("%s/%s", m.endpoint, key), nil) + if err != nil { + return "", err + } + req.SetBasicAuth(m.username, m.password) + + resp, err := m.Client.Do(req) + if err != nil { + return "", err + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", nil + } + etag := resp.Header.Get("Etag") + if len(etag) < 2 { + return "", nil + } + return etag[1 : len(etag)-1], nil +} + +func (m *couchDBClient) put(e couchDBEntry) error { + bs, err := json.Marshal(e) + if err != nil { + return err + } + + req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", m.endpoint, e.ID), bytes.NewReader(bs)) + if err != nil { + return err + } + req.SetBasicAuth(m.username, m.password) + _, err = m.Client.Do(req) + + return err +} + +func (m *couchDBClient) get(key string) (*physical.Entry, error) { + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", m.endpoint, url.PathEscape(key)), nil) + if err != nil { + return nil, err + } + req.SetBasicAuth(m.username, m.password) + resp, err := m.Client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } else if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GET returned %s", resp.Status) + } + bs, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + entry := couchDBEntry{} + if err := json.Unmarshal(bs, &entry); err != nil { + return nil, err + } + return entry.Entry, nil +} + +func (m *couchDBClient) list(prefix string) ([]couchDBListItem, error) { + req, _ := http.NewRequest("GET", fmt.Sprintf("%s/_all_docs", m.endpoint), nil) + req.SetBasicAuth(m.username, m.password) + values := req.URL.Query() + values.Set("skip", "0") + values.Set("limit", "100") + values.Set("include_docs", "false") + if prefix != "" { + values.Set("startkey", fmt.Sprintf("%q", prefix)) + values.Set("endkey", fmt.Sprintf("%q", prefix+"{}")) + } + req.URL.RawQuery = values.Encode() + + resp, err := m.Client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + results := couchDBList{} + if err := json.Unmarshal(data, &results); err != nil { + return nil, err + } + + return results.Rows, nil +} + +func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBackend, error) { + endpoint := os.Getenv("COUCHDB_ENDPOINT") + if endpoint == "" { + endpoint = conf["endpoint"] + } + if endpoint == "" { + return nil, fmt.Errorf("missing endpoint") + } + + username := os.Getenv("COUCHDB_USERNAME") + if username == "" { + username = conf["username"] + } + + password := os.Getenv("COUCHDB_PASSWORD") + if password == "" { + password = conf["password"] + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + var err error + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + } + if logger.IsDebug() { + logger.Debug("couchdb: max_parallel set", "max_parallel", maxParInt) + } + } + + return &CouchDBBackend{ + client: &couchDBClient{ + endpoint: endpoint, + username: username, + password: password, + Client: cleanhttp.DefaultPooledClient(), + }, + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + }, nil +} + +func NewCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + return buildCouchDBBackend(conf, logger) +} + +type couchDBEntry struct { + Entry *physical.Entry `json:"entry"` + Rev string `json:"_rev,omitempty"` + ID string `json:"_id"` + Deleted *bool `json:"_deleted,omitempty"` +} + +// Put is used to insert or update an entry +func (m *CouchDBBackend) Put(entry *physical.Entry) error { + m.permitPool.Acquire() + defer m.permitPool.Release() + + return m.PutInternal(entry) +} + +// Get is used to fetch an entry +func (m *CouchDBBackend) Get(key string) (*physical.Entry, error) { + m.permitPool.Acquire() + defer m.permitPool.Release() + + return m.GetInternal(key) +} + +// Delete is used to permanently delete an entry +func (m *CouchDBBackend) Delete(key string) error { + m.permitPool.Acquire() + defer m.permitPool.Release() + + return m.DeleteInternal(key) +} + +// List is used to list all the keys under a given prefix +func (m *CouchDBBackend) List(prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + items, err := m.client.list(prefix) + if err != nil { + return nil, err + } + + var out []string + seen := make(map[string]interface{}) + for _, result := range items { + trimmed := strings.TrimPrefix(result.ID, prefix) + sep := strings.Index(trimmed, "/") + if sep == -1 { + out = append(out, trimmed) + } else { + trimmed = trimmed[:sep+1] + if _, ok := seen[trimmed]; !ok { + out = append(out, trimmed) + seen[trimmed] = struct{}{} + } + } + } + return out, nil +} + +// TransactionalCouchDBBackend creates a couchdb backend that forces all operations to happen +// in serial +type TransactionalCouchDBBackend struct { + CouchDBBackend +} + +func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + backend, err := buildCouchDBBackend(conf, logger) + if err != nil { + return nil, err + } + backend.permitPool = physical.NewPermitPool(1) + + return &TransactionalCouchDBBackend{ + CouchDBBackend: *backend, + }, nil +} + +// GetInternal is used to fetch an entry +func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now()) + + return m.client.get(key) +} + +// PutInternal is used to insert or update an entry +func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now()) + + revision, _ := m.client.rev(url.PathEscape(entry.Key)) + + return m.client.put(couchDBEntry{ + Entry: entry, + Rev: revision, + ID: url.PathEscape(entry.Key), + }) +} + +// DeleteInternal is used to permanently delete an entry +func (m *CouchDBBackend) DeleteInternal(key string) error { + defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now()) + + revision, _ := m.client.rev(url.PathEscape(key)) + deleted := true + return m.client.put(couchDBEntry{ + ID: url.PathEscape(key), + Rev: revision, + Deleted: &deleted, + }) +} diff --git a/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go new file mode 100644 index 0000000..de4d05d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/couchdb/couchdb_test.go @@ -0,0 +1,132 @@ +package couchdb + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func TestCouchDBBackend(t *testing.T) { + cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t) + defer cleanup() + + logger := logformat.NewVaultLogger(log.LevelTrace) + + b, err := NewCouchDBBackend(map[string]string{ + "endpoint": endpoint, + "username": username, + "password": password, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestTransactionalCouchDBBackend(t *testing.T) { + cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t) + defer cleanup() + + logger := logformat.NewVaultLogger(log.LevelTrace) + + b, err := NewTransactionalCouchDBBackend(map[string]string{ + "endpoint": endpoint, + "username": username, + "password": password, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func prepareCouchdbDBTestContainer(t *testing.T) (cleanup func(), retAddress, username, password string) { + // If environment variable is set, assume caller wants to target a real + // DynamoDB. + if os.Getenv("COUCHDB_ENDPOINT") != "" { + return func() {}, os.Getenv("COUCHDB_ENDPOINT"), os.Getenv("COUCHDB_USERNAME"), os.Getenv("COUCHDB_PASSWORD") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("couchdb", "1.6", []string{}) + if err != nil { + t.Fatalf("Could not start local DynamoDB: %s", err) + } + + retAddress = "http://localhost:" + resource.GetPort("5984/tcp") + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local DynamoDB: %s", err) + } + } + + // exponential backoff-retry, because the couchDB may not be able to accept + // connections yet + if err := pool.Retry(func() error { + var err error + resp, err := http.Get(retAddress) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("Expected couchdb to return status code 200, got (%s) instead.", resp.Status) + } + return nil + }); err != nil { + t.Fatalf("Could not connect to docker: %s", err) + } + + dbName := fmt.Sprintf("vault-test-%d", time.Now().Unix()) + { + req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", retAddress, dbName), nil) + if err != nil { + t.Fatalf("Could not create create database request: %q", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Could not create database: %q", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusCreated { + bs, _ := ioutil.ReadAll(resp.Body) + t.Fatalf("Failed to create database: %s %s\n", resp.Status, string(bs)) + } + } + { + req, err := http.NewRequest("PUT", fmt.Sprintf("%s/_config/admins/admin", retAddress), strings.NewReader(`"admin"`)) + if err != nil { + t.Fatalf("Could not create admin user request: %q", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Could not create admin user: %q", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + bs, _ := ioutil.ReadAll(resp.Body) + t.Fatalf("Failed to create admin user: %s %s\n", resp.Status, string(bs)) + } + } + + return cleanup, retAddress + "/" + dbName, "admin", "admin" +} diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb.go b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go similarity index 87% rename from vendor/github.com/hashicorp/vault/physical/dynamodb.go rename to vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go index 4c7cefb..c0b3f3e 100644 --- a/vendor/github.com/hashicorp/vault/physical/dynamodb.go +++ b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb.go @@ -1,10 +1,11 @@ -package physical +package dynamodb import ( "fmt" "math" + "net/http" "os" - "path/filepath" + pkgPath "path" "sort" "strconv" "strings" @@ -16,14 +17,15 @@ import ( "github.com/armon/go-metrics" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/physical" ) const ( @@ -75,7 +77,7 @@ type DynamoDBBackend struct { recovery bool logger log.Logger haEnabled bool - permitPool *PermitPool + permitPool *physical.PermitPool } // DynamoDBRecord is the representation of a vault entry in @@ -109,9 +111,9 @@ type DynamoDBLockRecord struct { Expires int64 } -// newDynamoDBBackend constructs a DynamoDB backend. If the +// NewDynamoDBBackend constructs a DynamoDB backend. If the // configured DynamoDB table does not exist, it creates it. -func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, error) { +func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { table := os.Getenv("AWS_DYNAMODB_TABLE") if table == "" { table = conf["table"] @@ -166,29 +168,37 @@ func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, err if endpoint == "" { endpoint = conf["endpoint"] } - region := os.Getenv("AWS_DEFAULT_REGION") + region := os.Getenv("AWS_REGION") if region == "" { - region = conf["region"] + region = os.Getenv("AWS_DEFAULT_REGION") if region == "" { - region = DefaultDynamoDBRegion + region = conf["region"] + if region == "" { + region = DefaultDynamoDBRegion + } } } - creds := credentials.NewChainCredentials([]credentials.Provider{ - &credentials.StaticProvider{Value: credentials.Value{ - AccessKeyID: accessKey, - SecretAccessKey: secretKey, - SessionToken: sessionToken, - }}, - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, - }) + credsConfig := &awsutil.CredentialsConfig{ + AccessKey: accessKey, + SecretKey: secretKey, + SessionToken: sessionToken, + } + creds, err := credsConfig.GenerateCredentialChain() + if err != nil { + return nil, err + } + + pooledTransport := cleanhttp.DefaultPooledTransport() + pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount awsConf := aws.NewConfig(). WithCredentials(creds). WithRegion(region). - WithEndpoint(endpoint) + WithEndpoint(endpoint). + WithHTTPClient(&http.Client{ + Transport: pooledTransport, + }) client := dynamodb.New(session.New(awsConf)) if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil { @@ -222,7 +232,7 @@ func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, err return &DynamoDBBackend{ table: table, client: client, - permitPool: NewPermitPool(maxParInt), + permitPool: physical.NewPermitPool(maxParInt), recovery: recoveryModeBool, haEnabled: haEnabledBool, logger: logger, @@ -230,7 +240,7 @@ func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, err } // Put is used to insert or update an entry -func (d *DynamoDBBackend) Put(entry *Entry) error { +func (d *DynamoDBBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"dynamodb", "put"}, time.Now()) record := DynamoDBRecord{ @@ -248,7 +258,7 @@ func (d *DynamoDBBackend) Put(entry *Entry) error { }, }} - for _, prefix := range prefixes(entry.Key) { + for _, prefix := range physical.Prefixes(entry.Key) { record = DynamoDBRecord{ Path: recordPathForVaultKey(prefix), Key: fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)), @@ -268,7 +278,7 @@ func (d *DynamoDBBackend) Put(entry *Entry) error { } // Get is used to fetch an entry -func (d *DynamoDBBackend) Get(key string) (*Entry, error) { +func (d *DynamoDBBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"dynamodb", "get"}, time.Now()) d.permitPool.Acquire() @@ -294,7 +304,7 @@ func (d *DynamoDBBackend) Get(key string) (*Entry, error) { return nil, err } - return &Entry{ + return &physical.Entry{ Key: vaultKey(record), Value: record.Value, }, nil @@ -314,14 +324,14 @@ func (d *DynamoDBBackend) Delete(key string) error { }} // clean up now empty 'folders' - prefixes := prefixes(key) + prefixes := physical.Prefixes(key) sort.Sort(sort.Reverse(sort.StringSlice(prefixes))) for _, prefix := range prefixes { - items, err := d.List(prefix) + hasChildren, err := d.hasChildren(prefix) if err != nil { return err } - if len(items) == 1 { + if !hasChildren { requests = append(requests, &dynamodb.WriteRequest{ DeleteRequest: &dynamodb.DeleteRequest{ Key: map[string]*dynamodb.AttributeValue{ @@ -378,15 +388,49 @@ func (d *DynamoDBBackend) List(prefix string) ([]string, error) { return keys, nil } +// hasChildren returns true if there exist items below a certain path prefix. +// To do so, the method fetches such items from DynamoDB. If there are more +// than one item (which is the "directory" item), there are children. +func (d *DynamoDBBackend) hasChildren(prefix string) (bool, error) { + prefix = strings.TrimSuffix(prefix, "/") + prefix = escapeEmptyPath(prefix) + + queryInput := &dynamodb.QueryInput{ + TableName: aws.String(d.table), + ConsistentRead: aws.Bool(true), + KeyConditions: map[string]*dynamodb.Condition{ + "Path": { + ComparisonOperator: aws.String("EQ"), + AttributeValueList: []*dynamodb.AttributeValue{{ + S: aws.String(prefix), + }}, + }, + }, + // Avoid fetching too many items from DynamoDB for performance reasons. + // We need at least two because one is the directory item, all others + // are children. + Limit: aws.Int64(2), + } + + d.permitPool.Acquire() + defer d.permitPool.Release() + + out, err := d.client.Query(queryInput) + if err != nil { + return false, err + } + return len(out.Items) > 1, nil +} + // LockWith is used for mutual exclusion based on the given key. -func (d *DynamoDBBackend) LockWith(key, value string) (Lock, error) { +func (d *DynamoDBBackend) LockWith(key, value string) (physical.Lock, error) { identity, err := uuid.GenerateUUID() if err != nil { return nil, err } return &DynamoDBLock{ backend: d, - key: filepath.Join(filepath.Dir(key), DynamoDBLockPrefix+filepath.Base(key)), + key: pkgPath.Join(pkgPath.Dir(key), DynamoDBLockPrefix+pkgPath.Base(key)), value: value, identity: identity, recovery: d.recovery, @@ -690,7 +734,7 @@ func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, wr // its last component. func recordPathForVaultKey(key string) string { if strings.Contains(key, "/") { - return filepath.Dir(key) + return pkgPath.Dir(key) } return DynamoDBEmptyPath } @@ -700,7 +744,7 @@ func recordPathForVaultKey(key string) string { // property. This path equals the the vault key's // last component. func recordKeyForVaultKey(key string) string { - return filepath.Base(key) + return pkgPath.Base(key) } // vaultKey returns the vault key for a given record @@ -711,7 +755,7 @@ func vaultKey(record *DynamoDBRecord) string { if path == "" { return record.Key } - return filepath.Join(record.Path, record.Key) + return pkgPath.Join(record.Path, record.Key) } // escapeEmptyPath is used to escape the root key's path @@ -731,15 +775,3 @@ func unescapeEmptyPath(s string) string { } return s } - -// prefixes returns all parent 'folders' for a given -// vault key. -// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar'] -func prefixes(s string) []string { - components := strings.Split(s, "/") - result := []string{} - for i := 1; i < len(components); i++ { - result = append(result, strings.Join(components[:i], "/")) - } - return result -} diff --git a/vendor/github.com/hashicorp/vault/physical/dynamodb_test.go b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go similarity index 94% rename from vendor/github.com/hashicorp/vault/physical/dynamodb_test.go rename to vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go index daac8c8..426f23f 100644 --- a/vendor/github.com/hashicorp/vault/physical/dynamodb_test.go +++ b/vendor/github.com/hashicorp/vault/physical/dynamodb/dynamodb_test.go @@ -1,4 +1,4 @@ -package physical +package dynamodb import ( "fmt" @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" dockertest "gopkg.in/ory-am/dockertest.v3" @@ -49,20 +50,20 @@ func TestDynamoDBBackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("dynamodb", logger, map[string]string{ + b, err := NewDynamoDBBackend(map[string]string{ "access_key": creds.AccessKeyID, "secret_key": creds.SecretAccessKey, "session_token": creds.SessionToken, "table": table, "region": region, "endpoint": endpoint, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } func TestDynamoDBHABackend(t *testing.T) { @@ -95,30 +96,30 @@ func TestDynamoDBHABackend(t *testing.T) { }() logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("dynamodb", logger, map[string]string{ + b, err := NewDynamoDBBackend(map[string]string{ "access_key": creds.AccessKeyID, "secret_key": creds.SecretAccessKey, "session_token": creds.SessionToken, "table": table, "region": region, "endpoint": endpoint, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - ha, ok := b.(HABackend) + ha, ok := b.(physical.HABackend) if !ok { t.Fatalf("dynamodb does not implement HABackend") } - testHABackend(t, ha, ha) + physical.ExerciseHABackend(t, ha, ha) testDynamoDBLockTTL(t, ha) } // Similar to testHABackend, but using internal implementation details to // trigger the lock failure scenario by setting the lock renew period for one // of the locks to a higher value than the lock TTL. -func testDynamoDBLockTTL(t *testing.T, ha HABackend) { +func testDynamoDBLockTTL(t *testing.T, ha physical.HABackend) { // Set much smaller lock times to speed up the test. lockTTL := time.Second * 3 renewInterval := time.Second * 1 diff --git a/vendor/github.com/hashicorp/vault/physical/etcd.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd.go similarity index 92% rename from vendor/github.com/hashicorp/vault/physical/etcd.go rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd.go index 01a928d..5d9c26d 100644 --- a/vendor/github.com/hashicorp/vault/physical/etcd.go +++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd.go @@ -1,4 +1,4 @@ -package physical +package etcd import ( "context" @@ -10,6 +10,7 @@ import ( "github.com/coreos/etcd/client" "github.com/coreos/go-semver/semver" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) @@ -22,11 +23,11 @@ var ( EtcdLockHeldError = errors.New("lock already held") EtcdLockNotHeldError = errors.New("lock not held") EtcdSemaphoreKeyRemovedError = errors.New("semaphore key removed before lock aquisition") - EtcdVersionUnknow = errors.New("etcd: unknown API version") + EtcdVersionUnknown = errors.New("etcd: unknown API version") ) -// newEtcdBackend constructs a etcd backend using a given machine address. -func newEtcdBackend(conf map[string]string, logger log.Logger) (Backend, error) { +// NewEtcdBackend constructs a etcd backend using a given machine address. +func NewEtcdBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { var ( apiVersion string ok bool @@ -75,7 +76,7 @@ func newEtcdBackend(conf map[string]string, logger log.Logger) (Backend, error) } return newEtcd3Backend(conf, logger) default: - return nil, EtcdVersionUnknow + return nil, EtcdVersionUnknown } } diff --git a/vendor/github.com/hashicorp/vault/physical/etcd2.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go similarity index 97% rename from vendor/github.com/hashicorp/vault/physical/etcd2.go rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go index 4ef4b08..4e08615 100644 --- a/vendor/github.com/hashicorp/vault/physical/etcd2.go +++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd2.go @@ -1,4 +1,4 @@ -package physical +package etcd import ( "context" @@ -14,6 +14,7 @@ import ( metrics "github.com/armon/go-metrics" "github.com/coreos/etcd/client" "github.com/coreos/etcd/pkg/transport" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) @@ -49,12 +50,12 @@ const ( type Etcd2Backend struct { path string kAPI client.KeysAPI - permitPool *PermitPool + permitPool *physical.PermitPool logger log.Logger haEnabled bool } -func newEtcd2Backend(conf map[string]string, logger log.Logger) (Backend, error) { +func newEtcd2Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) { // Get the etcd path form the configuration. path, ok := conf["path"] if !ok { @@ -110,7 +111,7 @@ func newEtcd2Backend(conf map[string]string, logger log.Logger) (Backend, error) return &Etcd2Backend{ path: path, kAPI: kAPI, - permitPool: NewPermitPool(DefaultParallelOperations), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), logger: logger, haEnabled: haEnabledBool, }, nil @@ -169,7 +170,7 @@ func newEtcdV2Client(conf map[string]string) (client.Client, error) { } // Put is used to insert or update an entry. -func (c *Etcd2Backend) Put(entry *Entry) error { +func (c *Etcd2Backend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now()) value := base64.StdEncoding.EncodeToString(entry.Value) @@ -181,7 +182,7 @@ func (c *Etcd2Backend) Put(entry *Entry) error { } // Get is used to fetch an entry. -func (c *Etcd2Backend) Get(key string) (*Entry, error) { +func (c *Etcd2Backend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now()) c.permitPool.Acquire() @@ -206,7 +207,7 @@ func (c *Etcd2Backend) Get(key string) (*Entry, error) { } // Construct and return a new entry. - return &Entry{ + return &physical.Entry{ Key: key, Value: value, }, nil @@ -290,7 +291,7 @@ func (b *Etcd2Backend) nodePathLock(key string) string { } // Lock is used for mutual exclusion based on the given key. -func (c *Etcd2Backend) LockWith(key, value string) (Lock, error) { +func (c *Etcd2Backend) LockWith(key, value string) (physical.Lock, error) { return &Etcd2Lock{ kAPI: c.kAPI, value: value, diff --git a/vendor/github.com/hashicorp/vault/physical/etcd3.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go similarity index 73% rename from vendor/github.com/hashicorp/vault/physical/etcd3.go rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go index 6fecc73..04944e5 100644 --- a/vendor/github.com/hashicorp/vault/physical/etcd3.go +++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3.go @@ -1,4 +1,4 @@ -package physical +package etcd import ( "errors" @@ -11,10 +11,11 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/concurrency" "github.com/coreos/etcd/pkg/transport" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" "golang.org/x/net/context" ) @@ -27,16 +28,21 @@ type EtcdBackend struct { path string haEnabled bool - permitPool *PermitPool + permitPool *physical.PermitPool etcd *clientv3.Client } -// etcd default lease duration is 60s. set to 15s for faster recovery. -const etcd3LockTimeoutInSeconds = 15 +const ( + // etcd3 default lease duration is 60s. set to 15s for faster recovery. + etcd3LockTimeoutInSeconds = 15 + // etcd3 default request timeout is set to 5s. It should be long enough + // for most cases, even with internal retry. + etcd3RequestTimeout = 5 * time.Second +) // newEtcd3Backend constructs a etcd3 backend. -func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error) { +func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) { // Get the etcd path form the configuration. path, ok := conf["path"] if !ok { @@ -117,7 +123,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error) } if sync { - ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) err := etcd.Sync(ctx) cancel() if err != nil { @@ -128,29 +134,33 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error) return &EtcdBackend{ path: path, etcd: etcd, - permitPool: NewPermitPool(DefaultParallelOperations), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), logger: logger, haEnabled: haEnabledBool, }, nil } -func (c *EtcdBackend) Put(entry *Entry) error { +func (c *EtcdBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now()) c.permitPool.Acquire() defer c.permitPool.Release() - _, err := c.etcd.Put(context.Background(), path.Join(c.path, entry.Key), string(entry.Value)) + ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) + defer cancel() + _, err := c.etcd.Put(ctx, path.Join(c.path, entry.Key), string(entry.Value)) return err } -func (c *EtcdBackend) Get(key string) (*Entry, error) { +func (c *EtcdBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now()) c.permitPool.Acquire() defer c.permitPool.Release() - resp, err := c.etcd.Get(context.Background(), path.Join(c.path, key)) + ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) + defer cancel() + resp, err := c.etcd.Get(ctx, path.Join(c.path, key)) if err != nil { return nil, err } @@ -161,7 +171,7 @@ func (c *EtcdBackend) Get(key string) (*Entry, error) { if len(resp.Kvs) > 1 { return nil, errors.New("unexpected number of keys from a get request") } - return &Entry{ + return &physical.Entry{ Key: key, Value: resp.Kvs[0].Value, }, nil @@ -173,7 +183,9 @@ func (c *EtcdBackend) Delete(key string) error { c.permitPool.Acquire() defer c.permitPool.Release() - _, err := c.etcd.Delete(context.Background(), path.Join(c.path, key)) + ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) + defer cancel() + _, err := c.etcd.Delete(ctx, path.Join(c.path, key)) if err != nil { return err } @@ -186,8 +198,10 @@ func (c *EtcdBackend) List(prefix string) ([]string, error) { c.permitPool.Acquire() defer c.permitPool.Release() + ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) + defer cancel() prefix = path.Join(c.path, prefix) - resp, err := c.etcd.Get(context.Background(), prefix, clientv3.WithPrefix()) + resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix()) if err != nil { return nil, err } @@ -204,7 +218,7 @@ func (c *EtcdBackend) List(prefix string) ([]string, error) { if i := strings.Index(key, "/"); i == -1 { keys = append(keys, key) } else if i != -1 { - keys = appendIfMissing(keys, key[:i+1]) + keys = strutil.AppendIfMissing(keys, key[:i+1]) } } return keys, nil @@ -229,7 +243,7 @@ type EtcdLock struct { } // Lock is used for mutual exclusion based on the given key. -func (c *EtcdBackend) LockWith(key, value string) (Lock, error) { +func (c *EtcdBackend) LockWith(key, value string) (physical.Lock, error) { session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(etcd3LockTimeoutInSeconds)) if err != nil { return nil, err @@ -264,7 +278,10 @@ func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { } return nil, err } - if _, err := c.etcd.Put(ctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil { + + pctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) + defer cancel() + if _, err := c.etcd.Put(pctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil { return nil, err } @@ -281,11 +298,16 @@ func (c *EtcdLock) Unlock() error { return EtcdLockNotHeldError } - return c.etcdMu.Unlock(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) + defer cancel() + return c.etcdMu.Unlock(ctx) } func (c *EtcdLock) Value() (bool, string, error) { - resp, err := c.etcd.Get(context.Background(), + ctx, cancel := context.WithTimeout(context.Background(), etcd3RequestTimeout) + defer cancel() + + resp, err := c.etcd.Get(ctx, c.prefix, clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend)) diff --git a/vendor/github.com/hashicorp/vault/physical/etcd3_test.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go similarity index 66% rename from vendor/github.com/hashicorp/vault/physical/etcd3_test.go rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go index 0724091..fbd842d 100644 --- a/vendor/github.com/hashicorp/vault/physical/etcd3_test.go +++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd3_test.go @@ -1,4 +1,4 @@ -package physical +package etcd import ( "fmt" @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) @@ -18,20 +19,20 @@ func TestEtcd3Backend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("etcd", logger, map[string]string{ + b, err := NewEtcdBackend(map[string]string{ "path": fmt.Sprintf("/vault-%d", time.Now().Unix()), "etcd_api": "3", - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) - ha, ok := b.(HABackend) + ha, ok := b.(physical.HABackend) if !ok { t.Fatalf("etcd3 does not implement HABackend") } - testHABackend(t, ha, ha) + physical.ExerciseHABackend(t, ha, ha) } diff --git a/vendor/github.com/hashicorp/vault/physical/etcd_test.go b/vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go similarity index 82% rename from vendor/github.com/hashicorp/vault/physical/etcd_test.go rename to vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go index adddac2..d5c30bb 100644 --- a/vendor/github.com/hashicorp/vault/physical/etcd_test.go +++ b/vendor/github.com/hashicorp/vault/physical/etcd/etcd_test.go @@ -1,4 +1,4 @@ -package physical +package etcd import ( "fmt" @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" "github.com/coreos/etcd/client" @@ -52,19 +53,19 @@ func TestEtcdBackend(t *testing.T) { // need to provide it explicitly. logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("etcd", logger, map[string]string{ + b, err := NewEtcdBackend(map[string]string{ "path": randPath, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) - ha, ok := b.(HABackend) + ha, ok := b.(physical.HABackend) if !ok { t.Fatalf("etcd does not implement HABackend") } - testHABackend(t, ha, ha) + physical.ExerciseHABackend(t, ha, ha) } diff --git a/vendor/github.com/hashicorp/vault/physical/file.go b/vendor/github.com/hashicorp/vault/physical/file/file.go similarity index 74% rename from vendor/github.com/hashicorp/vault/physical/file.go rename to vendor/github.com/hashicorp/vault/physical/file/file.go index d9c5225..df05dba 100644 --- a/vendor/github.com/hashicorp/vault/physical/file.go +++ b/vendor/github.com/hashicorp/vault/physical/file/file.go @@ -1,4 +1,4 @@ -package physical +package file import ( "encoding/json" @@ -11,7 +11,9 @@ import ( log "github.com/mgutz/logxi/v1" + "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/physical" ) // FileBackend is a physical backend that stores data on disk @@ -25,15 +27,15 @@ type FileBackend struct { sync.RWMutex path string logger log.Logger - permitPool *PermitPool + permitPool *physical.PermitPool } type TransactionalFileBackend struct { FileBackend } -// newFileBackend constructs a FileBackend using the given directory -func newFileBackend(conf map[string]string, logger log.Logger) (Backend, error) { +// NewFileBackend constructs a FileBackend using the given directory +func NewFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { path, ok := conf["path"] if !ok { return nil, fmt.Errorf("'path' must be set") @@ -42,11 +44,11 @@ func newFileBackend(conf map[string]string, logger log.Logger) (Backend, error) return &FileBackend{ path: path, logger: logger, - permitPool: NewPermitPool(DefaultParallelOperations), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), }, nil } -func newTransactionalFileBackend(conf map[string]string, logger log.Logger) (Backend, error) { +func NewTransactionalFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { path, ok := conf["path"] if !ok { return nil, fmt.Errorf("'path' must be set") @@ -57,7 +59,7 @@ func newTransactionalFileBackend(conf map[string]string, logger log.Logger) (Bac FileBackend: FileBackend{ path: path, logger: logger, - permitPool: NewPermitPool(1), + permitPool: physical.NewPermitPool(1), }, }, nil } @@ -77,6 +79,10 @@ func (b *FileBackend) DeleteInternal(path string) error { return nil } + if err := b.validatePath(path); err != nil { + return err + } + basePath, key := b.expandPath(path) fullPath := filepath.Join(basePath, key) @@ -99,6 +105,9 @@ func (b *FileBackend) cleanupLogicalPath(path string) error { dir, err := os.Open(fullPath) if err != nil { + if dir != nil { + dir.Close() + } if os.IsNotExist(err) { return nil } else { @@ -124,7 +133,7 @@ func (b *FileBackend) cleanupLogicalPath(path string) error { return nil } -func (b *FileBackend) Get(k string) (*Entry, error) { +func (b *FileBackend) Get(k string) (*physical.Entry, error) { b.permitPool.Acquire() defer b.permitPool.Release() @@ -134,11 +143,18 @@ func (b *FileBackend) Get(k string) (*Entry, error) { return b.GetInternal(k) } -func (b *FileBackend) GetInternal(k string) (*Entry, error) { +func (b *FileBackend) GetInternal(k string) (*physical.Entry, error) { + if err := b.validatePath(k); err != nil { + return nil, err + } + path, key := b.expandPath(k) path = filepath.Join(path, key) f, err := os.Open(path) + if f != nil { + defer f.Close() + } if err != nil { if os.IsNotExist(err) { return nil, nil @@ -146,9 +162,8 @@ func (b *FileBackend) GetInternal(k string) (*Entry, error) { return nil, err } - defer f.Close() - var entry Entry + var entry physical.Entry if err := jsonutil.DecodeJSONFromReader(f, &entry); err != nil { return nil, err } @@ -156,7 +171,7 @@ func (b *FileBackend) GetInternal(k string) (*Entry, error) { return &entry, nil } -func (b *FileBackend) Put(entry *Entry) error { +func (b *FileBackend) Put(entry *physical.Entry) error { b.permitPool.Acquire() defer b.permitPool.Release() @@ -166,7 +181,11 @@ func (b *FileBackend) Put(entry *Entry) error { return b.PutInternal(entry) } -func (b *FileBackend) PutInternal(entry *Entry) error { +func (b *FileBackend) PutInternal(entry *physical.Entry) error { + if err := b.validatePath(entry.Key); err != nil { + return err + } + path, key := b.expandPath(entry.Key) // Make the parent tree @@ -179,10 +198,12 @@ func (b *FileBackend) PutInternal(entry *Entry) error { filepath.Join(path, key), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + if f != nil { + defer f.Close() + } if err != nil { return err } - defer f.Close() enc := json.NewEncoder(f) return enc.Encode(entry) } @@ -198,6 +219,10 @@ func (b *FileBackend) List(prefix string) ([]string, error) { } func (b *FileBackend) ListInternal(prefix string) ([]string, error) { + if err := b.validatePath(prefix); err != nil { + return nil, err + } + path := b.path if prefix != "" { path = filepath.Join(path, prefix) @@ -205,6 +230,9 @@ func (b *FileBackend) ListInternal(prefix string) ([]string, error) { // Read the directory contents f, err := os.Open(path) + if f != nil { + defer f.Close() + } if err != nil { if os.IsNotExist(err) { return nil, nil @@ -212,7 +240,6 @@ func (b *FileBackend) ListInternal(prefix string) ([]string, error) { return nil, err } - defer f.Close() names, err := f.Readdirnames(-1) if err != nil { @@ -237,12 +264,21 @@ func (b *FileBackend) expandPath(k string) (string, string) { return path, "_" + key } -func (b *TransactionalFileBackend) Transaction(txns []TxnEntry) error { +func (b *FileBackend) validatePath(path string) error { + switch { + case strings.Contains(path, ".."): + return consts.ErrPathContainsParentReferences + } + + return nil +} + +func (b *TransactionalFileBackend) Transaction(txns []physical.TxnEntry) error { b.permitPool.Acquire() defer b.permitPool.Release() b.Lock() defer b.Unlock() - return genericTransactionHandler(b, txns) + return physical.GenericTransactionHandler(b, txns) } diff --git a/vendor/github.com/hashicorp/vault/physical/file_test.go b/vendor/github.com/hashicorp/vault/physical/file/file_test.go similarity index 77% rename from vendor/github.com/hashicorp/vault/physical/file_test.go rename to vendor/github.com/hashicorp/vault/physical/file/file_test.go index 9810f4b..6438e21 100644 --- a/vendor/github.com/hashicorp/vault/physical/file_test.go +++ b/vendor/github.com/hashicorp/vault/physical/file/file_test.go @@ -1,4 +1,4 @@ -package physical +package file import ( "encoding/json" @@ -9,6 +9,7 @@ import ( "testing" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) @@ -21,9 +22,9 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("file", logger, map[string]string{ + b, err := NewFileBackend(map[string]string{ "path": backendPath, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } @@ -39,7 +40,7 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) { // Create a storage entry without base64 encoding the file name rawFullPath := filepath.Join(backendPath, "_foo") - e := &Entry{Key: "foo", Value: []byte("test")} + e := &physical.Entry{Key: "foo", Value: []byte("test")} f, err := os.OpenFile( rawFullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, @@ -131,6 +132,30 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) { } } +func TestFileBackend_ValidatePath(t *testing.T) { + dir, err := ioutil.TempDir("", "vault") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(dir) + + logger := logformat.NewVaultLogger(log.LevelTrace) + + b, err := NewFileBackend(map[string]string{ + "path": dir, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := b.Delete("foo/bar/../zip"); err == nil { + t.Fatal("expected error") + } + if err := b.Delete("foo/bar/zip"); err != nil { + t.Fatal("did not expect error") + } +} + func TestFileBackend(t *testing.T) { dir, err := ioutil.TempDir("", "vault") if err != nil { @@ -140,13 +165,13 @@ func TestFileBackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("file", logger, map[string]string{ + b, err := NewFileBackend(map[string]string{ "path": dir, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/vendor/github.com/hashicorp/vault/physical/gcs.go b/vendor/github.com/hashicorp/vault/physical/gcs/gcs.go similarity index 75% rename from vendor/github.com/hashicorp/vault/physical/gcs.go rename to vendor/github.com/hashicorp/vault/physical/gcs/gcs.go index e4d4187..5e7fc78 100644 --- a/vendor/github.com/hashicorp/vault/physical/gcs.go +++ b/vendor/github.com/hashicorp/vault/physical/gcs/gcs.go @@ -1,4 +1,4 @@ -package physical +package gcs import ( "fmt" @@ -10,6 +10,7 @@ import ( "time" "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" "cloud.google.com/go/storage" @@ -24,15 +25,14 @@ import ( type GCSBackend struct { bucketName string client *storage.Client - permitPool *PermitPool + permitPool *physical.PermitPool logger log.Logger } -// newGCSBackend constructs a Google Cloud Storage backend using a pre-existing +// NewGCSBackend constructs a Google Cloud Storage backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from environment variables or a service account file -func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) { - +func NewGCSBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET") if bucketName == "" { @@ -42,26 +42,14 @@ func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) { } } - // path to service account JSON file - credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") - if credentialsFile == "" { - credentialsFile = conf["credentials_file"] - if credentialsFile == "" { - return nil, fmt.Errorf("env var GOOGLE_APPLICATION_CREDENTIALS or configuration parameter 'credentials_file' must be set") - } - } - - client, err := storage.NewClient( - context.Background(), - option.WithServiceAccountFile(credentialsFile), - ) - + ctx := context.Background() + client, err := newGCSClient(ctx, conf, logger) if err != nil { - return nil, fmt.Errorf("error establishing storage client: '%v'", err) + return nil, errwrap.Wrapf("error establishing strorage client: {{err}}", err) } // check client connectivity by getting bucket attributes - _, err = client.Bucket(bucketName).Attrs(context.Background()) + _, err = client.Bucket(bucketName).Attrs(ctx) if err != nil { return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err) } @@ -81,15 +69,38 @@ func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) { g := GCSBackend{ bucketName: bucketName, client: client, - permitPool: NewPermitPool(maxParInt), + permitPool: physical.NewPermitPool(maxParInt), logger: logger, } return &g, nil } +func newGCSClient(ctx context.Context, conf map[string]string, logger log.Logger) (*storage.Client, error) { + // if credentials_file is configured, try to use it + // else use application default credentials + credentialsFile, ok := conf["credentials_file"] + if ok { + client, err := storage.NewClient( + ctx, + option.WithServiceAccountFile(credentialsFile), + ) + + if err != nil { + return nil, fmt.Errorf("error with provided credentials: '%v'", err) + } + return client, nil + } + + client, err := storage.NewClient(ctx) + if err != nil { + return nil, errwrap.Wrapf("error with application default credentials: {{err}}", err) + } + return client, nil +} + // Put is used to insert or update an entry -func (g *GCSBackend) Put(entry *Entry) error { +func (g *GCSBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"gcs", "put"}, time.Now()) bucket := g.client.Bucket(g.bucketName) @@ -105,7 +116,7 @@ func (g *GCSBackend) Put(entry *Entry) error { } // Get is used to fetch an entry -func (g *GCSBackend) Get(key string) (*Entry, error) { +func (g *GCSBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"gcs", "get"}, time.Now()) bucket := g.client.Bucket(g.bucketName) @@ -127,7 +138,7 @@ func (g *GCSBackend) Get(key string) (*Entry, error) { return nil, fmt.Errorf("error reading object '%v': '%v'", key, err) } - ent := Entry{ + ent := physical.Entry{ Key: key, Value: value, } diff --git a/vendor/github.com/hashicorp/vault/physical/gcs_test.go b/vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go similarity index 80% rename from vendor/github.com/hashicorp/vault/physical/gcs_test.go rename to vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go index 23c4d3a..dda6eed 100644 --- a/vendor/github.com/hashicorp/vault/physical/gcs_test.go +++ b/vendor/github.com/hashicorp/vault/physical/gcs/gcs_test.go @@ -1,4 +1,4 @@ -package physical +package gcs import ( "fmt" @@ -11,16 +11,12 @@ import ( "cloud.google.com/go/storage" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) -var ConsistencyDelays = delays{ - beforeList: 5 * time.Second, - beforeGet: 0 * time.Second, -} - func TestGCSBackend(t *testing.T) { credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") @@ -54,7 +50,6 @@ func TestGCSBackend(t *testing.T) { defer func() { objects_it := bucket.Objects(context.Background(), nil) - time.Sleep(ConsistencyDelays.beforeList) // have to delete all objects before deleting bucket for { objAttrs, err := objects_it.Next() @@ -70,8 +65,6 @@ func TestGCSBackend(t *testing.T) { bucket.Object(objAttrs.Name).Delete(context.Background()) } - // not a list operation, but google lists to make sure the bucket is empty on delete - time.Sleep(ConsistencyDelays.beforeList) err := bucket.Delete(context.Background()) if err != nil { t.Fatalf("error deleting bucket '%s': '%v'", bucketName, err) @@ -80,16 +73,16 @@ func TestGCSBackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("gcs", logger, map[string]string{ + b, err := NewGCSBackend(map[string]string{ "bucket": bucketName, "credentials_file": credentialsFile, - }) + }, logger) if err != nil { t.Fatalf("error creating google cloud storage backend: '%s'", err) } - testEventuallyConsistentBackend(t, b, ConsistencyDelays) - testEventuallyConsistentBackend_ListPrefix(t, b, ConsistencyDelays) + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/vendor/github.com/hashicorp/vault/physical/cache_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go similarity index 75% rename from vendor/github.com/hashicorp/vault/physical/cache_test.go rename to vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go index 151cf99..c771f03 100644 --- a/vendor/github.com/hashicorp/vault/physical/cache_test.go +++ b/vendor/github.com/hashicorp/vault/physical/inmem/cache_test.go @@ -1,32 +1,39 @@ -package physical +package inmem import ( "testing" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) func TestCache(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - inm := NewInmem(logger) - cache := NewCache(inm, 0, logger) - testBackend(t, cache) - testBackend_ListPrefix(t, cache) + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + cache := physical.NewCache(inm, 0, logger) + physical.ExerciseBackend(t, cache) + physical.ExerciseBackend_ListPrefix(t, cache) } func TestCache_Purge(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - inm := NewInmem(logger) - cache := NewCache(inm, 0, logger) + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + cache := physical.NewCache(inm, 0, logger) - ent := &Entry{ + ent := &physical.Entry{ Key: "foo", Value: []byte("bar"), } - err := cache.Put(ent) + err = cache.Put(ent) if err != nil { t.Fatalf("err: %v", err) } @@ -59,21 +66,24 @@ func TestCache_Purge(t *testing.T) { func TestCache_IgnoreCore(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - inm := NewInmem(logger) - cache := NewCache(inm, 0, logger) + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } - var ent *Entry - var err error + cache := physical.NewCache(inm, 0, logger) + + var ent *physical.Entry // First try normal handling - ent = &Entry{ + ent = &physical.Entry{ Key: "foo", Value: []byte("bar"), } if err := cache.Put(ent); err != nil { t.Fatal(err) } - ent = &Entry{ + ent = &physical.Entry{ Key: "foo", Value: []byte("foobar"), } @@ -89,14 +99,14 @@ func TestCache_IgnoreCore(t *testing.T) { } // Now try core path - ent = &Entry{ + ent = &physical.Entry{ Key: "core/foo", Value: []byte("bar"), } if err := cache.Put(ent); err != nil { t.Fatal(err) } - ent = &Entry{ + ent = &physical.Entry{ Key: "core/foo", Value: []byte("foobar"), } @@ -112,7 +122,7 @@ func TestCache_IgnoreCore(t *testing.T) { } // Now make sure looked-up values aren't added - ent = &Entry{ + ent = &physical.Entry{ Key: "core/zip", Value: []byte("zap"), } @@ -126,7 +136,7 @@ func TestCache_IgnoreCore(t *testing.T) { if string(ent.Value) != "zap" { t.Fatal("expected non-cached value") } - ent = &Entry{ + ent = &physical.Entry{ Key: "core/zip", Value: []byte("zipzap"), } diff --git a/vendor/github.com/hashicorp/vault/physical/inmem.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go similarity index 73% rename from vendor/github.com/hashicorp/vault/physical/inmem.go rename to vendor/github.com/hashicorp/vault/physical/inmem/inmem.go index 47f18eb..d4f9201 100644 --- a/vendor/github.com/hashicorp/vault/physical/inmem.go +++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go @@ -1,9 +1,10 @@ -package physical +package inmem import ( "strings" "sync" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" "github.com/armon/go-radix" @@ -15,7 +16,7 @@ import ( type InmemBackend struct { sync.RWMutex root *radix.Tree - permitPool *PermitPool + permitPool *physical.PermitPool logger log.Logger } @@ -24,30 +25,30 @@ type TransactionalInmemBackend struct { } // NewInmem constructs a new in-memory backend -func NewInmem(logger log.Logger) *InmemBackend { +func NewInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) { in := &InmemBackend{ root: radix.New(), - permitPool: NewPermitPool(DefaultParallelOperations), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), logger: logger, } - return in + return in, nil } // Basically for now just creates a permit pool of size 1 so only one operation // can run at a time -func NewTransactionalInmem(logger log.Logger) *TransactionalInmemBackend { +func NewTransactionalInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) { in := &TransactionalInmemBackend{ InmemBackend: InmemBackend{ root: radix.New(), - permitPool: NewPermitPool(1), + permitPool: physical.NewPermitPool(1), logger: logger, }, } - return in + return in, nil } // Put is used to insert or update an entry -func (i *InmemBackend) Put(entry *Entry) error { +func (i *InmemBackend) Put(entry *physical.Entry) error { i.permitPool.Acquire() defer i.permitPool.Release() @@ -57,13 +58,13 @@ func (i *InmemBackend) Put(entry *Entry) error { return i.PutInternal(entry) } -func (i *InmemBackend) PutInternal(entry *Entry) error { +func (i *InmemBackend) PutInternal(entry *physical.Entry) error { i.root.Insert(entry.Key, entry) return nil } // Get is used to fetch an entry -func (i *InmemBackend) Get(key string) (*Entry, error) { +func (i *InmemBackend) Get(key string) (*physical.Entry, error) { i.permitPool.Acquire() defer i.permitPool.Release() @@ -73,9 +74,9 @@ func (i *InmemBackend) Get(key string) (*Entry, error) { return i.GetInternal(key) } -func (i *InmemBackend) GetInternal(key string) (*Entry, error) { +func (i *InmemBackend) GetInternal(key string) (*physical.Entry, error) { if raw, ok := i.root.Get(key); ok { - return raw.(*Entry), nil + return raw.(*physical.Entry), nil } return nil, nil } @@ -131,12 +132,12 @@ func (i *InmemBackend) ListInternal(prefix string) ([]string, error) { } // Implements the transaction interface -func (t *TransactionalInmemBackend) Transaction(txns []TxnEntry) error { +func (t *TransactionalInmemBackend) Transaction(txns []physical.TxnEntry) error { t.permitPool.Acquire() defer t.permitPool.Release() t.Lock() defer t.Unlock() - return genericTransactionHandler(t, txns) + return physical.GenericTransactionHandler(t, txns) } diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_ha.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go similarity index 81% rename from vendor/github.com/hashicorp/vault/physical/inmem_ha.go rename to vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go index bc691c5..d322da2 100644 --- a/vendor/github.com/hashicorp/vault/physical/inmem_ha.go +++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go @@ -1,14 +1,15 @@ -package physical +package inmem import ( "fmt" "sync" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) type InmemHABackend struct { - Backend + physical.Backend locks map[string]string l sync.Mutex cond *sync.Cond @@ -16,23 +17,31 @@ type InmemHABackend struct { } type TransactionalInmemHABackend struct { - Transactional + physical.Transactional InmemHABackend } // NewInmemHA constructs a new in-memory HA backend. This is only for testing. -func NewInmemHA(logger log.Logger) *InmemHABackend { +func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + be, err := NewInmem(nil, logger) + if err != nil { + return nil, err + } + in := &InmemHABackend{ - Backend: NewInmem(logger), + Backend: be, locks: make(map[string]string), logger: logger, } in.cond = sync.NewCond(&in.l) - return in + return in, nil } -func NewTransactionalInmemHA(logger log.Logger) *TransactionalInmemHABackend { - transInmem := NewTransactionalInmem(logger) +func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + transInmem, err := NewTransactionalInmem(nil, logger) + if err != nil { + return nil, err + } inmemHA := InmemHABackend{ Backend: transInmem, locks: make(map[string]string), @@ -41,14 +50,14 @@ func NewTransactionalInmemHA(logger log.Logger) *TransactionalInmemHABackend { in := &TransactionalInmemHABackend{ InmemHABackend: inmemHA, - Transactional: transInmem, + Transactional: transInmem.(physical.Transactional), } in.cond = sync.NewCond(&in.l) - return in + return in, nil } // LockWith is used for mutual exclusion based on the given key. -func (i *InmemHABackend) LockWith(key, value string) (Lock, error) { +func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) { l := &InmemLock{ in: i, key: key, diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go new file mode 100644 index 0000000..8288595 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha_test.go @@ -0,0 +1,19 @@ +package inmem + +import ( + "testing" + + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" +) + +func TestInmemHA(t *testing.T) { + logger := logformat.NewVaultLogger(log.LevelTrace) + + inm, err := NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + physical.ExerciseHABackend(t, inm.(physical.HABackend), inm.(physical.HABackend)) +} diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go new file mode 100644 index 0000000..998061b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_test.go @@ -0,0 +1,20 @@ +package inmem + +import ( + "testing" + + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" +) + +func TestInmem(t *testing.T) { + logger := logformat.NewVaultLogger(log.LevelTrace) + + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + physical.ExerciseBackend(t, inm) + physical.ExerciseBackend_ListPrefix(t, inm) +} diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go new file mode 100644 index 0000000..719642a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/inmem/physical_view_test.go @@ -0,0 +1,120 @@ +package inmem + +import ( + "testing" + + "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" + log "github.com/mgutz/logxi/v1" +) + +func TestPhysicalView_impl(t *testing.T) { + var _ physical.Backend = new(physical.View) +} + +func newInmemTestBackend() (physical.Backend, error) { + logger := logformat.NewVaultLogger(log.LevelTrace) + return NewInmem(nil, logger) +} + +func TestPhysicalView_BadKeysKeys(t *testing.T) { + backend, err := newInmemTestBackend() + if err != nil { + t.Fatal(err) + } + view := physical.NewView(backend, "foo/") + + _, err = view.List("../") + if err == nil { + t.Fatalf("expected error") + } + + _, err = view.Get("../") + if err == nil { + t.Fatalf("expected error") + } + + err = view.Delete("../foo") + if err == nil { + t.Fatalf("expected error") + } + + le := &physical.Entry{ + Key: "../foo", + Value: []byte("test"), + } + err = view.Put(le) + if err == nil { + t.Fatalf("expected error") + } +} + +func TestPhysicalView(t *testing.T) { + backend, err := newInmemTestBackend() + if err != nil { + t.Fatal(err) + } + + view := physical.NewView(backend, "foo/") + + // Write a key outside of foo/ + entry := &physical.Entry{Key: "test", Value: []byte("test")} + if err := backend.Put(entry); err != nil { + t.Fatalf("bad: %v", err) + } + + // List should have no visibility + keys, err := view.List("") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 0 { + t.Fatalf("bad: %v", err) + } + + // Get should have no visibility + out, err := view.Get("test") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("bad: %v", out) + } + + // Try to put the same entry via the view + if err := view.Put(entry); err != nil { + t.Fatalf("err: %v", err) + } + + // Check it is nested + entry, err = backend.Get("foo/test") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatalf("missing nested foo/test") + } + + // Delete nested + if err := view.Delete("test"); err != nil { + t.Fatalf("err: %v", err) + } + + // Check the nested key + entry, err = backend.Get("foo/test") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry != nil { + t.Fatalf("nested foo/test should be gone") + } + + // Check the non-nested key + entry, err = backend.Get("test") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatalf("root test missing") + } +} diff --git a/vendor/github.com/hashicorp/vault/physical/transactions_test.go b/vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go similarity index 53% rename from vendor/github.com/hashicorp/vault/physical/transactions_test.go rename to vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go index e365a95..5565fbe 100644 --- a/vendor/github.com/hashicorp/vault/physical/transactions_test.go +++ b/vendor/github.com/hashicorp/vault/physical/inmem/transactions_test.go @@ -1,4 +1,4 @@ -package physical +package inmem import ( "fmt" @@ -8,6 +8,7 @@ import ( radix "github.com/armon/go-radix" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) @@ -16,11 +17,11 @@ type faultyPseudo struct { faultyPaths map[string]struct{} } -func (f *faultyPseudo) Get(key string) (*Entry, error) { +func (f *faultyPseudo) Get(key string) (*physical.Entry, error) { return f.underlying.Get(key) } -func (f *faultyPseudo) Put(entry *Entry) error { +func (f *faultyPseudo) Put(entry *physical.Entry) error { return f.underlying.Put(entry) } @@ -28,14 +29,14 @@ func (f *faultyPseudo) Delete(key string) error { return f.underlying.Delete(key) } -func (f *faultyPseudo) GetInternal(key string) (*Entry, error) { +func (f *faultyPseudo) GetInternal(key string) (*physical.Entry, error) { if _, ok := f.faultyPaths[key]; ok { return nil, fmt.Errorf("fault") } return f.underlying.GetInternal(key) } -func (f *faultyPseudo) PutInternal(entry *Entry) error { +func (f *faultyPseudo) PutInternal(entry *physical.Entry) error { if _, ok := f.faultyPaths[entry.Key]; ok { return fmt.Errorf("fault") } @@ -53,21 +54,21 @@ func (f *faultyPseudo) List(prefix string) ([]string, error) { return f.underlying.List(prefix) } -func (f *faultyPseudo) Transaction(txns []TxnEntry) error { +func (f *faultyPseudo) Transaction(txns []physical.TxnEntry) error { f.underlying.permitPool.Acquire() defer f.underlying.permitPool.Release() f.underlying.Lock() defer f.underlying.Unlock() - return genericTransactionHandler(f, txns) + return physical.GenericTransactionHandler(f, txns) } func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo { out := &faultyPseudo{ underlying: InmemBackend{ root: radix.New(), - permitPool: NewPermitPool(1), + permitPool: physical.NewPermitPool(1), logger: logger, }, faultyPaths: make(map[string]struct{}, len(faultyPaths)), @@ -81,68 +82,22 @@ func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo { func TestPseudo_Basic(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) p := newFaultyPseudo(logger, nil) - testBackend(t, p) - testBackend_ListPrefix(t, p) + physical.ExerciseBackend(t, p) + physical.ExerciseBackend_ListPrefix(t, p) } func TestPseudo_SuccessfulTransaction(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) p := newFaultyPseudo(logger, nil) - txns := setupPseudo(p, t) - - if err := p.Transaction(txns); err != nil { - t.Fatal(err) - } - - keys, err := p.List("") - if err != nil { - t.Fatal(err) - } - - expected := []string{"foo", "zip"} - - sort.Strings(keys) - sort.Strings(expected) - if !reflect.DeepEqual(keys, expected) { - t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) - } - - entry, err := p.Get("foo") - if err != nil { - t.Fatal(err) - } - if entry == nil { - t.Fatal("got nil entry") - } - if entry.Value == nil { - t.Fatal("got nil value") - } - if string(entry.Value) != "bar3" { - t.Fatal("updates did not apply correctly") - } - - entry, err = p.Get("zip") - if err != nil { - t.Fatal(err) - } - if entry == nil { - t.Fatal("got nil entry") - } - if entry.Value == nil { - t.Fatal("got nil value") - } - if string(entry.Value) != "zap3" { - t.Fatal("updates did not apply correctly") - } + physical.ExerciseTransactionalBackend(t, p) } func TestPseudo_FailedTransaction(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) p := newFaultyPseudo(logger, []string{"zip"}) - txns := setupPseudo(p, t) - + txns := physical.SetupTestingTransactions(t, p) if err := p.Transaction(txns); err == nil { t.Fatal("expected error during transaction") } @@ -188,67 +143,3 @@ func TestPseudo_FailedTransaction(t *testing.T) { t.Fatal("values did not rollback correctly") } } - -func setupPseudo(p *faultyPseudo, t *testing.T) []TxnEntry { - // Add a few keys so that we test rollback with deletion - if err := p.Put(&Entry{ - Key: "foo", - Value: []byte("bar"), - }); err != nil { - t.Fatal(err) - } - if err := p.Put(&Entry{ - Key: "zip", - Value: []byte("zap"), - }); err != nil { - t.Fatal(err) - } - if err := p.Put(&Entry{ - Key: "deleteme", - }); err != nil { - t.Fatal(err) - } - if err := p.Put(&Entry{ - Key: "deleteme2", - }); err != nil { - t.Fatal(err) - } - - txns := []TxnEntry{ - TxnEntry{ - Operation: PutOperation, - Entry: &Entry{ - Key: "foo", - Value: []byte("bar2"), - }, - }, - TxnEntry{ - Operation: DeleteOperation, - Entry: &Entry{ - Key: "deleteme", - }, - }, - TxnEntry{ - Operation: PutOperation, - Entry: &Entry{ - Key: "foo", - Value: []byte("bar3"), - }, - }, - TxnEntry{ - Operation: DeleteOperation, - Entry: &Entry{ - Key: "deleteme2", - }, - }, - TxnEntry{ - Operation: PutOperation, - Entry: &Entry{ - Key: "zip", - Value: []byte("zap3"), - }, - }, - } - - return txns -} diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go b/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go deleted file mode 100644 index 102f85b..0000000 --- a/vendor/github.com/hashicorp/vault/physical/inmem_ha_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package physical - -import ( - "testing" - - "github.com/hashicorp/vault/helper/logformat" - log "github.com/mgutz/logxi/v1" -) - -func TestInmemHA(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) - - inm := NewInmemHA(logger) - testHABackend(t, inm, inm) -} diff --git a/vendor/github.com/hashicorp/vault/physical/inmem_test.go b/vendor/github.com/hashicorp/vault/physical/inmem_test.go deleted file mode 100644 index 7c3c788..0000000 --- a/vendor/github.com/hashicorp/vault/physical/inmem_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package physical - -import ( - "testing" - - "github.com/hashicorp/vault/helper/logformat" - log "github.com/mgutz/logxi/v1" -) - -func TestInmem(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) - - inm := NewInmem(logger) - testBackend(t, inm) - testBackend_ListPrefix(t, inm) -} diff --git a/vendor/github.com/hashicorp/vault/physical/latency.go b/vendor/github.com/hashicorp/vault/physical/latency.go new file mode 100644 index 0000000..3253036 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/latency.go @@ -0,0 +1,90 @@ +package physical + +import ( + "math/rand" + "time" + + log "github.com/mgutz/logxi/v1" +) + +const ( + // DefaultJitterPercent is used if no cache size is specified for NewCache + DefaultJitterPercent = 20 +) + +// LatencyInjector is used to add latency into underlying physical requests +type LatencyInjector struct { + backend Backend + latency time.Duration + jitterPercent int + random *rand.Rand +} + +// TransactionalLatencyInjector is the transactional version of the latency +// injector +type TransactionalLatencyInjector struct { + *LatencyInjector + Transactional +} + +// NewLatencyInjector returns a wrapped physical backend to simulate latency +func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { + if jitter < 0 || jitter > 100 { + jitter = DefaultJitterPercent + } + logger.Info("physical/latency: creating latency injector") + + return &LatencyInjector{ + backend: b, + latency: latency, + jitterPercent: jitter, + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalLatencyInjector creates a new transactional LatencyInjector +func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { + return &TransactionalLatencyInjector{ + LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), + Transactional: b.(Transactional), + } +} + +func (l *LatencyInjector) addLatency() { + // Calculate a value between 1 +- jitter% + min := 100 - l.jitterPercent + max := 100 + l.jitterPercent + percent := l.random.Intn(max-min) + min + latencyDuration := time.Duration(int(l.latency) * percent / 100) + time.Sleep(latencyDuration) +} + +// Put is a latent put request +func (l *LatencyInjector) Put(entry *Entry) error { + l.addLatency() + return l.backend.Put(entry) +} + +// Get is a latent get request +func (l *LatencyInjector) Get(key string) (*Entry, error) { + l.addLatency() + return l.backend.Get(key) +} + +// Delete is a latent delete request +func (l *LatencyInjector) Delete(key string) error { + l.addLatency() + return l.backend.Delete(key) +} + +// List is a latent list request +func (l *LatencyInjector) List(prefix string) ([]string, error) { + l.addLatency() + return l.backend.List(prefix) +} + +// Transaction is a latent transaction request +func (l *TransactionalLatencyInjector) Transaction(txns []TxnEntry) error { + l.addLatency() + return l.Transactional.Transaction(txns) +} diff --git a/vendor/github.com/hashicorp/vault/physical/mssql.go b/vendor/github.com/hashicorp/vault/physical/mssql/mssql.go similarity index 75% rename from vendor/github.com/hashicorp/vault/physical/mssql.go rename to vendor/github.com/hashicorp/vault/physical/mssql/mssql.go index 25709a2..16228d6 100644 --- a/vendor/github.com/hashicorp/vault/physical/mssql.go +++ b/vendor/github.com/hashicorp/vault/physical/mssql/mssql.go @@ -1,25 +1,30 @@ -package physical +package mssql import ( "database/sql" "fmt" "sort" + "strconv" "strings" "time" "github.com/armon/go-metrics" _ "github.com/denisenkom/go-mssqldb" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" ) -type MsSQLBackend struct { +type MSSQLBackend struct { dbTable string client *sql.DB statements map[string]*sql.Stmt logger log.Logger + permitPool *physical.PermitPool } -func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) { +func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { username, ok := conf["username"] if !ok { username = "" @@ -35,6 +40,21 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) return nil, fmt.Errorf("missing server") } + maxParStr, ok := conf["max_parallel"] + var maxParInt int + var err error + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + } + if logger.IsDebug() { + logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt) + } + } else { + maxParInt = physical.DefaultParallelOperations + } + database, ok := conf["database"] if !ok { database = "Vault" @@ -79,6 +99,8 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) return nil, fmt.Errorf("failed to connect to mssql: %v", err) } + db.SetMaxOpenConns(maxParInt) + if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil { return nil, fmt.Errorf("failed to create mssql database: %v", err) } @@ -110,11 +132,12 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) return nil, fmt.Errorf("failed to create mssql table: %v", err) } - m := &MsSQLBackend{ + m := &MSSQLBackend{ dbTable: dbTable, client: db, statements: make(map[string]*sql.Stmt), logger: logger, + permitPool: physical.NewPermitPool(maxParInt), } statements := map[string]string{ @@ -134,7 +157,7 @@ func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) return m, nil } -func (m *MsSQLBackend) prepare(name, query string) error { +func (m *MSSQLBackend) prepare(name, query string) error { stmt, err := m.client.Prepare(query) if err != nil { return fmt.Errorf("failed to prepare '%s': %v", name, err) @@ -145,9 +168,12 @@ func (m *MsSQLBackend) prepare(name, query string) error { return nil } -func (m *MsSQLBackend) Put(entry *Entry) error { +func (m *MSSQLBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"mssql", "put"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + _, err := m.statements["put"].Exec(entry.Key, entry.Value, entry.Key, entry.Key, entry.Value) if err != nil { return err @@ -156,9 +182,12 @@ func (m *MsSQLBackend) Put(entry *Entry) error { return nil } -func (m *MsSQLBackend) Get(key string) (*Entry, error) { +func (m *MSSQLBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"mssql", "get"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + var result []byte err := m.statements["get"].QueryRow(key).Scan(&result) if err == sql.ErrNoRows { @@ -169,7 +198,7 @@ func (m *MsSQLBackend) Get(key string) (*Entry, error) { return nil, err } - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: result, } @@ -177,9 +206,12 @@ func (m *MsSQLBackend) Get(key string) (*Entry, error) { return ent, nil } -func (m *MsSQLBackend) Delete(key string) error { +func (m *MSSQLBackend) Delete(key string) error { defer metrics.MeasureSince([]string{"mssql", "delete"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + _, err := m.statements["delete"].Exec(key) if err != nil { return err @@ -188,12 +220,17 @@ func (m *MsSQLBackend) Delete(key string) error { return nil } -func (m *MsSQLBackend) List(prefix string) ([]string, error) { +func (m *MSSQLBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"mssql", "list"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + likePrefix := prefix + "%" rows, err := m.statements["list"].Query(likePrefix) - + if err != nil { + return nil, err + } var keys []string for rows.Next() { var key string @@ -206,7 +243,7 @@ func (m *MsSQLBackend) List(prefix string) ([]string, error) { if i := strings.Index(key, "/"); i == -1 { keys = append(keys, key) } else if i != -1 { - keys = appendIfMissing(keys, string(key[:i+1])) + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) } } diff --git a/vendor/github.com/hashicorp/vault/physical/mssql_test.go b/vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go similarity index 77% rename from vendor/github.com/hashicorp/vault/physical/mssql_test.go rename to vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go index 11f4684..7e1446e 100644 --- a/vendor/github.com/hashicorp/vault/physical/mssql_test.go +++ b/vendor/github.com/hashicorp/vault/physical/mssql/mssql_test.go @@ -1,16 +1,17 @@ -package physical +package mssql import ( "os" "testing" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" _ "github.com/denisenkom/go-mssqldb" ) -func TestMsSQLBackend(t *testing.T) { +func TestMSSQLBackend(t *testing.T) { server := os.Getenv("MSSQL_SERVER") if server == "" { t.SkipNow() @@ -32,27 +33,26 @@ func TestMsSQLBackend(t *testing.T) { // Run vault tests logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("mssql", logger, map[string]string{ + b, err := NewMSSQLBackend(map[string]string{ "server": server, "database": database, "table": table, "username": username, "password": password, - }) + }, logger) if err != nil { t.Fatalf("Failed to create new backend: %v", err) } defer func() { - mssql := b.(*MsSQLBackend) + mssql := b.(*MSSQLBackend) _, err := mssql.client.Exec("DROP TABLE " + mssql.dbTable) if err != nil { t.Fatalf("Failed to drop table: %v", err) } }() - testBackend(t, b) - testBackend_ListPrefix(t, b) - + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/vendor/github.com/hashicorp/vault/physical/mysql.go b/vendor/github.com/hashicorp/vault/physical/mysql/mysql.go similarity index 81% rename from vendor/github.com/hashicorp/vault/physical/mysql.go rename to vendor/github.com/hashicorp/vault/physical/mysql/mysql.go index ce13514..87daa9a 100644 --- a/vendor/github.com/hashicorp/vault/physical/mysql.go +++ b/vendor/github.com/hashicorp/vault/physical/mysql/mysql.go @@ -1,4 +1,4 @@ -package physical +package mysql import ( "crypto/tls" @@ -8,6 +8,7 @@ import ( "io/ioutil" "net/url" "sort" + "strconv" "strings" "time" @@ -15,6 +16,9 @@ import ( "github.com/armon/go-metrics" mysql "github.com/go-sql-driver/mysql" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/physical" ) // Unreserved tls key @@ -28,11 +32,14 @@ type MySQLBackend struct { client *sql.DB statements map[string]*sql.Stmt logger log.Logger + permitPool *physical.PermitPool } -// newMySQLBackend constructs a MySQL backend using the given API client and +// NewMySQLBackend constructs a MySQL backend using the given API client and // server address and credential for accessing mysql database. -func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) { +func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + var err error + // Get the MySQL credentials to perform read/write operations. username, ok := conf["username"] if !ok || username == "" { @@ -60,6 +67,20 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) } dbTable := database + "." + table + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + } + if logger.IsDebug() { + logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt) + } + } else { + maxParInt = physical.DefaultParallelOperations + } + dsnParams := url.Values{} tlsCaFile, ok := conf["tls_ca_file"] if ok { @@ -77,6 +98,8 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) return nil, fmt.Errorf("failed to connect to mysql: %v", err) } + db.SetMaxOpenConns(maxParInt) + // Create the required database if it doesn't exists. if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS " + database); err != nil { return nil, fmt.Errorf("failed to create mysql database: %v", err) @@ -95,6 +118,7 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) client: db, statements: make(map[string]*sql.Stmt), logger: logger, + permitPool: physical.NewPermitPool(maxParInt), } // Prepare all the statements required @@ -110,6 +134,7 @@ func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) return nil, err } } + return m, nil } @@ -124,9 +149,12 @@ func (m *MySQLBackend) prepare(name, query string) error { } // Put is used to insert or update an entry. -func (m *MySQLBackend) Put(entry *Entry) error { +func (m *MySQLBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"mysql", "put"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + _, err := m.statements["put"].Exec(entry.Key, entry.Value) if err != nil { return err @@ -135,9 +163,12 @@ func (m *MySQLBackend) Put(entry *Entry) error { } // Get is used to fetch and entry. -func (m *MySQLBackend) Get(key string) (*Entry, error) { +func (m *MySQLBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"mysql", "get"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + var result []byte err := m.statements["get"].QueryRow(key).Scan(&result) if err == sql.ErrNoRows { @@ -147,7 +178,7 @@ func (m *MySQLBackend) Get(key string) (*Entry, error) { return nil, err } - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: result, } @@ -158,6 +189,9 @@ func (m *MySQLBackend) Get(key string) (*Entry, error) { func (m *MySQLBackend) Delete(key string) error { defer metrics.MeasureSince([]string{"mysql", "delete"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + _, err := m.statements["delete"].Exec(key) if err != nil { return err @@ -170,6 +204,9 @@ func (m *MySQLBackend) Delete(key string) error { func (m *MySQLBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"mysql", "list"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + // Add the % wildcard to the prefix to do the prefix search likePrefix := prefix + "%" rows, err := m.statements["list"].Query(likePrefix) @@ -191,7 +228,7 @@ func (m *MySQLBackend) List(prefix string) ([]string, error) { keys = append(keys, key) } else if i != -1 { // Add truncated 'folder' paths - keys = appendIfMissing(keys, string(key[:i+1])) + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) } } diff --git a/vendor/github.com/hashicorp/vault/physical/mysql_test.go b/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go similarity index 83% rename from vendor/github.com/hashicorp/vault/physical/mysql_test.go rename to vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go index 1eabd9f..ecf8431 100644 --- a/vendor/github.com/hashicorp/vault/physical/mysql_test.go +++ b/vendor/github.com/hashicorp/vault/physical/mysql/mysql_test.go @@ -1,10 +1,11 @@ -package physical +package mysql import ( "os" "testing" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" _ "github.com/go-sql-driver/mysql" @@ -32,13 +33,13 @@ func TestMySQLBackend(t *testing.T) { // Run vault tests logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("mysql", logger, map[string]string{ + b, err := NewMySQLBackend(map[string]string{ "address": address, "database": database, "table": table, "username": username, "password": password, - }) + }, logger) if err != nil { t.Fatalf("Failed to create new backend: %v", err) @@ -52,7 +53,6 @@ func TestMySQLBackend(t *testing.T) { } }() - testBackend(t, b) - testBackend_ListPrefix(t, b) - + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/vendor/github.com/hashicorp/vault/physical/physical.go b/vendor/github.com/hashicorp/vault/physical/physical.go index b35d281..088a86b 100644 --- a/vendor/github.com/hashicorp/vault/physical/physical.go +++ b/vendor/github.com/hashicorp/vault/physical/physical.go @@ -1,7 +1,7 @@ package physical import ( - "fmt" + "strings" "sync" log "github.com/mgutz/logxi/v1" @@ -70,8 +70,8 @@ type RedirectDetect interface { } // Callback signatures for RunServiceDiscovery -type activeFunction func() bool -type sealedFunction func() bool +type ActiveFunction func() bool +type SealedFunction func() bool // ServiceDiscovery is an optional interface that an HABackend can implement. // If they do, the state of a backend is advertised to the service discovery @@ -89,7 +89,7 @@ type ServiceDiscovery interface { // Run executes any background service discovery tasks until the // shutdown channel is closed. - RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc activeFunction, sealedFunc sealedFunction) error + RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc ActiveFunction, sealedFunc SealedFunction) error } type Lock interface { @@ -115,46 +115,6 @@ type Entry struct { // Factory is the factory function to create a physical backend. type Factory func(config map[string]string, logger log.Logger) (Backend, error) -// NewBackend returns a new backend with the given type and configuration. -// The backend is looked up in the builtinBackends variable. -func NewBackend(t string, logger log.Logger, conf map[string]string) (Backend, error) { - f, ok := builtinBackends[t] - if !ok { - return nil, fmt.Errorf("unknown physical backend type: %s", t) - } - return f(conf, logger) -} - -// BuiltinBackends is the list of built-in physical backends that can -// be used with NewBackend. -var builtinBackends = map[string]Factory{ - "inmem": func(_ map[string]string, logger log.Logger) (Backend, error) { - return NewInmem(logger), nil - }, - "inmem_transactional": func(_ map[string]string, logger log.Logger) (Backend, error) { - return NewTransactionalInmem(logger), nil - }, - "inmem_ha": func(_ map[string]string, logger log.Logger) (Backend, error) { - return NewInmemHA(logger), nil - }, - "inmem_transactional_ha": func(_ map[string]string, logger log.Logger) (Backend, error) { - return NewTransactionalInmemHA(logger), nil - }, - "file_transactional": newTransactionalFileBackend, - "consul": newConsulBackend, - "zookeeper": newZookeeperBackend, - "file": newFileBackend, - "s3": newS3Backend, - "azure": newAzureBackend, - "dynamodb": newDynamoDBBackend, - "etcd": newEtcdBackend, - "mssql": newMsSQLBackend, - "mysql": newMySQLBackend, - "postgresql": newPostgreSQLBackend, - "swift": newSwiftBackend, - "gcs": newGCSBackend, -} - // PermitPool is used to limit maximum outstanding requests type PermitPool struct { sem chan int @@ -180,3 +140,15 @@ func (c *PermitPool) Acquire() { func (c *PermitPool) Release() { <-c.sem } + +// Prefixes is a shared helper function returns all parent 'folders' for a +// given vault key. +// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar'] +func Prefixes(s string) []string { + components := strings.Split(s, "/") + result := []string{} + for i := 1; i < len(components); i++ { + result = append(result, strings.Join(components[:i], "/")) + } + return result +} diff --git a/vendor/github.com/hashicorp/vault/physical/physical_view.go b/vendor/github.com/hashicorp/vault/physical/physical_view.go new file mode 100644 index 0000000..38c16e5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/physical/physical_view.go @@ -0,0 +1,94 @@ +package physical + +import ( + "errors" + "strings" +) + +var ( + ErrRelativePath = errors.New("relative paths not supported") +) + +// View represents a prefixed view of a physical backend +type View struct { + backend Backend + prefix string +} + +// NewView takes an underlying physical backend and returns +// a view of it that can only operate with the given prefix. +func NewView(backend Backend, prefix string) *View { + return &View{ + backend: backend, + prefix: prefix, + } +} + +// List the contents of the prefixed view +func (v *View) List(prefix string) ([]string, error) { + if err := v.sanityCheck(prefix); err != nil { + return nil, err + } + return v.backend.List(v.expandKey(prefix)) +} + +// Get the key of the prefixed view +func (v *View) Get(key string) (*Entry, error) { + if err := v.sanityCheck(key); err != nil { + return nil, err + } + entry, err := v.backend.Get(v.expandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + if entry != nil { + entry.Key = v.truncateKey(entry.Key) + } + + return &Entry{ + Key: entry.Key, + Value: entry.Value, + }, nil +} + +// Put the entry into the prefix view +func (v *View) Put(entry *Entry) error { + if err := v.sanityCheck(entry.Key); err != nil { + return err + } + + nested := &Entry{ + Key: v.expandKey(entry.Key), + Value: entry.Value, + } + return v.backend.Put(nested) +} + +// Delete the entry from the prefix view +func (v *View) Delete(key string) error { + if err := v.sanityCheck(key); err != nil { + return err + } + return v.backend.Delete(v.expandKey(key)) +} + +// sanityCheck is used to perform a sanity check on a key +func (v *View) sanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// expandKey is used to expand to the full key path with the prefix +func (v *View) expandKey(suffix string) string { + return v.prefix + suffix +} + +// truncateKey is used to remove the prefix of the key +func (v *View) truncateKey(full string) string { + return strings.TrimPrefix(full, v.prefix) +} diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql.go b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go similarity index 77% rename from vendor/github.com/hashicorp/vault/physical/postgresql.go rename to vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go index 2b11d48..cb35782 100644 --- a/vendor/github.com/hashicorp/vault/physical/postgresql.go +++ b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql.go @@ -1,11 +1,14 @@ -package physical +package postgresql import ( "database/sql" "fmt" + "strconv" "strings" "time" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" "github.com/armon/go-metrics" @@ -22,11 +25,12 @@ type PostgreSQLBackend struct { delete_query string list_query string logger log.Logger + permitPool *physical.PermitPool } -// newPostgreSQLBackend constructs a PostgreSQL backend using the given +// NewPostgreSQLBackend constructs a PostgreSQL backend using the given // API client, server address, credentials, and database. -func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) { +func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { // Get the PostgreSQL credentials to perform read/write operations. connURL, ok := conf["connection_url"] if !ok || connURL == "" { @@ -39,11 +43,27 @@ func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, e } quoted_table := pq.QuoteIdentifier(unquoted_table) + maxParStr, ok := conf["max_parallel"] + var maxParInt int + var err error + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + } + if logger.IsDebug() { + logger.Debug("postgres: max_parallel set", "max_parallel", maxParInt) + } + } else { + maxParInt = physical.DefaultParallelOperations + } + // Create PostgreSQL handle for the database. db, err := sql.Open("postgres", connURL) if err != nil { return nil, fmt.Errorf("failed to connect to postgres: %v", err) } + db.SetMaxOpenConns(maxParInt) // Determine if we should use an upsert function (versions < 9.5) var upsert_required bool @@ -72,8 +92,9 @@ func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, e delete_query: "DELETE FROM " + quoted_table + " WHERE path = $1 AND key = $2", list_query: "SELECT key FROM " + quoted_table + " WHERE path = $1" + "UNION SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " + - quoted_table + " WHERE parent_path LIKE concat($1, '%')", - logger: logger, + quoted_table + " WHERE parent_path LIKE $1 || '%'", + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), } return m, nil @@ -104,9 +125,12 @@ func (m *PostgreSQLBackend) splitKey(fullPath string) (string, string, string) { } // Put is used to insert or update an entry. -func (m *PostgreSQLBackend) Put(entry *Entry) error { +func (m *PostgreSQLBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"postgres", "put"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + parentPath, path, key := m.splitKey(entry.Key) _, err := m.client.Exec(m.put_query, parentPath, path, key, entry.Value) @@ -117,9 +141,12 @@ func (m *PostgreSQLBackend) Put(entry *Entry) error { } // Get is used to fetch and entry. -func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) { +func (m *PostgreSQLBackend) Get(fullPath string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"postgres", "get"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + _, path, key := m.splitKey(fullPath) var result []byte @@ -131,7 +158,7 @@ func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) { return nil, err } - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: result, } @@ -142,6 +169,9 @@ func (m *PostgreSQLBackend) Get(fullPath string) (*Entry, error) { func (m *PostgreSQLBackend) Delete(fullPath string) error { defer metrics.MeasureSince([]string{"postgres", "delete"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + _, path, key := m.splitKey(fullPath) _, err := m.client.Exec(m.delete_query, path, key) @@ -156,6 +186,9 @@ func (m *PostgreSQLBackend) Delete(fullPath string) error { func (m *PostgreSQLBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"postgres", "list"}, time.Now()) + m.permitPool.Acquire() + defer m.permitPool.Release() + rows, err := m.client.Query(m.list_query, "/"+prefix) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/vault/physical/postgresql_test.go b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go similarity index 78% rename from vendor/github.com/hashicorp/vault/physical/postgresql_test.go rename to vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go index 5cdaaa0..940d0e2 100644 --- a/vendor/github.com/hashicorp/vault/physical/postgresql_test.go +++ b/vendor/github.com/hashicorp/vault/physical/postgresql/postgresql_test.go @@ -1,10 +1,11 @@ -package physical +package postgresql import ( "os" "testing" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" _ "github.com/lib/pq" @@ -24,11 +25,10 @@ func TestPostgreSQLBackend(t *testing.T) { // Run vault tests logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("postgresql", logger, map[string]string{ + b, err := NewPostgreSQLBackend(map[string]string{ "connection_url": connURL, "table": table, - }) - + }, logger) if err != nil { t.Fatalf("Failed to create new backend: %v", err) } @@ -41,7 +41,6 @@ func TestPostgreSQLBackend(t *testing.T) { } }() - testBackend(t, b) - testBackend_ListPrefix(t, b) - + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/vendor/github.com/hashicorp/vault/physical/s3.go b/vendor/github.com/hashicorp/vault/physical/s3/s3.go similarity index 76% rename from vendor/github.com/hashicorp/vault/physical/s3.go rename to vendor/github.com/hashicorp/vault/physical/s3/s3.go index 8271be7..7118e7d 100644 --- a/vendor/github.com/hashicorp/vault/physical/s3.go +++ b/vendor/github.com/hashicorp/vault/physical/s3/s3.go @@ -1,4 +1,4 @@ -package physical +package s3 import ( "bytes" @@ -22,6 +22,7 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/physical" ) // S3Backend is a physical backend that stores data @@ -30,14 +31,13 @@ type S3Backend struct { bucket string client *s3.S3 logger log.Logger - permitPool *PermitPool + permitPool *physical.PermitPool } -// newS3Backend constructs a S3 backend using a pre-existing +// NewS3Backend constructs a S3 backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from the environment, AWS credential files or by IAM role. -func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) { - +func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) { bucket := os.Getenv("AWS_S3_BUCKET") if bucket == "" { bucket = conf["bucket"] @@ -62,11 +62,14 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) { if endpoint == "" { endpoint = conf["endpoint"] } - region := os.Getenv("AWS_DEFAULT_REGION") + region := os.Getenv("AWS_REGION") if region == "" { - region = conf["region"] + region = os.Getenv("AWS_DEFAULT_REGION") if region == "" { - region = "us-east-1" + region = conf["region"] + if region == "" { + region = "us-east-1" + } } } @@ -92,9 +95,9 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) { Region: aws.String(region), })) - _, err = s3conn.HeadBucket(&s3.HeadBucketInput{Bucket: &bucket}) + _, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket}) if err != nil { - return nil, fmt.Errorf("unable to access bucket '%s': %v", bucket, err) + return nil, fmt.Errorf("unable to access bucket '%s' in region %s: %v", bucket, region, err) } maxParStr, ok := conf["max_parallel"] @@ -113,13 +116,13 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) { client: s3conn, bucket: bucket, logger: logger, - permitPool: NewPermitPool(maxParInt), + permitPool: physical.NewPermitPool(maxParInt), } return s, nil } // Put is used to insert or update an entry -func (s *S3Backend) Put(entry *Entry) error { +func (s *S3Backend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"s3", "put"}, time.Now()) s.permitPool.Acquire() @@ -139,7 +142,7 @@ func (s *S3Backend) Put(entry *Entry) error { } // Get is used to fetch an entry -func (s *S3Backend) Get(key string) (*Entry, error) { +func (s *S3Backend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"s3", "get"}, time.Now()) s.permitPool.Acquire() @@ -169,7 +172,7 @@ func (s *S3Backend) Get(key string) (*Entry, error) { return nil, err } - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: data, } @@ -205,23 +208,35 @@ func (s *S3Backend) List(prefix string) ([]string, error) { defer s.permitPool.Release() params := &s3.ListObjectsV2Input{ - Bucket: aws.String(s.bucket), - Prefix: aws.String(prefix), + Bucket: aws.String(s.bucket), + Prefix: aws.String(prefix), + Delimiter: aws.String("/"), } keys := []string{} err := s.client.ListObjectsV2Pages(params, func(page *s3.ListObjectsV2Output, lastPage bool) bool { - for _, key := range page.Contents { - key := strings.TrimPrefix(*key.Key, prefix) + if page != nil { + // Add truncated 'folder' paths + for _, commonPrefix := range page.CommonPrefixes { + // Avoid panic + if commonPrefix == nil { + continue + } - if i := strings.Index(key, "/"); i == -1 { - // Add objects only from the current 'folder' + commonPrefix := strings.TrimPrefix(*commonPrefix.Prefix, prefix) + keys = append(keys, commonPrefix) + } + // Add objects only from the current 'folder' + for _, key := range page.Contents { + // Avoid panic + if key == nil { + continue + } + + key := strings.TrimPrefix(*key.Key, prefix) keys = append(keys, key) - } else if i != -1 { - // Add truncated 'folder' paths - keys = appendIfMissing(keys, key[:i+1]) } } return true @@ -235,12 +250,3 @@ func (s *S3Backend) List(prefix string) ([]string, error) { return keys, nil } - -func appendIfMissing(slice []string, i string) []string { - for _, ele := range slice { - if ele == i { - return slice - } - } - return append(slice, i) -} diff --git a/vendor/github.com/hashicorp/vault/physical/s3_test.go b/vendor/github.com/hashicorp/vault/physical/s3/s3_test.go similarity index 75% rename from vendor/github.com/hashicorp/vault/physical/s3_test.go rename to vendor/github.com/hashicorp/vault/physical/s3/s3_test.go index 8fdb882..dbe4c93 100644 --- a/vendor/github.com/hashicorp/vault/physical/s3_test.go +++ b/vendor/github.com/hashicorp/vault/physical/s3/s3_test.go @@ -1,4 +1,4 @@ -package physical +package s3 import ( "fmt" @@ -7,23 +7,27 @@ import ( "testing" "time" + "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) func TestS3Backend(t *testing.T) { - if os.Getenv("AWS_ACCESS_KEY_ID") == "" || os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { + credsConfig := &awsutil.CredentialsConfig{} + + credsChain, err := credsConfig.GenerateCredentialChain() + if err != nil { t.SkipNow() } - creds, err := credentials.NewEnvCredentials().Get() + _, err = credsChain.Get() if err != nil { - t.Fatalf("err: %v", err) + t.SkipNow() } // If the variable is empty or doesn't exist, the default @@ -36,7 +40,7 @@ func TestS3Backend(t *testing.T) { } s3conn := s3.New(session.New(&aws.Config{ - Credentials: credentials.NewEnvCredentials(), + Credentials: credsChain, Endpoint: aws.String(endpoint), Region: aws.String(region), })) @@ -77,17 +81,14 @@ func TestS3Backend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("s3", logger, map[string]string{ - "access_key": creds.AccessKeyID, - "secret_key": creds.SecretAccessKey, - "session_token": creds.SessionToken, - "bucket": bucket, - }) + // This uses the same logic to find the AWS credentials as we did at the beginning of the test + b, err := NewS3Backend(map[string]string{ + "bucket": bucket, + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) - + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/vendor/github.com/hashicorp/vault/physical/swift.go b/vendor/github.com/hashicorp/vault/physical/swift/swift.go similarity index 87% rename from vendor/github.com/hashicorp/vault/physical/swift.go rename to vendor/github.com/hashicorp/vault/physical/swift/swift.go index 0ed4fe6..30d7e66 100644 --- a/vendor/github.com/hashicorp/vault/physical/swift.go +++ b/vendor/github.com/hashicorp/vault/physical/swift/swift.go @@ -1,4 +1,4 @@ -package physical +package swift import ( "fmt" @@ -13,6 +13,8 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/physical" "github.com/ncw/swift" ) @@ -22,13 +24,14 @@ type SwiftBackend struct { container string client *swift.Connection logger log.Logger - permitPool *PermitPool + permitPool *physical.PermitPool } -// newSwiftBackend constructs a Swift backend using a pre-existing +// NewSwiftBackend constructs a Swift backend using a pre-existing // container. Credentials can be provided to the backend, sourced // from the environment. -func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) { +func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + var ok bool username := os.Getenv("OS_USERNAME") if username == "" { @@ -60,11 +63,9 @@ func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) } project := os.Getenv("OS_PROJECT_NAME") if project == "" { - project = conf["project"] - - if project == "" { + if project, ok = conf["project"]; !ok { // Check for KeyStone naming prior to V3 - project := os.Getenv("OS_TENANT_NAME") + project = os.Getenv("OS_TENANT_NAME") if project == "" { project = conf["tenant"] } @@ -116,13 +117,13 @@ func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) client: &c, container: container, logger: logger, - permitPool: NewPermitPool(maxParInt), + permitPool: physical.NewPermitPool(maxParInt), } return s, nil } // Put is used to insert or update an entry -func (s *SwiftBackend) Put(entry *Entry) error { +func (s *SwiftBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"swift", "put"}, time.Now()) s.permitPool.Acquire() @@ -138,7 +139,7 @@ func (s *SwiftBackend) Put(entry *Entry) error { } // Get is used to fetch an entry -func (s *SwiftBackend) Get(key string) (*Entry, error) { +func (s *SwiftBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"swift", "get"}, time.Now()) s.permitPool.Acquire() @@ -161,7 +162,7 @@ func (s *SwiftBackend) Get(key string) (*Entry, error) { if err != nil { return nil, err } - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: data, } @@ -207,7 +208,7 @@ func (s *SwiftBackend) List(prefix string) ([]string, error) { keys = append(keys, key) } else if i != -1 { // Add truncated 'folder' paths - keys = appendIfMissing(keys, key[:i+1]) + keys = strutil.AppendIfMissing(keys, key[:i+1]) } } diff --git a/vendor/github.com/hashicorp/vault/physical/swift_test.go b/vendor/github.com/hashicorp/vault/physical/swift/swift_test.go similarity index 90% rename from vendor/github.com/hashicorp/vault/physical/swift_test.go rename to vendor/github.com/hashicorp/vault/physical/swift/swift_test.go index 2da37f0..5aa2ec9 100644 --- a/vendor/github.com/hashicorp/vault/physical/swift_test.go +++ b/vendor/github.com/hashicorp/vault/physical/swift/swift_test.go @@ -1,4 +1,4 @@ -package physical +package swift import ( "fmt" @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" "github.com/ncw/swift" ) @@ -66,7 +67,7 @@ func TestSwiftBackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("swift", logger, map[string]string{ + b, err := NewSwiftBackend(map[string]string{ "username": username, "password": password, "container": container, @@ -74,12 +75,11 @@ func TestSwiftBackend(t *testing.T) { "project": project, "domain": domain, "project-domain": projectDomain, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) - + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/vendor/github.com/hashicorp/vault/physical/physical_test.go b/vendor/github.com/hashicorp/vault/physical/testing.go similarity index 52% rename from vendor/github.com/hashicorp/vault/physical/physical_test.go rename to vendor/github.com/hashicorp/vault/physical/testing.go index de1b9cb..69f7167 100644 --- a/vendor/github.com/hashicorp/vault/physical/physical_test.go +++ b/vendor/github.com/hashicorp/vault/physical/testing.go @@ -5,30 +5,9 @@ import ( "sort" "testing" "time" - - "github.com/hashicorp/vault/helper/logformat" - log "github.com/mgutz/logxi/v1" ) -func testNewBackend(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) - - _, err := NewBackend("foobar", logger, nil) - if err == nil { - t.Fatalf("expected error") - } - - b, err := NewBackend("inmem", logger, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if b == nil { - t.Fatalf("expected backend") - } -} - -func testBackend(t *testing.T, b Backend) { +func ExerciseBackend(t *testing.T, b Backend) { // Should be empty keys, err := b.List("") if err != nil { @@ -216,7 +195,7 @@ func testBackend(t *testing.T, b Backend) { } } -func testBackend_ListPrefix(t *testing.T, b Backend) { +func ExerciseBackend_ListPrefix(t *testing.T, b Backend) { e1 := &Entry{Key: "foo", Value: []byte("test")} e2 := &Entry{Key: "foo/bar", Value: []byte("test")} e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} @@ -286,7 +265,7 @@ func testBackend_ListPrefix(t *testing.T, b Backend) { } } -func testHABackend(t *testing.T, b HABackend, b2 HABackend) { +func ExerciseHABackend(t *testing.T, b HABackend, b2 HABackend) { // Get the lock lock, err := b.LockWith("foo", "bar") if err != nil { @@ -362,275 +341,120 @@ func testHABackend(t *testing.T, b HABackend, b2 HABackend) { lock2.Unlock() } -type delays struct { - beforeGet time.Duration - beforeList time.Duration -} +func ExerciseTransactionalBackend(t *testing.T, b Backend) { + tb, ok := b.(Transactional) + if !ok { + t.Fatal("Not a transactional backend") + } -func testEventuallyConsistentBackend(t *testing.T, b Backend, d delays) { + txns := SetupTestingTransactions(t, b) + + if err := tb.Transaction(txns); err != nil { + t.Fatal(err) + } - // no delay required: nothing written to bucket - // Should be empty keys, err := b.List("") if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 0 { - t.Fatalf("bad: %v", keys) + t.Fatal(err) } - // Delete should work if it does not exist - err = b.Delete("foo") - if err != nil { - t.Fatalf("err: %v", err) - } + expected := []string{"foo", "zip"} - // no delay required: nothing written to bucket - // Get should fail - out, err := b.Get("foo") - if err != nil { - t.Fatalf("err: %v", err) - } - if out != nil { - t.Fatalf("bad: %v", out) - } - - // Make an entry - e := &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Get should work - time.Sleep(d.beforeGet) - out, err = b.Get("foo") - if err != nil { - t.Fatalf("err: %v", err) - } - if !reflect.DeepEqual(out, e) { - t.Fatalf("bad: %v expected: %v", out, e) - } - - // List should not be empty - time.Sleep(d.beforeList) - keys, err = b.List("") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 1 { - t.Fatalf("bad: %v", keys) - } - if keys[0] != "foo" { - t.Fatalf("bad: %v", keys) - } - - // Delete should work - err = b.Delete("foo") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should be empty - time.Sleep(d.beforeList) - keys, err = b.List("") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 0 { - t.Fatalf("bad: %v", keys) - } - - // Get should fail - time.Sleep(d.beforeGet) - out, err = b.Get("foo") - if err != nil { - t.Fatalf("err: %v", err) - } - if out != nil { - t.Fatalf("bad: %v", out) - } - - // Multiple Puts should work; GH-189 - e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) - } - e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Make a nested entry - e = &Entry{Key: "foo/bar", Value: []byte("baz")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) - } - - time.Sleep(d.beforeList) - keys, err = b.List("") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 2 { - t.Fatalf("bad: %v", keys) - } sort.Strings(keys) - if keys[0] != "foo" || keys[1] != "foo/" { - t.Fatalf("bad: %v", keys) + sort.Strings(expected) + if !reflect.DeepEqual(keys, expected) { + t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) } - // Delete with children should work - err = b.Delete("foo") + entry, err := b.Get("foo") if err != nil { - t.Fatalf("err: %v", err) + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "bar3" { + t.Fatal("updates did not apply correctly") } - // Get should return the child - time.Sleep(d.beforeGet) - out, err = b.Get("foo/bar") + entry, err = b.Get("zip") if err != nil { - t.Fatalf("err: %v", err) + t.Fatal(err) } - if out == nil { - t.Fatalf("missing child") + if entry == nil { + t.Fatal("got nil entry") } - - // Removal of nested secret should not leave artifacts - e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) + if entry.Value == nil { + t.Fatal("got nil value") } - - err = b.Delete("foo/nested1/nested2/nested3") - if err != nil { - t.Fatalf("failed to remove nested secret: %v", err) - } - - time.Sleep(d.beforeList) - keys, err = b.List("foo/") - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(keys) != 1 { - t.Fatalf("there should be only one key left after deleting nested "+ - "secret: %v", keys) - } - - if keys[0] != "bar" { - t.Fatalf("bad keys after deleting nested: %v", keys) - } - - // Make a second nested entry to test prefix removal - e = &Entry{Key: "foo/zip", Value: []byte("zap")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Delete should not remove the prefix - err = b.Delete("foo/bar") - if err != nil { - t.Fatalf("err: %v", err) - } - - time.Sleep(d.beforeList) - keys, err = b.List("") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 1 { - t.Fatalf("bad: %v", keys) - } - if keys[0] != "foo/" { - t.Fatalf("bad: %v", keys) - } - - // Delete should remove the prefix - err = b.Delete("foo/zip") - if err != nil { - t.Fatalf("err: %v", err) - } - - time.Sleep(d.beforeList) - keys, err = b.List("") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 0 { - t.Fatalf("bad: %v", keys) + if string(entry.Value) != "zap3" { + t.Fatal("updates did not apply correctly") } } -func testEventuallyConsistentBackend_ListPrefix(t *testing.T, b Backend, d delays) { - e1 := &Entry{Key: "foo", Value: []byte("test")} - e2 := &Entry{Key: "foo/bar", Value: []byte("test")} - e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} - - err := b.Put(e1) - if err != nil { - t.Fatalf("err: %v", err) +func SetupTestingTransactions(t *testing.T, b Backend) []TxnEntry { + // Add a few keys so that we test rollback with deletion + if err := b.Put(&Entry{ + Key: "foo", + Value: []byte("bar"), + }); err != nil { + t.Fatal(err) } - err = b.Put(e2) - if err != nil { - t.Fatalf("err: %v", err) + if err := b.Put(&Entry{ + Key: "zip", + Value: []byte("zap"), + }); err != nil { + t.Fatal(err) } - err = b.Put(e3) - if err != nil { - t.Fatalf("err: %v", err) + if err := b.Put(&Entry{ + Key: "deleteme", + }); err != nil { + t.Fatal(err) + } + if err := b.Put(&Entry{ + Key: "deleteme2", + }); err != nil { + t.Fatal(err) } - // Scan the root - time.Sleep(d.beforeList) - keys, err := b.List("") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 2 { - t.Fatalf("bad: %v", keys) - } - sort.Strings(keys) - if keys[0] != "foo" { - t.Fatalf("bad: %v", keys) - } - if keys[1] != "foo/" { - t.Fatalf("bad: %v", keys) - } - - // Scan foo/ - time.Sleep(d.beforeList) - keys, err = b.List("foo/") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 2 { - t.Fatalf("bad: %v", keys) - } - sort.Strings(keys) - if keys[0] != "bar" { - t.Fatalf("bad: %v", keys) - } - if keys[1] != "bar/" { - t.Fatalf("bad: %v", keys) - } - - // Scan foo/bar/ - time.Sleep(d.beforeList) - keys, err = b.List("foo/bar/") - if err != nil { - t.Fatalf("err: %v", err) - } - sort.Strings(keys) - if len(keys) != 1 { - t.Fatalf("bad: %v", keys) - } - if keys[0] != "baz" { - t.Fatalf("bad: %v", keys) + txns := []TxnEntry{ + TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar2"), + }, + }, + TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme", + }, + }, + TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar3"), + }, + }, + TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme2", + }, + }, + TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: "zip", + Value: []byte("zap3"), + }, + }, } + return txns } diff --git a/vendor/github.com/hashicorp/vault/physical/transactions.go b/vendor/github.com/hashicorp/vault/physical/transactions.go index b9ddffa..f8668d2 100644 --- a/vendor/github.com/hashicorp/vault/physical/transactions.go +++ b/vendor/github.com/hashicorp/vault/physical/transactions.go @@ -27,7 +27,7 @@ type PseudoTransactional interface { } // Implements the transaction interface -func genericTransactionHandler(t PseudoTransactional, txns []TxnEntry) (retErr error) { +func GenericTransactionHandler(t PseudoTransactional, txns []TxnEntry) (retErr error) { rollbackStack := make([]TxnEntry, 0, len(txns)) var dirty bool diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper.go b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go similarity index 84% rename from vendor/github.com/hashicorp/vault/physical/zookeeper.go rename to vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go index 6bc9061..8ecc0d6 100644 --- a/vendor/github.com/hashicorp/vault/physical/zookeeper.go +++ b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper.go @@ -1,4 +1,4 @@ -package physical +package zookeeper import ( "fmt" @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" metrics "github.com/armon/go-metrics" @@ -22,20 +23,20 @@ const ( ZKNodeFilePrefix = "_" ) -// ZookeeperBackend is a physical backend that stores data at specific -// prefix within Zookeeper. It is used in production situations as +// ZooKeeperBackend is a physical backend that stores data at specific +// prefix within ZooKeeper. It is used in production situations as // it allows Vault to run on multiple machines in a highly-available manner. -type ZookeeperBackend struct { +type ZooKeeperBackend struct { path string client *zk.Conn acl []zk.ACL logger log.Logger } -// newZookeeperBackend constructs a Zookeeper backend using the given API client +// NewZooKeeperBackend constructs a ZooKeeper backend using the given API client // and the prefix in the KV store. -func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, error) { - // Get the path in Zookeeper +func NewZooKeeperBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the path in ZooKeeper path, ok := conf["path"] if !ok { path = "vault/" @@ -114,12 +115,12 @@ func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, er if useAddAuth { err = client.AddAuth(schema, []byte(owner)) if err != nil { - return nil, fmt.Errorf("Zookeeper rejected authentication information provided at auth_info: %v", err) + return nil, fmt.Errorf("ZooKeeper rejected authentication information provided at auth_info: %v", err) } } // Setup the backend - c := &ZookeeperBackend{ + c := &ZooKeeperBackend{ path: path, client: client, acl: acl, @@ -131,7 +132,7 @@ func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, er // ensurePath is used to create each node in the path hierarchy. // We avoid calling this optimistically, and invoke it when we get // an error during an operation -func (c *ZookeeperBackend) ensurePath(path string, value []byte) error { +func (c *ZooKeeperBackend) ensurePath(path string, value []byte) error { nodes := strings.Split(path, "/") fullPath := "" for index, node := range nodes { @@ -161,7 +162,7 @@ func (c *ZookeeperBackend) ensurePath(path string, value []byte) error { // cleanupLogicalPath is used to remove all empty nodes, begining with deepest one, // aborting on first non-empty one, up to top-level node. -func (c *ZookeeperBackend) cleanupLogicalPath(path string) error { +func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error { nodes := strings.Split(path, "/") for i := len(nodes) - 1; i > 0; i-- { fullPath := c.path + strings.Join(nodes[:i], "/") @@ -192,12 +193,12 @@ func (c *ZookeeperBackend) cleanupLogicalPath(path string) error { } // nodePath returns an zk path based on the given key. -func (c *ZookeeperBackend) nodePath(key string) string { +func (c *ZooKeeperBackend) nodePath(key string) string { return filepath.Join(c.path, filepath.Dir(key), ZKNodeFilePrefix+filepath.Base(key)) } // Put is used to insert or update an entry -func (c *ZookeeperBackend) Put(entry *Entry) error { +func (c *ZooKeeperBackend) Put(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"zookeeper", "put"}, time.Now()) // Attempt to set the full path @@ -212,7 +213,7 @@ func (c *ZookeeperBackend) Put(entry *Entry) error { } // Get is used to fetch an entry -func (c *ZookeeperBackend) Get(key string) (*Entry, error) { +func (c *ZooKeeperBackend) Get(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"zookeeper", "get"}, time.Now()) // Attempt to read the full path @@ -231,7 +232,7 @@ func (c *ZookeeperBackend) Get(key string) (*Entry, error) { if value == nil { return nil, nil } - ent := &Entry{ + ent := &physical.Entry{ Key: key, Value: value, } @@ -239,7 +240,7 @@ func (c *ZookeeperBackend) Get(key string) (*Entry, error) { } // Delete is used to permanently delete an entry -func (c *ZookeeperBackend) Delete(key string) error { +func (c *ZooKeeperBackend) Delete(key string) error { defer metrics.MeasureSince([]string{"zookeeper", "delete"}, time.Now()) if key == "" { @@ -262,7 +263,7 @@ func (c *ZookeeperBackend) Delete(key string) error { // List is used ot list all the keys under a given // prefix, up to the next prefix. -func (c *ZookeeperBackend) List(prefix string) ([]string, error) { +func (c *ZooKeeperBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"zookeeper", "list"}, time.Now()) // Query the children at the full path @@ -289,8 +290,14 @@ func (c *ZookeeperBackend) List(prefix string) ([]string, error) { // and append the slash which is what Vault depends on // for iteration if stat.DataLength > 0 && stat.NumChildren > 0 { - msgFmt := "Node %q is both of data and leaf type ??" - panic(fmt.Sprintf(msgFmt, childPath)) + if childPath == c.nodePath("core/lock") { + // go-zookeeper Lock() breaks Vault semantics and creates a directory + // under the lock file; just treat it like the file Vault expects + children = append(children, key[1:]) + } else { + msgFmt := "Node %q is both of data and leaf type ??" + panic(fmt.Sprintf(msgFmt, childPath)) + } } else if stat.DataLength == 0 { // No, we cannot differentiate here on number of children as node // can have all it leafs remoed, and it still is a node. @@ -304,8 +311,8 @@ func (c *ZookeeperBackend) List(prefix string) ([]string, error) { } // LockWith is used for mutual exclusion based on the given key. -func (c *ZookeeperBackend) LockWith(key, value string) (Lock, error) { - l := &ZookeeperHALock{ +func (c *ZooKeeperBackend) LockWith(key, value string) (physical.Lock, error) { + l := &ZooKeeperHALock{ in: c, key: key, value: value, @@ -315,13 +322,13 @@ func (c *ZookeeperBackend) LockWith(key, value string) (Lock, error) { // HAEnabled indicates whether the HA functionality should be exposed. // Currently always returns true. -func (c *ZookeeperBackend) HAEnabled() bool { +func (c *ZooKeeperBackend) HAEnabled() bool { return true } -// ZookeeperHALock is a Zookeeper Lock implementation for the HABackend -type ZookeeperHALock struct { - in *ZookeeperBackend +// ZooKeeperHALock is a ZooKeeper Lock implementation for the HABackend +type ZooKeeperHALock struct { + in *ZooKeeperBackend key string value string @@ -331,7 +338,7 @@ type ZookeeperHALock struct { zkLock *zk.Lock } -func (i *ZookeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { +func (i *ZooKeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { i.localLock.Lock() defer i.localLock.Unlock() if i.held { @@ -373,7 +380,7 @@ func (i *ZookeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) return i.leaderCh, nil } -func (i *ZookeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) { +func (i *ZooKeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) { // Wait to acquire the lock in ZK lock := zk.NewLock(i.in.client, lockpath, i.in.acl) err := lock.Lock() @@ -401,7 +408,7 @@ func (i *ZookeeperHALock) attemptLock(lockpath string, didLock chan struct{}, fa } } -func (i *ZookeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) { +func (i *ZooKeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) { for { select { case event := <-lockeventCh: @@ -426,7 +433,7 @@ func (i *ZookeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan } } -func (i *ZookeeperHALock) Unlock() error { +func (i *ZooKeeperHALock) Unlock() error { i.localLock.Lock() defer i.localLock.Unlock() if !i.held { @@ -438,7 +445,7 @@ func (i *ZookeeperHALock) Unlock() error { return nil } -func (i *ZookeeperHALock) Value() (bool, string, error) { +func (i *ZooKeeperHALock) Value() (bool, string, error) { lockpath := i.in.nodePath(i.key) value, _, err := i.in.client.Get(lockpath) return (value != nil), string(value), err diff --git a/vendor/github.com/hashicorp/vault/physical/zookeeper_test.go b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go similarity index 80% rename from vendor/github.com/hashicorp/vault/physical/zookeeper_test.go rename to vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go index b9969ae..a85c27c 100644 --- a/vendor/github.com/hashicorp/vault/physical/zookeeper_test.go +++ b/vendor/github.com/hashicorp/vault/physical/zookeeper/zookeeper_test.go @@ -1,4 +1,4 @@ -package physical +package zookeeper import ( "fmt" @@ -7,12 +7,13 @@ import ( "time" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/physical" log "github.com/mgutz/logxi/v1" "github.com/samuel/go-zookeeper/zk" ) -func TestZookeeperBackend(t *testing.T) { +func TestZooKeeperBackend(t *testing.T) { addr := os.Getenv("ZOOKEEPER_ADDR") if addr == "" { t.SkipNow() @@ -45,19 +46,19 @@ func TestZookeeperBackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("zookeeper", logger, map[string]string{ + b, err := NewZooKeeperBackend(map[string]string{ "address": addr + "," + addr, "path": randPath, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - testBackend(t, b) - testBackend_ListPrefix(t, b) + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) } -func TestZookeeperHABackend(t *testing.T) { +func TestZooKeeperHABackend(t *testing.T) { addr := os.Getenv("ZOOKEEPER_ADDR") if addr == "" { t.SkipNow() @@ -85,17 +86,17 @@ func TestZookeeperHABackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) - b, err := NewBackend("zookeeper", logger, map[string]string{ + b, err := NewZooKeeperBackend(map[string]string{ "address": addr + "," + addr, "path": randPath, - }) + }, logger) if err != nil { t.Fatalf("err: %s", err) } - ha, ok := b.(HABackend) + ha, ok := b.(physical.HABackend) if !ok { t.Fatalf("zookeeper does not implement HABackend") } - testHABackend(t, ha, ha) + physical.ExerciseHABackend(t, ha, ha) } diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go new file mode 100644 index 0000000..f9bfdeb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/cassandra" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + err := cassandra.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go new file mode 100644 index 0000000..c0b5fd5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra.go @@ -0,0 +1,177 @@ +package cassandra + +import ( + "strings" + "time" + + "github.com/gocql/gocql" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" +) + +const ( + defaultUserCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;` + defaultUserDeletionCQL = `DROP USER '{{username}}';` + cassandraTypeName = "cassandra" +) + +// Cassandra is an implementation of Database interface +type Cassandra struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +// New returns a new Cassandra instance +func New() (interface{}, error) { + connProducer := &cassandraConnectionProducer{} + connProducer.Type = cassandraTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 15, + RoleNameLen: 15, + UsernameLen: 100, + Separator: "_", + } + + dbType := &Cassandra{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil +} + +// Run instantiates a Cassandra object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*Cassandra), apiTLSConfig) + + return nil +} + +// Type returns the TypeName for this backend +func (c *Cassandra) Type() (string, error) { + return cassandraTypeName, nil +} + +func (c *Cassandra) getConnection() (*gocql.Session, error) { + session, err := c.Connection() + if err != nil { + return nil, err + } + + return session.(*gocql.Session), nil +} + +// CreateUser generates the username/password on the underlying Cassandra secret backend as instructed by +// the CreationStatement provided. +func (c *Cassandra) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + // Grab the lock + c.Lock() + defer c.Unlock() + + // Get the connection + session, err := c.getConnection() + if err != nil { + return "", "", err + } + + creationCQL := statements.CreationStatements + if creationCQL == "" { + creationCQL = defaultUserCreationCQL + } + rollbackCQL := statements.RollbackStatements + if rollbackCQL == "" { + rollbackCQL = defaultUserDeletionCQL + } + + username, err = c.GenerateUsername(usernameConfig) + username = strings.Replace(username, "-", "_", -1) + if err != nil { + return "", "", err + } + // Cassandra doesn't like the uppercase usernames + username = strings.ToLower(username) + + password, err = c.GeneratePassword() + if err != nil { + return "", "", err + } + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(creationCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + err = session.Query(dbutil.QueryHelper(query, map[string]string{ + "username": username, + "password": password, + })).Exec() + if err != nil { + for _, query := range strutil.ParseArbitraryStringSlice(rollbackCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + session.Query(dbutil.QueryHelper(query, map[string]string{ + "username": username, + })).Exec() + } + return "", "", err + } + } + + return username, password, nil +} + +// RenewUser is not supported on Cassandra, so this is a no-op. +func (c *Cassandra) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + // NOOP + return nil +} + +// RevokeUser attempts to drop the specified user. +func (c *Cassandra) RevokeUser(statements dbplugin.Statements, username string) error { + // Grab the lock + c.Lock() + defer c.Unlock() + + session, err := c.getConnection() + if err != nil { + return err + } + + revocationCQL := statements.RevocationStatements + if revocationCQL == "" { + revocationCQL = defaultUserDeletionCQL + } + + var result *multierror.Error + for _, query := range strutil.ParseArbitraryStringSlice(revocationCQL, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + err := session.Query(dbutil.QueryHelper(query, map[string]string{ + "username": username, + })).Exec() + + result = multierror.Append(result, err) + } + + return result.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go new file mode 100644 index 0000000..0f4d330 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/cassandra_test.go @@ -0,0 +1,277 @@ +package cassandra + +import ( + "os" + "strconv" + "testing" + "time" + + "fmt" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func prepareCassandraTestContainer(t *testing.T) (func(), string, int) { + if os.Getenv("CASSANDRA_HOST") != "" { + return func() {}, os.Getenv("CASSANDRA_HOST"), 0 + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + cwd, _ := os.Getwd() + cassandraMountPath := fmt.Sprintf("%s/test-fixtures/:/etc/cassandra/", cwd) + + ro := &dockertest.RunOptions{ + Repository: "cassandra", + Tag: "latest", + Env: []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"}, + Mounts: []string{cassandraMountPath}, + } + resource, err := pool.RunWithOptions(ro) + if err != nil { + t.Fatalf("Could not start local cassandra docker container: %s", err) + } + + cleanup := func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + port, _ := strconv.Atoi(resource.GetPort("9042/tcp")) + address := fmt.Sprintf("127.0.0.1:%d", port) + + // exponential backoff-retry + if err = pool.Retry(func() error { + clusterConfig := gocql.NewCluster(address) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: "cassandra", + Password: "cassandra", + } + clusterConfig.ProtoVersion = 4 + clusterConfig.Port = port + + session, err := clusterConfig.CreateSession() + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + defer session.Close() + return nil + }); err != nil { + cleanup() + t.Fatalf("Could not connect to cassandra docker container: %s", err) + } + return cleanup, address, port +} + +func TestCassandra_Initialize(t *testing.T) { + if os.Getenv("TRAVIS") != "true" { + t.SkipNow() + } + cleanup, address, port := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": address, + "port": port, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + connProducer := db.ConnectionProducer.(*cassandraConnectionProducer) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // test a string protocol + connectionDetails = map[string]interface{}{ + "hosts": address, + "port": strconv.Itoa(port), + "username": "cassandra", + "password": "cassandra", + "protocol_version": "4", + } + + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestCassandra_CreateUser(t *testing.T) { + if os.Getenv("TRAVIS") != "true" { + t.SkipNow() + } + cleanup, address, port := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": address, + "port": port, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testCassandraRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, address, port, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMyCassandra_RenewUser(t *testing.T) { + if os.Getenv("TRAVIS") != "true" { + t.SkipNow() + } + cleanup, address, port := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": address, + "port": port, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testCassandraRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, address, port, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + err = db.RenewUser(statements, username, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestCassandra_RevokeUser(t *testing.T) { + if os.Getenv("TRAVIS") != "true" { + t.SkipNow() + } + cleanup, address, port := prepareCassandraTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "hosts": address, + "port": port, + "username": "cassandra", + "password": "cassandra", + "protocol_version": 4, + } + + dbRaw, _ := New() + db := dbRaw.(*Cassandra) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testCassandraRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, address, port, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, address, port, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, address string, port int, username, password string) error { + clusterConfig := gocql.NewCluster(address) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: username, + Password: password, + } + clusterConfig.ProtoVersion = 4 + clusterConfig.Port = port + + session, err := clusterConfig.CreateSession() + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + defer session.Close() + return nil +} + +const testCassandraRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER; +GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};` diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go new file mode 100644 index 0000000..44b0b7d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/connection_producer.go @@ -0,0 +1,237 @@ +package cassandra + +import ( + "crypto/tls" + "fmt" + "strings" + "sync" + "time" + + "github.com/mitchellh/mapstructure" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/helper/certutil" + "github.com/hashicorp/vault/helper/parseutil" + "github.com/hashicorp/vault/helper/tlsutil" + "github.com/hashicorp/vault/plugins/helper/database/connutil" +) + +// cassandraConnectionProducer implements ConnectionProducer and provides an +// interface for cassandra databases to make connections. +type cassandraConnectionProducer struct { + Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"` + Port int `json:"port" structs:"port" mapstructure:"port"` + Username string `json:"username" structs:"username" mapstructure:"username"` + Password string `json:"password" structs:"password" mapstructure:"password"` + TLS bool `json:"tls" structs:"tls" mapstructure:"tls"` + InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"` + ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"` + ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"` + TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"` + Consistency string `json:"consistency" structs:"consistency" mapstructure:"consistency"` + PemBundle string `json:"pem_bundle" structs:"pem_bundle" mapstructure:"pem_bundle"` + PemJSON string `json:"pem_json" structs:"pem_json" mapstructure:"pem_json"` + + connectTimeout time.Duration + certificate string + privateKey string + issuingCA string + + Initialized bool + Type string + session *gocql.Session + sync.Mutex +} + +func (c *cassandraConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error { + c.Lock() + defer c.Unlock() + + err := mapstructure.WeakDecode(conf, c) + if err != nil { + return err + } + + if c.ConnectTimeoutRaw == nil { + c.ConnectTimeoutRaw = "0s" + } + c.connectTimeout, err = parseutil.ParseDurationSecond(c.ConnectTimeoutRaw) + if err != nil { + return fmt.Errorf("invalid connect_timeout: %s", err) + } + + switch { + case len(c.Hosts) == 0: + return fmt.Errorf("hosts cannot be empty") + case len(c.Username) == 0: + return fmt.Errorf("username cannot be empty") + case len(c.Password) == 0: + return fmt.Errorf("password cannot be empty") + } + + var certBundle *certutil.CertBundle + var parsedCertBundle *certutil.ParsedCertBundle + switch { + case len(c.PemJSON) != 0: + parsedCertBundle, err = certutil.ParsePKIJSON([]byte(c.PemJSON)) + if err != nil { + return fmt.Errorf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err) + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return fmt.Errorf("Error marshaling PEM information: %s", err) + } + c.certificate = certBundle.Certificate + c.privateKey = certBundle.PrivateKey + c.issuingCA = certBundle.IssuingCA + c.TLS = true + + case len(c.PemBundle) != 0: + parsedCertBundle, err = certutil.ParsePEMBundle(c.PemBundle) + if err != nil { + return fmt.Errorf("Error parsing the given PEM information: %s", err) + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return fmt.Errorf("Error marshaling PEM information: %s", err) + } + c.certificate = certBundle.Certificate + c.privateKey = certBundle.PrivateKey + c.issuingCA = certBundle.IssuingCA + c.TLS = true + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + c.Initialized = true + + if verifyConnection { + if _, err := c.Connection(); err != nil { + return fmt.Errorf("error verifying connection: %s", err) + } + } + + return nil +} + +func (c *cassandraConnectionProducer) Connection() (interface{}, error) { + if !c.Initialized { + return nil, connutil.ErrNotInitialized + } + + // If we already have a DB, return it + if c.session != nil { + return c.session, nil + } + + session, err := c.createSession() + if err != nil { + return nil, err + } + + // Store the session in backend for reuse + c.session = session + + return session, nil +} + +func (c *cassandraConnectionProducer) Close() error { + // Grab the write lock + c.Lock() + defer c.Unlock() + + if c.session != nil { + c.session.Close() + } + + c.session = nil + + return nil +} + +func (c *cassandraConnectionProducer) createSession() (*gocql.Session, error) { + hosts := strings.Split(c.Hosts, ",") + clusterConfig := gocql.NewCluster(hosts...) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: c.Username, + Password: c.Password, + } + + if c.Port != 0 { + clusterConfig.Port = c.Port + } + + clusterConfig.ProtoVersion = c.ProtocolVersion + if clusterConfig.ProtoVersion == 0 { + clusterConfig.ProtoVersion = 2 + } + + clusterConfig.Timeout = c.connectTimeout + if c.TLS { + var tlsConfig *tls.Config + if len(c.certificate) > 0 || len(c.issuingCA) > 0 { + if len(c.certificate) > 0 && len(c.privateKey) == 0 { + return nil, fmt.Errorf("found certificate for TLS authentication but no private key") + } + + certBundle := &certutil.CertBundle{} + if len(c.certificate) > 0 { + certBundle.Certificate = c.certificate + certBundle.PrivateKey = c.privateKey + } + if len(c.issuingCA) > 0 { + certBundle.IssuingCA = c.issuingCA + } + + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return nil, fmt.Errorf("failed to parse certificate bundle: %s", err) + } + + tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient) + if err != nil || tlsConfig == nil { + return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%v", tlsConfig, err) + } + tlsConfig.InsecureSkipVerify = c.InsecureTLS + + if c.TLSMinVersion != "" { + var ok bool + tlsConfig.MinVersion, ok = tlsutil.TLSLookup[c.TLSMinVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_min_version' in config") + } + } else { + // MinVersion was not being set earlier. Reset it to + // zero to gracefully handle upgrades. + tlsConfig.MinVersion = 0 + } + } + + clusterConfig.SslOpts = &gocql.SslOptions{ + Config: tlsConfig, + } + } + + session, err := clusterConfig.CreateSession() + if err != nil { + return nil, fmt.Errorf("error creating session: %s", err) + } + + // Set consistency + if c.Consistency != "" { + consistencyValue, err := gocql.ParseConsistencyWrapper(c.Consistency) + if err != nil { + return nil, err + } + + session.SetConsistency(consistencyValue) + } + + // Verify the info + err = session.Query(`LIST USERS`).Exec() + if err != nil { + return nil, fmt.Errorf("error validating connection info: %s", err) + } + + return session, nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml new file mode 100644 index 0000000..7c28d84 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/cassandra/test-fixtures/cassandra.yaml @@ -0,0 +1,1146 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# Triggers automatic allocation of num_tokens tokens for this node. The allocation +# algorithm attempts to choose tokens in a way that optimizes replicated load over +# the nodes in the datacenter for the replication strategy used by the specified +# keyspace. +# +# The load assigned to each node will be close to proportional to its number of +# vnodes. +# +# Only supported with the Murmur3Partitioner. +# allocate_tokens_for_keyspace: KEYSPACE + +# initial_token allows you to specify tokens manually. While you can use it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally +hinted_handoff_enabled: true + +# When hinted_handoff_enabled is true, a black list of data centers that will not +# perform hinted handoff +# hinted_handoff_disabled_datacenters: +# - DC1 +# - DC2 + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours + +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 + +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Directory where Cassandra should store hints. +# If not set, the default directory is $CASSANDRA_HOME/data/hints. +# hints_directory: /var/lib/cassandra/hints + +# How often hints should be flushed from the internal buffers to disk. +# Will *not* trigger fsync. +hints_flush_period_in_ms: 10000 + +# Maximum size for a single hints file, in megabytes. +max_hints_file_size_in_mb: 128 + +# Compression to apply to the hint files. If omitted, hints files +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#hints_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +authenticator: PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: CassandraAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching granted roles can be an expensive +# operation depending on the role manager, CassandraRoleManager is one example) +# Granted roles are cached for authenticated sessions in AuthenticatedUser and +# after the period specified here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable caching entirely. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 2000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# Validity period for credentials cache. This cache is tightly coupled to +# the provided PasswordAuthenticator implementation of IAuthenticator. If +# another IAuthenticator implementation is configured, this cache will not +# be automatically used and so the following settings will have no effect. +# Please note, credentials are cached in their encrypted form, so while +# activating this cache may reduce the number of queries made to the +# underlying table, it may not bring a significant reduction in the +# latency of individual authentication attempts. +# Defaults to 2000, set to 0 to disable credentials caching. +credentials_validity_in_ms: 2000 + +# Refresh interval for credentials cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If credentials_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as credentials_validity_in_ms. +# credentials_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +data_file_directories: + - /var/lib/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +commitlog_directory: /var/lib/cassandra/commitlog + +# Enable / disable CDC functionality on a per-node basis. This modifies the logic used +# for write path allocation rejection (standard: never reject. cdc: reject Mutation +# containing a CDC-enabled table if at space limit in cdc_raw_directory). +cdc_enabled: false + +# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the +# segment contains mutations for a CDC-enabled table. This should be placed on a +# separate spindle than the data directories. If not set, the default directory is +# $CASSANDRA_HOME/data/cdc_raw. +# cdc_raw_directory: /var/lib/cassandra/cdc_raw + +# Policy for data disk failures: +# +# die +# shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# +# stop_paranoid +# shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# +# stop +# shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# +# best_effort +# stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# +# ignore +# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Policy for commit disk failures: +# +# die +# shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# +# stop +# shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# +# stop_commit +# shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# +# ignore +# ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the native protocol prepared statement cache +# +# Valid values are either "auto" (omitting the value) or a value greater 0. +# +# Note that specifying a too large value will result in long running GCs and possbily +# out-of-memory errors. Keep the value at a small fraction of the heap. +# +# If you constantly see "prepared statements discarded in the last minute because +# cache limit reached" messages, the first step is to investigate the root cause +# of these messages and check whether prepared statements are used correctly - +# i.e. use bind markers for variable parts. +# +# Do only change the default value, if you really have more prepared statements than +# fit in the cache. In most cases it is not neccessary to change this value. +# Constantly re-preparing statements is a performance penalty. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +prepared_statements_cache_size_mb: + +# Maximum size of the Thrift prepared statement cache +# +# If you do not use Thrift at all, it is safe to leave this value at "auto". +# +# See description of 'prepared_statements_cache_size_mb' above for more information. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +thrift_prepared_statements_cache_size_mb: + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. Available implementations: +# +# org.apache.cassandra.cache.OHCProvider +# Fully off-heap row cache implementation (default). +# +# org.apache.cassandra.cache.SerializingCacheProvider +# This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +# Max mutation size is also configurable via max_mutation_size_in_kb setting in +# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. +# +# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must +# be set to at least twice the size of max_mutation_size_in_kb / 1024 +# +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +# commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# For materialized view writes, as there is a read involved, so this should +# be limited by the less of concurrent reads or concurrent writes. +concurrent_materialized_view_writes: 32 + +# Maximum memory to use for sstable chunk cache and buffer pooling. +# 32MB of this are reserved for pooling buffers, the rest is used as an +# cache that holds uncompressed sstable chunks. +# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, +# so is in addition to the memory allocated for heap. The cache also has on-heap +# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size +# if the default 64k chunk size is used). +# Memory is only allocated when needed. +# file_cache_size_in_mb: 512 + +# Flag indicating whether to allocate on or off heap when the sstable buffer +# pool is exhausted, that is when it has exceeded the maximum memory +# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. + +# buffer_pool_use_heap_if_exhausted: true + +# The strategy for optimizing disk read +# Possible values are: +# ssd (for solid state disks, the default) +# spinning (for spinning disks) +# disk_optimization_strategy: ssd + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +# memtable_heap_space_in_mb: 2048 +# memtable_offheap_space_in_mb: 2048 + +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Larger mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# +# heap_buffers +# on heap nio buffers +# +# offheap_buffers +# off heap (direct) nio buffers +# +# offheap_objects +# off heap objects +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is the smaller of 8192, and 1/4 of the total space +# of the commitlog volume. +# +# commitlog_total_space_in_mb: 8192 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. +# +# memtable_flush_writers defaults to one per data_file_directory. +# +# If your data directories are backed by SSD, you can increase this, but +# avoid having memtable_flush_writers * data_file_directories > number of cores +#memtable_flush_writers: 1 + +# Total space to use for change-data-capture logs on disk. +# +# If space gets above this value, Cassandra will throw WriteTimeoutException +# on Mutations including tables with CDC enabled. A CDCCompactor is responsible +# for parsing the raw CDC logs and deleting them when parsing is completed. +# +# The default value is the min of 4096 mb and 1/8th of the total space +# of the drive where cdc_raw_directory resides. +# cdc_total_space_in_mb: 4096 + +# When we hit our cdc_raw limit and the CDCCompactor is either running behind +# or experiencing backpressure, we check at the following interval to see if any +# new space for cdc-tracked tables has been made available. Default to 250ms +# cdc_free_space_check_interval_ms: 250 + +# A fixed memory pool size in MB for for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +listen_address: 172.17.0.5 + +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# listen_interface: eth0 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +broadcast_address: 127.0.0.1 + +# When using multiple physical network interfaces, set this +# to true to listen on broadcast_address in addition to +# the listen_address, allowing nodes to communicate in both +# interfaces. +# Ignore this property if the network configuration automatically +# routes between the public and private networks such as EC2. +# listen_on_broadcast_address: false + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +# native_transport_port_ssl: 9142 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. If you're changing this parameter, +# you may want to adjust max_value_size_in_mb accordingly. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: false + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +rpc_address: 0.0.0.0 + +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# rpc_interface: eth1 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +broadcast_rpc_address: 127.0.0.1 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync +# One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha +# Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See also: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and 'man tcp' +# internode_send_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# +# - a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# - but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + +# Per sstable indexed key cache entries (the collation index in memory +# mentioned above) exceeding this size will not be held on heap. +# This means that only partition information is held on heap and the +# index entries are read from disk. +# +# Note that this size refers to the size of the +# serialized index information and not the size of the partition. +column_index_cache_size_in_kb: 2 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# When unset, the default is 200 Mbps or 25 MB/s +# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Set socket timeout for streaming operation. +# The stream session is failed if no data/ack is received by any of the participants +# within that period, which means this should also be sufficient to stream a large +# sstable or rebuild table indexes. +# Default value is 86400000ms, which means stale streams timeout after 24 hours. +# A value of zero means stream sockets should never time out. +# streaming_socket_timeout_in_ms: 86400000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH +# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. +# This means that if you start with the default SimpleSnitch, which +# locates every node on "rack1" in "datacenter1", your only options +# if you need to add another datacenter are GossipingPropertyFileSnitch +# (and the older PFS). From there, if you want to migrate to an +# incompatible snitch like Ec2Snitch you can do it by adding new nodes +# under Ec2Snitch (which will locate them in a new "datacenter") and +# decommissioning the old ones. +# +# Out of the box, Cassandra provides: +# +# SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# +# GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# +# PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# +# Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# +# Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# +# NoScheduler +# Has no options +# +# RoundRobin +# throttle_limit +# The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# default_weight +# default_weight is optional and allows for +# overriding the default which is 1. +# weights +# Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# JVM defaults for supported SSL socket protocols and cipher suites can +# be replaced using custom encryption options. This is not recommended +# unless you have policies in place that dictate certain settings, or +# need to disable vulnerable ciphers or protocols in case the JVM cannot +# be updated. +# FIPS compliant settings can be configured at JVM level and should not +# involve changing encryption settings here: +# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html +# *NOTE* No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# Can be: +# +# all +# all traffic is compressed +# +# dc +# traffic between different datacenters is compressed +# +# none +# nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +# This threshold can be adjusted to minimize logging if necessary +# gc_log_threshold_in_ms: 200 + +# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at +# INFO level +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. +enable_user_defined_functions: false + +# Enables scripted UDFs (JavaScript UDFs). +# Java UDFs are always enabled, if enable_user_defined_functions is true. +# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. +# This option has no effect, if enable_user_defined_functions is false. +enable_scripted_user_defined_functions: false + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1 + + +# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from +# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by +# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys +# can still (and should!) be in the keystore and will be used on decrypt operations +# (to handle the case of key rotation). +# +# It is strongly recommended to download and install Java Cryptography Extension (JCE) +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. +# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) +# +# Currently, only the following file types are supported for transparent data encryption, although +# more are coming in future cassandra releases: commitlog, hints +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Log WARN on any batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Log WARN on any batches not of type LOGGED than span across more partitions than this limit +unlogged_batch_across_partitions_warn_threshold: 10 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level +# Adjust the threshold based on your application throughput requirement +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +gc_warn_threshold_in_ms: 1000 + +# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption +# early. Any value size larger than this threshold will result into marking an SSTable +# as corrupted. +# max_value_size_in_mb: 256 diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go new file mode 100644 index 0000000..f995fe0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/hana" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + err := hana.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go new file mode 100644 index 0000000..aa2b53d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana.go @@ -0,0 +1,283 @@ +package hana + +import ( + "database/sql" + "fmt" + "strings" + "time" + + _ "github.com/SAP/go-hdb/driver" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" +) + +const ( + hanaTypeName = "hdb" +) + +// HANA is an implementation of Database interface +type HANA struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +// New implements builtinplugins.BuiltinFactory +func New() (interface{}, error) { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = hanaTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 32, + RoleNameLen: 20, + UsernameLen: 128, + Separator: "_", + } + + dbType := &HANA{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil +} + +// Run instantiates a HANA object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*HANA), apiTLSConfig) + + return nil +} + +// Type returns the TypeName for this backend +func (h *HANA) Type() (string, error) { + return hanaTypeName, nil +} + +func (h *HANA) getConnection() (*sql.DB, error) { + db, err := h.Connection() + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +// CreateUser generates the username/password on the underlying HANA secret backend +// as instructed by the CreationStatement provided. +func (h *HANA) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + // Grab the lock + h.Lock() + defer h.Unlock() + + // Get the connection + db, err := h.getConnection() + if err != nil { + return "", "", err + } + + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + // Generate username + username, err = h.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + // HANA does not allow hyphens in usernames, and highly prefers capital letters + username = strings.Replace(username, "-", "_", -1) + username = strings.ToUpper(username) + + // Generate password + password, err = h.GeneratePassword() + if err != nil { + return "", "", err + } + // Most HANA configurations have password constraints + // Prefix with A1a to satisfy these constraints. User will be forced to change upon login + password = strings.Replace(password, "-", "_", -1) + password = "A1a" + password + + // If expiration is in the role SQL, HANA will deactivate the user when time is up, + // regardless of whether vault is alive to revoke lease + expirationStr, err := h.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return "", "", err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + })) + if err != nil { + return "", "", err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return "", "", err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + +// Renewing hana user just means altering user's valid until property +func (h *HANA) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + // Get connection + db, err := h.getConnection() + if err != nil { + return err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + // If expiration is in the role SQL, HANA will deactivate the user when time is up, + // regardless of whether vault is alive to revoke lease + expirationStr, err := h.GenerateExpiration(expiration) + if err != nil { + return err + } + + // Renew user's valid until property field + stmt, err := tx.Prepare("ALTER USER " + username + " VALID UNTIL " + "'" + expirationStr + "'") + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +// Revoking hana user will deactivate user and try to perform a soft drop +func (h *HANA) RevokeUser(statements dbplugin.Statements, username string) error { + // default revoke will be a soft drop on user + if statements.RevocationStatements == "" { + return h.revokeUserDefault(username) + } + + // Get connection + db, err := h.getConnection() + if err != nil { + return err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + })) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (h *HANA) revokeUserDefault(username string) error { + // Get connection + db, err := h.getConnection() + if err != nil { + return err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + // Disable server login for user + disableStmt, err := tx.Prepare(fmt.Sprintf("ALTER USER %s DEACTIVATE USER NOW", username)) + if err != nil { + return err + } + defer disableStmt.Close() + if _, err := disableStmt.Exec(); err != nil { + return err + } + + // Invalidates current sessions and performs soft drop (drop if no dependencies) + // if hard drop is desired, custom revoke statements should be written for role + dropStmt, err := tx.Prepare(fmt.Sprintf("DROP USER %s RESTRICT", username)) + if err != nil { + return err + } + defer dropStmt.Close() + if _, err := dropStmt.Exec(); err != nil { + return err + } + + // Commit transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go new file mode 100644 index 0000000..7cff7f1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/hana/hana_test.go @@ -0,0 +1,167 @@ +package hana + +import ( + "database/sql" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" +) + +func TestHANA_Initialize(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*HANA) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer) + if !connProducer.Initialized { + t.Fatal("Database should be initialized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +// this test will leave a lingering user on the system +func TestHANA_CreateUser(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*HANA) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test-test", + RoleName: "test-test", + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Hour)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testHANARole, + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestHANA_RevokeUser(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*HANA) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testHANARole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test-test", + RoleName: "test-test", + } + + // Test default revoke statememts + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour)) + if err != nil { + t.Fatalf("err: %s", err) + } + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + + // Test custom revoke statememt + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour)) + if err != nil { + t.Fatalf("err: %s", err) + } + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + statements.RevocationStatements = testHANADrop + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + parts := strings.Split(connURL, "@") + connURL = fmt.Sprintf("hdb://%s:%s@%s", username, password, parts[1]) + db, err := sql.Open("hdb", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const testHANARole = ` +CREATE USER {{name}} PASSWORD {{password}} VALID UNTIL '{{expiration}}';` + +const testHANADrop = ` +DROP USER {{name}} CASCADE;` diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go new file mode 100644 index 0000000..f802dc3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/connection_producer.go @@ -0,0 +1,167 @@ +package mongodb + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/mitchellh/mapstructure" + + "gopkg.in/mgo.v2" +) + +// mongoDBConnectionProducer implements ConnectionProducer and provides an +// interface for databases to make connections. +type mongoDBConnectionProducer struct { + ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` + + Initialized bool + Type string + session *mgo.Session + sync.Mutex +} + +// Initialize parses connection configuration. +func (c *mongoDBConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error { + c.Lock() + defer c.Unlock() + + err := mapstructure.WeakDecode(conf, c) + if err != nil { + return err + } + + if len(c.ConnectionURL) == 0 { + return fmt.Errorf("connection_url cannot be empty") + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + c.Initialized = true + + if verifyConnection { + if _, err := c.Connection(); err != nil { + return fmt.Errorf("error verifying connection: %s", err) + } + + if err := c.session.Ping(); err != nil { + return fmt.Errorf("error verifying connection: %s", err) + } + } + + return nil +} + +// Connection creates a database connection. +func (c *mongoDBConnectionProducer) Connection() (interface{}, error) { + if !c.Initialized { + return nil, connutil.ErrNotInitialized + } + + if c.session != nil { + return c.session, nil + } + + dialInfo, err := parseMongoURL(c.ConnectionURL) + if err != nil { + return nil, err + } + + c.session, err = mgo.DialWithInfo(dialInfo) + if err != nil { + return nil, err + } + c.session.SetSyncTimeout(1 * time.Minute) + c.session.SetSocketTimeout(1 * time.Minute) + + return nil, nil +} + +// Close terminates the database connection. +func (c *mongoDBConnectionProducer) Close() error { + c.Lock() + defer c.Unlock() + + if c.session != nil { + c.session.Close() + } + + c.session = nil + + return nil +} + +func parseMongoURL(rawURL string) (*mgo.DialInfo, error) { + url, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + info := mgo.DialInfo{ + Addrs: strings.Split(url.Host, ","), + Database: strings.TrimPrefix(url.Path, "/"), + Timeout: 10 * time.Second, + } + + if url.User != nil { + info.Username = url.User.Username() + info.Password, _ = url.User.Password() + } + + query := url.Query() + for key, values := range query { + var value string + if len(values) > 0 { + value = values[0] + } + + switch key { + case "authSource": + info.Source = value + case "authMechanism": + info.Mechanism = value + case "gssapiServiceName": + info.Service = value + case "replicaSet": + info.ReplicaSetName = value + case "maxPoolSize": + poolLimit, err := strconv.Atoi(value) + if err != nil { + return nil, errors.New("bad value for maxPoolSize: " + value) + } + info.PoolLimit = poolLimit + case "ssl": + // Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that + // ourselves. See https://github.com/go-mgo/mgo/issues/84 + ssl, err := strconv.ParseBool(value) + if err != nil { + return nil, errors.New("bad value for ssl: " + value) + } + if ssl { + info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { + return tls.Dial("tcp", addr.String(), &tls.Config{}) + } + } + case "connect": + if value == "direct" { + info.Direct = true + break + } + if value == "replicaSet" { + break + } + fallthrough + default: + return nil, errors.New("unsupported connection URL option: " + key + "=" + value) + } + } + + return &info, nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go new file mode 100644 index 0000000..eedb0d0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/mongodb" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + err := mongodb.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go new file mode 100644 index 0000000..52671da --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb.go @@ -0,0 +1,204 @@ +package mongodb + +import ( + "io" + "strings" + "time" + + "encoding/json" + + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" + "gopkg.in/mgo.v2" +) + +const mongoDBTypeName = "mongodb" + +// MongoDB is an implementation of Database interface +type MongoDB struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +// New returns a new MongoDB instance +func New() (interface{}, error) { + connProducer := &mongoDBConnectionProducer{} + connProducer.Type = mongoDBTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 15, + RoleNameLen: 15, + UsernameLen: 100, + Separator: "-", + } + + dbType := &MongoDB{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + return dbType, nil +} + +// Run instantiates a MongoDB object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*MongoDB), apiTLSConfig) + + return nil +} + +// Type returns the TypeName for this backend +func (m *MongoDB) Type() (string, error) { + return mongoDBTypeName, nil +} + +func (m *MongoDB) getConnection() (*mgo.Session, error) { + session, err := m.Connection() + if err != nil { + return nil, err + } + + return session.(*mgo.Session), nil +} + +// CreateUser generates the username/password on the underlying secret backend as instructed by +// the CreationStatement provided. The creation statement is a JSON blob that has a db value, +// and an array of roles that accepts a role, and an optional db value pair. This array will +// be normalized the format specified in the mongoDB docs: +// https://docs.mongodb.com/manual/reference/command/createUser/#dbcmd.createUser +// +// JSON Example: +// { "db": "admin", "roles": [{ "role": "readWrite" }, {"role": "read", "db": "foo"}] } +func (m *MongoDB) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + // Grab the lock + m.Lock() + defer m.Unlock() + + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + session, err := m.getConnection() + if err != nil { + return "", "", err + } + + username, err = m.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + password, err = m.GeneratePassword() + if err != nil { + return "", "", err + } + + // Unmarshal statements.CreationStatements into mongodbRoles + var mongoCS mongoDBStatement + err = json.Unmarshal([]byte(statements.CreationStatements), &mongoCS) + if err != nil { + return "", "", err + } + + // Default to "admin" if no db provided + if mongoCS.DB == "" { + mongoCS.DB = "admin" + } + + if len(mongoCS.Roles) == 0 { + return "", "", fmt.Errorf("roles array is required in creation statement") + } + + createUserCmd := createUserCommand{ + Username: username, + Password: password, + Roles: mongoCS.Roles.toStandardRolesArray(), + } + + err = session.DB(mongoCS.DB).Run(createUserCmd, nil) + switch { + case err == nil: + case err == io.EOF, strings.Contains(err.Error(), "EOF"): + if err := m.ConnectionProducer.Close(); err != nil { + return "", "", errwrap.Wrapf("error closing EOF'd mongo connection: {{err}}", err) + } + session, err := m.getConnection() + if err != nil { + return "", "", err + } + err = session.DB(mongoCS.DB).Run(createUserCmd, nil) + if err != nil { + return "", "", err + } + default: + return "", "", err + } + + return username, password, nil +} + +// RenewUser is not supported on MongoDB, so this is a no-op. +func (m *MongoDB) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + // NOOP + return nil +} + +// RevokeUser drops the specified user from the authentication databse. If none is provided +// in the revocation statement, the default "admin" authentication database will be assumed. +func (m *MongoDB) RevokeUser(statements dbplugin.Statements, username string) error { + session, err := m.getConnection() + if err != nil { + return err + } + + // If no revocation statements provided, pass in empty JSON + revocationStatement := statements.RevocationStatements + if revocationStatement == "" { + revocationStatement = `{}` + } + + // Unmarshal revocation statements into mongodbRoles + var mongoCS mongoDBStatement + err = json.Unmarshal([]byte(revocationStatement), &mongoCS) + if err != nil { + return err + } + + db := mongoCS.DB + // If db is not specified, use the default authenticationDatabase "admin" + if db == "" { + db = "admin" + } + + err = session.DB(db).RemoveUser(username) + switch { + case err == nil, err == mgo.ErrNotFound: + case err == io.EOF, strings.Contains(err.Error(), "EOF"): + if err := m.ConnectionProducer.Close(); err != nil { + return errwrap.Wrapf("error closing EOF'd mongo connection: {{err}}", err) + } + session, err := m.getConnection() + if err != nil { + return err + } + err = session.DB(db).RemoveUser(username) + if err != nil { + return err + } + default: + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go new file mode 100644 index 0000000..95f6e90 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/mongodb_test.go @@ -0,0 +1,193 @@ +package mongodb + +import ( + "fmt" + "os" + "testing" + "time" + + mgo "gopkg.in/mgo.v2" + + "strings" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +const testMongoDBRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }` + +func prepareMongoDBTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("MONGODB_URL") != "" { + return func() {}, os.Getenv("MONGODB_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("mongo", "latest", []string{}) + if err != nil { + t.Fatalf("Could not start local mongo docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("mongodb://localhost:%s", resource.GetPort("27017/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + dialInfo, err := parseMongoURL(retURL) + if err != nil { + return err + } + + session, err := mgo.DialWithInfo(dialInfo) + if err != nil { + return err + } + session.SetSyncTimeout(1 * time.Minute) + session.SetSocketTimeout(1 * time.Minute) + return session.Ping() + }); err != nil { + t.Fatalf("Could not connect to mongo docker container: %s", err) + } + + return +} + +func TestMongoDB_Initialize(t *testing.T) { + cleanup, connURL := prepareMongoDBTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, err := New() + if err != nil { + t.Fatalf("err: %s", err) + } + db := dbRaw.(*MongoDB) + connProducer := db.ConnectionProducer.(*mongoDBConnectionProducer) + + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !connProducer.Initialized { + t.Fatal("Database should be initialized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestMongoDB_CreateUser(t *testing.T) { + cleanup, connURL := prepareMongoDBTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, err := New() + if err != nil { + t.Fatalf("err: %s", err) + } + db := dbRaw.(*MongoDB) + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testMongoDBRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMongoDB_RevokeUser(t *testing.T) { + cleanup, connURL := prepareMongoDBTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, err := New() + if err != nil { + t.Fatalf("err: %s", err) + } + db := dbRaw.(*MongoDB) + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testMongoDBRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revocation statememt + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1) + dialInfo, err := parseMongoURL(connURL) + if err != nil { + return err + } + + session, err := mgo.DialWithInfo(dialInfo) + if err != nil { + return err + } + session.SetSyncTimeout(1 * time.Minute) + session.SetSocketTimeout(1 * time.Minute) + return session.Ping() +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go new file mode 100644 index 0000000..9004a3c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mongodb/util.go @@ -0,0 +1,39 @@ +package mongodb + +type createUserCommand struct { + Username string `bson:"createUser"` + Password string `bson:"pwd"` + Roles []interface{} `bson:"roles"` +} +type mongodbRole struct { + Role string `json:"role" bson:"role"` + DB string `json:"db" bson:"db"` +} + +type mongodbRoles []mongodbRole + +type mongoDBStatement struct { + DB string `json:"db"` + Roles mongodbRoles `json:"roles"` +} + +// Convert array of role documents like: +// +// [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ] +// +// into a "standard" MongoDB roles array containing both strings and role documents: +// +// [ "readWrite", { "role": "readWrite", "db": "test" } ] +// +// MongoDB's createUser command accepts the latter. +func (roles mongodbRoles) toStandardRolesArray() []interface{} { + var standardRolesArray []interface{} + for _, role := range roles { + if role.DB == "" { + standardRolesArray = append(standardRolesArray, role.Role) + } else { + standardRolesArray = append(standardRolesArray, role) + } + } + return standardRolesArray +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go new file mode 100644 index 0000000..9201b48 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/mssql" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + err := mssql.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go new file mode 100644 index 0000000..7b920c8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql.go @@ -0,0 +1,321 @@ +package mssql + +import ( + "database/sql" + "fmt" + "strings" + "time" + + _ "github.com/denisenkom/go-mssqldb" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" +) + +const msSQLTypeName = "mssql" + +// MSSQL is an implementation of Database interface +type MSSQL struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +func New() (interface{}, error) { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = msSQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 20, + RoleNameLen: 20, + UsernameLen: 128, + Separator: "-", + } + + dbType := &MSSQL{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil +} + +// Run instantiates a MSSQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*MSSQL), apiTLSConfig) + + return nil +} + +// Type returns the TypeName for this backend +func (m *MSSQL) Type() (string, error) { + return msSQLTypeName, nil +} + +func (m *MSSQL) getConnection() (*sql.DB, error) { + db, err := m.Connection() + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +// CreateUser generates the username/password on the underlying MSSQL secret backend as instructed by +// the CreationStatement provided. +func (m *MSSQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + // Grab the lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection() + if err != nil { + return "", "", err + } + + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + username, err = m.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + password, err = m.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := m.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return "", "", err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + })) + if err != nil { + return "", "", err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return "", "", err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + +// RenewUser is not supported on MSSQL, so this is a no-op. +func (m *MSSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + // NOOP + return nil +} + +// RevokeUser attempts to drop the specified user. It will first attempt to disable login, +// then kill pending connections from that user, and finally drop the user and login from the +// database instance. +func (m *MSSQL) RevokeUser(statements dbplugin.Statements, username string) error { + if statements.RevocationStatements == "" { + return m.revokeUserDefault(username) + } + + // Get connection + db, err := m.getConnection() + if err != nil { + return err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + })) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (m *MSSQL) revokeUserDefault(username string) error { + // Get connection + db, err := m.getConnection() + if err != nil { + return err + } + + // First disable server login + disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username)) + if err != nil { + return err + } + defer disableStmt.Close() + if _, err := disableStmt.Exec(); err != nil { + return err + } + + // Query for sessions for the login so that we can kill any outstanding + // sessions. There cannot be any active sessions before we drop the logins + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + sessionStmt, err := db.Prepare(fmt.Sprintf( + "SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = '%s';", username)) + if err != nil { + return err + } + defer sessionStmt.Close() + + sessionRows, err := sessionStmt.Query() + if err != nil { + return err + } + defer sessionRows.Close() + + var revokeStmts []string + for sessionRows.Next() { + var sessionID int + err = sessionRows.Scan(&sessionID) + if err != nil { + return err + } + revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID)) + } + + // Query for database users using undocumented stored procedure for now since + // it is the easiest way to get this information; + // we need to drop the database users before we can drop the login and the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.Prepare(fmt.Sprintf("EXEC master.dbo.sp_msloginmappings '%s';", username)) + if err != nil { + return err + } + defer stmt.Close() + + rows, err := stmt.Query() + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var loginName, dbName, qUsername string + var aliasName sql.NullString + err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName) + if err != nil { + return err + } + revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName, username, username)) + } + + // we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revokeStmts { + stmt, err := db.Prepare(query) + if err != nil { + lastStmtError = err + continue + } + defer stmt.Close() + _, err = stmt.Exec() + if err != nil { + lastStmtError = err + } + } + + // can't drop if not all database users are dropped + if rows.Err() != nil { + return fmt.Errorf("cound not generate sql statements for all rows: %s", rows.Err()) + } + if lastStmtError != nil { + return fmt.Errorf("could not perform all sql statements: %s", lastStmtError) + } + + // Drop this login + stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username)) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + + return nil +} + +const dropUserSQL = ` +USE [%s] +IF EXISTS + (SELECT name + FROM sys.database_principals + WHERE name = N'%s') +BEGIN + DROP USER [%s] +END +` + +const dropLoginSQL = ` +IF EXISTS + (SELECT name + FROM master.sys.server_principals + WHERE name = N'%s') +BEGIN + DROP LOGIN [%s] +END +` diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go new file mode 100644 index 0000000..5a00890 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mssql/mssql_test.go @@ -0,0 +1,188 @@ +package mssql + +import ( + "database/sql" + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" +) + +var ( + testMSQLImagePull sync.Once +) + +func TestMSSQL_Initialize(t *testing.T) { + if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + return + } + connURL := os.Getenv("MSSQL_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*MSSQL) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer) + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test decoding a string value for max_open_connections + connectionDetails = map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": "5", + } + + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestMSSQL_CreateUser(t *testing.T) { + if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + return + } + connURL := os.Getenv("MSSQL_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*MSSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testMSSQLRole, + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMSSQL_RevokeUser(t *testing.T) { + if os.Getenv("MSSQL_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + return + } + connURL := os.Getenv("MSSQL_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*MSSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testMSSQLRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test custom revoke statememt + statements.RevocationStatements = testMSSQLDrop + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + parts := strings.Split(connURL, "@") + connURL = fmt.Sprintf("sqlserver://%s:%s@%s", username, password, parts[1]) + db, err := sql.Open("mssql", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const testMSSQLRole = ` +CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; +CREATE USER [{{name}}] FOR LOGIN [{{name}}]; +GRANT SELECT, INSERT, UPDATE, DELETE ON SCHEMA::dbo TO [{{name}}];` + +const testMSSQLDrop = ` +DROP USER [{{name}}]; +DROP LOGIN [{{name}}]; +` diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go new file mode 100644 index 0000000..917f1b3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/mysql" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + err := mysql.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go new file mode 100644 index 0000000..2b950e0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql-legacy-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/mysql" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + err := mysql.RunLegacy(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go new file mode 100644 index 0000000..297941c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go @@ -0,0 +1,218 @@ +package mysql + +import ( + "database/sql" + "strings" + "time" + + _ "github.com/go-sql-driver/mysql" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" +) + +const ( + defaultMysqlRevocationStmts = ` + REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; + DROP USER '{{name}}'@'%' + ` + mySQLTypeName = "mysql" +) + +var ( + MetadataLen int = 10 + LegacyMetadataLen int = 4 + UsernameLen int = 32 + LegacyUsernameLen int = 16 +) + +type MySQL struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +// New implements builtinplugins.BuiltinFactory +func New(displayNameLen, roleNameLen, usernameLen int) func() (interface{}, error) { + return func() (interface{}, error) { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = mySQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: displayNameLen, + RoleNameLen: roleNameLen, + UsernameLen: usernameLen, + Separator: "-", + } + + dbType := &MySQL{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil + } +} + +// Run instantiates a MySQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + return runCommon(false, apiTLSConfig) +} + +// Run instantiates a MySQL object, and runs the RPC server for the plugin +func RunLegacy(apiTLSConfig *api.TLSConfig) error { + return runCommon(true, apiTLSConfig) +} + +func runCommon(legacy bool, apiTLSConfig *api.TLSConfig) error { + var f func() (interface{}, error) + if legacy { + f = New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen) + } else { + f = New(MetadataLen, MetadataLen, UsernameLen) + } + dbType, err := f() + if err != nil { + return err + } + + plugins.Serve(dbType.(*MySQL), apiTLSConfig) + + return nil +} + +func (m *MySQL) Type() (string, error) { + return mySQLTypeName, nil +} + +func (m *MySQL) getConnection() (*sql.DB, error) { + db, err := m.Connection() + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (m *MySQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + // Grab the lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection() + if err != nil { + return "", "", err + } + + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + username, err = m.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + password, err = m.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := m.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return "", "", err + } + defer tx.Rollback() + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + })) + if err != nil { + return "", "", err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return "", "", err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + } + + return username, password, nil +} + +// NOOP +func (m *MySQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + return nil +} + +func (m *MySQL) RevokeUser(statements dbplugin.Statements, username string) error { + // Grab the read lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection() + if err != nil { + return err + } + + revocationStmts := statements.RevocationStatements + // Use a default SQL statement for revocation if one cannot be fetched from the role + if revocationStmts == "" { + revocationStmts = defaultMysqlRevocationStmts + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + // This is not a prepared statement because not all commands are supported + // 1295: This command is not supported in the prepared statement protocol yet + // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ + query = strings.Replace(query, "{{name}}", username, -1) + _, err = tx.Exec(query) + if err != nil { + return err + } + + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go new file mode 100644 index 0000000..851bd02 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql_test.go @@ -0,0 +1,326 @@ +package mysql + +import ( + "database/sql" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func prepareMySQLTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("MYSQL_URL") != "" { + return func() {}, os.Getenv("MYSQL_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("mysql", "latest", []string{"MYSQL_ROOT_PASSWORD=secret"}) + if err != nil { + t.Fatalf("Could not start local MySQL docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + var db *sql.DB + db, err = sql.Open("mysql", retURL) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + t.Fatalf("Could not connect to MySQL docker container: %s", err) + } + + return +} + +func prepareMySQLLegacyTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("MYSQL_URL") != "" { + return func() {}, os.Getenv("MYSQL_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + // Mysql 5.6 is the last MySQL version to limit usernames to 16 characters. + resource, err := pool.Run("mysql", "5.6", []string{"MYSQL_ROOT_PASSWORD=secret"}) + if err != nil { + t.Fatalf("Could not start local MySQL docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + var db *sql.DB + db, err = sql.Open("mysql", retURL) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + t.Fatalf("Could not connect to MySQL docker container: %s", err) + } + + return +} + +func TestMySQL_Initialize(t *testing.T) { + cleanup, connURL := prepareMySQLTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + f := New(MetadataLen, MetadataLen, UsernameLen) + dbRaw, _ := f() + db := dbRaw.(*MySQL) + connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test decoding a string value for max_open_connections + connectionDetails = map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": "5", + } + + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestMySQL_CreateUser(t *testing.T) { + cleanup, connURL := prepareMySQLTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + f := New(MetadataLen, MetadataLen, UsernameLen) + dbRaw, _ := f() + db := dbRaw.(*MySQL) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test-long-displayname", + RoleName: "test-long-rolename", + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testMySQLRoleWildCard, + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test a second time to make sure usernames don't collide + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMySQL_CreateUser_Legacy(t *testing.T) { + cleanup, connURL := prepareMySQLLegacyTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + f := New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen) + dbRaw, _ := f() + db := dbRaw.(*MySQL) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test-long-displayname", + RoleName: "test-long-rolename", + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testMySQLRoleWildCard, + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test a second time to make sure usernames don't collide + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestMySQL_RevokeUser(t *testing.T) { + cleanup, connURL := prepareMySQLTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + f := New(MetadataLen, MetadataLen, UsernameLen) + dbRaw, _ := f() + db := dbRaw.(*MySQL) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testMySQLRoleWildCard, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + + statements.CreationStatements = testMySQLRoleWildCard + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test custom revoke statements + statements.RevocationStatements = testMySQLRevocationSQL + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + connURL = strings.Replace(connURL, "root:secret", fmt.Sprintf("%s:%s", username, password), 1) + db, err := sql.Open("mysql", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const testMySQLRoleWildCard = ` +CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; +GRANT SELECT ON *.* TO '{{name}}'@'%'; +` +const testMySQLRevocationSQL = ` +REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; +DROP USER '{{name}}'@'%'; +` diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go new file mode 100644 index 0000000..a3b1789 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql-database-plugin/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins/database/postgresql" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + err := postgresql.Run(apiClientMeta.GetTLSConfig()) + if err != nil { + log.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go new file mode 100644 index 0000000..93fa8a8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go @@ -0,0 +1,372 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/plugins" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + "github.com/hashicorp/vault/plugins/helper/database/credsutil" + "github.com/hashicorp/vault/plugins/helper/database/dbutil" + "github.com/lib/pq" + _ "github.com/lib/pq" +) + +const ( + postgreSQLTypeName string = "postgres" + defaultPostgresRenewSQL = ` +ALTER ROLE "{{name}}" VALID UNTIL '{{expiration}}'; +` +) + +// New implements builtinplugins.BuiltinFactory +func New() (interface{}, error) { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = postgreSQLTypeName + + credsProducer := &credsutil.SQLCredentialsProducer{ + DisplayNameLen: 8, + RoleNameLen: 8, + UsernameLen: 63, + Separator: "-", + } + + dbType := &PostgreSQL{ + ConnectionProducer: connProducer, + CredentialsProducer: credsProducer, + } + + return dbType, nil +} + +// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin +func Run(apiTLSConfig *api.TLSConfig) error { + dbType, err := New() + if err != nil { + return err + } + + plugins.Serve(dbType.(*PostgreSQL), apiTLSConfig) + + return nil +} + +type PostgreSQL struct { + connutil.ConnectionProducer + credsutil.CredentialsProducer +} + +func (p *PostgreSQL) Type() (string, error) { + return postgreSQLTypeName, nil +} + +func (p *PostgreSQL) getConnection() (*sql.DB, error) { + db, err := p.Connection() + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (p *PostgreSQL) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + if statements.CreationStatements == "" { + return "", "", dbutil.ErrEmptyCreationStatement + } + + // Grab the lock + p.Lock() + defer p.Unlock() + + username, err = p.GenerateUsername(usernameConfig) + if err != nil { + return "", "", err + } + + password, err = p.GeneratePassword() + if err != nil { + return "", "", err + } + + expirationStr, err := p.GenerateExpiration(expiration) + if err != nil { + return "", "", err + } + + // Get the connection + db, err := p.getConnection() + if err != nil { + return "", "", err + + } + + // Start a transaction + tx, err := db.Begin() + if err != nil { + return "", "", err + + } + defer func() { + tx.Rollback() + }() + // Return the secret + + // Execute each query + for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + "expiration": expirationStr, + })) + if err != nil { + return "", "", err + + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return "", "", err + + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return "", "", err + + } + + return username, password, nil +} + +func (p *PostgreSQL) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error { + p.Lock() + defer p.Unlock() + + renewStmts := statements.RenewStatements + if renewStmts == "" { + renewStmts = defaultPostgresRenewSQL + } + + db, err := p.getConnection() + if err != nil { + return err + } + + tx, err := db.Begin() + if err != nil { + return err + } + defer func() { + tx.Rollback() + }() + + expirationStr, err := p.GenerateExpiration(expiration) + if err != nil { + return err + } + + for _, query := range strutil.ParseArbitraryStringSlice(renewStmts, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + "expiration": expirationStr, + })) + if err != nil { + return err + } + + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + } + + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (p *PostgreSQL) RevokeUser(statements dbplugin.Statements, username string) error { + // Grab the lock + p.Lock() + defer p.Unlock() + + if statements.RevocationStatements == "" { + return p.defaultRevokeUser(username) + } + + return p.customRevokeUser(username, statements.RevocationStatements) +} + +func (p *PostgreSQL) customRevokeUser(username, revocationStmts string) error { + db, err := p.getConnection() + if err != nil { + return err + } + + tx, err := db.Begin() + if err != nil { + return err + } + defer func() { + tx.Rollback() + }() + + for _, query := range strutil.ParseArbitraryStringSlice(revocationStmts, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{ + "name": username, + })) + if err != nil { + return err + } + defer stmt.Close() + + if _, err := stmt.Exec(); err != nil { + return err + } + } + + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (p *PostgreSQL) defaultRevokeUser(username string) error { + db, err := p.getConnection() + if err != nil { + return err + } + + // Check if the role exists + var exists bool + err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return err + } + + if exists == false { + return nil + } + + // Query for permissions; we need to revoke permissions before we can drop + // the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;") + if err != nil { + return err + } + defer stmt.Close() + + rows, err := stmt.Query(username) + if err != nil { + return err + } + defer rows.Close() + + const initialNumRevocations = 16 + revocationStmts := make([]string, 0, initialNumRevocations) + for rows.Next() { + var schema string + err = rows.Scan(&schema) + if err != nil { + // keep going; remove as many permissions as possible right now + continue + } + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, + pq.QuoteIdentifier(schema), + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE USAGE ON SCHEMA %s FROM %s;`, + pq.QuoteIdentifier(schema), + pq.QuoteIdentifier(username))) + } + + // for good measure, revoke all privileges and usage on schema public + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`, + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;", + pq.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE USAGE ON SCHEMA public FROM %s;", + pq.QuoteIdentifier(username))) + + // get the current database name so we can issue a REVOKE CONNECT for + // this username + var dbname sql.NullString + if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil { + return err + } + + if dbname.Valid { + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE CONNECT ON DATABASE %s FROM %s;`, + pq.QuoteIdentifier(dbname.String), + pq.QuoteIdentifier(username))) + } + + // again, here, we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revocationStmts { + stmt, err := db.Prepare(query) + if err != nil { + lastStmtError = err + continue + } + defer stmt.Close() + _, err = stmt.Exec() + if err != nil { + lastStmtError = err + } + } + + // can't drop if not all privileges are revoked + if rows.Err() != nil { + return fmt.Errorf("could not generate revocation statements for all rows: %s", rows.Err()) + } + if lastStmtError != nil { + return fmt.Errorf("could not perform all revocation statements: %s", lastStmtError) + } + + // Drop this user + stmt, err = db.Prepare(fmt.Sprintf( + `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username))) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.Exec(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go new file mode 100644 index 0000000..a74abb4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go @@ -0,0 +1,363 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/plugins/helper/database/connutil" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +var ( + testPostgresImagePull sync.Once +) + +func preparePostgresTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("PG_URL") != "" { + return func() {}, os.Getenv("PG_URL") + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"}) + if err != nil { + t.Fatalf("Could not start local PostgreSQL docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + var db *sql.DB + db, err = sql.Open("postgres", retURL) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + t.Fatalf("Could not connect to PostgreSQL docker container: %s", err) + } + + return +} + +func TestPostgreSQL_Initialize(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": 5, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + + connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer) + + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !connProducer.Initialized { + t.Fatal("Database should be initalized") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test decoding a string value for max_open_connections + connectionDetails = map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": "5", + } + + err = db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + +} + +func TestPostgreSQL_CreateUser(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + // Test with no configured Creation Statememt + _, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + statements := dbplugin.Statements{ + CreationStatements: testPostgresRole, + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + statements.CreationStatements = testPostgresReadOnlyRole + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } +} + +func TestPostgreSQL_RenewUser(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testPostgresRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + err = db.RenewUser(statements, username, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Sleep longer than the inital expiration time + time.Sleep(2 * time.Second) + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + statements.RenewStatements = defaultPostgresRenewSQL + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + err = db.RenewUser(statements, username, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Sleep longer than the inital expiration time + time.Sleep(2 * time.Second) + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + +} + +func TestPostgreSQL_RevokeUser(t *testing.T) { + cleanup, connURL := preparePostgresTestContainer(t) + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + dbRaw, _ := New() + db := dbRaw.(*PostgreSQL) + err := db.Initialize(connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + statements := dbplugin.Statements{ + CreationStatements: testPostgresRole, + } + + usernameConfig := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test default revoke statememts + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + + username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(2*time.Second)) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err = testCredsExist(t, connURL, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Test custom revoke statements + statements.RevocationStatements = defaultPostgresRevocationSQL + err = db.RevokeUser(statements, username) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := testCredsExist(t, connURL, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + connURL = strings.Replace(connURL, "postgres:secret", fmt.Sprintf("%s:%s", username, password), 1) + db, err := sql.Open("postgres", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const testPostgresRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const testPostgresReadOnlyRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}"; +GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}"; +` + +const testPostgresBlockStatementRole = ` +DO $$ +BEGIN + IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN + CREATE ROLE "foo-role"; + CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; + ALTER ROLE "foo-role" SET search_path = foo; + GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; + GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; + END IF; +END +$$ + +CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; +GRANT "foo-role" TO "{{name}}"; +ALTER ROLE "{{name}}" SET search_path = foo; +GRANT CONNECT ON DATABASE "postgres" TO "{{name}}"; +` + +var testPostgresBlockStatementRoleSlice = []string{ + ` +DO $$ +BEGIN + IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN + CREATE ROLE "foo-role"; + CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; + ALTER ROLE "foo-role" SET search_path = foo; + GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; + GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; + END IF; +END +$$ +`, + `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`, + `GRANT "foo-role" TO "{{name}}";`, + `ALTER ROLE "{{name}}" SET search_path = foo;`, + `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`, +} + +const defaultPostgresRevocationSQL = ` +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}"; +REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}"; +REVOKE USAGE ON SCHEMA public FROM "{{name}}"; + +DROP ROLE IF EXISTS "{{name}}"; +` diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go new file mode 100644 index 0000000..d36d571 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go @@ -0,0 +1,21 @@ +package connutil + +import ( + "errors" + "sync" +) + +var ( + ErrNotInitialized = errors.New("connection has not been initalized") +) + +// ConnectionProducer can be used as an embeded interface in the Database +// definition. It implements the methods dealing with individual database +// connections and is used in all the builtin database types. +type ConnectionProducer interface { + Close() error + Initialize(map[string]interface{}, bool) error + Connection() (interface{}, error) + + sync.Locker +} diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go new file mode 100644 index 0000000..c325cbc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go @@ -0,0 +1,138 @@ +package connutil + +import ( + "database/sql" + "fmt" + "strings" + "sync" + "time" + + "github.com/hashicorp/vault/helper/parseutil" + "github.com/mitchellh/mapstructure" +) + +// SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases +type SQLConnectionProducer struct { + ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` + MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` + MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"` + MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" structs:"max_connection_lifetime" mapstructure:"max_connection_lifetime"` + + Type string + maxConnectionLifetime time.Duration + Initialized bool + db *sql.DB + sync.Mutex +} + +func (c *SQLConnectionProducer) Initialize(conf map[string]interface{}, verifyConnection bool) error { + c.Lock() + defer c.Unlock() + + err := mapstructure.WeakDecode(conf, c) + if err != nil { + return err + } + + if len(c.ConnectionURL) == 0 { + return fmt.Errorf("connection_url cannot be empty") + } + + if c.MaxOpenConnections == 0 { + c.MaxOpenConnections = 2 + } + + if c.MaxIdleConnections == 0 { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxIdleConnections > c.MaxOpenConnections { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxConnectionLifetimeRaw == nil { + c.MaxConnectionLifetimeRaw = "0s" + } + + c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw) + if err != nil { + return fmt.Errorf("invalid max_connection_lifetime: %s", err) + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + c.Initialized = true + + if verifyConnection { + if _, err := c.Connection(); err != nil { + return fmt.Errorf("error verifying connection: %s", err) + } + + if err := c.db.Ping(); err != nil { + return fmt.Errorf("error verifying connection: %s", err) + } + } + + return nil +} + +func (c *SQLConnectionProducer) Connection() (interface{}, error) { + if !c.Initialized { + return nil, ErrNotInitialized + } + + // If we already have a DB, test it and return + if c.db != nil { + if err := c.db.Ping(); err == nil { + return c.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + c.db.Close() + } + + // For mssql backend, switch to sqlserver instead + dbType := c.Type + if c.Type == "mssql" { + dbType = "sqlserver" + } + + // Otherwise, attempt to make connection + conn := c.ConnectionURL + + // Ensure timezone is set to UTC for all the conenctions + if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") { + if strings.Contains(conn, "?") { + conn += "&timezone=utc" + } else { + conn += "?timezone=utc" + } + } + + var err error + c.db, err = sql.Open(dbType, conn) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + c.db.SetMaxOpenConns(c.MaxOpenConnections) + c.db.SetMaxIdleConns(c.MaxIdleConnections) + c.db.SetConnMaxLifetime(c.maxConnectionLifetime) + + return c.db, nil +} + +// Close attempts to close the connection +func (c *SQLConnectionProducer) Close() error { + // Grab the write lock + c.Lock() + defer c.Unlock() + + if c.db != nil { + c.db.Close() + } + + c.db = nil + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go new file mode 100644 index 0000000..8ce3b5e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go @@ -0,0 +1,87 @@ +package credsutil + +import ( + "crypto/rand" + "time" + + "fmt" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" +) + +// CredentialsProducer can be used as an embeded interface in the Database +// definition. It implements the methods for generating user information for a +// particular database type and is used in all the builtin database types. +type CredentialsProducer interface { + GenerateUsername(usernameConfig dbplugin.UsernameConfig) (string, error) + GeneratePassword() (string, error) + GenerateExpiration(ttl time.Time) (string, error) +} + +const ( + reqStr = `A1a-` + minStrLen = 10 +) + +// RandomAlphaNumeric returns a random string of characters [A-Za-z0-9-] +// of the provided length. The string generated takes up to 4 characters +// of space that are predefined and prepended to ensure password +// character requirements. It also requires a min length of 10 characters. +func RandomAlphaNumeric(length int, prependA1a bool) (string, error) { + if length < minStrLen { + return "", fmt.Errorf("minimum length of %d is required", minStrLen) + } + + var size int + var retBytes []byte + if prependA1a { + size = len(reqStr) + retBytes = make([]byte, length-size) + // Enforce alphanumeric requirements + retBytes = append([]byte(reqStr), retBytes...) + } else { + retBytes = make([]byte, length) + } + + for size < length { + // Extend the len of the random byte slice to lower odds of having to + // re-roll. + c := length + len(reqStr) + bArr := make([]byte, c) + _, err := rand.Read(bArr) + if err != nil { + return "", err + } + + for _, b := range bArr { + if size == length { + break + } + + /** + * Each byte will be in [0, 256), but we only care about: + * + * [48, 57] 0-9 + * [65, 90] A-Z + * [97, 122] a-z + * + * Which means that the highest bit will always be zero, since the last byte with high bit + * zero is 01111111 = 127 which is higher than 122. Lower our odds of having to re-roll a byte by + * dividing by two (right bit shift of 1). + */ + + b = b >> 1 + // Bitwise OR to set min to 48, further reduces re-roll + b |= 0x30 + + // The byte is any of 0-9 A-Z a-z + byteIsAllowable := (b >= 48 && b <= 57) || (b >= 65 && b <= 90) || (b >= 97 && b <= 122) + if byteIsAllowable { + retBytes[size] = b + size++ + } + } + } + + return string(retBytes), nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go new file mode 100644 index 0000000..e094719 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil_test.go @@ -0,0 +1,40 @@ +package credsutil + +import ( + "strings" + "testing" +) + +func TestRandomAlphaNumeric(t *testing.T) { + s, err := RandomAlphaNumeric(10, true) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if len(s) != 10 { + t.Fatalf("Unexpected length of string, expected 10, got string: %s", s) + } + + s, err = RandomAlphaNumeric(20, true) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if len(s) != 20 { + t.Fatalf("Unexpected length of string, expected 20, got string: %s", s) + } + + if !strings.Contains(s, reqStr) { + t.Fatalf("Expected %s to contain %s", s, reqStr) + } + + s, err = RandomAlphaNumeric(20, false) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if len(s) != 20 { + t.Fatalf("Unexpected length of string, expected 20, got string: %s", s) + } + + if strings.Contains(s, reqStr) { + t.Fatalf("Expected %s not to contain %s", s, reqStr) + } +} diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go new file mode 100644 index 0000000..af9a746 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go @@ -0,0 +1,72 @@ +package credsutil + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" +) + +const ( + NoneLength int = -1 +) + +// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types. +type SQLCredentialsProducer struct { + DisplayNameLen int + RoleNameLen int + UsernameLen int + Separator string +} + +func (scp *SQLCredentialsProducer) GenerateUsername(config dbplugin.UsernameConfig) (string, error) { + username := "v" + + displayName := config.DisplayName + if scp.DisplayNameLen > 0 && len(displayName) > scp.DisplayNameLen { + displayName = displayName[:scp.DisplayNameLen] + } else if scp.DisplayNameLen == NoneLength { + displayName = "" + } + + if len(displayName) > 0 { + username = fmt.Sprintf("%s%s%s", username, scp.Separator, displayName) + } + + roleName := config.RoleName + if scp.RoleNameLen > 0 && len(roleName) > scp.RoleNameLen { + roleName = roleName[:scp.RoleNameLen] + } else if scp.RoleNameLen == NoneLength { + roleName = "" + } + + if len(roleName) > 0 { + username = fmt.Sprintf("%s%s%s", username, scp.Separator, roleName) + } + + userUUID, err := RandomAlphaNumeric(20, false) + if err != nil { + return "", err + } + + username = fmt.Sprintf("%s%s%s", username, scp.Separator, userUUID) + username = fmt.Sprintf("%s%s%s", username, scp.Separator, fmt.Sprint(time.Now().UTC().Unix())) + if scp.UsernameLen > 0 && len(username) > scp.UsernameLen { + username = username[:scp.UsernameLen] + } + + return username, nil +} + +func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) { + password, err := RandomAlphaNumeric(20, true) + if err != nil { + return "", err + } + + return password, nil +} + +func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) { + return ttl.Format("2006-01-02 15:04:05-0700"), nil +} diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go new file mode 100644 index 0000000..e80273b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go @@ -0,0 +1,20 @@ +package dbutil + +import ( + "errors" + "fmt" + "strings" +) + +var ( + ErrEmptyCreationStatement = errors.New("empty creation statements") +) + +// Query templates a query for us. +func QueryHelper(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1) + } + + return tpl +} diff --git a/vendor/github.com/hashicorp/vault/plugins/serve.go b/vendor/github.com/hashicorp/vault/plugins/serve.go new file mode 100644 index 0000000..a40fc5b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/plugins/serve.go @@ -0,0 +1,31 @@ +package plugins + +import ( + "fmt" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/database/dbplugin" + "github.com/hashicorp/vault/helper/pluginutil" +) + +// Serve is used to start a plugin's RPC server. It takes an interface that must +// implement a known plugin interface to vault and an optional api.TLSConfig for +// use during the inital unwrap request to vault. The api config is particulary +// useful when vault is setup to require client cert checking. +func Serve(plugin interface{}, tlsConfig *api.TLSConfig) { + tlsProvider := pluginutil.VaultPluginTLSProvider(tlsConfig) + + err := pluginutil.OptionallyEnableMlock() + if err != nil { + fmt.Println(err) + return + } + + switch p := plugin.(type) { + case dbplugin.Database: + dbplugin.Serve(p, tlsProvider) + default: + fmt.Println("Unsupported plugin type") + } + +} diff --git a/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile b/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile index 7126b63..c5104f3 100644 --- a/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile +++ b/vendor/github.com/hashicorp/vault/scripts/cross/Dockerfile @@ -1,6 +1,6 @@ # Adapted from tcnksm/dockerfile-gox -- thanks! -FROM debian:jessie +FROM debian:stable RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ curl \ @@ -10,7 +10,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ git mercurial bzr \ && rm -rf /var/lib/apt/lists/* -ENV GOVERSION 1.8.1 +ENV GOVERSION 1.9 RUN mkdir /goroot && mkdir /gopath RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \ | tar xvzf - -C /goroot --strip-components=1 diff --git a/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh b/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh new file mode 100755 index 0000000..574f4d7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/scripts/gofmtcheck.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +echo "==> Checking that code complies with gofmt requirements..." + +gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`) +if [[ -n ${gofmt_files} ]]; then + echo 'gofmt needs running on the following files:' + echo "${gofmt_files}" + echo "You can use the command: \`make fmt\` to reformat code." + exit 1 +fi diff --git a/vendor/github.com/hashicorp/vault/scripts/update_deps.sh b/vendor/github.com/hashicorp/vault/scripts/update_deps.sh index 80241fc..82b85b0 100755 --- a/vendor/github.com/hashicorp/vault/scripts/update_deps.sh +++ b/vendor/github.com/hashicorp/vault/scripts/update_deps.sh @@ -32,4 +32,8 @@ govendor init echo "Fetching deps, will take some time..." govendor fetch +missing +govendor remove github.com/Sirupsen/logrus +cd vendor +find -type f | grep '.go' | xargs sed -i -e 's/Sirupsen/sirupsen/' + echo "Done; to commit run \n\ncd ${GOPATH}/src/github.com/hashicorp/${TOOL}\n" diff --git a/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf b/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf index ba0ee3a..cef4002 100644 --- a/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf +++ b/vendor/github.com/hashicorp/vault/terraform/aws/variables.tf @@ -3,7 +3,7 @@ //------------------------------------------------------------------- variable "download-url" { - default = "https://releases.hashicorp.com/vault/0.7.0/vault_0.7.0_linux_amd64.zip" + default = "https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_linux_amd64.zip" description = "URL to download Vault" } diff --git a/vendor/github.com/hashicorp/vault/vault/acl.go b/vendor/github.com/hashicorp/vault/vault/acl.go index 550e0df..7360178 100644 --- a/vendor/github.com/hashicorp/vault/vault/acl.go +++ b/vendor/github.com/hashicorp/vault/vault/acl.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/armon/go-radix" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/logical" ) @@ -51,7 +52,11 @@ func NewACL(policies []*Policy) (*ACL, error) { // Check for an existing policy raw, ok := tree.Get(pc.Prefix) if !ok { - tree.Insert(pc.Prefix, pc.Permissions) + clonedPerms, err := pc.Permissions.Clone() + if err != nil { + return nil, errwrap.Wrapf("error cloning ACL permissions: {{err}}", err) + } + tree.Insert(pc.Prefix, clonedPerms) continue } @@ -66,15 +71,15 @@ func NewACL(policies []*Policy) (*ACL, error) { case pc.Permissions.CapabilitiesBitmap&DenyCapabilityInt > 0: // If this new policy explicitly denies, only save the deny value - pc.Permissions.CapabilitiesBitmap = DenyCapabilityInt - pc.Permissions.AllowedParameters = nil - pc.Permissions.DeniedParameters = nil + existingPerms.CapabilitiesBitmap = DenyCapabilityInt + existingPerms.AllowedParameters = nil + existingPerms.DeniedParameters = nil goto INSERT default: // Insert the capabilities in this new policy into the existing // value - pc.Permissions.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap + existingPerms.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap } // Note: In these stanzas, we're preferring minimum lifetimes. So @@ -85,59 +90,58 @@ func NewACL(policies []*Policy) (*ACL, error) { // If we have an existing max, and we either don't have a current // max, or the current is greater than the previous, use the // existing. - if existingPerms.MaxWrappingTTL > 0 && - (pc.Permissions.MaxWrappingTTL == 0 || - existingPerms.MaxWrappingTTL < pc.Permissions.MaxWrappingTTL) { - pc.Permissions.MaxWrappingTTL = existingPerms.MaxWrappingTTL + if pc.Permissions.MaxWrappingTTL > 0 && + (existingPerms.MaxWrappingTTL == 0 || + pc.Permissions.MaxWrappingTTL < existingPerms.MaxWrappingTTL) { + existingPerms.MaxWrappingTTL = pc.Permissions.MaxWrappingTTL } // If we have an existing min, and we either don't have a current // min, or the current is greater than the previous, use the // existing - if existingPerms.MinWrappingTTL > 0 && - (pc.Permissions.MinWrappingTTL == 0 || - existingPerms.MinWrappingTTL < pc.Permissions.MinWrappingTTL) { - pc.Permissions.MinWrappingTTL = existingPerms.MinWrappingTTL + if pc.Permissions.MinWrappingTTL > 0 && + (existingPerms.MinWrappingTTL == 0 || + pc.Permissions.MinWrappingTTL < existingPerms.MinWrappingTTL) { + existingPerms.MinWrappingTTL = pc.Permissions.MinWrappingTTL } - if len(existingPerms.AllowedParameters) > 0 { - if pc.Permissions.AllowedParameters == nil { - pc.Permissions.AllowedParameters = existingPerms.AllowedParameters + if len(pc.Permissions.AllowedParameters) > 0 { + if existingPerms.AllowedParameters == nil { + existingPerms.AllowedParameters = pc.Permissions.AllowedParameters } else { - for key, value := range existingPerms.AllowedParameters { - pcValue, ok := pc.Permissions.AllowedParameters[key] + for key, value := range pc.Permissions.AllowedParameters { + pcValue, ok := existingPerms.AllowedParameters[key] // If an empty array exist it should overwrite any other // value. if len(value) == 0 || (ok && len(pcValue) == 0) { - pc.Permissions.AllowedParameters[key] = []interface{}{} + existingPerms.AllowedParameters[key] = []interface{}{} } else { // Merge the two maps, appending values on key conflict. - pc.Permissions.AllowedParameters[key] = append(value, pc.Permissions.AllowedParameters[key]...) + existingPerms.AllowedParameters[key] = append(value, existingPerms.AllowedParameters[key]...) } } } } - if len(existingPerms.DeniedParameters) > 0 { - if pc.Permissions.DeniedParameters == nil { - pc.Permissions.DeniedParameters = existingPerms.DeniedParameters + if len(pc.Permissions.DeniedParameters) > 0 { + if existingPerms.DeniedParameters == nil { + existingPerms.DeniedParameters = pc.Permissions.DeniedParameters } else { - for key, value := range existingPerms.DeniedParameters { - pcValue, ok := pc.Permissions.DeniedParameters[key] + for key, value := range pc.Permissions.DeniedParameters { + pcValue, ok := existingPerms.DeniedParameters[key] // If an empty array exist it should overwrite any other // value. if len(value) == 0 || (ok && len(pcValue) == 0) { - pc.Permissions.DeniedParameters[key] = []interface{}{} + existingPerms.DeniedParameters[key] = []interface{}{} } else { // Merge the two maps, appending values on key conflict. - pc.Permissions.DeniedParameters[key] = append(value, pc.Permissions.DeniedParameters[key]...) + existingPerms.DeniedParameters[key] = append(value, existingPerms.DeniedParameters[key]...) } } } } INSERT: - - tree.Insert(pc.Prefix, pc.Permissions) + tree.Insert(pc.Prefix, existingPerms) } } diff --git a/vendor/github.com/hashicorp/vault/vault/acl_test.go b/vendor/github.com/hashicorp/vault/vault/acl_test.go index 7eb45b8..638fed6 100644 --- a/vendor/github.com/hashicorp/vault/vault/acl_test.go +++ b/vendor/github.com/hashicorp/vault/vault/acl_test.go @@ -2,6 +2,7 @@ package vault import ( "reflect" + "sync" "testing" "time" @@ -245,7 +246,7 @@ func TestACL_PolicyMerge(t *testing.T) { {"allow/all1", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil}, {"deny/all", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}}, {"deny/all1", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}}, - {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{1, 2, 3, 4}}, map[string][]interface{}{"test": []interface{}{1, 2, 3, 4}}}, + {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}}, {"value/empty", nil, nil, map[string][]interface{}{"empty": []interface{}{}}, map[string][]interface{}{"empty": []interface{}{}}}, } @@ -415,6 +416,35 @@ func TestACL_ValuePermissions(t *testing.T) { } } +// NOTE: this test doesn't catch any races ATM +func TestACL_CreationRace(t *testing.T) { + policy, err := Parse(valuePermissionsPolicy) + if err != nil { + t.Fatalf("err: %v", err) + } + + var wg sync.WaitGroup + stopTime := time.Now().Add(20 * time.Second) + + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + if time.Now().After(stopTime) { + return + } + _, err := NewACL([]*Policy{policy}) + if err != nil { + t.Fatalf("err: %v", err) + } + } + }() + } + + wg.Wait() +} + var tokenCreationPolicy = ` name = "tokenCreation" path "auth/token/create*" { @@ -423,7 +453,7 @@ path "auth/token/create*" { ` var aclPolicy = ` -name = "dev" +name = "DeV" path "dev/*" { policy = "sudo" } @@ -452,7 +482,7 @@ path "foo/bar" { ` var aclPolicy2 = ` -name = "ops" +name = "OpS" path "dev/hide/*" { policy = "deny" } diff --git a/vendor/github.com/hashicorp/vault/vault/audit.go b/vendor/github.com/hashicorp/vault/vault/audit.go index 9391843..fccf9aa 100644 --- a/vendor/github.com/hashicorp/vault/vault/audit.go +++ b/vendor/github.com/hashicorp/vault/vault/audit.go @@ -79,6 +79,13 @@ func (c *Core) enableAudit(entry *MountEntry) error { } entry.UUID = entryUUID } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("audit_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + } viewPath := auditBarrierPrefix + entry.UUID + "/" view := NewBarrierView(c.barrier, viewPath) @@ -201,6 +208,14 @@ func (c *Core) loadAudits() error { entry.Table = c.audit.Type needPersist = true } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("audit_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + needPersist = true + } } if !needPersist { @@ -368,17 +383,16 @@ func (c *Core) newAuditBackend(entry *MountEntry, view logical.Storage, conf map if !ok { return nil, fmt.Errorf("unknown backend type: %s", entry.Type) } - salter, err := salt.NewSalt(view, &salt.Config{ + saltConfig := &salt.Config{ HMAC: sha256.New, HMACType: "hmac-sha256", - }) - if err != nil { - return nil, fmt.Errorf("core: unable to generate salt: %v", err) + Location: salt.DefaultLocation, } be, err := f(&audit.BackendConfig{ - Salt: salter, - Config: conf, + SaltView: view, + SaltConfig: saltConfig, + Config: conf, }) if err != nil { return nil, err @@ -397,7 +411,7 @@ func (c *Core) newAuditBackend(entry *MountEntry, view logical.Storage, conf map c.logger.Debug("audit: adding reload function", "path", entry.Path) } - c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]string) error { + c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]interface{}) error { if c.logger.IsInfo() { c.logger.Info("audit: reloading file audit backend", "path", entry.Path) } @@ -474,20 +488,29 @@ func (a *AuditBroker) GetHash(name string, input string) (string, error) { return "", fmt.Errorf("unknown audit backend %s", name) } - return be.backend.GetHash(input), nil + return be.backend.GetHash(input) } // LogRequest is used to ensure all the audit backends have an opportunity to // log the given request and that *at least one* succeeds. -func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, headersConfig *AuditedHeadersConfig, outerErr error) (retErr error) { +func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, headersConfig *AuditedHeadersConfig, outerErr error) (ret error) { defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now()) a.RLock() defer a.RUnlock() + + var retErr *multierror.Error + defer func() { if r := recover(); r != nil { a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r) retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) } + + ret = retErr.ErrorOrNil() + + if ret != nil { + metrics.IncrCounter([]string{"audit", "log_request_failure"}, 1.0) + } }() // All logged requests must have an identifier @@ -506,35 +529,49 @@ func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, heade anyLogged := false for name, be := range a.backends { req.Headers = nil - req.Headers = headersConfig.ApplyConfig(headers, be.backend.GetHash) + transHeaders, thErr := headersConfig.ApplyConfig(headers, be.backend.GetHash) + if thErr != nil { + a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr) + continue + } + req.Headers = transHeaders start := time.Now() - err := be.backend.LogRequest(auth, req, outerErr) + lrErr := be.backend.LogRequest(auth, req, outerErr) metrics.MeasureSince([]string{"audit", name, "log_request"}, start) - if err != nil { - a.logger.Error("audit: backend failed to log request", "backend", name, "error", err) + if lrErr != nil { + a.logger.Error("audit: backend failed to log request", "backend", name, "error", lrErr) } else { anyLogged = true } } if !anyLogged && len(a.backends) > 0 { retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the request")) - return } - return nil + + return retErr.ErrorOrNil() } // LogResponse is used to ensure all the audit backends have an opportunity to // log the given response and that *at least one* succeeds. func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request, - resp *logical.Response, headersConfig *AuditedHeadersConfig, err error) (reterr error) { + resp *logical.Response, headersConfig *AuditedHeadersConfig, err error) (ret error) { defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now()) a.RLock() defer a.RUnlock() + + var retErr *multierror.Error + defer func() { if r := recover(); r != nil { a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r) - reterr = fmt.Errorf("panic generating audit log") + retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) + } + + ret = retErr.ErrorOrNil() + + if ret != nil { + metrics.IncrCounter([]string{"audit", "log_response_failure"}, 1.0) } }() @@ -547,19 +584,35 @@ func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request, anyLogged := false for name, be := range a.backends { req.Headers = nil - req.Headers = headersConfig.ApplyConfig(headers, be.backend.GetHash) + transHeaders, thErr := headersConfig.ApplyConfig(headers, be.backend.GetHash) + if thErr != nil { + a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr) + continue + } + req.Headers = transHeaders start := time.Now() - err := be.backend.LogResponse(auth, req, resp, err) + lrErr := be.backend.LogResponse(auth, req, resp, err) metrics.MeasureSince([]string{"audit", name, "log_response"}, start) - if err != nil { - a.logger.Error("audit: backend failed to log response", "backend", name, "error", err) + if lrErr != nil { + a.logger.Error("audit: backend failed to log response", "backend", name, "error", lrErr) } else { anyLogged = true } } if !anyLogged && len(a.backends) > 0 { - return fmt.Errorf("no audit backend succeeded in logging the response") + retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the response")) + } + + return retErr.ErrorOrNil() +} + +func (a *AuditBroker) Invalidate(key string) { + // For now we ignore the key as this would only apply to salts. We just + // sort of brute force it on each one. + a.Lock() + defer a.Unlock() + for _, be := range a.backends { + be.backend.Invalidate() } - return nil } diff --git a/vendor/github.com/hashicorp/vault/vault/audit_test.go b/vendor/github.com/hashicorp/vault/vault/audit_test.go index 5e97da8..a91298d 100644 --- a/vendor/github.com/hashicorp/vault/vault/audit_test.go +++ b/vendor/github.com/hashicorp/vault/vault/audit_test.go @@ -3,6 +3,8 @@ package vault import ( "fmt" "reflect" + "strings" + "sync" "testing" "time" @@ -13,6 +15,7 @@ import ( "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" log "github.com/mgutz/logxi/v1" "github.com/mitchellh/copystructure" @@ -31,6 +34,9 @@ type NoopAudit struct { RespReq []*logical.Request Resp []*logical.Response RespErrs []error + + salt *salt.Salt + saltMutex sync.RWMutex } func (n *NoopAudit) LogRequest(a *logical.Auth, r *logical.Request, err error) error { @@ -49,14 +55,44 @@ func (n *NoopAudit) LogResponse(a *logical.Auth, r *logical.Request, re *logical return n.RespErr } -func (n *NoopAudit) GetHash(data string) string { - return n.Config.Salt.GetIdentifiedHMAC(data) +func (n *NoopAudit) Salt() (*salt.Salt, error) { + n.saltMutex.RLock() + if n.salt != nil { + defer n.saltMutex.RUnlock() + return n.salt, nil + } + n.saltMutex.RUnlock() + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + if n.salt != nil { + return n.salt, nil + } + salt, err := salt.NewSalt(n.Config.SaltView, n.Config.SaltConfig) + if err != nil { + return nil, err + } + n.salt = salt + return salt, nil +} + +func (n *NoopAudit) GetHash(data string) (string, error) { + salt, err := n.Salt() + if err != nil { + return "", err + } + return salt.GetIdentifiedHMAC(data), nil } func (n *NoopAudit) Reload() error { return nil } +func (n *NoopAudit) Invalidate() { + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + n.salt = nil +} + func TestCore_EnableAudit(t *testing.T) { c, keys, _ := TestCoreUnsealed(t) c.auditBackends["noop"] = func(config *audit.BackendConfig) (audit.Backend, error) { @@ -184,16 +220,18 @@ func TestCore_EnableAudit_Local(t *testing.T) { Type: auditTableType, Entries: []*MountEntry{ &MountEntry{ - Table: auditTableType, - Path: "noop/", - Type: "noop", - UUID: "abcd", + Table: auditTableType, + Path: "noop/", + Type: "noop", + UUID: "abcd", + Accessor: "noop-abcd", }, &MountEntry{ - Table: auditTableType, - Path: "noop2/", - Type: "noop", - UUID: "bcde", + Table: auditTableType, + Path: "noop2/", + Type: "noop", + UUID: "bcde", + Accessor: "noop-bcde", }, }, } @@ -508,7 +546,7 @@ func TestAuditBroker_LogResponse(t *testing.T) { t.Fatalf("Bad: %#v", a.Resp[0]) } if !reflect.DeepEqual(a.RespErrs[0], respErr) { - t.Fatalf("Bad: %#v", a.RespErrs[0]) + t.Fatalf("Expected\n%v\nGot\n%#v", respErr, a.RespErrs[0]) } } @@ -522,7 +560,7 @@ func TestAuditBroker_LogResponse(t *testing.T) { // Should FAIL work with both failing backends a2.RespErr = fmt.Errorf("failed") err = b.LogResponse(auth, req, resp, headersConf, respErr) - if err.Error() != "no audit backend succeeded in logging the response" { + if !strings.Contains(err.Error(), "no audit backend succeeded in logging the response") { t.Fatalf("err: %v", err) } } diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers.go b/vendor/github.com/hashicorp/vault/vault/audited_headers.go index 781c035..1e1a11b 100644 --- a/vendor/github.com/hashicorp/vault/vault/audited_headers.go +++ b/vendor/github.com/hashicorp/vault/vault/audited_headers.go @@ -88,7 +88,7 @@ func (a *AuditedHeadersConfig) remove(header string) error { // ApplyConfig returns a map of approved headers and their values, either // hmac'ed or plaintext -func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc func(string) string) (result map[string][]string) { +func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc func(string) (string, error)) (result map[string][]string, retErr error) { // Grab a read lock a.RLock() defer a.RUnlock() @@ -110,7 +110,11 @@ func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc // Optionally hmac the values if settings.HMAC { for i, el := range hVals { - hVals[i] = hashFunc(el) + hVal, err := hashFunc(el) + if err != nil { + return nil, err + } + hVals[i] = hVal } } @@ -118,7 +122,7 @@ func (a *AuditedHeadersConfig) ApplyConfig(headers map[string][]string, hashFunc } } - return + return result, nil } // Initalize the headers config by loading from the barrier view diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go b/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go index 5e82ec7..93225cf 100644 --- a/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go +++ b/vendor/github.com/hashicorp/vault/vault/audited_headers_test.go @@ -166,9 +166,12 @@ func TestAuditedHeadersConfig_ApplyConfig(t *testing.T) { "Content-Type": []string{"json"}, } - hashFunc := func(s string) string { return "hashed" } + hashFunc := func(s string) (string, error) { return "hashed", nil } - result := conf.ApplyConfig(reqHeaders, hashFunc) + result, err := conf.ApplyConfig(reqHeaders, hashFunc) + if err != nil { + t.Fatal(err) + } expected := map[string][]string{ "x-test-header": []string{"foo"}, @@ -214,7 +217,7 @@ func BenchmarkAuditedHeaderConfig_ApplyConfig(b *testing.B) { b.Fatal(err) } - hashFunc := func(s string) string { return salter.GetIdentifiedHMAC(s) } + hashFunc := func(s string) (string, error) { return salter.GetIdentifiedHMAC(s), nil } // Reset the timer since we did a lot above b.ResetTimer() diff --git a/vendor/github.com/hashicorp/vault/vault/auth.go b/vendor/github.com/hashicorp/vault/vault/auth.go index 5a5e68b..5900449 100644 --- a/vendor/github.com/hashicorp/vault/vault/auth.go +++ b/vendor/github.com/hashicorp/vault/vault/auth.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/logical" @@ -85,13 +86,23 @@ func (c *Core) enableCredential(entry *MountEntry) error { } entry.UUID = entryUUID } - + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("auth_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + } viewPath := credentialBarrierPrefix + entry.UUID + "/" view := NewBarrierView(c.barrier, viewPath) sysView := c.mountEntrySysView(entry) + conf := make(map[string]string) + if entry.Config.PluginName != "" { + conf["plugin_name"] = entry.Config.PluginName + } // Create the new backend - backend, err := c.newCredentialBackend(entry.Type, sysView, view, nil) + backend, err := c.newCredentialBackend(entry.Type, sysView, view, conf) if err != nil { return err } @@ -99,6 +110,12 @@ func (c *Core) enableCredential(entry *MountEntry) error { return fmt.Errorf("nil backend returned from %q factory", entry.Type) } + // Check for the correct backend type + backendType := backend.Type() + if entry.Type == "plugin" && backendType != logical.TypeCredential { + return fmt.Errorf("cannot mount '%s' of type '%s' as an auth backend", entry.Config.PluginName, backendType) + } + if err := backend.Initialize(); err != nil { return err } @@ -125,7 +142,7 @@ func (c *Core) enableCredential(entry *MountEntry) error { // disableCredential is used to disable an existing credential backend; the // boolean indicates if it existed -func (c *Core) disableCredential(path string) (bool, error) { +func (c *Core) disableCredential(path string) error { // Ensure we end the path in a slash if !strings.HasSuffix(path, "/") { path += "/" @@ -133,29 +150,29 @@ func (c *Core) disableCredential(path string) (bool, error) { // Ensure the token backend is not affected if path == "token/" { - return true, fmt.Errorf("token credential backend cannot be disabled") + return fmt.Errorf("token credential backend cannot be disabled") } // Store the view for this backend fullPath := credentialRoutePrefix + path view := c.router.MatchingStorageView(fullPath) if view == nil { - return false, fmt.Errorf("no matching backend %s", fullPath) + return fmt.Errorf("no matching backend %s", fullPath) } // Mark the entry as tainted if err := c.taintCredEntry(path); err != nil { - return true, err + return err } // Taint the router path to prevent routing if err := c.router.Taint(fullPath); err != nil { - return true, err + return err } // Revoke credentials from this path if err := c.expiration.RevokePrefix(fullPath); err != nil { - return true, err + return err } // Call cleanup function if it exists @@ -166,24 +183,24 @@ func (c *Core) disableCredential(path string) (bool, error) { // Unmount the backend if err := c.router.Unmount(fullPath); err != nil { - return true, err + return err } // Clear the data in the view if view != nil { if err := logical.ClearView(view); err != nil { - return true, err + return err } } // Remove the mount table entry if err := c.removeCredEntry(path); err != nil { - return true, err + return err } if c.logger.IsInfo() { c.logger.Info("core: disabled credential backend", "path", path) } - return true, nil + return nil } // removeCredEntry is used to remove an entry in the auth table @@ -283,13 +300,21 @@ func (c *Core) loadCredentials() error { entry.Table = c.auth.Type needPersist = true } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor("auth_" + entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + needPersist = true + } } if !needPersist { return nil } } else { - c.auth = defaultAuthTable() + c.auth = c.defaultAuthTable() } if err := c.persistAuth(c.auth, false); err != nil { @@ -373,7 +398,6 @@ func (c *Core) persistAuth(table *MountTable, localOnly bool) error { // setupCredentials is invoked after we've loaded the auth table to // initialize the credential backends and setup the router func (c *Core) setupCredentials() error { - var backend logical.Backend var view *BarrierView var err error var persistNeeded bool @@ -382,6 +406,7 @@ func (c *Core) setupCredentials() error { defer c.authLock.Unlock() for _, entry := range c.auth.Entries { + var backend logical.Backend // Work around some problematic code that existed in master for a while if strings.HasPrefix(entry.Path, credentialRoutePrefix) { entry.Path = strings.TrimPrefix(entry.Path, credentialRoutePrefix) @@ -392,21 +417,36 @@ func (c *Core) setupCredentials() error { viewPath := credentialBarrierPrefix + entry.UUID + "/" view = NewBarrierView(c.barrier, viewPath) sysView := c.mountEntrySysView(entry) + conf := make(map[string]string) + if entry.Config.PluginName != "" { + conf["plugin_name"] = entry.Config.PluginName + } // Initialize the backend - backend, err = c.newCredentialBackend(entry.Type, sysView, view, nil) + backend, err = c.newCredentialBackend(entry.Type, sysView, view, conf) if err != nil { c.logger.Error("core: failed to create credential entry", "path", entry.Path, "error", err) + if errwrap.Contains(err, ErrPluginNotFound.Error()) && entry.Type == "plugin" { + // If we encounter an error instantiating the backend due to it being missing from the catalog, + // skip backend initialization but register the entry to the mount table to preserve storage + // and path. + goto ROUTER_MOUNT + } return errLoadAuthFailed } if backend == nil { return fmt.Errorf("nil backend returned from %q factory", entry.Type) } + // Check for the correct backend type + if entry.Type == "plugin" && backend.Type() != logical.TypeCredential { + return fmt.Errorf("cannot mount '%s' of type '%s' as an auth backend", entry.Config.PluginName, backend.Type()) + } + if err := backend.Initialize(); err != nil { return err } - + ROUTER_MOUNT: // Mount the backend path := credentialRoutePrefix + entry.Path err = c.router.Mount(backend, path, entry, view) @@ -425,7 +465,7 @@ func (c *Core) setupCredentials() error { c.tokenStore = backend.(*TokenStore) // this is loaded *after* the normal mounts, including cubbyhole - c.router.tokenStoreSalt = c.tokenStore.salt + c.router.tokenStoreSaltFunc = c.tokenStore.Salt c.tokenStore.cubbyholeBackend = c.router.MatchingBackend("cubbyhole/").(*CubbyholeBackend) } } @@ -485,7 +525,7 @@ func (c *Core) newCredentialBackend( } // defaultAuthTable creates a default auth table -func defaultAuthTable() *MountTable { +func (c *Core) defaultAuthTable() *MountTable { table := &MountTable{ Type: credentialTableType, } @@ -493,12 +533,17 @@ func defaultAuthTable() *MountTable { if err != nil { panic(fmt.Sprintf("could not generate UUID for default auth table token entry: %v", err)) } + tokenAccessor, err := c.generateMountAccessor("auth_token") + if err != nil { + panic(fmt.Sprintf("could not generate accessor for default auth table token entry: %v", err)) + } tokenAuth := &MountEntry{ Table: credentialTableType, Path: "token/", Type: "token", Description: "token based credentials", UUID: tokenUUID, + Accessor: tokenAccessor, } table.Entries = append(table.Entries, tokenAuth) return table diff --git a/vendor/github.com/hashicorp/vault/vault/auth_test.go b/vendor/github.com/hashicorp/vault/vault/auth_test.go index bc150e9..c81b264 100644 --- a/vendor/github.com/hashicorp/vault/vault/auth_test.go +++ b/vendor/github.com/hashicorp/vault/vault/auth_test.go @@ -99,16 +99,18 @@ func TestCore_EnableCredential_Local(t *testing.T) { Type: credentialTableType, Entries: []*MountEntry{ &MountEntry{ - Table: credentialTableType, - Path: "noop/", - Type: "noop", - UUID: "abcd", + Table: credentialTableType, + Path: "noop/", + Type: "noop", + UUID: "abcd", + Accessor: "noop-abcd", }, &MountEntry{ - Table: credentialTableType, - Path: "noop2/", - Type: "noop", - UUID: "bcde", + Table: credentialTableType, + Path: "noop2/", + Type: "noop", + UUID: "bcde", + Accessor: "noop-bcde", }, }, } @@ -215,9 +217,9 @@ func TestCore_DisableCredential(t *testing.T) { return &NoopBackend{}, nil } - existed, err := c.disableCredential("foo") - if existed || (err != nil && !strings.HasPrefix(err.Error(), "no matching backend")) { - t.Fatalf("existed: %v; err: %v", existed, err) + err := c.disableCredential("foo") + if err != nil && !strings.HasPrefix(err.Error(), "no matching backend") { + t.Fatalf("err: %v", err) } me := &MountEntry{ @@ -230,9 +232,9 @@ func TestCore_DisableCredential(t *testing.T) { t.Fatalf("err: %v", err) } - existed, err = c.disableCredential("foo") - if !existed || err != nil { - t.Fatalf("existed: %v; err: %v", existed, err) + err = c.disableCredential("foo") + if err != nil { + t.Fatalf("err: %v", err) } match := c.router.MatchingMount("auth/foo/bar") @@ -266,9 +268,9 @@ func TestCore_DisableCredential(t *testing.T) { func TestCore_DisableCredential_Protected(t *testing.T) { c, _, _ := TestCoreUnsealed(t) - existed, err := c.disableCredential("token") - if !existed || err.Error() != "token credential backend cannot be disabled" { - t.Fatalf("existed: %v; err: %v", existed, err) + err := c.disableCredential("token") + if err.Error() != "token credential backend cannot be disabled" { + t.Fatalf("err: %v", err) } } @@ -322,9 +324,9 @@ func TestCore_DisableCredential_Cleanup(t *testing.T) { } // Disable should cleanup - existed, err := c.disableCredential("foo") - if !existed || err != nil { - t.Fatalf("existed: %v; err: %v", existed, err) + err = c.disableCredential("foo") + if err != nil { + t.Fatalf("err: %v", err) } // Token should be revoked @@ -347,7 +349,8 @@ func TestCore_DisableCredential_Cleanup(t *testing.T) { } func TestDefaultAuthTable(t *testing.T) { - table := defaultAuthTable() + c, _, _ := TestCoreUnsealed(t) + table := c.defaultAuthTable() verifyDefaultAuthTable(t, table) } diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go index 7d575ce..ef0fe38 100644 --- a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go +++ b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" log "github.com/mgutz/logxi/v1" ) @@ -16,8 +17,10 @@ var ( // mockBarrier returns a physical backend, security barrier, and master key func mockBarrier(t testing.TB) (physical.Backend, SecurityBarrier, []byte) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -31,8 +34,10 @@ func mockBarrier(t testing.TB) (physical.Backend, SecurityBarrier, []byte) { } func TestAESGCMBarrier_Basic(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -41,8 +46,10 @@ func TestAESGCMBarrier_Basic(t *testing.T) { } func TestAESGCMBarrier_Rotate(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -51,8 +58,10 @@ func TestAESGCMBarrier_Rotate(t *testing.T) { } func TestAESGCMBarrier_Upgrade(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b1, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -65,8 +74,10 @@ func TestAESGCMBarrier_Upgrade(t *testing.T) { } func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b1, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -79,8 +90,10 @@ func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) { } func TestAESGCMBarrier_Rekey(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -91,8 +104,10 @@ func TestAESGCMBarrier_Rekey(t *testing.T) { // Test an upgrade from the old (0.1) barrier/init to the new // core/keyring style func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -171,8 +186,10 @@ func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) { // Verify data sent through is encrypted func TestAESGCMBarrier_Confidential(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -209,8 +226,10 @@ func TestAESGCMBarrier_Confidential(t *testing.T) { // Verify data sent through cannot be tampered with func TestAESGCMBarrier_Integrity(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -245,8 +264,10 @@ func TestAESGCMBarrier_Integrity(t *testing.T) { // Verify data sent through cannot be moved func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -275,6 +296,9 @@ func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) { pe, _ := inm.Get("test") pe.Key = "moved" err = inm.Put(pe) + if err != nil { + t.Fatalf("err: %v", err) + } // Read from the barrier _, err = b.Get("moved") @@ -284,8 +308,10 @@ func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) { } func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -314,6 +340,9 @@ func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) { pe, _ := inm.Get("test") pe.Key = "moved" err = inm.Put(pe) + if err != nil { + t.Fatalf("err: %v", err) + } // Read from the barrier _, err = b.Get("moved") @@ -323,8 +352,10 @@ func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) { } func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -376,8 +407,10 @@ func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) { } func TestEncrypt_Unique(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -404,8 +437,10 @@ func TestEncrypt_Unique(t *testing.T) { } func TestInitialize_KeyLength(t *testing.T) { - - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) @@ -435,7 +470,13 @@ func TestInitialize_KeyLength(t *testing.T) { } func TestEncrypt_BarrierEncryptor(t *testing.T) { - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatalf("err: %v", err) + } + if err != nil { + t.Fatalf("err: %v", err) + } b, err := NewAESGCMBarrier(inm) if err != nil { t.Fatalf("err: %v", err) diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view.go b/vendor/github.com/hashicorp/vault/vault/barrier_view.go index 0fa6f2d..3512aba 100644 --- a/vendor/github.com/hashicorp/vault/vault/barrier_view.go +++ b/vendor/github.com/hashicorp/vault/vault/barrier_view.go @@ -1,7 +1,7 @@ package vault import ( - "fmt" + "errors" "strings" "github.com/hashicorp/vault/logical" @@ -20,6 +20,10 @@ type BarrierView struct { readonly bool } +var ( + ErrRelativePath = errors.New("relative paths not supported") +) + // NewBarrierView takes an underlying security barrier and returns // a view of it that can only operate with the given prefix. func NewBarrierView(barrier BarrierStorage, prefix string) *BarrierView { @@ -32,7 +36,7 @@ func NewBarrierView(barrier BarrierStorage, prefix string) *BarrierView { // sanityCheck is used to perform a sanity check on a key func (v *BarrierView) sanityCheck(key string) error { if strings.Contains(key, "..") { - return fmt.Errorf("key cannot be relative path") + return ErrRelativePath } return nil } @@ -98,7 +102,6 @@ func (v *BarrierView) Delete(key string) error { return logical.ErrReadOnly } - return v.barrier.Delete(expandedKey) } diff --git a/vendor/github.com/hashicorp/vault/vault/cluster.go b/vendor/github.com/hashicorp/vault/vault/cluster.go index dc9fb65..beca4b9 100644 --- a/vendor/github.com/hashicorp/vault/vault/cluster.go +++ b/vendor/github.com/hashicorp/vault/vault/cluster.go @@ -17,13 +17,8 @@ import ( "net/http" "time" - log "github.com/mgutz/logxi/v1" - - "golang.org/x/net/http2" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/forwarding" "github.com/hashicorp/vault/helper/jsonutil" ) @@ -50,11 +45,6 @@ type clusterKeyParams struct { D *big.Int `json:"d" structs:"d" mapstructure:"d"` } -type activeConnection struct { - transport *http2.Transport - clusterAddr string -} - // Structure representing the storage entry that holds cluster information type Cluster struct { // Name of the cluster @@ -292,21 +282,11 @@ func (c *Core) setupCluster() error { return nil } -// SetClusterSetupFuncs sets the handler setup func -func (c *Core) SetClusterSetupFuncs(handler func() (http.Handler, http.Handler)) { - c.clusterHandlerSetupFunc = handler -} - // startClusterListener starts cluster request listeners during postunseal. It // is assumed that the state lock is held while this is run. Right now this // only starts forwarding listeners; it's TBD whether other request types will // be built in the same mechanism or started independently. func (c *Core) startClusterListener() error { - if c.clusterHandlerSetupFunc == nil { - c.logger.Error("core: cluster handler setup function has not been set when trying to start listeners") - return fmt.Errorf("cluster handler setup function has not been set") - } - if c.clusterAddr == "" { c.logger.Info("core: clustering disabled, not starting listeners") return nil @@ -418,7 +398,7 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) { //c.logger.Trace("core: performing server config lookup") for _, v := range clientHello.SupportedProtos { switch v { - case "h2", "req_fw_sb-act_v1": + case "h2", requestForwardingALPN: default: return nil, fmt.Errorf("unknown ALPN proto %s", v) } @@ -434,6 +414,7 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) { RootCAs: caPool, ClientCAs: caPool, NextProtos: clientHello.SupportedProtos, + CipherSuites: c.clusterCipherSuites, } switch { @@ -458,6 +439,7 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) { GetClientCertificate: clientLookup, GetConfigForClient: serverConfigLookup, MinVersion: tls.VersionTLS12, + CipherSuites: c.clusterCipherSuites, } var localCert bytes.Buffer @@ -482,50 +464,6 @@ func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) { c.clusterListenerAddrs = addrs } -// WrapHandlerForClustering takes in Vault's HTTP handler and returns a setup -// function that returns both the original handler and one wrapped with cluster -// methods -func WrapHandlerForClustering(handler http.Handler, logger log.Logger) func() (http.Handler, http.Handler) { - return func() (http.Handler, http.Handler) { - // This mux handles cluster functions (right now, only forwarded requests) - mux := http.NewServeMux() - mux.HandleFunc("/cluster/local/forwarded-request", func(w http.ResponseWriter, req *http.Request) { - //logger.Trace("forwarding: serving h2 forwarded request") - freq, err := forwarding.ParseForwardedHTTPRequest(req) - if err != nil { - if logger != nil { - logger.Error("http/forwarded-request-server: error parsing forwarded request", "error", err) - } - - w.Header().Add("Content-Type", "application/json") - - // The response writer here is different from - // the one set in Vault's HTTP handler. - // Hence, set the Cache-Control explicitly. - w.Header().Set("Cache-Control", "no-store") - - w.WriteHeader(http.StatusInternalServerError) - - type errorResponse struct { - Errors []string - } - resp := &errorResponse{ - Errors: []string{ - err.Error(), - }, - } - - enc := json.NewEncoder(w) - enc.Encode(resp) - return - } - - // To avoid the risk of a forward loop in some pathological condition, - // set the no-forward header - freq.Header.Set(IntNoForwardingHeaderName, "true") - handler.ServeHTTP(w, freq) - }) - - return handler, mux - } +func (c *Core) SetClusterHandler(handler http.Handler) { + c.clusterHandler = handler } diff --git a/vendor/github.com/hashicorp/vault/vault/cluster_test.go b/vendor/github.com/hashicorp/vault/vault/cluster_test.go index d3ee512..9bc5b69 100644 --- a/vendor/github.com/hashicorp/vault/vault/cluster_test.go +++ b/vendor/github.com/hashicorp/vault/vault/cluster_test.go @@ -6,7 +6,6 @@ import ( "fmt" "net" "net/http" - "os" "testing" "time" @@ -14,6 +13,7 @@ import ( "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" log "github.com/mgutz/logxi/v1" ) @@ -44,9 +44,17 @@ func TestClusterHAFetching(t *testing.T) { redirect := "http://127.0.0.1:8200" + inm, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } c, err := NewCore(&CoreConfig{ - Physical: physical.NewInmemHA(logger), - HAPhysical: physical.NewInmemHA(logger), + Physical: inm, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirect, DisableMlock: true, }) @@ -86,12 +94,12 @@ func TestCluster_ListenForRequests(t *testing.T) { // Make this nicer for tests manualStepDownSleepPeriod = 5 * time.Second - cores := TestCluster(t, []http.Handler{nil, nil, nil}, nil, false) - for _, core := range cores { - defer core.CloseListeners() - } - - root := cores[0].Root + cluster := NewTestCluster(t, nil, &TestClusterOptions{ + KeepStandbysSealed: true, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores // Wait for core to become active TestWaitActive(t, cores[0].Core) @@ -116,16 +124,16 @@ func TestCluster_ListenForRequests(t *testing.T) { t.Fatalf("%s not a TCP port", tcpAddr.String()) } - conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+10), tlsConfig) + conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+105), tlsConfig) if err != nil { if expectFail { - t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+10) + t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105) continue } t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[1]) } if expectFail { - t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+10) + t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105) } err = conn.Handshake() if err != nil { @@ -138,7 +146,7 @@ func TestCluster_ListenForRequests(t *testing.T) { case connState.NegotiatedProtocol != "h2" || !connState.NegotiatedProtocolIsMutual: t.Fatal("bad protocol negotiation") } - t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+10) + t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+105) } } @@ -148,7 +156,7 @@ func TestCluster_ListenForRequests(t *testing.T) { err := cores[0].StepDown(&logical.Request{ Operation: logical.UpdateOperation, Path: "sys/step-down", - ClientToken: root, + ClientToken: cluster.RootToken, }) if err != nil { t.Fatal(err) @@ -163,7 +171,7 @@ func TestCluster_ListenForRequests(t *testing.T) { time.Sleep(manualStepDownSleepPeriod) checkListenersFunc(false) - err = cores[0].Seal(root) + err = cores[0].Seal(cluster.RootToken) if err != nil { t.Fatal(err) } @@ -176,51 +184,39 @@ func TestCluster_ForwardRequests(t *testing.T) { // Make this nicer for tests manualStepDownSleepPeriod = 5 * time.Second - testCluster_ForwardRequestsCommon(t, false) - testCluster_ForwardRequestsCommon(t, true) - os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "") + testCluster_ForwardRequestsCommon(t) } -func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) { - if rpc { - os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1") - } else { - os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "") - } - - handler1 := http.NewServeMux() - handler1.HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) { +func testCluster_ForwardRequestsCommon(t *testing.T) { + cluster := NewTestCluster(t, nil, nil) + cores := cluster.Cores + cores[0].Handler.(*http.ServeMux).HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(201) w.Write([]byte("core1")) }) - handler2 := http.NewServeMux() - handler2.HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) { + cores[1].Handler.(*http.ServeMux).HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(202) w.Write([]byte("core2")) }) - handler3 := http.NewServeMux() - handler3.HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) { + cores[2].Handler.(*http.ServeMux).HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(203) w.Write([]byte("core3")) }) + cluster.Start() + defer cluster.Cleanup() - cores := TestCluster(t, []http.Handler{handler1, handler2, handler3}, nil, true) - for _, core := range cores { - defer core.CloseListeners() - } - - root := cores[0].Root + root := cluster.RootToken // Wait for core to become active TestWaitActive(t, cores[0].Core) // Test forwarding a request. Since we're going directly from core to core // with no fallback we know that if it worked, request handling is working - testCluster_ForwardRequests(t, cores[1], "core1") - testCluster_ForwardRequests(t, cores[2], "core1") + testCluster_ForwardRequests(t, cores[1], root, "core1") + testCluster_ForwardRequests(t, cores[2], root, "core1") // // Now we do a bunch of round-robining. The point is to make sure that as @@ -245,8 +241,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) { }) time.Sleep(clusterTestPausePeriod) TestWaitActive(t, cores[1].Core) - testCluster_ForwardRequests(t, cores[0], "core2") - testCluster_ForwardRequests(t, cores[2], "core2") + testCluster_ForwardRequests(t, cores[0], root, "core2") + testCluster_ForwardRequests(t, cores[2], root, "core2") // Ensure active core is cores[2] and test err = cores[1].StepDown(&logical.Request{ @@ -265,8 +261,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) { }) time.Sleep(clusterTestPausePeriod) TestWaitActive(t, cores[2].Core) - testCluster_ForwardRequests(t, cores[0], "core3") - testCluster_ForwardRequests(t, cores[1], "core3") + testCluster_ForwardRequests(t, cores[0], root, "core3") + testCluster_ForwardRequests(t, cores[1], root, "core3") // Ensure active core is cores[0] and test err = cores[2].StepDown(&logical.Request{ @@ -285,8 +281,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) { }) time.Sleep(clusterTestPausePeriod) TestWaitActive(t, cores[0].Core) - testCluster_ForwardRequests(t, cores[1], "core1") - testCluster_ForwardRequests(t, cores[2], "core1") + testCluster_ForwardRequests(t, cores[1], root, "core1") + testCluster_ForwardRequests(t, cores[2], root, "core1") // Ensure active core is cores[1] and test err = cores[0].StepDown(&logical.Request{ @@ -305,8 +301,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) { }) time.Sleep(clusterTestPausePeriod) TestWaitActive(t, cores[1].Core) - testCluster_ForwardRequests(t, cores[0], "core2") - testCluster_ForwardRequests(t, cores[2], "core2") + testCluster_ForwardRequests(t, cores[0], root, "core2") + testCluster_ForwardRequests(t, cores[2], root, "core2") // Ensure active core is cores[2] and test err = cores[1].StepDown(&logical.Request{ @@ -325,11 +321,11 @@ func testCluster_ForwardRequestsCommon(t *testing.T, rpc bool) { }) time.Sleep(clusterTestPausePeriod) TestWaitActive(t, cores[2].Core) - testCluster_ForwardRequests(t, cores[0], "core3") - testCluster_ForwardRequests(t, cores[1], "core3") + testCluster_ForwardRequests(t, cores[0], root, "core3") + testCluster_ForwardRequests(t, cores[1], root, "core3") } -func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID string) { +func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, rootToken, remoteCoreID string) { standby, err := c.Standby() if err != nil { t.Fatal(err) @@ -339,8 +335,9 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID } // We need to call Leader as that refreshes the connection info - isLeader, _, err := c.Leader() + isLeader, _, _, err := c.Leader() if err != nil { + panic(err.Error()) t.Fatal(err) } if isLeader { @@ -352,7 +349,7 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID if err != nil { t.Fatal(err) } - req.Header.Add("X-Vault-Token", c.Root) + req.Header.Add("X-Vault-Token", rootToken) statusCode, header, respBytes, err := c.ForwardRequest(req) if err != nil { @@ -386,3 +383,37 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID } } } + +func TestCluster_CustomCipherSuites(t *testing.T) { + cluster := NewTestCluster(t, &CoreConfig{ + ClusterCipherSuites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + }, nil) + cluster.Start() + defer cluster.Cleanup() + core := cluster.Cores[0] + + // Wait for core to become active + TestWaitActive(t, core.Core) + + tlsConf, err := core.Core.ClusterTLSConfig() + if err != nil { + t.Fatal(err) + } + + conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", core.Listeners[0].Address.IP.String(), core.Listeners[0].Address.Port+105), tlsConf) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + err = conn.Handshake() + if err != nil { + t.Fatal(err) + } + if conn.ConnectionState().CipherSuite != tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 { + var availCiphers string + for _, cipher := range core.clusterCipherSuites { + availCiphers += fmt.Sprintf("%x ", cipher) + } + t.Fatalf("got bad negotiated cipher %x, core-set suites are %s", conn.ConnectionState().CipherSuite, availCiphers) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/core.go b/vendor/github.com/hashicorp/vault/vault/core.go index 396a2bc..1259c03 100644 --- a/vendor/github.com/hashicorp/vault/vault/core.go +++ b/vendor/github.com/hashicorp/vault/vault/core.go @@ -10,6 +10,7 @@ import ( "net" "net/http" "net/url" + "path/filepath" "sync" "time" @@ -28,9 +29,12 @@ import ( "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/helper/mlock" + "github.com/hashicorp/vault/helper/reload" + "github.com/hashicorp/vault/helper/tlsutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/shamir" + cache "github.com/patrickmn/go-cache" ) const ( @@ -50,6 +54,9 @@ const ( // HA lock if an error is encountered lockRetryInterval = 10 * time.Second + // leaderCheckInterval is how often a standby checks for a new leader + leaderCheckInterval = 2500 * time.Millisecond + // keyRotateCheckInterval is how often a standby checks for a key // rotation taking place. keyRotateCheckInterval = 30 * time.Second @@ -98,9 +105,6 @@ var ( LastRemoteWAL = lastRemoteWALImpl ) -// ReloadFunc are functions that are called when a reload is requested. -type ReloadFunc func(map[string]string) error - // NonFatalError is an error that can be returned during NewCore that should be // displayed but not cause a program exit type NonFatalError struct { @@ -268,9 +272,9 @@ type Core struct { cachingDisabled bool // reloadFuncs is a map containing reload functions - reloadFuncs map[string][]ReloadFunc + reloadFuncs map[string][]reload.ReloadFunc - // reloadFuncsLock controlls access to the funcs + // reloadFuncsLock controls access to the funcs reloadFuncsLock sync.RWMutex // wrappingJWTKey is the key used for generating JWTs containing response @@ -282,6 +286,8 @@ type Core struct { // // Name clusterName string + // Specific cipher suites to use for clustering, if any + clusterCipherSuites []uint16 // Used to modify cluster parameters clusterParamsLock sync.RWMutex // The private key stored in the barrier used for establishing @@ -293,8 +299,8 @@ type Core struct { localClusterParsedCert *x509.Certificate // The TCP addresses we should use for clustering clusterListenerAddrs []*net.TCPAddr - // The setup function that gives us the handler to use - clusterHandlerSetupFunc func() (http.Handler, http.Handler) + // The handler to use for request forwarding + clusterHandler http.Handler // Tracks whether cluster listeners are running, e.g. it's safe to send a // shutdown down the channel clusterListenersRunning bool @@ -303,8 +309,6 @@ type Core struct { // Shutdown success channel. We need this to be done serially to ensure // that binds are removed before they might be reinstated. clusterListenerShutdownSuccessCh chan struct{} - // Connection info containing a client and a current active address - requestForwardingConnection *activeConnection // Write lock used to ensure that we don't have multiple connections adjust // this value at the same time requestForwardingConnectionLock sync.RWMutex @@ -313,16 +317,25 @@ type Core struct { clusterLeaderUUID string // Most recent leader redirect addr clusterLeaderRedirectAddr string + // Most recent leader cluster addr + clusterLeaderClusterAddr string // Lock for the cluster leader values clusterLeaderParamsLock sync.RWMutex + // Info on cluster members + clusterPeerClusterAddrsCache *cache.Cache // The grpc Server that handles server RPC calls rpcServer *grpc.Server + // The context for the client + rpcClientConnContext context.Context // The function for canceling the client connection rpcClientConnCancelFunc context.CancelFunc // The grpc ClientConn for RPC calls rpcClientConn *grpc.ClientConn // The grpc forwarding client - rpcForwardingClient RequestForwardingClient + rpcForwardingClient *forwardingClient + + // CORS Information + corsConfig *CORSConfig // replicationState keeps the current replication state cached for quick // lookup @@ -330,6 +343,22 @@ type Core struct { // uiEnabled indicates whether Vault Web UI is enabled or not uiEnabled bool + + // rawEnabled indicates whether the Raw endpoint is enabled + rawEnabled bool + + // pluginDirectory is the location vault will look for plugin binaries + pluginDirectory string + + // pluginCatalog is used to manage plugin configurations + pluginCatalog *PluginCatalog + + enableMlock bool + + // This can be used to trigger operations to stop running when Vault is + // going to be shut down, stepped down, or sealed + requestContext context.Context + requestContextCancelFunc context.CancelFunc } // CoreConfig is used to parameterize a core @@ -372,9 +401,16 @@ type CoreConfig struct { ClusterName string `json:"cluster_name" structs:"cluster_name" mapstructure:"cluster_name"` + ClusterCipherSuites string `json:"cluster_cipher_suites" structs:"cluster_cipher_suites" mapstructure:"cluster_cipher_suites"` + EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"` - ReloadFuncs *map[string][]ReloadFunc + // Enable the raw endpoint + EnableRaw bool `json:"enable_raw" structs:"enable_raw" mapstructure:"enable_raw"` + + PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"` + + ReloadFuncs *map[string][]reload.ReloadFunc ReloadFuncsLock *sync.RWMutex } @@ -430,11 +466,30 @@ func NewCore(conf *CoreConfig) (*Core, error) { clusterName: conf.ClusterName, clusterListenerShutdownCh: make(chan struct{}), clusterListenerShutdownSuccessCh: make(chan struct{}), + clusterPeerClusterAddrsCache: cache.New(3*heartbeatInterval, time.Second), + enableMlock: !conf.DisableMlock, + rawEnabled: conf.EnableRaw, } + if conf.ClusterCipherSuites != "" { + suites, err := tlsutil.ParseCiphers(conf.ClusterCipherSuites) + if err != nil { + return nil, errwrap.Wrapf("error parsing cluster cipher suites: {{err}}", err) + } + c.clusterCipherSuites = suites + } + + c.corsConfig = &CORSConfig{core: c} + // Load CORS config and provide a value for the core field. + + _, txnOK := conf.Physical.(physical.Transactional) // Wrap the physical backend in a cache layer if enabled and not already wrapped if _, isCache := conf.Physical.(*physical.Cache); !conf.DisableCache && !isCache { - c.physical = physical.NewCache(conf.Physical, conf.CacheSize, conf.Logger) + if txnOK { + c.physical = physical.NewTransactionalCache(conf.Physical, conf.CacheSize, conf.Logger) + } else { + c.physical = physical.NewCache(conf.Physical, conf.CacheSize, conf.Logger) + } } if !conf.DisableMlock { @@ -453,8 +508,15 @@ func NewCore(conf *CoreConfig) (*Core, error) { } } - // Construct a new AES-GCM barrier var err error + if conf.PluginDirectory != "" { + c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory) + if err != nil { + return nil, fmt.Errorf("core setup failed, could not verify plugin directory: %v", err) + } + } + + // Construct a new AES-GCM barrier c.barrier, err = NewAESGCMBarrier(c.physical) if err != nil { return nil, fmt.Errorf("barrier setup failed: %v", err) @@ -468,7 +530,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { // the caller can share state conf.ReloadFuncsLock = &c.reloadFuncsLock c.reloadFuncsLock.Lock() - c.reloadFuncs = make(map[string][]ReloadFunc) + c.reloadFuncs = make(map[string][]reload.ReloadFunc) c.reloadFuncsLock.Unlock() conf.ReloadFuncs = &c.reloadFuncs @@ -477,13 +539,17 @@ func NewCore(conf *CoreConfig) (*Core, error) { for k, f := range conf.LogicalBackends { logicalBackends[k] = f } - _, ok := logicalBackends["generic"] + _, ok := logicalBackends["kv"] if !ok { - logicalBackends["generic"] = PassthroughBackendFactory + logicalBackends["kv"] = PassthroughBackendFactory } logicalBackends["cubbyhole"] = CubbyholeBackendFactory logicalBackends["system"] = func(config *logical.BackendConfig) (logical.Backend, error) { - return NewSystemBackend(c, config) + b := NewSystemBackend(c) + if err := b.Setup(config); err != nil { + return nil, err + } + return b, nil } c.logicalBackends = logicalBackends @@ -519,14 +585,27 @@ func NewCore(conf *CoreConfig) (*Core, error) { // problem. It is only used to gracefully quit in the case of HA so that failover // happens as quickly as possible. func (c *Core) Shutdown() error { - c.stateLock.Lock() - defer c.stateLock.Unlock() - if c.sealed { - return nil + c.stateLock.RLock() + // Tell any requests that know about this to stop + if c.requestContextCancelFunc != nil { + c.requestContextCancelFunc() } + c.stateLock.RUnlock() // Seal the Vault, causes a leader stepdown - return c.sealInternal() + retChan := make(chan error) + go func() { + c.stateLock.Lock() + defer c.stateLock.Unlock() + retChan <- c.sealInternal() + }() + + return <-retChan +} + +// CORSConfig returns the current CORS configuration +func (c *Core) CORSConfig() *CORSConfig { + return c.corsConfig } // LookupToken returns the properties of the token from the token store. This @@ -637,24 +716,27 @@ func (c *Core) checkToken(req *logical.Request) (*logical.Auth, *TokenEntry, err panic("unreachable code") } } + // Create the auth response + auth := &logical.Auth{ + ClientToken: req.ClientToken, + Accessor: req.ClientTokenAccessor, + Policies: te.Policies, + Metadata: te.Meta, + DisplayName: te.DisplayName, + } // Check the standard non-root ACLs. Return the token entry if it's not // allowed so we can decrement the use count. allowed, rootPrivs := acl.AllowOperation(req) if !allowed { - return nil, te, logical.ErrPermissionDenied + // Return auth for audit logging even if not allowed + return auth, te, logical.ErrPermissionDenied } if rootPath && !rootPrivs { - return nil, te, logical.ErrPermissionDenied + // Return auth for audit logging even if not allowed + return auth, te, logical.ErrPermissionDenied } - // Create the auth response - auth := &logical.Auth{ - ClientToken: req.ClientToken, - Policies: te.Policies, - Metadata: te.Meta, - DisplayName: te.DisplayName, - } return auth, te, nil } @@ -673,49 +755,50 @@ func (c *Core) Standby() (bool, error) { } // Leader is used to get the current active leader -func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) { +func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) { c.stateLock.RLock() defer c.stateLock.RUnlock() // Check if sealed if c.sealed { - return false, "", consts.ErrSealed + return false, "", "", consts.ErrSealed } // Check if HA enabled if c.ha == nil { - return false, "", ErrHANotEnabled + return false, "", "", ErrHANotEnabled } // Check if we are the leader if !c.standby { - return true, c.redirectAddr, nil + return true, c.redirectAddr, c.clusterAddr, nil } // Initialize a lock lock, err := c.ha.LockWith(coreLockPath, "read") if err != nil { - return false, "", err + return false, "", "", err } // Read the value held, leaderUUID, err := lock.Value() if err != nil { - return false, "", err + return false, "", "", err } if !held { - return false, "", nil + return false, "", "", nil } c.clusterLeaderParamsLock.RLock() localLeaderUUID := c.clusterLeaderUUID localRedirAddr := c.clusterLeaderRedirectAddr + localClusterAddr := c.clusterLeaderClusterAddr c.clusterLeaderParamsLock.RUnlock() // If the leader hasn't changed, return the cached value; nothing changes // mid-leadership, and the barrier caches anyways if leaderUUID == localLeaderUUID && localRedirAddr != "" { - return false, localRedirAddr, nil + return false, localRedirAddr, localClusterAddr, nil } c.logger.Trace("core: found new active node information, refreshing") @@ -725,16 +808,16 @@ func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) { // Validate base conditions again if leaderUUID == c.clusterLeaderUUID && c.clusterLeaderRedirectAddr != "" { - return false, localRedirAddr, nil + return false, localRedirAddr, localClusterAddr, nil } key := coreLeaderPrefix + leaderUUID entry, err := c.barrier.Get(key) if err != nil { - return false, "", err + return false, "", "", err } if entry == nil { - return false, "", nil + return false, "", "", nil } var oldAdv bool @@ -754,23 +837,24 @@ func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) { // Ensure we are using current values err = c.loadLocalClusterTLS(adv) if err != nil { - return false, "", err + return false, "", "", err } // This will ensure that we both have a connection at the ready and that // the address is the current known value err = c.refreshRequestForwardingConnection(adv.ClusterAddr) if err != nil { - return false, "", err + return false, "", "", err } } // Don't set these until everything has been parsed successfully or we'll // never try again c.clusterLeaderRedirectAddr = adv.RedirectAddr + c.clusterLeaderClusterAddr = adv.ClusterAddr c.clusterLeaderUUID = leaderUUID - return false, adv.RedirectAddr, nil + return false, adv.RedirectAddr, adv.ClusterAddr, nil } // SecretProgress returns the number of keys provided so far @@ -956,13 +1040,14 @@ func (c *Core) unsealInternal(masterKey []byte) (bool, error) { func (c *Core) SealWithRequest(req *logical.Request) error { defer metrics.MeasureSince([]string{"core", "seal-with-request"}, time.Now()) - c.stateLock.Lock() - defer c.stateLock.Unlock() + c.stateLock.RLock() if c.sealed { + c.stateLock.RUnlock() return nil } + // This will unlock the read lock return c.sealInitCommon(req) } @@ -971,10 +1056,10 @@ func (c *Core) SealWithRequest(req *logical.Request) error { func (c *Core) Seal(token string) error { defer metrics.MeasureSince([]string{"core", "seal"}, time.Now()) - c.stateLock.Lock() - defer c.stateLock.Unlock() + c.stateLock.RLock() if c.sealed { + c.stateLock.RUnlock() return nil } @@ -984,17 +1069,19 @@ func (c *Core) Seal(token string) error { ClientToken: token, } + // This will unlock the read lock return c.sealInitCommon(req) } // sealInitCommon is common logic for Seal and SealWithRequest and is used to // re-seal the Vault. This requires the Vault to be unsealed again to perform -// any further operations. +// any further operations. Note: this function will read-unlock the state lock. func (c *Core) sealInitCommon(req *logical.Request) (retErr error) { defer metrics.MeasureSince([]string{"core", "seal-internal"}, time.Now()) if req == nil { retErr = multierror.Append(retErr, errors.New("nil request to seal")) + c.stateLock.RUnlock() return retErr } @@ -1009,9 +1096,11 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) { if c.standby { c.logger.Error("core: vault cannot seal when in standby mode; please restart instead") retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead")) + c.stateLock.RUnlock() return retErr } retErr = multierror.Append(retErr, err) + c.stateLock.RUnlock() return retErr } @@ -1026,6 +1115,7 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) { if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil { c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err) retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue")) + c.stateLock.RUnlock() return retErr } @@ -1036,11 +1126,13 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) { if err != nil { c.logger.Error("core: failed to use token", "error", err) retErr = multierror.Append(retErr, ErrInternalError) + c.stateLock.RUnlock() return retErr } if te == nil { // Token is no longer valid retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + c.stateLock.RUnlock() return retErr } if te.NumUses == -1 { @@ -1059,19 +1151,36 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) { allowed, rootPrivs := acl.AllowOperation(req) if !allowed { retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + c.stateLock.RUnlock() return retErr } // We always require root privileges for this operation if !rootPrivs { retErr = multierror.Append(retErr, logical.ErrPermissionDenied) + c.stateLock.RUnlock() return retErr } + // Tell any requests that know about this to stop + if c.requestContextCancelFunc != nil { + c.requestContextCancelFunc() + } + + // Unlock from the request handling + c.stateLock.RUnlock() + //Seal the Vault - err = c.sealInternal() - if err != nil { - retErr = multierror.Append(retErr, err) + retChan := make(chan error) + go func() { + c.stateLock.Lock() + defer c.stateLock.Unlock() + retChan <- c.sealInternal() + }() + + funcErr := <-retChan + if funcErr != nil { + retErr = multierror.Append(retErr, funcErr) } return retErr @@ -1086,8 +1195,8 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { return retErr } - c.stateLock.Lock() - defer c.stateLock.Unlock() + c.stateLock.RLock() + defer c.stateLock.RUnlock() if c.sealed { return nil } @@ -1165,7 +1274,11 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { // sealInternal is an internal method used to seal the vault. It does not do // any authorization checking. The stateLock must be held prior to calling. func (c *Core) sealInternal() error { - // Enable that we are sealed to prevent furthur transactions + if c.sealed { + return nil + } + + // Enable that we are sealed to prevent further transactions c.sealed = true c.logger.Debug("core: marked as sealed") @@ -1187,6 +1300,8 @@ func (c *Core) sealInternal() error { // Signal the standby goroutine to shutdown, wait for completion close(c.standbyStopCh) + c.requestContext = nil + // Release the lock while we wait to avoid deadlocking c.stateLock.Unlock() <-c.standbyDoneCh @@ -1224,6 +1339,8 @@ func (c *Core) postUnseal() (retErr error) { defer func() { if retErr != nil { c.preSeal() + } else { + c.requestContext, c.requestContextCancelFunc = context.WithCancel(context.Background()) } }() c.logger.Info("core: post-unseal setup starting") @@ -1250,16 +1367,19 @@ func (c *Core) postUnseal() (retErr error) { if err := c.ensureWrappingKey(); err != nil { return err } + if err := c.setupPluginCatalog(); err != nil { + return err + } if err := c.loadMounts(); err != nil { return err } if err := c.setupMounts(); err != nil { return err } - if err := c.startRollback(); err != nil { + if err := c.setupPolicyStore(); err != nil { return err } - if err := c.setupPolicyStore(); err != nil { + if err := c.loadCORSConfig(); err != nil { return err } if err := c.loadCredentials(); err != nil { @@ -1268,6 +1388,9 @@ func (c *Core) postUnseal() (retErr error) { if err := c.setupCredentials(); err != nil { return err } + if err := c.startRollback(); err != nil { + return err + } if err := c.setupExpiration(); err != nil { return err } @@ -1280,6 +1403,7 @@ func (c *Core) postUnseal() (retErr error) { if err := c.setupAuditedHeadersConfig(); err != nil { return err } + if c.ha != nil { if err := c.startClusterListener(); err != nil { return err @@ -1369,9 +1493,15 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) { keyRotateDone := make(chan struct{}) keyRotateStop := make(chan struct{}) go c.periodicCheckKeyUpgrade(keyRotateDone, keyRotateStop) + // Monitor for new leadership + checkLeaderDone := make(chan struct{}) + checkLeaderStop := make(chan struct{}) + go c.periodicLeaderRefresh(checkLeaderDone, checkLeaderStop) defer func() { close(keyRotateStop) <-keyRotateDone + close(checkLeaderStop) + <-checkLeaderDone }() for { @@ -1382,11 +1512,6 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) { default: } - // Clear forwarding clients - c.requestForwardingConnectionLock.Lock() - c.clearForwardingClients() - c.requestForwardingConnectionLock.Unlock() - // Create a lock uuid, err := uuid.GenerateUUID() if err != nil { @@ -1498,6 +1623,11 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) { c.logger.Error("core: clearing leader advertisement failed", "error", err) } + // Tell any requests that know about this to stop + if c.requestContextCancelFunc != nil { + c.requestContextCancelFunc() + } + // Attempt the pre-seal process c.stateLock.Lock() c.standby = true @@ -1520,6 +1650,22 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) { } } +// This checks the leader periodically to ensure that we switch RPC to a new +// leader pretty quickly. There is logic in Leader() already to not make this +// onerous and avoid more traffic than needed, so we just call that and ignore +// the result. +func (c *Core) periodicLeaderRefresh(doneCh, stopCh chan struct{}) { + defer close(doneCh) + for { + select { + case <-time.After(leaderCheckInterval): + c.Leader() + case <-stopCh: + return + } + } +} + // periodicCheckKeyUpgrade is used to watch for key rotation events as a standby func (c *Core) periodicCheckKeyUpgrade(doneCh, stopCh chan struct{}) { defer close(doneCh) @@ -1590,6 +1736,15 @@ func (c *Core) scheduleUpgradeCleanup() error { // Schedule cleanup for all of them time.AfterFunc(keyRotateGracePeriod, func() { + sealed, err := c.barrier.Sealed() + if err != nil { + c.logger.Warn("core: failed to check barrier status at upgrade cleanup time") + return + } + if sealed { + c.logger.Warn("core: barrier sealed at upgrade cleanup time") + return + } for _, upgrade := range upgrades { path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade) if err := c.barrier.Delete(path); err != nil { @@ -1743,11 +1898,9 @@ func (c *Core) emitMetrics(stopCh chan struct{}) { } func (c *Core) ReplicationState() consts.ReplicationState { - var state consts.ReplicationState - c.clusterParamsLock.RLock() - state = c.replicationState - c.clusterParamsLock.RUnlock() - return state + c.stateLock.RLock() + defer c.stateLock.RUnlock() + return c.replicationState } func (c *Core) SealAccess() *SealAccess { diff --git a/vendor/github.com/hashicorp/vault/vault/core_test.go b/vendor/github.com/hashicorp/vault/vault/core_test.go index ced18cd..b940254 100644 --- a/vendor/github.com/hashicorp/vault/vault/core_test.go +++ b/vendor/github.com/hashicorp/vault/vault/core_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" log "github.com/mgutz/logxi/v1" ) @@ -23,12 +24,17 @@ var ( func TestNewCore_badRedirectAddr(t *testing.T) { logger = logformat.NewVaultLogger(log.LevelTrace) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + conf := &CoreConfig{ RedirectAddr: "127.0.0.1:8200", - Physical: physical.NewInmem(logger), + Physical: inm, DisableMlock: true, } - _, err := NewCore(conf) + _, err = NewCore(conf) if err == nil { t.Fatal("should error") } @@ -974,12 +980,19 @@ func TestCore_Standby_Seal(t *testing.T) { // Create the first core and initialize it logger = logformat.NewVaultLogger(log.LevelTrace) - inm := physical.NewInmem(logger) - inmha := physical.NewInmemHA(logger) + inm, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + redirectOriginal := "http://127.0.0.1:8200" core, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal, DisableMlock: true, }) @@ -1006,7 +1019,7 @@ func TestCore_Standby_Seal(t *testing.T) { TestWaitActive(t, core) // Check the leader is local - isLeader, advertise, err := core.Leader() + isLeader, advertise, _, err := core.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1021,7 +1034,7 @@ func TestCore_Standby_Seal(t *testing.T) { redirectOriginal2 := "http://127.0.0.1:8500" core2, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal2, DisableMlock: true, }) @@ -1053,7 +1066,7 @@ func TestCore_Standby_Seal(t *testing.T) { } // Check the leader is not local - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1085,12 +1098,19 @@ func TestCore_StepDown(t *testing.T) { // Create the first core and initialize it logger = logformat.NewVaultLogger(log.LevelTrace) - inm := physical.NewInmem(logger) - inmha := physical.NewInmemHA(logger) + inm, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + redirectOriginal := "http://127.0.0.1:8200" core, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal, DisableMlock: true, }) @@ -1117,7 +1137,7 @@ func TestCore_StepDown(t *testing.T) { TestWaitActive(t, core) // Check the leader is local - isLeader, advertise, err := core.Leader() + isLeader, advertise, _, err := core.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1132,7 +1152,7 @@ func TestCore_StepDown(t *testing.T) { redirectOriginal2 := "http://127.0.0.1:8500" core2, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal2, DisableMlock: true, }) @@ -1164,7 +1184,7 @@ func TestCore_StepDown(t *testing.T) { } // Check the leader is not local - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1205,7 +1225,7 @@ func TestCore_StepDown(t *testing.T) { } // Check the leader is core2 - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1217,7 +1237,7 @@ func TestCore_StepDown(t *testing.T) { } // Check the leader is not local - isLeader, advertise, err = core.Leader() + isLeader, advertise, _, err = core.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1248,7 +1268,7 @@ func TestCore_StepDown(t *testing.T) { } // Check the leader is core1 - isLeader, advertise, err = core.Leader() + isLeader, advertise, _, err = core.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1260,7 +1280,7 @@ func TestCore_StepDown(t *testing.T) { } // Check the leader is not local - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1276,12 +1296,19 @@ func TestCore_CleanLeaderPrefix(t *testing.T) { // Create the first core and initialize it logger = logformat.NewVaultLogger(log.LevelTrace) - inm := physical.NewInmem(logger) - inmha := physical.NewInmemHA(logger) + inm, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + redirectOriginal := "http://127.0.0.1:8200" core, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal, DisableMlock: true, }) @@ -1335,7 +1362,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) { } // Check the leader is local - isLeader, advertise, err := core.Leader() + isLeader, advertise, _, err := core.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1350,7 +1377,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) { redirectOriginal2 := "http://127.0.0.1:8500" core2, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal2, DisableMlock: true, }) @@ -1382,7 +1409,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) { } // Check the leader is not local - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1412,7 +1439,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) { TestWaitActive(t, core2) // Check the leader is local - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1438,14 +1465,27 @@ func TestCore_CleanLeaderPrefix(t *testing.T) { func TestCore_Standby(t *testing.T) { logger = logformat.NewVaultLogger(log.LevelTrace) - inmha := physical.NewInmemHA(logger) - testCore_Standby_Common(t, inmha, inmha) + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + + testCore_Standby_Common(t, inmha, inmha.(physical.HABackend)) } func TestCore_Standby_SeparateHA(t *testing.T) { logger = logformat.NewVaultLogger(log.LevelTrace) - testCore_Standby_Common(t, physical.NewInmemHA(logger), physical.NewInmemHA(logger)) + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha2, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + + testCore_Standby_Common(t, inmha, inmha2.(physical.HABackend)) } func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical.HABackend) { @@ -1494,7 +1534,7 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical. } // Check the leader is local - isLeader, advertise, err := core.Leader() + isLeader, advertise, _, err := core.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1547,7 +1587,7 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical. } // Check the leader is not local - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1593,7 +1633,7 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical. } // Check the leader is local - isLeader, advertise, err = core2.Leader() + isLeader, advertise, _, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } @@ -1604,18 +1644,18 @@ func testCore_Standby_Common(t *testing.T, inm physical.Backend, inmha physical. t.Fatalf("Bad advertise: %v, orig is %v", advertise, redirectOriginal2) } - if inm.(*physical.InmemHABackend) == inmha.(*physical.InmemHABackend) { - lockSize := inm.(*physical.InmemHABackend).LockMapSize() + if inm.(*inmem.InmemHABackend) == inmha.(*inmem.InmemHABackend) { + lockSize := inm.(*inmem.InmemHABackend).LockMapSize() if lockSize == 0 { t.Fatalf("locks not used with only one HA backend") } } else { - lockSize := inmha.(*physical.InmemHABackend).LockMapSize() + lockSize := inmha.(*inmem.InmemHABackend).LockMapSize() if lockSize == 0 { t.Fatalf("locks not used with expected HA backend") } - lockSize = inm.(*physical.InmemHABackend).LockMapSize() + lockSize = inm.(*inmem.InmemHABackend).LockMapSize() if lockSize != 0 { t.Fatalf("locks used with unexpected HA backend") } @@ -1793,6 +1833,19 @@ func TestCore_RenewSameLease(t *testing.T) { if resp.Secret.LeaseID != original { t.Fatalf("lease id changed: %s %s", original, resp.Secret.LeaseID) } + + // Renew the lease (alternate path) + req = logical.TestRequest(t, logical.UpdateOperation, "sys/leases/renew/"+resp.Secret.LeaseID) + req.ClientToken = root + resp, err = c.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Verify the lease did not change + if resp.Secret.LeaseID != original { + t.Fatalf("lease id changed: %s %s", original, resp.Secret.LeaseID) + } } // Renew of a token should not create a new lease @@ -1937,7 +1990,7 @@ path "secret/*" { } // Renew the lease - req = logical.TestRequest(t, logical.UpdateOperation, "sys/renew") + req = logical.TestRequest(t, logical.UpdateOperation, "sys/leases/renew") req.Data = map[string]interface{}{ "lease_id": resp.Secret.LeaseID, } @@ -2002,12 +2055,19 @@ func TestCore_Standby_Rotate(t *testing.T) { // Create the first core and initialize it logger = logformat.NewVaultLogger(log.LevelTrace) - inm := physical.NewInmem(logger) - inmha := physical.NewInmemHA(logger) + inm, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + redirectOriginal := "http://127.0.0.1:8200" core, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal, DisableMlock: true, }) @@ -2028,7 +2088,7 @@ func TestCore_Standby_Rotate(t *testing.T) { redirectOriginal2 := "http://127.0.0.1:8500" core2, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal2, DisableMlock: true, }) diff --git a/vendor/github.com/hashicorp/vault/vault/cors.go b/vendor/github.com/hashicorp/vault/vault/cors.go new file mode 100644 index 0000000..f94f078 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/cors.go @@ -0,0 +1,153 @@ +package vault + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + + "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/logical" +) + +const ( + CORSDisabled uint32 = iota + CORSEnabled +) + +var StdAllowedHeaders = []string{ + "Content-Type", + "X-Requested-With", + "X-Vault-AWS-IAM-Server-ID", + "X-Vault-MFA", + "X-Vault-No-Request-Forwarding", + "X-Vault-Token", + "X-Vault-Wrap-Format", + "X-Vault-Wrap-TTL", +} + +// CORSConfig stores the state of the CORS configuration. +type CORSConfig struct { + sync.RWMutex `json:"-"` + core *Core + Enabled uint32 `json:"enabled"` + AllowedOrigins []string `json:"allowed_origins,omitempty"` + AllowedHeaders []string `json:"allowed_headers,omitempty"` +} + +func (c *Core) saveCORSConfig() error { + view := c.systemBarrierView.SubView("config/") + + localConfig := &CORSConfig{ + Enabled: atomic.LoadUint32(&c.corsConfig.Enabled), + } + c.corsConfig.RLock() + localConfig.AllowedOrigins = c.corsConfig.AllowedOrigins + localConfig.AllowedHeaders = c.corsConfig.AllowedHeaders + c.corsConfig.RUnlock() + + entry, err := logical.StorageEntryJSON("cors", localConfig) + if err != nil { + return fmt.Errorf("failed to create CORS config entry: %v", err) + } + + if err := view.Put(entry); err != nil { + return fmt.Errorf("failed to save CORS config: %v", err) + } + + return nil +} + +// This should only be called with the core state lock held for writing +func (c *Core) loadCORSConfig() error { + view := c.systemBarrierView.SubView("config/") + + // Load the config in + out, err := view.Get("cors") + if err != nil { + return fmt.Errorf("failed to read CORS config: %v", err) + } + if out == nil { + return nil + } + + newConfig := new(CORSConfig) + err = out.DecodeJSON(newConfig) + if err != nil { + return err + } + newConfig.core = c + + c.corsConfig = newConfig + + return nil +} + +// Enable takes either a '*' or a comma-seprated list of URLs that can make +// cross-origin requests to Vault. +func (c *CORSConfig) Enable(urls []string, headers []string) error { + if len(urls) == 0 { + return errors.New("at least one origin or the wildcard must be provided.") + } + + if strutil.StrListContains(urls, "*") && len(urls) > 1 { + return errors.New("to allow all origins the '*' must be the only value for allowed_origins") + } + + c.Lock() + c.AllowedOrigins = urls + + // Start with the standard headers to Vault accepts. + c.AllowedHeaders = append(c.AllowedHeaders, StdAllowedHeaders...) + + // Allow the user to add additional headers to the list of + // headers allowed on cross-origin requests. + if len(headers) > 0 { + c.AllowedHeaders = append(c.AllowedHeaders, headers...) + } + c.Unlock() + + atomic.StoreUint32(&c.Enabled, CORSEnabled) + + return c.core.saveCORSConfig() +} + +// IsEnabled returns the value of CORSConfig.isEnabled +func (c *CORSConfig) IsEnabled() bool { + return atomic.LoadUint32(&c.Enabled) == CORSEnabled +} + +// Disable sets CORS to disabled and clears the allowed origins & headers. +func (c *CORSConfig) Disable() error { + atomic.StoreUint32(&c.Enabled, CORSDisabled) + c.Lock() + + c.AllowedOrigins = nil + c.AllowedHeaders = nil + + c.Unlock() + + return c.core.saveCORSConfig() +} + +// IsValidOrigin determines if the origin of the request is allowed to make +// cross-origin requests based on the CORSConfig. +func (c *CORSConfig) IsValidOrigin(origin string) bool { + // If we aren't enabling CORS then all origins are valid + if !c.IsEnabled() { + return true + } + + c.RLock() + defer c.RUnlock() + + if len(c.AllowedOrigins) == 0 { + return false + } + + if len(c.AllowedOrigins) == 1 && (c.AllowedOrigins)[0] == "*" { + return true + } + + return strutil.StrListContains(c.AllowedOrigins, origin) +} diff --git a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go index 30b6a76..b5e477a 100644 --- a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go +++ b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go @@ -1,9 +1,14 @@ package vault import ( + "fmt" "time" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" ) @@ -79,11 +84,60 @@ func (d dynamicSystemView) CachingDisabled() bool { return d.core.cachingDisabled || (d.mountEntry != nil && d.mountEntry.Config.ForceNoCache) } -// Checks if this is a primary Vault instance. +// Checks if this is a primary Vault instance. Caller should hold the stateLock +// in read mode. func (d dynamicSystemView) ReplicationState() consts.ReplicationState { - var state consts.ReplicationState - d.core.clusterParamsLock.RLock() - state = d.core.replicationState - d.core.clusterParamsLock.RUnlock() - return state + return d.core.replicationState +} + +// ResponseWrapData wraps the given data in a cubbyhole and returns the +// token used to unwrap. +func (d dynamicSystemView) ResponseWrapData(data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "sys/wrapping/wrap", + } + + resp := &logical.Response{ + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: ttl, + }, + Data: data, + } + + if jwt { + resp.WrapInfo.Format = "jwt" + } + + _, err := d.core.wrapInCubbyhole(req, resp) + if err != nil { + return nil, err + } + + return resp.WrapInfo, nil +} + +// LookupPlugin looks for a plugin with the given name in the plugin catalog. It +// returns a PluginRunner or an error if no plugin was found. +func (d dynamicSystemView) LookupPlugin(name string) (*pluginutil.PluginRunner, error) { + if d.core == nil { + return nil, fmt.Errorf("system view core is nil") + } + if d.core.pluginCatalog == nil { + return nil, fmt.Errorf("system view core plugin catalog is nil") + } + r, err := d.core.pluginCatalog.Get(name) + if err != nil { + return nil, err + } + if r == nil { + return nil, errwrap.Wrapf(fmt.Sprintf("{{err}}: %s", name), ErrPluginNotFound) + } + + return r, nil +} + +// MlockEnabled returns the configuration setting for enabling mlock on plugins. +func (d dynamicSystemView) MlockEnabled() bool { + return d.core.enableMlock } diff --git a/vendor/github.com/hashicorp/vault/vault/expiration.go b/vendor/github.com/hashicorp/vault/vault/expiration.go index f0f885e..628df8e 100644 --- a/vendor/github.com/hashicorp/vault/vault/expiration.go +++ b/vendor/github.com/hashicorp/vault/vault/expiration.go @@ -2,18 +2,23 @@ package vault import ( "encoding/json" + "errors" "fmt" "path" "strings" "sync" + "sync/atomic" "time" "github.com/armon/go-metrics" log "github.com/mgutz/logxi/v1" + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/logical" ) @@ -34,9 +39,6 @@ const ( // revokeRetryBase is a baseline retry time revokeRetryBase = 10 * time.Second - // minRevokeDelay is used to prevent an instant revoke on restore - minRevokeDelay = 5 * time.Second - // maxLeaseDuration is the default maximum lease duration maxLeaseTTL = 32 * 24 * time.Hour @@ -56,7 +58,16 @@ type ExpirationManager struct { logger log.Logger pending map[string]*time.Timer - pendingLock sync.Mutex + pendingLock sync.RWMutex + + tidyLock int32 + + restoreMode int32 + restoreModeLock sync.RWMutex + restoreRequestLock sync.RWMutex + restoreLocks []*locksutil.LockEntry + restoreLoaded sync.Map + quitCh chan struct{} } // NewExpirationManager creates a new ExpirationManager that is backed @@ -64,8 +75,8 @@ type ExpirationManager struct { func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger log.Logger) *ExpirationManager { if logger == nil { logger = log.New("expiration_manager") - } + exp := &ExpirationManager{ router: router, idView: view.SubView(leaseViewPrefix), @@ -73,6 +84,12 @@ func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, log tokenStore: ts, logger: logger, pending: make(map[string]*time.Timer), + + // new instances of the expiration manager will go immediately into + // restore mode + restoreMode: 1, + restoreLocks: locksutil.CreateLocks(), + quitCh: make(chan struct{}), } return exp } @@ -94,9 +111,14 @@ func (c *Core) setupExpiration() error { // Restore the existing state c.logger.Info("expiration: restoring leases") - if err := c.expiration.Restore(); err != nil { - return fmt.Errorf("expiration state restore failed: %v", err) + errorFunc := func() { + c.logger.Error("expiration: shutting down") + if err := c.Shutdown(); err != nil { + c.logger.Error("expiration: error shutting down core: %v", err) + } } + go c.expiration.Restore(errorFunc) + return nil } @@ -114,17 +136,165 @@ func (c *Core) stopExpiration() error { return nil } +// lockLease takes out a lock for a given lease ID +func (m *ExpirationManager) lockLease(leaseID string) { + locksutil.LockForKey(m.restoreLocks, leaseID).Lock() +} + +// unlockLease unlocks a given lease ID +func (m *ExpirationManager) unlockLease(leaseID string) { + locksutil.LockForKey(m.restoreLocks, leaseID).Unlock() +} + +// inRestoreMode returns if we are currently in restore mode +func (m *ExpirationManager) inRestoreMode() bool { + return atomic.LoadInt32(&m.restoreMode) == 1 +} + +// Tidy cleans up the dangling storage entries for leases. It scans the storage +// view to find all the available leases, checks if the token embedded in it is +// either empty or invalid and in both the cases, it revokes them. It also uses +// a token cache to avoid multiple lookups of the same token ID. It is normally +// not required to use the API that invokes this. This is only intended to +// clean up the corrupt storage due to bugs. +func (m *ExpirationManager) Tidy() error { + if m.inRestoreMode() { + return errors.New("cannot run tidy while restoring leases") + } + + var tidyErrors *multierror.Error + + if !atomic.CompareAndSwapInt32(&m.tidyLock, 0, 1) { + m.logger.Warn("expiration: tidy operation on leases is already in progress") + return fmt.Errorf("tidy operation on leases is already in progress") + } + + defer atomic.CompareAndSwapInt32(&m.tidyLock, 1, 0) + + m.logger.Info("expiration: beginning tidy operation on leases") + defer m.logger.Info("expiration: finished tidy operation on leases") + + // Create a cache to keep track of looked up tokens + tokenCache := make(map[string]bool) + var countLease, revokedCount, deletedCountInvalidToken, deletedCountEmptyToken int64 + + tidyFunc := func(leaseID string) { + countLease++ + if countLease%500 == 0 { + m.logger.Info("expiration: tidying leases", "progress", countLease) + } + + le, err := m.loadEntry(leaseID) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to load the lease ID %q: %v", leaseID, err)) + return + } + + if le == nil { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("nil entry for lease ID %q: %v", leaseID, err)) + return + } + + var isValid, ok bool + revokeLease := false + if le.ClientToken == "" { + m.logger.Trace("expiration: revoking lease which has an empty token", "lease_id", leaseID) + revokeLease = true + deletedCountEmptyToken++ + goto REVOKE_CHECK + } + + isValid, ok = tokenCache[le.ClientToken] + if !ok { + saltedID, err := m.tokenStore.SaltID(le.ClientToken) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup salt id: %v", err)) + return + } + lock := locksutil.LockForKey(m.tokenStore.tokenLocks, le.ClientToken) + lock.RLock() + te, err := m.tokenStore.lookupSalted(saltedID, true) + lock.RUnlock() + + if err != nil { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup token: %v", err)) + return + } + + if te == nil { + m.logger.Trace("expiration: revoking lease which holds an invalid token", "lease_id", leaseID) + revokeLease = true + deletedCountInvalidToken++ + tokenCache[le.ClientToken] = false + } else { + tokenCache[le.ClientToken] = true + } + goto REVOKE_CHECK + } else { + if isValid { + return + } + + m.logger.Trace("expiration: revoking lease which contains an invalid token", "lease_id", leaseID) + revokeLease = true + deletedCountInvalidToken++ + goto REVOKE_CHECK + } + + REVOKE_CHECK: + if revokeLease { + // Force the revocation and skip going through the token store + // again + err = m.revokeCommon(leaseID, true, true) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke an invalid lease with ID %q: %v", leaseID, err)) + return + } + revokedCount++ + } + } + + if err := logical.ScanView(m.idView, tidyFunc); err != nil { + return err + } + + m.logger.Debug("expiration: number of leases scanned", "count", countLease) + m.logger.Debug("expiration: number of leases which had empty tokens", "count", deletedCountEmptyToken) + m.logger.Debug("expiration: number of leases which had invalid tokens", "count", deletedCountInvalidToken) + m.logger.Debug("expiration: number of leases successfully revoked", "count", revokedCount) + + return tidyErrors.ErrorOrNil() +} + // Restore is used to recover the lease states when starting. // This is used after starting the vault. -func (m *ExpirationManager) Restore() error { - m.pendingLock.Lock() - defer m.pendingLock.Unlock() +func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { + defer func() { + // Turn off restore mode. We can do this safely without the lock because + // if restore mode finished successfully, restore mode was already + // disabled with the lock. In an error state, this will allow the + // Stop() function to shut everything down. + atomic.StoreInt32(&m.restoreMode, 0) + + switch { + case retErr == nil: + case errwrap.Contains(retErr, ErrBarrierSealed.Error()): + // Don't run error func because we're likely already shutting down + m.logger.Warn("expiration: barrier sealed while restoring leases, stopping lease loading") + retErr = nil + default: + m.logger.Error("expiration: error restoring leases", "error", retErr) + if errorFunc != nil { + errorFunc() + } + } + }() // Accumulate existing leases m.logger.Debug("expiration: collecting leases") existing, err := logical.CollectKeys(m.idView) if err != nil { - return fmt.Errorf("failed to scan for leases: %v", err) + return errwrap.Wrapf("failed to scan for leases: {{err}}", err) } m.logger.Debug("expiration: leases collected", "num_existing", len(existing)) @@ -133,7 +303,7 @@ func (m *ExpirationManager) Restore() error { quit := make(chan bool) // Buffer these channels to prevent deadlocks errs := make(chan error, len(existing)) - result := make(chan *leaseEntry, len(existing)) + result := make(chan struct{}, len(existing)) // Use a wait group wg := &sync.WaitGroup{} @@ -152,18 +322,21 @@ func (m *ExpirationManager) Restore() error { return } - le, err := m.loadEntry(leaseID) + err := m.processRestore(leaseID) if err != nil { errs <- err continue } - // Write results out to the result channel - result <- le + // Send message that lease is done + result <- struct{}{} // quit early case <-quit: return + + case <-m.quitCh: + return } } }() @@ -174,7 +347,7 @@ func (m *ExpirationManager) Restore() error { go func() { defer wg.Done() for i, leaseID := range existing { - if i%500 == 0 { + if i > 0 && i%500 == 0 { m.logger.Trace("expiration: leases loading", "progress", i) } @@ -182,6 +355,9 @@ func (m *ExpirationManager) Restore() error { case <-quit: return + case <-m.quitCh: + return + default: broker <- leaseID } @@ -191,49 +367,59 @@ func (m *ExpirationManager) Restore() error { close(broker) }() - // Restore each key by pulling from the result chan + // Ensure all keys on the chan are processed for i := 0; i < len(existing); i++ { select { case err := <-errs: // Close all go routines close(quit) - return err - case le := <-result: + case <-m.quitCh: + close(quit) + return nil - // If there is no entry, nothing to restore - if le == nil { - continue - } - - // If there is no expiry time, don't do anything - if le.ExpireTime.IsZero() { - continue - } - - // Determine the remaining time to expiration - expires := le.ExpireTime.Sub(time.Now()) - if expires <= 0 { - expires = minRevokeDelay - } - - // Setup revocation timer - m.pending[le.LeaseID] = time.AfterFunc(expires, func() { - m.expireID(le.LeaseID) - }) + case <-result: } } // Let all go routines finish wg.Wait() - if len(m.pending) > 0 { - if m.logger.IsInfo() { - m.logger.Info("expire: leases restored", "restored_lease_count", len(m.pending)) - } + m.restoreModeLock.Lock() + m.restoreLoaded = sync.Map{} + m.restoreLocks = nil + atomic.StoreInt32(&m.restoreMode, 0) + m.restoreModeLock.Unlock() + + m.logger.Info("expiration: lease restore complete") + return nil +} + +// processRestore takes a lease and restores it in the expiration manager if it has +// not already been seen +func (m *ExpirationManager) processRestore(leaseID string) error { + m.restoreRequestLock.RLock() + defer m.restoreRequestLock.RUnlock() + + // Check if the lease has been seen + if _, ok := m.restoreLoaded.Load(leaseID); ok { + return nil } + m.lockLease(leaseID) + defer m.unlockLease(leaseID) + + // Check again with the lease locked + if _, ok := m.restoreLoaded.Load(leaseID); ok { + return nil + } + + // Load lease and restore expiration timer + _, err := m.loadEntryInternal(leaseID, true, false) + if err != nil { + return err + } return nil } @@ -241,12 +427,26 @@ func (m *ExpirationManager) Restore() error { // This must be called before sealing the view. func (m *ExpirationManager) Stop() error { // Stop all the pending expiration timers + m.logger.Debug("expiration: stop triggered") + defer m.logger.Debug("expiration: finished stopping") + m.pendingLock.Lock() for _, timer := range m.pending { timer.Stop() } m.pending = make(map[string]*time.Timer) m.pendingLock.Unlock() + + close(m.quitCh) + if m.inRestoreMode() { + for { + if !m.inRestoreMode() { + break + } + time.Sleep(10 * time.Millisecond) + } + } + return nil } @@ -261,6 +461,7 @@ func (m *ExpirationManager) Revoke(leaseID string) error { // during revocation and still remove entries/index/lease timers func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool) error { defer metrics.MeasureSince([]string{"expire", "revoke-common"}, time.Now()) + // Load the entry le, err := m.loadEntry(leaseID) if err != nil { @@ -277,10 +478,10 @@ func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool) if err := m.revokeEntry(le); err != nil { if !force { return err - } else { - if m.logger.IsWarn() { - m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err) - } + } + + if m.logger.IsWarn() { + m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err) } } } @@ -330,6 +531,7 @@ func (m *ExpirationManager) RevokePrefix(prefix string) error { // token store's revokeSalted function. func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error { defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now()) + // Lookup the leases existing, err := m.lookupByToken(te.ID) if err != nil { @@ -338,14 +540,18 @@ func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error { // Revoke all the keys for idx, leaseID := range existing { - if err := m.Revoke(leaseID); err != nil { + if err := m.revokeCommon(leaseID, false, false); err != nil { return fmt.Errorf("failed to revoke '%s' (%d / %d): %v", leaseID, idx+1, len(existing), err) } } if te.Path != "" { - tokenLeaseID := path.Join(te.Path, m.tokenStore.SaltID(te.ID)) + saltedID, err := m.tokenStore.SaltID(te.ID) + if err != nil { + return err + } + tokenLeaseID := path.Join(te.Path, saltedID) // We want to skip the revokeEntry call as that will call back into // revocation logic in the token store, which is what is running this @@ -361,6 +567,11 @@ func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error { } func (m *ExpirationManager) revokePrefixCommon(prefix string, force bool) error { + if m.inRestoreMode() { + m.restoreRequestLock.Lock() + defer m.restoreRequestLock.Unlock() + } + // Ensure there is a trailing slash if !strings.HasSuffix(prefix, "/") { prefix = prefix + "/" @@ -388,6 +599,7 @@ func (m *ExpirationManager) revokePrefixCommon(prefix string, force bool) error // and a renew interval. The increment may be ignored. func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*logical.Response, error) { defer metrics.MeasureSince([]string{"expire", "renew"}, time.Now()) + // Load the entry le, err := m.loadEntry(leaseID) if err != nil { @@ -399,6 +611,13 @@ func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*log return nil, err } + if le.Secret == nil { + if le.Auth != nil { + return logical.ErrorResponse("tokens cannot be renewed through this endpoint"), logical.ErrPermissionDenied + } + return logical.ErrorResponse("lease does not correspond to a secret"), nil + } + // Attempt to renew the entry resp, err := m.renewEntry(le, increment) if err != nil { @@ -434,13 +653,57 @@ func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*log return resp, nil } +// RestoreSaltedTokenCheck verifies that the token is not expired while running +// in restore mode. If we are not in restore mode, the lease has already been +// restored or the lease still has time left, it returns true. +func (m *ExpirationManager) RestoreSaltedTokenCheck(source string, saltedID string) (bool, error) { + defer metrics.MeasureSince([]string{"expire", "restore-token-check"}, time.Now()) + + // Return immediately if we are not in restore mode, expiration manager is + // already loaded + if !m.inRestoreMode() { + return true, nil + } + + m.restoreModeLock.RLock() + defer m.restoreModeLock.RUnlock() + + // Check again after we obtain the lock + if !m.inRestoreMode() { + return true, nil + } + + leaseID := path.Join(source, saltedID) + + m.lockLease(leaseID) + defer m.unlockLease(leaseID) + + le, err := m.loadEntryInternal(leaseID, true, true) + if err != nil { + return false, err + } + if le != nil && !le.ExpireTime.IsZero() { + expires := le.ExpireTime.Sub(time.Now()) + if expires <= 0 { + return false, nil + } + } + + return true, nil +} + // RenewToken is used to renew a token which does not need to // invoke a logical backend. func (m *ExpirationManager) RenewToken(req *logical.Request, source string, token string, increment time.Duration) (*logical.Response, error) { defer metrics.MeasureSince([]string{"expire", "renew-token"}, time.Now()) + // Compute the Lease ID - leaseID := path.Join(source, m.tokenStore.SaltID(token)) + saltedID, err := m.tokenStore.SaltID(token) + if err != nil { + return nil, err + } + leaseID := path.Join(source, saltedID) // Load the entry le, err := m.loadEntry(leaseID) @@ -498,8 +761,13 @@ func (m *ExpirationManager) RenewToken(req *logical.Request, source string, toke // Register is used to take a request and response with an associated // lease. The secret gets assigned a LeaseID and the management of // of lease is assumed by the expiration manager. -func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Response) (string, error) { +func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Response) (id string, retErr error) { defer metrics.MeasureSince([]string{"expire", "register"}, time.Now()) + + if req.ClientToken == "" { + return "", fmt.Errorf("expiration: cannot register a lease with an empty client token") + } + // Ignore if there is no leased secret if resp == nil || resp.Secret == nil { return "", nil @@ -515,8 +783,34 @@ func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Respons if err != nil { return "", err } + + leaseID := path.Join(req.Path, leaseUUID) + + defer func() { + // If there is an error we want to rollback as much as possible (note + // that errors here are ignored to do as much cleanup as we can). We + // want to revoke a generated secret (since an error means we may not + // be successfully tracking it), remove indexes, and delete the entry. + if retErr != nil { + revResp, err := m.router.Route(logical.RevokeRequest(req.Path, resp.Secret, resp.Data)) + if err != nil { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional internal error was encountered revoking the newly-generated secret: {{err}}", err)) + } else if revResp != nil && revResp.IsError() { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered revoking the newly-generated secret: {{err}}", revResp.Error())) + } + + if err := m.deleteEntry(leaseID); err != nil { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered deleting any lease associated with the newly-generated secret: {{err}}", err)) + } + + if err := m.removeIndexByToken(req.ClientToken, leaseID); err != nil { + retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered removing lease indexes associated with the newly-generated secret: {{err}}", err)) + } + } + }() + le := leaseEntry{ - LeaseID: path.Join(req.Path, leaseUUID), + LeaseID: leaseID, ClientToken: req.ClientToken, Path: req.Path, Data: resp.Data, @@ -548,9 +842,22 @@ func (m *ExpirationManager) Register(req *logical.Request, resp *logical.Respons func (m *ExpirationManager) RegisterAuth(source string, auth *logical.Auth) error { defer metrics.MeasureSince([]string{"expire", "register-auth"}, time.Now()) + if auth.ClientToken == "" { + return fmt.Errorf("expiration: cannot register an auth lease with an empty token") + } + + if strings.Contains(source, "..") { + return fmt.Errorf("expiration: %s", consts.ErrPathContainsParentReferences) + } + + saltedID, err := m.tokenStore.SaltID(auth.ClientToken) + if err != nil { + return err + } + // Create a lease entry le := leaseEntry{ - LeaseID: path.Join(source, m.tokenStore.SaltID(auth.ClientToken)), + LeaseID: path.Join(source, saltedID), ClientToken: auth.ClientToken, Auth: auth, Path: source, @@ -574,7 +881,11 @@ func (m *ExpirationManager) FetchLeaseTimesByToken(source, token string) (*lease defer metrics.MeasureSince([]string{"expire", "fetch-lease-times-by-token"}, time.Now()) // Compute the Lease ID - leaseID := path.Join(source, m.tokenStore.SaltID(token)) + saltedID, err := m.tokenStore.SaltID(token) + if err != nil { + return nil, err + } + leaseID := path.Join(source, saltedID) return m.FetchLeaseTimes(leaseID) } @@ -620,8 +931,19 @@ func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Durati // Check for an existing timer timer, ok := m.pending[le.LeaseID] + // If there is no expiry time, don't do anything + if le.ExpireTime.IsZero() { + // if the timer happened to exist, stop the time and delete it from the + // pending timers. + if ok { + timer.Stop() + delete(m.pending, le.LeaseID) + } + return + } + // Create entry if it does not exist - if !ok && leaseTotal > 0 { + if !ok { timer := time.AfterFunc(leaseTotal, func() { m.expireID(le.LeaseID) }) @@ -629,17 +951,8 @@ func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Durati return } - // Delete the timer if the expiration time is zero - if ok && leaseTotal == 0 { - timer.Stop() - delete(m.pending, le.LeaseID) - return - } - // Extend the timer by the lease total - if ok && leaseTotal > 0 { - timer.Reset(leaseTotal) - } + timer.Reset(leaseTotal) } // expireID is invoked when a given ID is expired @@ -650,17 +963,23 @@ func (m *ExpirationManager) expireID(leaseID string) { m.pendingLock.Unlock() for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ { + select { + case <-m.quitCh: + m.logger.Error("expiration: shutting down, not attempting further revocation of lease", "lease_id", leaseID) + return + default: + } err := m.Revoke(leaseID) if err == nil { if m.logger.IsInfo() { - m.logger.Info("expire: revoked lease", "lease_id", leaseID) + m.logger.Info("expiration: revoked lease", "lease_id", leaseID) } return } - m.logger.Error("expire: failed to revoke lease", "lease_id", leaseID, "error", err) + m.logger.Error("expiration: failed to revoke lease", "lease_id", leaseID, "error", err) time.Sleep((1 << attempt) * revokeRetryBase) } - m.logger.Error("expire: maximum revoke attempts reached", "lease_id", leaseID) + m.logger.Error("expiration: maximum revoke attempts reached", "lease_id", leaseID) } // revokeEntry is used to attempt revocation of an internal entry @@ -668,7 +987,7 @@ func (m *ExpirationManager) revokeEntry(le *leaseEntry) error { // Revocation of login tokens is special since we can by-pass the // backend and directly interact with the token store if le.Auth != nil { - if err := m.tokenStore.RevokeTree(le.Auth.ClientToken); err != nil { + if err := m.tokenStore.RevokeTree(le.ClientToken); err != nil { return fmt.Errorf("failed to revoke token: %v", err) } @@ -722,6 +1041,24 @@ func (m *ExpirationManager) renewAuthEntry(req *logical.Request, le *leaseEntry, // loadEntry is used to read a lease entry func (m *ExpirationManager) loadEntry(leaseID string) (*leaseEntry, error) { + // Take out the lease locks after we ensure we are in restore mode + restoreMode := m.inRestoreMode() + if restoreMode { + m.restoreModeLock.RLock() + defer m.restoreModeLock.RUnlock() + + restoreMode = m.inRestoreMode() + if restoreMode { + m.lockLease(leaseID) + defer m.unlockLease(leaseID) + } + } + return m.loadEntryInternal(leaseID, restoreMode, true) +} + +// loadEntryInternal is used when you need to load an entry but also need to +// control the lifecycle of the restoreLock +func (m *ExpirationManager) loadEntryInternal(leaseID string, restoreMode bool, checkRestored bool) (*leaseEntry, error) { out, err := m.idView.Get(leaseID) if err != nil { return nil, fmt.Errorf("failed to read lease entry: %v", err) @@ -733,6 +1070,24 @@ func (m *ExpirationManager) loadEntry(leaseID string) (*leaseEntry, error) { if err != nil { return nil, fmt.Errorf("failed to decode lease entry: %v", err) } + + if restoreMode { + if checkRestored { + // If we have already loaded this lease, we don't need to update on + // load. In the case of renewal and revocation, updatePending will be + // done after making the appropriate modifications to the lease. + if _, ok := m.restoreLoaded.Load(leaseID); ok { + return le, nil + } + } + + // Update the cache of restored leases, either synchronously or through + // the lazy loaded restore process + m.restoreLoaded.Store(le.LeaseID, struct{}{}) + + // Setup revocation timer + m.updatePending(le, le.ExpireTime.Sub(time.Now())) + } return le, nil } @@ -765,8 +1120,18 @@ func (m *ExpirationManager) deleteEntry(leaseID string) error { // createIndexByToken creates a secondary index from the token to a lease entry func (m *ExpirationManager) createIndexByToken(token, leaseID string) error { + saltedID, err := m.tokenStore.SaltID(token) + if err != nil { + return err + } + + leaseSaltedID, err := m.tokenStore.SaltID(leaseID) + if err != nil { + return err + } + ent := logical.StorageEntry{ - Key: m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID), + Key: saltedID + "/" + leaseSaltedID, Value: []byte(leaseID), } if err := m.tokenView.Put(&ent); err != nil { @@ -777,7 +1142,17 @@ func (m *ExpirationManager) createIndexByToken(token, leaseID string) error { // indexByToken looks up the secondary index from the token to a lease entry func (m *ExpirationManager) indexByToken(token, leaseID string) (*logical.StorageEntry, error) { - key := m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID) + saltedID, err := m.tokenStore.SaltID(token) + if err != nil { + return nil, err + } + + leaseSaltedID, err := m.tokenStore.SaltID(leaseID) + if err != nil { + return nil, err + } + + key := saltedID + "/" + leaseSaltedID entry, err := m.tokenView.Get(key) if err != nil { return nil, fmt.Errorf("failed to look up secondary index entry") @@ -787,7 +1162,17 @@ func (m *ExpirationManager) indexByToken(token, leaseID string) (*logical.Storag // removeIndexByToken removes the secondary index from the token to a lease entry func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error { - key := m.tokenStore.SaltID(token) + "/" + m.tokenStore.SaltID(leaseID) + saltedID, err := m.tokenStore.SaltID(token) + if err != nil { + return err + } + + leaseSaltedID, err := m.tokenStore.SaltID(leaseID) + if err != nil { + return err + } + + key := saltedID + "/" + leaseSaltedID if err := m.tokenView.Delete(key); err != nil { return fmt.Errorf("failed to delete lease index entry: %v", err) } @@ -796,8 +1181,13 @@ func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error { // lookupByToken is used to lookup all the leaseID's via the func (m *ExpirationManager) lookupByToken(token string) ([]string, error) { + saltedID, err := m.tokenStore.SaltID(token) + if err != nil { + return nil, err + } + // Scan via the index for sub-leases - prefix := m.tokenStore.SaltID(token) + "/" + prefix := saltedID + "/" subKeys, err := m.tokenView.List(prefix) if err != nil { return nil, fmt.Errorf("failed to list leases: %v", err) @@ -820,9 +1210,9 @@ func (m *ExpirationManager) lookupByToken(token string) ([]string, error) { // emitMetrics is invoked periodically to emit statistics func (m *ExpirationManager) emitMetrics() { - m.pendingLock.Lock() + m.pendingLock.RLock() num := len(m.pending) - m.pendingLock.Unlock() + m.pendingLock.RUnlock() metrics.SetGauge([]string{"expire", "num_leases"}, float32(num)) } diff --git a/vendor/github.com/hashicorp/vault/vault/expiration_test.go b/vendor/github.com/hashicorp/vault/vault/expiration_test.go index ced6b42..144bd16 100644 --- a/vendor/github.com/hashicorp/vault/vault/expiration_test.go +++ b/vendor/github.com/hashicorp/vault/vault/expiration_test.go @@ -2,7 +2,6 @@ package vault import ( "fmt" - "os" "reflect" "sort" "strings" @@ -15,6 +14,7 @@ import ( "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" log "github.com/mgutz/logxi/v1" ) @@ -33,16 +33,230 @@ func mockBackendExpiration(t testing.TB, backend physical.Backend) (*Core, *Expi return c, ts.expiration } +func TestExpiration_Tidy(t *testing.T) { + var err error + + exp := mockExpiration(t) + if err := exp.Restore(nil); err != nil { + t.Fatal(err) + } + + // Set up a count function to calculate number of leases + count := 0 + countFunc := func(leaseID string) { + count++ + } + + // Scan the storage with the count func set + if err = logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Check that there are no leases to begin with + if count != 0 { + t.Fatalf("bad: lease count; expected:0 actual:%d", count) + } + + // Create a lease entry without a client token in it + le := &leaseEntry{ + LeaseID: "lease/with/no/client/token", + Path: "foo/bar", + } + + // Persist the invalid lease entry + if err = exp.persistEntry(le); err != nil { + t.Fatalf("error persisting entry: %v", err) + } + + count = 0 + if err = logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Check that the storage was successful and that the count of leases is + // now 1 + if count != 1 { + t.Fatalf("bad: lease count; expected:1 actual:%d", count) + } + + // Run the tidy operation + err = exp.Tidy() + if err != nil { + t.Fatal(err) + } + + count = 0 + if err := logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Post the tidy operation, the invalid lease entry should have been gone + if count != 0 { + t.Fatalf("bad: lease count; expected:0 actual:%d", count) + } + + // Set a revoked/invalid token in the lease entry + le.ClientToken = "invalidtoken" + + // Persist the invalid lease entry + if err = exp.persistEntry(le); err != nil { + t.Fatalf("error persisting entry: %v", err) + } + + count = 0 + if err = logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Check that the storage was successful and that the count of leases is + // now 1 + if count != 1 { + t.Fatalf("bad: lease count; expected:1 actual:%d", count) + } + + // Run the tidy operation + err = exp.Tidy() + if err != nil { + t.Fatal(err) + } + + count = 0 + if err = logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Post the tidy operation, the invalid lease entry should have been gone + if count != 0 { + t.Fatalf("bad: lease count; expected:0 actual:%d", count) + } + + // Attach an invalid token with 2 leases + if err = exp.persistEntry(le); err != nil { + t.Fatalf("error persisting entry: %v", err) + } + + le.LeaseID = "another/invalid/lease" + if err = exp.persistEntry(le); err != nil { + t.Fatalf("error persisting entry: %v", err) + } + + // Run the tidy operation + err = exp.Tidy() + if err != nil { + t.Fatal(err) + } + + count = 0 + if err = logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Post the tidy operation, the invalid lease entry should have been gone + if count != 0 { + t.Fatalf("bad: lease count; expected:0 actual:%d", count) + } + + for i := 0; i < 1000; i++ { + req := &logical.Request{ + Operation: logical.ReadOperation, + Path: "invalid/lease/" + fmt.Sprintf("%d", i+1), + ClientToken: "invalidtoken", + } + resp := &logical.Response{ + Secret: &logical.Secret{ + LeaseOptions: logical.LeaseOptions{ + TTL: 100 * time.Millisecond, + }, + }, + Data: map[string]interface{}{ + "test_key": "test_value", + }, + } + _, err := exp.Register(req, resp) + if err != nil { + t.Fatalf("err: %v", err) + } + } + + count = 0 + if err = logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Check that there are 1000 leases now + if count != 1000 { + t.Fatalf("bad: lease count; expected:1000 actual:%d", count) + } + + errCh1 := make(chan error) + errCh2 := make(chan error) + + // Initiate tidy of the above 1000 invalid leases in quick succession. Only + // one tidy operation can be in flight at any time. One of these requests + // should error out. + go func() { + errCh1 <- exp.Tidy() + }() + + go func() { + errCh2 <- exp.Tidy() + }() + + var err1, err2 error + + for i := 0; i < 2; i++ { + select { + case err1 = <-errCh1: + case err2 = <-errCh2: + } + } + + if !(err1 != nil && err1.Error() == "tidy operation on leases is already in progress") && + !(err2 != nil && err2.Error() == "tidy operation on leases is already in progress") { + t.Fatalf("expected at least one of err1 or err2 to be set; err1: %#v\n err2:%#v\n", err1, err2) + } + + root, err := exp.tokenStore.rootToken() + if err != nil { + t.Fatal(err) + } + le.ClientToken = root.ID + + // Attach a valid token with the leases + if err = exp.persistEntry(le); err != nil { + t.Fatalf("error persisting entry: %v", err) + } + + // Run the tidy operation + err = exp.Tidy() + if err != nil { + t.Fatal(err) + } + + count = 0 + if err = logical.ScanView(exp.idView, countFunc); err != nil { + t.Fatal(err) + } + + // Post the tidy operation, the valid lease entry should not get affected + if count != 1 { + t.Fatalf("bad: lease count; expected:1 actual:%d", count) + } +} + +// To avoid pulling in deps for all users of the package, don't leave these +// uncommented in the public tree +/* func BenchmarkExpiration_Restore_Etcd(b *testing.B) { addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR") randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) logger := logformat.NewVaultLogger(log.LevelTrace) - physicalBackend, err := physical.NewBackend("etcd", logger, map[string]string{ + physicalBackend, err := physEtcd.NewEtcdBackend(map[string]string{ "address": addr, "path": randPath, "max_parallel": "256", - }) + }, logger) if err != nil { b.Fatalf("err: %s", err) } @@ -55,21 +269,26 @@ func BenchmarkExpiration_Restore_Consul(b *testing.B) { randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) logger := logformat.NewVaultLogger(log.LevelTrace) - physicalBackend, err := physical.NewBackend("consul", logger, map[string]string{ + physicalBackend, err := physConsul.NewConsulBackend(map[string]string{ "address": addr, "path": randPath, "max_parallel": "256", - }) + }, logger) if err != nil { b.Fatalf("err: %s", err) } benchmarkExpirationBackend(b, physicalBackend, 10000) // 10,000 leases } +*/ func BenchmarkExpiration_Restore_InMem(b *testing.B) { logger := logformat.NewVaultLogger(log.LevelTrace) - benchmarkExpirationBackend(b, physical.NewInmem(logger), 100000) // 100,000 Leases + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + b.Fatal(err) + } + benchmarkExpirationBackend(b, inm, 100000) // 100,000 Leases } func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, numLeases int) { @@ -80,7 +299,10 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, if err != nil { b.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + b.Fatal(err) + } // Register fake leases for i := 0; i < numLeases; i++ { @@ -90,8 +312,9 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, } req := &logical.Request{ - Operation: logical.ReadOperation, - Path: "prod/aws/" + pathUUID, + Operation: logical.ReadOperation, + Path: "prod/aws/" + pathUUID, + ClientToken: "root", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -118,7 +341,7 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, b.ResetTimer() for i := 0; i < b.N; i++ { - err = exp.Restore() + err = exp.Restore(nil) // Restore if err != nil { b.Fatalf("err: %v", err) @@ -136,7 +359,10 @@ func TestExpiration_Restore(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } paths := []string{ "prod/aws/foo", @@ -145,8 +371,9 @@ func TestExpiration_Restore(t *testing.T) { } for _, path := range paths { req := &logical.Request{ - Operation: logical.ReadOperation, - Path: path, + Operation: logical.ReadOperation, + Path: path, + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -172,7 +399,7 @@ func TestExpiration_Restore(t *testing.T) { } // Restore - err = exp.Restore() + err = exp.Restore(nil) if err != nil { t.Fatalf("err: %v", err) } @@ -200,8 +427,9 @@ func TestExpiration_Restore(t *testing.T) { func TestExpiration_Register(t *testing.T) { exp := mockExpiration(t) req := &logical.Request{ - Operation: logical.ReadOperation, - Path: "prod/aws/foo", + Operation: logical.ReadOperation, + Path: "prod/aws/foo", + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -247,6 +475,11 @@ func TestExpiration_RegisterAuth(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } + + err = exp.RegisterAuth("auth/github/../login", auth) + if err == nil { + t.Fatal("expected error") + } } func TestExpiration_RegisterAuth_NoLease(t *testing.T) { @@ -296,11 +529,15 @@ func TestExpiration_Revoke(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } req := &logical.Request{ - Operation: logical.ReadOperation, - Path: "prod/aws/foo", + Operation: logical.ReadOperation, + Path: "prod/aws/foo", + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -338,11 +575,15 @@ func TestExpiration_RevokeOnExpire(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } req := &logical.Request{ - Operation: logical.ReadOperation, - Path: "prod/aws/foo", + Operation: logical.ReadOperation, + Path: "prod/aws/foo", + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -391,7 +632,10 @@ func TestExpiration_RevokePrefix(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } paths := []string{ "prod/aws/foo", @@ -400,8 +644,9 @@ func TestExpiration_RevokePrefix(t *testing.T) { } for _, path := range paths { req := &logical.Request{ - Operation: logical.ReadOperation, - Path: path, + Operation: logical.ReadOperation, + Path: path, + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -455,7 +700,10 @@ func TestExpiration_RevokeByToken(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } paths := []string{ "prod/aws/foo", @@ -585,11 +833,15 @@ func TestExpiration_Renew(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } req := &logical.Request{ - Operation: logical.ReadOperation, - Path: "prod/aws/foo", + Operation: logical.ReadOperation, + Path: "prod/aws/foo", + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -651,11 +903,15 @@ func TestExpiration_Renew_NotRenewable(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } req := &logical.Request{ - Operation: logical.ReadOperation, - Path: "prod/aws/foo", + Operation: logical.ReadOperation, + Path: "prod/aws/foo", + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -697,11 +953,15 @@ func TestExpiration_Renew_RevokeOnExpire(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{Path: "prod/aws/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } req := &logical.Request{ - Operation: logical.ReadOperation, - Path: "prod/aws/foo", + Operation: logical.ReadOperation, + Path: "prod/aws/foo", + ClientToken: "foobar", } resp := &logical.Response{ Secret: &logical.Secret{ @@ -769,7 +1029,10 @@ func TestExpiration_revokeEntry(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "foo/bar/", &MountEntry{Path: "foo/bar/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } le := &leaseEntry{ LeaseID: "foo/bar/1234", @@ -796,13 +1059,10 @@ func TestExpiration_revokeEntry(t *testing.T) { req := noop.Requests[0] if req.Operation != logical.RevokeOperation { - t.Fatalf("Bad: %v", req) - } - if req.Path != le.Path { - t.Fatalf("Bad: %v", req) + t.Fatalf("bad: operation; req: %#v", req) } if !reflect.DeepEqual(req.Data, le.Data) { - t.Fatalf("Bad: %v", req) + t.Fatalf("bad: data; req: %#v\n le: %#v\n", req, le) } } @@ -900,7 +1160,10 @@ func TestExpiration_renewEntry(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "foo/bar/", &MountEntry{Path: "foo/bar/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } le := &leaseEntry{ LeaseID: "foo/bar/1234", @@ -933,9 +1196,6 @@ func TestExpiration_renewEntry(t *testing.T) { if req.Operation != logical.RenewOperation { t.Fatalf("Bad: %v", req) } - if req.Path != le.Path { - t.Fatalf("Bad: %v", req) - } if !reflect.DeepEqual(req.Data, le.Data) { t.Fatalf("Bad: %v", req) } @@ -966,7 +1226,10 @@ func TestExpiration_renewAuthEntry(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "auth/foo/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "auth/foo/", &MountEntry{Path: "auth/foo/", Type: "noop", UUID: meUUID, Accessor: "noop-accessor"}, view) + if err != nil { + t.Fatal(err) + } le := &leaseEntry{ LeaseID: "auth/foo/1234", @@ -1134,9 +1397,10 @@ func TestExpiration_RevokeForce(t *testing.T) { core.logicalBackends["badrenew"] = badRenewFactory me := &MountEntry{ - Table: mountTableType, - Path: "badrenew/", - Type: "badrenew", + Table: mountTableType, + Path: "badrenew/", + Type: "badrenew", + Accessor: "badrenewaccessor", } err := core.mount(me) @@ -1207,5 +1471,10 @@ func badRenewFactory(conf *logical.BackendConfig) (logical.Backend, error) { }, } - return be.Setup(conf) + err := be.Setup(conf) + if err != nil { + return nil, err + } + + return be, nil } diff --git a/vendor/github.com/hashicorp/vault/vault/init_test.go b/vendor/github.com/hashicorp/vault/vault/init_test.go index 38d95e4..48581f7 100644 --- a/vendor/github.com/hashicorp/vault/vault/init_test.go +++ b/vendor/github.com/hashicorp/vault/vault/init_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/logical" - "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" ) func TestCore_Init(t *testing.T) { @@ -25,12 +25,15 @@ func TestCore_Init(t *testing.T) { func testCore_NewTestCore(t *testing.T, seal Seal) (*Core, *CoreConfig) { logger := logformat.NewVaultLogger(log.LevelTrace) - inm := physical.NewInmem(logger) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } conf := &CoreConfig{ Physical: inm, DisableMlock: true, LogicalBackends: map[string]logical.Factory{ - "generic": LeasedPassthroughBackendFactory, + "kv": LeasedPassthroughBackendFactory, }, Seal: seal, } diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go index 76353b0..cedb241 100644 --- a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go +++ b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go @@ -46,7 +46,7 @@ func CubbyholeBackendFactory(conf *logical.BackendConfig) (logical.Backend, erro // CubbyholeBackend is used for storing secrets directly into the physical // backend. The secrets are encrypted in the durable storage. -// This differs from generic in that every token has its own private +// This differs from kv in that every token has its own private // storage view. The view is removed when the token expires. type CubbyholeBackend struct { *framework.Backend @@ -185,7 +185,7 @@ The secrets are encrypted/decrypted by Vault: they are never stored unencrypted in the backend and the backend never has an opportunity to see the unencrypted value. -This backend differs from the 'generic' backend in that it is namespaced +This backend differs from the 'kv' backend in that it is namespaced per-token. Tokens can only read and write their own values, with no sharing possible (per-token cubbyholes). This can be useful for implementing certain authentication workflows, as well as "scratch" areas for individual diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go index eb52a3f..5fc013e 100644 --- a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go +++ b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go @@ -5,8 +5,8 @@ import ( "fmt" "strings" - "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -17,13 +17,13 @@ func PassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, er return LeaseSwitchedPassthroughBackend(conf, false) } -// PassthroughBackendWithLeasesFactory returns a PassthroughBackend +// LeasedPassthroughBackendFactory returns a PassthroughBackend // with leases switched on func LeasedPassthroughBackendFactory(conf *logical.BackendConfig) (logical.Backend, error) { return LeaseSwitchedPassthroughBackend(conf, true) } -// LeaseSwitchedPassthroughBackendFactory returns a PassthroughBackend +// LeaseSwitchedPassthroughBackend returns a PassthroughBackend // with leases switched on or off func LeaseSwitchedPassthroughBackend(conf *logical.BackendConfig, leases bool) (logical.Backend, error) { var b PassthroughBackend @@ -53,7 +53,7 @@ func LeaseSwitchedPassthroughBackend(conf *logical.BackendConfig, leases bool) ( b.Backend.Secrets = []*framework.Secret{ &framework.Secret{ - Type: "generic", + Type: "kv", Renew: b.handleRead, Revoke: b.handleRevoke, @@ -116,7 +116,7 @@ func (b *PassthroughBackend) handleRead( var resp *logical.Response if b.generateLeases { // Generate the response - resp = b.Secret("generic").Response(rawData, nil) + resp = b.Secret("kv").Response(rawData, nil) resp.Secret.Renewable = false } else { resp = &logical.Response{ @@ -126,14 +126,13 @@ func (b *PassthroughBackend) handleRead( } // Check if there is a ttl key - var ttl string - ttl, _ = rawData["ttl"].(string) - if len(ttl) == 0 { - ttl, _ = rawData["lease"].(string) - } ttlDuration := b.System().DefaultLeaseTTL() - if len(ttl) != 0 { - dur, err := parseutil.ParseDurationSecond(ttl) + ttlRaw, ok := rawData["ttl"] + if !ok { + ttlRaw, ok = rawData["lease"] + } + if ok { + dur, err := parseutil.ParseDurationSecond(ttlRaw) if err == nil { ttlDuration = dur } @@ -148,6 +147,10 @@ func (b *PassthroughBackend) handleRead( return resp, nil } +func (b *PassthroughBackend) GeneratesLeases() bool { + return b.generateLeases +} + func (b *PassthroughBackend) handleWrite( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { // Check that some fields are given @@ -203,12 +206,8 @@ func (b *PassthroughBackend) handleList( return logical.ListResponse(keys), nil } -func (b *PassthroughBackend) GeneratesLeases() bool { - return b.generateLeases -} - const passthroughHelp = ` -The generic backend reads and writes arbitrary secrets to the backend. +The kv backend reads and writes arbitrary secrets to the backend. The secrets are encrypted/decrypted by Vault: they are never stored unencrypted in the backend and the backend never has an opportunity to see the unencrypted value. diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go index bd33d65..1ccda69 100644 --- a/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go +++ b/vendor/github.com/hashicorp/vault/vault/logical_passthrough_test.go @@ -1,10 +1,12 @@ package vault import ( + "encoding/json" "reflect" "testing" "time" + "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/logical" ) @@ -49,10 +51,19 @@ func TestPassthroughBackend_Write(t *testing.T) { } func TestPassthroughBackend_Read(t *testing.T) { - test := func(b logical.Backend, ttlType string, leased bool) { + test := func(b logical.Backend, ttlType string, ttl interface{}, leased bool) { req := logical.TestRequest(t, logical.UpdateOperation, "foo") req.Data["raw"] = "test" - req.Data[ttlType] = "1h" + var reqTTL interface{} + switch ttl.(type) { + case int64: + reqTTL = ttl.(int64) + case string: + reqTTL = ttl.(string) + default: + t.Fatal("unknown ttl type") + } + req.Data[ttlType] = reqTTL storage := req.Storage if _, err := b.HandleRequest(req); err != nil { @@ -67,16 +78,34 @@ func TestPassthroughBackend_Read(t *testing.T) { t.Fatalf("err: %v", err) } + expectedTTL, err := parseutil.ParseDurationSecond(ttl) + if err != nil { + t.Fatal(err) + } + + // What comes back if an int is passed in is a json.Number which is + // actually aliased as a string so to make the deep equal happy if it's + // actually a number we set it to an int64 + var respTTL interface{} = resp.Data[ttlType] + _, ok := respTTL.(json.Number) + if ok { + respTTL, err = respTTL.(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + resp.Data[ttlType] = respTTL + } + expected := &logical.Response{ Secret: &logical.Secret{ LeaseOptions: logical.LeaseOptions{ Renewable: true, - TTL: time.Hour, + TTL: expectedTTL, }, }, Data: map[string]interface{}{ "raw": "test", - ttlType: "1h", + ttlType: reqTTL, }, } @@ -86,15 +115,15 @@ func TestPassthroughBackend_Read(t *testing.T) { resp.Secret.InternalData = nil resp.Secret.LeaseID = "" if !reflect.DeepEqual(resp, expected) { - t.Fatalf("bad response.\n\nexpected: %#v\n\nGot: %#v", expected, resp) + t.Fatalf("bad response.\n\nexpected:\n%#v\n\nGot:\n%#v", expected, resp) } } b := testPassthroughLeasedBackend() - test(b, "lease", true) - test(b, "ttl", true) + test(b, "lease", "1h", true) + test(b, "ttl", "5", true) b = testPassthroughBackend() - test(b, "lease", false) - test(b, "ttl", false) + test(b, "lease", int64(10), false) + test(b, "ttl", "40s", false) } func TestPassthroughBackend_Delete(t *testing.T) { @@ -168,10 +197,10 @@ func TestPassthroughBackend_List(t *testing.T) { func TestPassthroughBackend_Revoke(t *testing.T) { test := func(b logical.Backend) { - req := logical.TestRequest(t, logical.RevokeOperation, "generic") + req := logical.TestRequest(t, logical.RevokeOperation, "kv") req.Secret = &logical.Secret{ InternalData: map[string]interface{}{ - "secret_type": "generic", + "secret_type": "kv", }, } diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system.go b/vendor/github.com/hashicorp/vault/vault/logical_system.go index 5fdf312..1593a1f 100644 --- a/vendor/github.com/hashicorp/vault/vault/logical_system.go +++ b/vendor/github.com/hashicorp/vault/vault/logical_system.go @@ -9,8 +9,10 @@ import ( "sync" "time" + "github.com/fatih/structs" "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/parseutil" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" "github.com/mitchellh/mapstructure" @@ -20,7 +22,7 @@ var ( // protectedPaths cannot be accessed via the raw APIs. // This is both for security and to prevent disrupting Vault. protectedPaths = []string{ - "core", + keyringPath, } replicationPaths = func(b *SystemBackend) []*framework.Path { @@ -43,7 +45,7 @@ var ( } ) -func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backend, error) { +func NewSystemBackend(core *Core) *SystemBackend { b := &SystemBackend{ Core: core, } @@ -57,18 +59,23 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen "remount", "audit", "audit/*", + "raw", "raw/*", "replication/primary/secondary-token", "replication/reindex", "rotate", + "config/cors", "config/auditing/*", + "plugins/catalog/*", "revoke-prefix/*", + "revoke-force/*", "leases/revoke-prefix/*", "leases/revoke-force/*", "leases/lookup/*", }, Unauthenticated: []string{ + "wrapping/lookup", "wrapping/pubkey", "replication/status", }, @@ -97,6 +104,34 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen HelpDescription: strings.TrimSpace(sysHelp["capabilities_accessor"][1]), }, + &framework.Path{ + Pattern: "config/cors$", + + Fields: map[string]*framework.FieldSchema{ + "enable": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Enables or disables CORS headers on requests.", + }, + "allowed_origins": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "A comma-separated string or array of strings indicating origins that may make cross-origin requests.", + }, + "allowed_headers": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: "A comma-separated string or array of strings indicating headers that are allowed on cross-origin requests.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleCORSRead, + logical.UpdateOperation: b.handleCORSUpdate, + logical.DeleteOperation: b.handleCORSDelete, + }, + + HelpDescription: strings.TrimSpace(sysHelp["config/cors"][0]), + HelpSynopsis: strings.TrimSpace(sysHelp["config/cors"][1]), + }, + &framework.Path{ Pattern: "capabilities$", @@ -196,6 +231,10 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]), }, + "description": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_desc"][0]), + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.handleAuthTuneRead, @@ -221,6 +260,10 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]), }, + "description": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_desc"][0]), + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -257,6 +300,10 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen Default: false, Description: strings.TrimSpace(sysHelp["mount_local"][0]), }, + "plugin_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["mount_plugin_name"][0]), + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -421,6 +468,17 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen HelpDescription: strings.TrimSpace(sysHelp["revoke-prefix"][1]), }, + &framework.Path{ + Pattern: "leases/tidy$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handleTidyLeases, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["tidy_leases"][0]), + HelpDescription: strings.TrimSpace(sysHelp["tidy_leases"][1]), + }, + &framework.Path{ Pattern: "auth$", @@ -448,11 +506,19 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen Type: framework.TypeString, Description: strings.TrimSpace(sysHelp["auth_desc"][0]), }, + "config": &framework.FieldSchema{ + Type: framework.TypeMap, + Description: strings.TrimSpace(sysHelp["auth_config"][0]), + }, "local": &framework.FieldSchema{ Type: framework.TypeBool, Default: false, Description: strings.TrimSpace(sysHelp["mount_local"][0]), }, + "plugin_name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["auth_plugin"][0]), + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -587,25 +653,6 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen HelpDescription: strings.TrimSpace(sysHelp["audit"][1]), }, - &framework.Path{ - Pattern: "raw/(?P.+)", - - Fields: map[string]*framework.FieldSchema{ - "path": &framework.FieldSchema{ - Type: framework.TypeString, - }, - "value": &framework.FieldSchema{ - Type: framework.TypeString, - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.handleRawRead, - logical.UpdateOperation: b.handleRawWrite, - logical.DeleteOperation: b.handleRawDelete, - }, - }, - &framework.Path{ Pattern: "key-status$", @@ -681,6 +728,7 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.handleWrappingLookup, + logical.ReadOperation: b.handleWrappingLookup, }, HelpSynopsis: strings.TrimSpace(sysHelp["wraplookup"][0]), @@ -736,27 +784,159 @@ func NewSystemBackend(core *Core, config *logical.BackendConfig) (logical.Backen HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]), HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]), }, + &framework.Path{ + Pattern: "plugins/catalog/?$", + + Fields: map[string]*framework.FieldSchema{}, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.handlePluginCatalogList, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]), + }, + &framework.Path{ + Pattern: "plugins/catalog/(?P.+)", + + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_name"][0]), + }, + "sha256": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]), + }, + "sha_256": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]), + }, + "command": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-catalog_command"][0]), + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handlePluginCatalogUpdate, + logical.DeleteOperation: b.handlePluginCatalogDelete, + logical.ReadOperation: b.handlePluginCatalogRead, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]), + }, + &framework.Path{ + Pattern: "plugins/reload/backend$", + + Fields: map[string]*framework.FieldSchema{ + "plugin": &framework.FieldSchema{ + Type: framework.TypeString, + Description: strings.TrimSpace(sysHelp["plugin-backend-reload-plugin"][0]), + }, + "mounts": &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: strings.TrimSpace(sysHelp["plugin-backend-reload-mounts"][0]), + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.handlePluginReloadUpdate, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["plugin-reload"][0]), + HelpDescription: strings.TrimSpace(sysHelp["plugin-reload"][1]), + }, }, } b.Backend.Paths = append(b.Backend.Paths, replicationPaths(b)...) + if core.rawEnabled { + b.Backend.Paths = append(b.Backend.Paths, &framework.Path{ + Pattern: "(raw/?$|raw/(?P.+))", + + Fields: map[string]*framework.FieldSchema{ + "path": &framework.FieldSchema{ + Type: framework.TypeString, + }, + "value": &framework.FieldSchema{ + Type: framework.TypeString, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.handleRawRead, + logical.UpdateOperation: b.handleRawWrite, + logical.DeleteOperation: b.handleRawDelete, + logical.ListOperation: b.handleRawList, + }, + }) + } + b.Backend.Invalidate = b.invalidate - return b.Backend.Setup(config) + return b } // SystemBackend implements logical.Backend and is used to interact with // the core of the system. This backend is hardcoded to exist at the "sys" // prefix. Conceptually it is similar to procfs on Linux. type SystemBackend struct { - Core *Core - Backend *framework.Backend + *framework.Backend + Core *Core +} + +// handleCORSRead returns the current CORS configuration +func (b *SystemBackend) handleCORSRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + corsConf := b.Core.corsConfig + + enabled := corsConf.IsEnabled() + + resp := &logical.Response{ + Data: map[string]interface{}{ + "enabled": enabled, + }, + } + + if enabled { + corsConf.RLock() + resp.Data["allowed_origins"] = corsConf.AllowedOrigins + resp.Data["allowed_headers"] = corsConf.AllowedHeaders + corsConf.RUnlock() + } + + return resp, nil +} + +// handleCORSUpdate sets the list of origins that are allowed to make +// cross-origin requests and sets the CORS enabled flag to true +func (b *SystemBackend) handleCORSUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + origins := d.Get("allowed_origins").([]string) + headers := d.Get("allowed_headers").([]string) + + return nil, b.Core.corsConfig.Enable(origins, headers) +} + +// handleCORSDelete sets the CORS enabled flag to false and clears the list of +// allowed origins & headers. +func (b *SystemBackend) handleCORSDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return nil, b.Core.corsConfig.Disable() +} + +func (b *SystemBackend) handleTidyLeases(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + err := b.Core.expiration.Tidy() + if err != nil { + b.Backend.Logger().Error("sys: failed to tidy leases", "error", err) + return handleError(err) + } + return nil, err } func (b *SystemBackend) invalidate(key string) { if b.Core.logger.IsTrace() { - b.Core.logger.Trace("sys: invaliding key", "key", key) + b.Core.logger.Trace("sys: invalidating key", "key", key) } switch { case strings.HasPrefix(key, policySubPath): @@ -768,6 +948,107 @@ func (b *SystemBackend) invalidate(key string) { } } +func (b *SystemBackend) handlePluginCatalogList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + plugins, err := b.Core.pluginCatalog.List() + if err != nil { + return nil, err + } + + return logical.ListResponse(plugins), nil +} + +func (b *SystemBackend) handlePluginCatalogUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + + sha256 := d.Get("sha256").(string) + if sha256 == "" { + sha256 = d.Get("sha_256").(string) + if sha256 == "" { + return logical.ErrorResponse("missing SHA-256 value"), nil + } + } + + command := d.Get("command").(string) + if command == "" { + return logical.ErrorResponse("missing command value"), nil + } + + sha256Bytes, err := hex.DecodeString(sha256) + if err != nil { + return logical.ErrorResponse("Could not decode SHA-256 value from Hex"), err + } + + err = b.Core.pluginCatalog.Set(pluginName, command, sha256Bytes) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *SystemBackend) handlePluginCatalogRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + plugin, err := b.Core.pluginCatalog.Get(pluginName) + if err != nil { + return nil, err + } + if plugin == nil { + return nil, nil + } + + // Create a map of data to be returned and remove sensitive information from it + data := structs.New(plugin).Map() + + return &logical.Response{ + Data: data, + }, nil +} + +func (b *SystemBackend) handlePluginCatalogDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("name").(string) + if pluginName == "" { + return logical.ErrorResponse("missing plugin name"), nil + } + err := b.Core.pluginCatalog.Delete(pluginName) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *SystemBackend) handlePluginReloadUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + pluginName := d.Get("plugin").(string) + pluginMounts := d.Get("mounts").([]string) + + if pluginName != "" && len(pluginMounts) > 0 { + return logical.ErrorResponse("plugin and mounts cannot be set at the same time"), nil + } + if pluginName == "" && len(pluginMounts) == 0 { + return logical.ErrorResponse("plugin or mounts must be provided"), nil + } + + if pluginName != "" { + err := b.Core.reloadMatchingPlugin(pluginName) + if err != nil { + return nil, err + } + } else if len(pluginMounts) > 0 { + err := b.Core.reloadMatchingPluginMounts(pluginMounts) + if err != nil { + return nil, err + } + } + + return nil, nil +} + // handleAuditedHeaderUpdate creates or overwrites a header entry func (b *SystemBackend) handleAuditedHeaderUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { header := d.Get("header").(string) @@ -832,7 +1113,7 @@ func (b *SystemBackend) handleAuditedHeadersRead(req *logical.Request, d *framew }, nil } -// handleCapabilitiesreturns the ACL capabilities of the token for a given path +// handleCapabilities returns the ACL capabilities of the token for a given path func (b *SystemBackend) handleCapabilities(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { token := d.Get("token").(string) if token == "" { @@ -850,15 +1131,15 @@ func (b *SystemBackend) handleCapabilities(req *logical.Request, d *framework.Fi }, nil } -// handleCapabilitiesAccessor returns the ACL capabilities of the token associted -// with the given accessor for a given path. +// handleCapabilitiesAccessor returns the ACL capabilities of the +// token associted with the given accessor for a given path. func (b *SystemBackend) handleCapabilitiesAccessor(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { accessor := d.Get("accessor").(string) if accessor == "" { return logical.ErrorResponse("missing accessor"), nil } - aEntry, err := b.Core.tokenStore.lookupByAccessor(accessor) + aEntry, err := b.Core.tokenStore.lookupByAccessor(accessor, false) if err != nil { return nil, err } @@ -962,17 +1243,17 @@ func (b *SystemBackend) handleMountTable( } for _, entry := range b.Core.mounts.Entries { + // Populate mount info + structConfig := structs.New(entry.Config).Map() + structConfig["default_lease_ttl"] = int64(structConfig["default_lease_ttl"].(time.Duration).Seconds()) + structConfig["max_lease_ttl"] = int64(structConfig["max_lease_ttl"].(time.Duration).Seconds()) info := map[string]interface{}{ "type": entry.Type, "description": entry.Description, - "config": map[string]interface{}{ - "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()), - "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()), - "force_no_cache": entry.Config.ForceNoCache, - }, - "local": entry.Local, + "accessor": entry.Accessor, + "config": structConfig, + "local": entry.Local, } - resp.Data[entry.Path] = info } @@ -982,12 +1263,10 @@ func (b *SystemBackend) handleMountTable( // handleMount is used to mount a new path func (b *SystemBackend) handleMount( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.Core.clusterParamsLock.RLock() repState := b.Core.replicationState - b.Core.clusterParamsLock.RUnlock() local := data.Get("local").(bool) - if !local && repState == consts.ReplicationSecondary { + if !local && repState.HasState(consts.ReplicationPerformanceSecondary) { return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil } @@ -995,16 +1274,13 @@ func (b *SystemBackend) handleMount( path := data.Get("path").(string) logicalType := data.Get("type").(string) description := data.Get("description").(string) + pluginName := data.Get("plugin_name").(string) path = sanitizeMountPath(path) var config MountConfig + var apiConfig APIMountConfig - var apiConfig struct { - DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` - } configMap := data.Get("config").(map[string]interface{}) if configMap != nil && len(configMap) != 0 { err := mapstructure.Decode(configMap, &apiConfig) @@ -1053,6 +1329,21 @@ func (b *SystemBackend) handleMount( logical.ErrInvalidRequest } + // Only set plugin-name if mount is of type plugin, with apiConfig.PluginName + // option taking precedence. + if logicalType == "plugin" { + switch { + case apiConfig.PluginName != "": + config.PluginName = apiConfig.PluginName + case pluginName != "": + config.PluginName = pluginName + default: + return logical.ErrorResponse( + "plugin_name must be provided for plugin backend"), + logical.ErrInvalidRequest + } + } + // Copy over the force no cache if set if apiConfig.ForceNoCache { config.ForceNoCache = true @@ -1097,25 +1388,25 @@ func handleError( // handleUnmount is used to unmount a path func (b *SystemBackend) handleUnmount( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.Core.clusterParamsLock.RLock() + path := data.Get("path").(string) + path = sanitizeMountPath(path) + repState := b.Core.replicationState - b.Core.clusterParamsLock.RUnlock() - - suffix := strings.TrimPrefix(req.Path, "mounts/") - if len(suffix) == 0 { - return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest - } - - suffix = sanitizeMountPath(suffix) - - entry := b.Core.router.MatchingMountEntry(suffix) - if entry != nil && !entry.Local && repState == consts.ReplicationSecondary { + entry := b.Core.router.MatchingMountEntry(path) + if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil } + // We return success when the mount does not exists to not expose if the + // mount existed or not + match := b.Core.router.MatchingMount(path) + if match == "" || path != match { + return nil, nil + } + // Attempt unmount - if existed, err := b.Core.unmount(suffix); existed && err != nil { - b.Backend.Logger().Error("sys: unmount failed", "path", suffix, "error", err) + if err := b.Core.unmount(path); err != nil { + b.Backend.Logger().Error("sys: unmount failed", "path", path, "error", err) return handleError(err) } @@ -1125,9 +1416,7 @@ func (b *SystemBackend) handleUnmount( // handleRemount is used to remount a path func (b *SystemBackend) handleRemount( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.Core.clusterParamsLock.RLock() repState := b.Core.replicationState - b.Core.clusterParamsLock.RUnlock() // Get the paths fromPath := data.Get("from").(string) @@ -1142,7 +1431,7 @@ func (b *SystemBackend) handleRemount( toPath = sanitizeMountPath(toPath) entry := b.Core.router.MatchingMountEntry(fromPath) - if entry != nil && !entry.Local && repState == consts.ReplicationSecondary { + if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { return logical.ErrorResponse("cannot remount a non-local mount on a replication secondary"), nil } @@ -1238,9 +1527,7 @@ func (b *SystemBackend) handleMountTuneWrite( // handleTuneWriteCommon is used to set config settings on a path func (b *SystemBackend) handleTuneWriteCommon( path string, data *framework.FieldData) (*logical.Response, error) { - b.Core.clusterParamsLock.RLock() repState := b.Core.replicationState - b.Core.clusterParamsLock.RUnlock() path = sanitizeMountPath(path) @@ -1257,7 +1544,7 @@ func (b *SystemBackend) handleTuneWriteCommon( b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path) return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path)) } - if mountEntry != nil && !mountEntry.Local && repState == consts.ReplicationSecondary { + if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil } @@ -1269,40 +1556,52 @@ func (b *SystemBackend) handleTuneWriteCommon( lock = &b.Core.mountsLock } + lock.Lock() + defer lock.Unlock() + + // Check again after grabbing the lock + mountEntry = b.Core.router.MatchingMountEntry(path) + if mountEntry == nil { + b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path) + return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path)) + } + if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil + } + // Timing configuration parameters { - var newDefault, newMax *time.Duration + var newDefault, newMax time.Duration defTTL := data.Get("default_lease_ttl").(string) switch defTTL { case "": + newDefault = mountEntry.Config.DefaultLeaseTTL case "system": - tmpDef := time.Duration(0) - newDefault = &tmpDef + newDefault = time.Duration(0) default: tmpDef, err := parseutil.ParseDurationSecond(defTTL) if err != nil { return handleError(err) } - newDefault = &tmpDef + newDefault = tmpDef } maxTTL := data.Get("max_lease_ttl").(string) switch maxTTL { case "": + newMax = mountEntry.Config.MaxLeaseTTL case "system": - tmpMax := time.Duration(0) - newMax = &tmpMax + newMax = time.Duration(0) default: tmpMax, err := parseutil.ParseDurationSecond(maxTTL) if err != nil { return handleError(err) } - newMax = &tmpMax + newMax = tmpMax } - if newDefault != nil || newMax != nil { - lock.Lock() - defer lock.Unlock() + if newDefault != mountEntry.Config.DefaultLeaseTTL || + newMax != mountEntry.Config.MaxLeaseTTL { if err := b.tuneMountTTLs(path, mountEntry, newDefault, newMax); err != nil { b.Backend.Logger().Error("sys: tuning failed", "path", path, "error", err) @@ -1311,6 +1610,28 @@ func (b *SystemBackend) handleTuneWriteCommon( } } + description := data.Get("description").(string) + if description != "" { + oldDesc := mountEntry.Description + mountEntry.Description = description + + // Update the mount table + var err error + switch { + case strings.HasPrefix(path, "auth/"): + err = b.Core.persistAuth(b.Core.auth, mountEntry.Local) + default: + err = b.Core.persistMounts(b.Core.mounts, mountEntry.Local) + } + if err != nil { + mountEntry.Description = oldDesc + return handleError(err) + } + if b.Core.logger.IsInfo() { + b.Core.logger.Info("core: mount tuning of description successful", "path", path) + } + } + return nil, nil } @@ -1461,6 +1782,7 @@ func (b *SystemBackend) handleAuthTable( info := map[string]interface{}{ "type": entry.Type, "description": entry.Description, + "accessor": entry.Accessor, "config": map[string]interface{}{ "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()), "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()), @@ -1475,12 +1797,10 @@ func (b *SystemBackend) handleAuthTable( // handleEnableAuth is used to enable a new credential backend func (b *SystemBackend) handleEnableAuth( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.Core.clusterParamsLock.RLock() repState := b.Core.replicationState - b.Core.clusterParamsLock.RUnlock() local := data.Get("local").(bool) - if !local && repState == consts.ReplicationSecondary { + if !local && repState.HasState(consts.ReplicationPerformanceSecondary) { return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil } @@ -1488,6 +1808,35 @@ func (b *SystemBackend) handleEnableAuth( path := data.Get("path").(string) logicalType := data.Get("type").(string) description := data.Get("description").(string) + pluginName := data.Get("plugin_name").(string) + + var config MountConfig + var apiConfig APIMountConfig + + configMap := data.Get("config").(map[string]interface{}) + if configMap != nil && len(configMap) != 0 { + err := mapstructure.Decode(configMap, &apiConfig) + if err != nil { + return logical.ErrorResponse( + "unable to convert given auth config information"), + logical.ErrInvalidRequest + } + } + + // Only set plugin name if mount is of type plugin, with apiConfig.PluginName + // option taking precedence. + if logicalType == "plugin" { + switch { + case apiConfig.PluginName != "": + config.PluginName = apiConfig.PluginName + case pluginName != "": + config.PluginName = pluginName + default: + return logical.ErrorResponse( + "plugin_name must be provided for plugin backend"), + logical.ErrInvalidRequest + } + } if logicalType == "" { return logical.ErrorResponse( @@ -1503,6 +1852,7 @@ func (b *SystemBackend) handleEnableAuth( Path: path, Type: logicalType, Description: description, + Config: config, Local: local, } @@ -1517,16 +1867,26 @@ func (b *SystemBackend) handleEnableAuth( // handleDisableAuth is used to disable a credential backend func (b *SystemBackend) handleDisableAuth( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - suffix := strings.TrimPrefix(req.Path, "auth/") - if len(suffix) == 0 { - return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest + path := data.Get("path").(string) + path = sanitizeMountPath(path) + fullPath := credentialRoutePrefix + path + + repState := b.Core.replicationState + entry := b.Core.router.MatchingMountEntry(fullPath) + if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { + return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil } - suffix = sanitizeMountPath(suffix) + // We return success when the mount does not exists to not expose if the + // mount existed or not + match := b.Core.router.MatchingMount(fullPath) + if match == "" || fullPath != match { + return nil, nil + } // Attempt disable - if existed, err := b.Core.disableCredential(suffix); existed && err != nil { - b.Backend.Logger().Error("sys: disable auth mount failed", "path", suffix, "error", err) + if err := b.Core.disableCredential(path); err != nil { + b.Backend.Logger().Error("sys: disable auth mount failed", "path", path, "error", err) return handleError(err) } return nil, nil @@ -1564,7 +1924,7 @@ func (b *SystemBackend) handlePolicyRead( return &logical.Response{ Data: map[string]interface{}{ - "name": name, + "name": policy.Name, "rules": policy.Raw, }, }, nil @@ -1574,7 +1934,16 @@ func (b *SystemBackend) handlePolicyRead( func (b *SystemBackend) handlePolicySet( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { name := data.Get("name").(string) - rules := data.Get("rules").(string) + + rulesRaw, ok := data.GetOk("rules") + if !ok { + return logical.ErrorResponse("'rules' parameter not supplied"), nil + } + + rules := rulesRaw.(string) + if rules == "" { + return logical.ErrorResponse("'rules' parameter empty"), nil + } // Validate the rules parse parse, err := Parse(rules) @@ -1582,8 +1951,9 @@ func (b *SystemBackend) handlePolicySet( return handleError(err) } - // Override the name - parse.Name = strings.ToLower(name) + if name != "" { + parse.Name = name + } // Update the policy if err := b.Core.policyStore.SetPolicy(parse); err != nil { @@ -1652,12 +2022,10 @@ func (b *SystemBackend) handleAuditHash( // handleEnableAudit is used to enable a new audit backend func (b *SystemBackend) handleEnableAudit( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.Core.clusterParamsLock.RLock() repState := b.Core.replicationState - b.Core.clusterParamsLock.RUnlock() local := data.Get("local").(bool) - if !local && repState == consts.ReplicationSecondary { + if !local && repState.HasState(consts.ReplicationPerformanceSecondary) { return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil } @@ -1779,6 +2147,29 @@ func (b *SystemBackend) handleRawDelete( return nil, nil } +// handleRawList is used to list directly from the barrier +func (b *SystemBackend) handleRawList( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := data.Get("path").(string) + if path != "" && !strings.HasSuffix(path, "/") { + path = path + "/" + } + + // Prevent access of protected paths + for _, p := range protectedPaths { + if strings.HasPrefix(path, p) { + err := fmt.Sprintf("cannot list '%s'", path) + return logical.ErrorResponse(err), logical.ErrInvalidRequest + } + } + + keys, err := b.Core.barrier.List(path) + if err != nil { + return handleError(err) + } + return logical.ListResponse(keys), nil +} + // handleKeyStatus returns status information about the backend key func (b *SystemBackend) handleKeyStatus( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { @@ -1800,10 +2191,8 @@ func (b *SystemBackend) handleKeyStatus( // handleRotate is used to trigger a key rotation func (b *SystemBackend) handleRotate( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.Core.clusterParamsLock.RLock() repState := b.Core.replicationState - b.Core.clusterParamsLock.RUnlock() - if repState == consts.ReplicationSecondary { + if repState.HasState(consts.ReplicationPerformanceSecondary) { return logical.ErrorResponse("cannot rotate on a replication secondary"), nil } @@ -1942,10 +2331,14 @@ func (b *SystemBackend) handleWrappingUnwrap( func (b *SystemBackend) handleWrappingLookup( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // This ordering of lookups has been validated already in the wrapping + // validation func, we're just doing this for a safety check token := data.Get("token").(string) - if token == "" { - return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest + token = req.ClientToken + if token == "" { + return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest + } } cubbyReq := &logical.Request{ @@ -1969,6 +2362,7 @@ func (b *SystemBackend) handleWrappingLookup( creationTTLRaw := cubbyResp.Data["creation_ttl"] creationTime := cubbyResp.Data["creation_time"] + creationPath := cubbyResp.Data["creation_path"] resp := &logical.Response{ Data: map[string]interface{}{}, @@ -1984,6 +2378,9 @@ func (b *SystemBackend) handleWrappingLookup( // This was JSON marshaled so it's already a string in RFC3339 format resp.Data["creation_time"] = cubbyResp.Data["creation_time"] } + if creationPath != nil { + resp.Data["creation_path"] = cubbyResp.Data["creation_path"] + } return resp, nil } @@ -2043,6 +2440,13 @@ func (b *SystemBackend) handleWrappingRewrap( return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err) } + // Get creation_path to return as the response later + creationPathRaw := cubbyResp.Data["creation_path"] + if creationPathRaw == nil { + return nil, fmt.Errorf("creation_path value in wrapping information was nil") + } + creationPath := creationPathRaw.(string) + // Fetch the original response and return it as the data for the new response cubbyReq = &logical.Request{ Operation: logical.ReadOperation, @@ -2074,8 +2478,9 @@ func (b *SystemBackend) handleWrappingRewrap( Data: map[string]interface{}{ "response": response, }, - WrapInfo: &logical.ResponseWrapInfo{ - TTL: time.Duration(creationTTL), + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: time.Duration(creationTTL), + CreationPath: creationPath, }, }, nil } @@ -2100,6 +2505,21 @@ as well as perform core operations. // sysHelp is all the help text for the sys backend. var sysHelp = map[string][2]string{ + "config/cors": { + "Configures or returns the current configuration of CORS settings.", + ` +This path responds to the following HTTP methods. + + GET / + Returns the configuration of the CORS setting. + + POST / + Sets the comma-separated list of origins that can make cross-origin requests. + + DELETE / + Clears the CORS configuration and disables acceptance of CORS requests. + `, + }, "init": { "Initializes or returns the initialization status of the Vault.", ` @@ -2218,6 +2638,11 @@ and max_lease_ttl.`, and is unaffected by replication.`, }, + "mount_plugin_name": { + `Name of the plugin to mount based from the name registered +in the plugin catalog.`, + }, + "tune_default_lease_ttl": { `The default lease TTL for this mount.`, }, @@ -2354,6 +2779,15 @@ Example: you might have an OAuth backend for GitHub, and one for Google Apps. "", }, + "auth_config": { + `Configuration for this mount, such as plugin_name.`, + }, + + "auth_plugin": { + `Name of the auth plugin to use based from the name in the plugin catalog.`, + "", + }, + "policy-list": { `List the configured access control policies.`, ` @@ -2478,6 +2912,15 @@ Enable a new audit backend or disable an existing backend. on a given path.`, }, + "tidy_leases": { + `This endpoint performs cleanup tasks that can be run if certain error +conditions have occurred.`, + `This endpoint performs cleanup tasks that can be run to clean up the +lease entries after certain error conditions. Usually running this is not +necessary, and is only required if upgrade notes or support personnel suggest +it.`, + }, + "wrap": { "Response-wraps an arbitrary JSON object.", `Round trips the given input data into a response-wrapped token.`, @@ -2524,7 +2967,38 @@ This path responds to the following HTTP methods. "Lists the headers configured to be audited.", `Returns a list of headers that have been configured to be audited.`, }, + "plugin-catalog": { + "Configures the plugins known to vault", + ` +This path responds to the following HTTP methods. + LIST / + Returns a list of names of configured plugins. + GET / + Retrieve the metadata for the named plugin. + + PUT / + Add or update plugin. + + DELETE / + Delete the plugin with the given name. + `, + }, + "plugin-catalog_name": { + "The name of the plugin", + "", + }, + "plugin-catalog_sha-256": { + `The SHA256 sum of the executable used in the +command field. This should be HEX encoded.`, + "", + }, + "plugin-catalog_command": { + `The command used to start the plugin. The +executable defined in this command must exist in vault's +plugin directory.`, + "", + }, "leases": { `View or list lease metadata.`, ` @@ -2542,4 +3016,19 @@ This path responds to the following HTTP methods. `The path to list leases under. Example: "aws/creds/deploy"`, "", }, + "plugin-reload": { + "Reload mounts that use a particular backend plugin.", + `Reload mounts that use a particular backend plugin. Either the plugin name + or the desired plugin backend mounts must be provided, but not both. In the + case that the plugin name is provided, all mounted paths that use that plugin + backend will be reloaded.`, + }, + "plugin-backend-reload-plugin": { + `The name of the plugin to reload, as registered in the plugin catalog.`, + "", + }, + "plugin-backend-reload-mounts": { + `The mount paths of the plugin backends to reload.`, + "", + }, } diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go index 809ebb9..929159e 100644 --- a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go +++ b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go @@ -7,61 +7,31 @@ import ( ) // tuneMount is used to set config on a mount point -func (b *SystemBackend) tuneMountTTLs(path string, me *MountEntry, newDefault, newMax *time.Duration) error { - meConfig := &me.Config +func (b *SystemBackend) tuneMountTTLs(path string, me *MountEntry, newDefault, newMax time.Duration) error { + zero := time.Duration(0) - if newDefault == nil && newMax == nil { - return nil - } - if newDefault == nil && newMax != nil && - *newMax == meConfig.MaxLeaseTTL { - return nil - } - if newMax == nil && newDefault != nil && - *newDefault == meConfig.DefaultLeaseTTL { - return nil - } - if newMax != nil && newDefault != nil && - *newDefault == meConfig.DefaultLeaseTTL && - *newMax == meConfig.MaxLeaseTTL { - return nil - } + switch { + case newDefault == zero && newMax == zero: + // No checks needed - if newMax != nil && newDefault != nil && *newMax < *newDefault { - return fmt.Errorf("new backend max lease TTL of %d less than new backend default lease TTL of %d", - int(newMax.Seconds()), int(newDefault.Seconds())) - } + case newDefault == zero && newMax != zero: + // No default/max conflict, no checks needed - if newMax != nil && newDefault == nil { - if meConfig.DefaultLeaseTTL != 0 && *newMax < meConfig.DefaultLeaseTTL { - return fmt.Errorf("new backend max lease TTL of %d less than backend default lease TTL of %d", - int(newMax.Seconds()), int(meConfig.DefaultLeaseTTL.Seconds())) + case newDefault != zero && newMax == zero: + // No default/max conflict, no checks needed + + case newDefault != zero && newMax != zero: + if newMax < newDefault { + return fmt.Errorf("backend max lease TTL of %d would be less than backend default lease TTL of %d", + int(newMax.Seconds()), int(newDefault.Seconds())) } } - if newDefault != nil { - if meConfig.MaxLeaseTTL == 0 { - if newMax == nil && *newDefault > b.Core.maxLeaseTTL { - return fmt.Errorf("new backend default lease TTL of %d greater than system max lease TTL of %d", - int(newDefault.Seconds()), int(b.Core.maxLeaseTTL.Seconds())) - } - } else { - if newMax == nil && *newDefault > meConfig.MaxLeaseTTL { - return fmt.Errorf("new backend default lease TTL of %d greater than backend max lease TTL of %d", - int(newDefault.Seconds()), int(meConfig.MaxLeaseTTL.Seconds())) - } - } - } + origMax := me.Config.MaxLeaseTTL + origDefault := me.Config.DefaultLeaseTTL - origMax := meConfig.MaxLeaseTTL - origDefault := meConfig.DefaultLeaseTTL - - if newMax != nil { - meConfig.MaxLeaseTTL = *newMax - } - if newDefault != nil { - meConfig.DefaultLeaseTTL = *newDefault - } + me.Config.MaxLeaseTTL = newMax + me.Config.DefaultLeaseTTL = newDefault // Update the mount table var err error @@ -72,13 +42,12 @@ func (b *SystemBackend) tuneMountTTLs(path string, me *MountEntry, newDefault, n err = b.Core.persistMounts(b.Core.mounts, me.Local) } if err != nil { - meConfig.MaxLeaseTTL = origMax - meConfig.DefaultLeaseTTL = origDefault + me.Config.MaxLeaseTTL = origMax + me.Config.DefaultLeaseTTL = origDefault return fmt.Errorf("failed to update mount table, rolling back TTL changes") } - if b.Core.logger.IsInfo() { - b.Core.logger.Info("core: mount tuning successful", "path", path) + b.Core.logger.Info("core: mount tuning of leases successful", "path", path) } return nil diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_integ_test.go b/vendor/github.com/hashicorp/vault/vault/logical_system_integ_test.go new file mode 100644 index 0000000..60eab6b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/logical_system_integ_test.go @@ -0,0 +1,467 @@ +package vault_test + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/pluginutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + lplugin "github.com/hashicorp/vault/logical/plugin" + "github.com/hashicorp/vault/logical/plugin/mock" + "github.com/hashicorp/vault/vault" +) + +func TestSystemBackend_Plugin_secret(t *testing.T) { + cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical) + defer cluster.Cleanup() + + core := cluster.Cores[0] + + // Make a request to lazy load the plugin + req := logical.TestRequest(t, logical.ReadOperation, "mock-0/internal") + req.ClientToken = core.Client.Token() + resp, err := core.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil { + t.Fatalf("bad: response should not be nil") + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) + + // Unseal the cluster + barrierKeys := cluster.BarrierKeys + for _, core := range cluster.Cores { + for _, key := range barrierKeys { + _, err := core.Unseal(vault.TestKeyCopy(key)) + if err != nil { + t.Fatal(err) + } + } + sealed, err := core.Sealed() + if err != nil { + t.Fatalf("err checking seal status: %s", err) + } + if sealed { + t.Fatal("should not be sealed") + } + // Wait for active so post-unseal takes place + // If it fails, it means unseal process failed + vault.TestWaitActive(t, core.Core) + } +} + +func TestSystemBackend_Plugin_auth(t *testing.T) { + cluster := testSystemBackendMock(t, 1, 1, logical.TypeCredential) + defer cluster.Cleanup() + + core := cluster.Cores[0] + + // Make a request to lazy load the plugin + req := logical.TestRequest(t, logical.ReadOperation, "auth/mock-0/internal") + req.ClientToken = core.Client.Token() + resp, err := core.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil { + t.Fatalf("bad: response should not be nil") + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) + + // Unseal the cluster + barrierKeys := cluster.BarrierKeys + for _, core := range cluster.Cores { + for _, key := range barrierKeys { + _, err := core.Unseal(vault.TestKeyCopy(key)) + if err != nil { + t.Fatal(err) + } + } + sealed, err := core.Sealed() + if err != nil { + t.Fatalf("err checking seal status: %s", err) + } + if sealed { + t.Fatal("should not be sealed") + } + // Wait for active so post-unseal takes place + // If it fails, it means unseal process failed + vault.TestWaitActive(t, core.Core) + } +} + +func TestSystemBackend_Plugin_MismatchType(t *testing.T) { + cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical) + defer cluster.Cleanup() + + core := cluster.Cores[0] + + // Replace the plugin with a credential backend + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainCredentials") + + // Make a request to lazy load the now-credential plugin + // and expect an error + req := logical.TestRequest(t, logical.ReadOperation, "mock-0/internal") + req.ClientToken = core.Client.Token() + _, err := core.HandleRequest(req) + if err == nil { + t.Fatalf("expected error due to mismatch on error type: %s", err) + } + + // Sleep a bit before cleanup is called + time.Sleep(1 * time.Second) +} + +func TestSystemBackend_Plugin_CatalogRemoved(t *testing.T) { + t.Run("secret", func(t *testing.T) { + testPlugin_CatalogRemoved(t, logical.TypeLogical, false) + }) + + t.Run("auth", func(t *testing.T) { + testPlugin_CatalogRemoved(t, logical.TypeCredential, false) + }) + + t.Run("secret-mount-existing", func(t *testing.T) { + testPlugin_CatalogRemoved(t, logical.TypeLogical, true) + }) + + t.Run("auth-mount-existing", func(t *testing.T) { + testPlugin_CatalogRemoved(t, logical.TypeCredential, true) + }) +} + +func testPlugin_CatalogRemoved(t *testing.T, btype logical.BackendType, testMount bool) { + cluster := testSystemBackendMock(t, 1, 1, btype) + defer cluster.Cleanup() + + core := cluster.Cores[0] + + // Remove the plugin from the catalog + req := logical.TestRequest(t, logical.DeleteOperation, "sys/plugins/catalog/mock-plugin") + req.ClientToken = core.Client.Token() + resp, err := core.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Seal the cluster + cluster.EnsureCoresSealed(t) + + // Unseal the cluster + barrierKeys := cluster.BarrierKeys + for _, core := range cluster.Cores { + for _, key := range barrierKeys { + _, err := core.Unseal(vault.TestKeyCopy(key)) + if err != nil { + t.Fatal(err) + } + } + sealed, err := core.Sealed() + if err != nil { + t.Fatalf("err checking seal status: %s", err) + } + if sealed { + t.Fatal("should not be sealed") + } + // Wait for active so post-unseal takes place + // If it fails, it means unseal process failed + vault.TestWaitActive(t, core.Core) + } + + if testMount { + // Add plugin back to the catalog + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainLogical") + + // Mount the plugin at the same path after plugin is re-added to the catalog + // and expect an error due to existing path. + var err error + switch btype { + case logical.TypeLogical: + _, err = core.Client.Logical().Write("sys/mounts/mock-0", map[string]interface{}{ + "type": "plugin", + "config": map[string]interface{}{ + "plugin_name": "mock-plugin", + }, + }) + case logical.TypeCredential: + _, err = core.Client.Logical().Write("sys/auth/mock-0", map[string]interface{}{ + "type": "plugin", + "plugin_name": "mock-plugin", + }) + } + if err == nil { + t.Fatal("expected error when mounting on existing path") + } + } +} + +func TestSystemBackend_Plugin_autoReload(t *testing.T) { + cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical) + defer cluster.Cleanup() + + core := cluster.Cores[0] + + // Update internal value + req := logical.TestRequest(t, logical.UpdateOperation, "mock-0/internal") + req.ClientToken = core.Client.Token() + req.Data["value"] = "baz" + resp, err := core.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } + + // Call errors/rpc endpoint to trigger reload + req = logical.TestRequest(t, logical.ReadOperation, "mock-0/errors/rpc") + req.ClientToken = core.Client.Token() + resp, err = core.HandleRequest(req) + if err == nil { + t.Fatalf("expected error from error/rpc request") + } + + // Check internal value to make sure it's reset + req = logical.TestRequest(t, logical.ReadOperation, "mock-0/internal") + req.ClientToken = core.Client.Token() + resp, err = core.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil { + t.Fatalf("bad: response should not be nil") + } + if resp.Data["value"].(string) == "baz" { + t.Fatal("did not expect backend internal value to be 'baz'") + } +} + +func TestSystemBackend_Plugin_SealUnseal(t *testing.T) { + cluster := testSystemBackendMock(t, 1, 1, logical.TypeLogical) + defer cluster.Cleanup() + + // Seal the cluster + cluster.EnsureCoresSealed(t) + + // Unseal the cluster + barrierKeys := cluster.BarrierKeys + for _, core := range cluster.Cores { + for _, key := range barrierKeys { + _, err := core.Unseal(vault.TestKeyCopy(key)) + if err != nil { + t.Fatal(err) + } + } + sealed, err := core.Sealed() + if err != nil { + t.Fatalf("err checking seal status: %s", err) + } + if sealed { + t.Fatal("should not be sealed") + } + // Wait for active so post-unseal takes place + // If it fails, it means unseal process failed + vault.TestWaitActive(t, core.Core) + } +} + +func TestSystemBackend_Plugin_reload(t *testing.T) { + data := map[string]interface{}{ + "plugin": "mock-plugin", + } + t.Run("plugin", func(t *testing.T) { testSystemBackend_PluginReload(t, data) }) + + data = map[string]interface{}{ + "mounts": "mock-0/,mock-1/", + } + t.Run("mounts", func(t *testing.T) { testSystemBackend_PluginReload(t, data) }) +} + +// Helper func to test different reload methods on plugin reload endpoint +func testSystemBackend_PluginReload(t *testing.T, reqData map[string]interface{}) { + cluster := testSystemBackendMock(t, 1, 2, logical.TypeLogical) + defer cluster.Cleanup() + + core := cluster.Cores[0] + client := core.Client + + for i := 0; i < 2; i++ { + // Update internal value in the backend + resp, err := client.Logical().Write(fmt.Sprintf("mock-%d/internal", i), map[string]interface{}{ + "value": "baz", + }) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } + } + + // Perform plugin reload + resp, err := client.Logical().Write("sys/plugins/reload/backend", reqData) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } + + for i := 0; i < 2; i++ { + // Ensure internal backed value is reset + resp, err := client.Logical().Read(fmt.Sprintf("mock-%d/internal", i)) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil { + t.Fatalf("bad: response should not be nil") + } + if resp.Data["value"].(string) == "baz" { + t.Fatal("did not expect backend internal value to be 'baz'") + } + } +} + +// testSystemBackendMock returns a systemBackend with the desired number +// of mounted mock plugin backends +func testSystemBackendMock(t *testing.T, numCores, numMounts int, backendType logical.BackendType) *vault.TestCluster { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "plugin": plugin.Factory, + }, + CredentialBackends: map[string]logical.Factory{ + "plugin": plugin.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + KeepStandbysSealed: true, + NumCores: numCores, + }) + cluster.Start() + + core := cluster.Cores[0] + vault.TestWaitActive(t, core.Core) + client := core.Client + + os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) + + switch backendType { + case logical.TypeLogical: + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainLogical") + for i := 0; i < numMounts; i++ { + // Alternate input styles for plugin_name on every other mount + options := map[string]interface{}{ + "type": "plugin", + } + if (i+1)%2 == 0 { + options["config"] = map[string]interface{}{ + "plugin_name": "mock-plugin", + } + } else { + options["plugin_name"] = "mock-plugin" + } + resp, err := client.Logical().Write(fmt.Sprintf("sys/mounts/mock-%d", i), options) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } + } + case logical.TypeCredential: + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMainCredentials") + for i := 0; i < numMounts; i++ { + // Alternate input styles for plugin_name on every other mount + options := map[string]interface{}{ + "type": "plugin", + } + if (i+1)%2 == 0 { + options["config"] = map[string]interface{}{ + "plugin_name": "mock-plugin", + } + } else { + options["plugin_name"] = "mock-plugin" + } + resp, err := client.Logical().Write(fmt.Sprintf("sys/auth/mock-%d", i), options) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } + } + default: + t.Fatal("unknown backend type provided") + } + + return cluster +} + +func TestBackend_PluginMainLogical(t *testing.T) { + args := []string{} + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadaModeEnv) != "true" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM)) + + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig) + + factoryFunc := mock.FactoryType(logical.TypeLogical) + + err := lplugin.Serve(&lplugin.ServeOpts{ + BackendFactoryFunc: factoryFunc, + TLSProviderFunc: tlsProviderFunc, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestBackend_PluginMainCredentials(t *testing.T) { + args := []string{} + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadaModeEnv) != "true" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM)) + + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig) + + factoryFunc := mock.FactoryType(logical.TypeCredential) + + err := lplugin.Serve(&lplugin.ServeOpts{ + BackendFactoryFunc: factoryFunc, + TLSProviderFunc: tlsProviderFunc, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_test.go b/vendor/github.com/hashicorp/vault/vault/logical_system_test.go index 4f3f70f..3f9243b 100644 --- a/vendor/github.com/hashicorp/vault/vault/logical_system_test.go +++ b/vendor/github.com/hashicorp/vault/vault/logical_system_test.go @@ -2,6 +2,11 @@ package vault import ( "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path/filepath" "reflect" "strings" "testing" @@ -9,6 +14,8 @@ import ( "github.com/fatih/structs" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/pluginutil" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" "github.com/mitchellh/mapstructure" @@ -20,12 +27,16 @@ func TestSystemBackend_RootPaths(t *testing.T) { "remount", "audit", "audit/*", + "raw", "raw/*", "replication/primary/secondary-token", "replication/reindex", "rotate", + "config/cors", "config/auditing/*", + "plugins/catalog/*", "revoke-prefix/*", + "revoke-force/*", "leases/revoke-prefix/*", "leases/revoke-force/*", "leases/lookup/*", @@ -38,6 +49,62 @@ func TestSystemBackend_RootPaths(t *testing.T) { } } +func TestSystemConfigCORS(t *testing.T) { + b := testSystemBackend(t) + _, barrier, _ := mockBarrier(t) + view := NewBarrierView(barrier, "") + b.(*SystemBackend).Core.systemBarrierView = view + + req := logical.TestRequest(t, logical.UpdateOperation, "config/cors") + req.Data["allowed_origins"] = "http://www.example.com" + req.Data["allowed_headers"] = "X-Custom-Header" + _, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + expected := &logical.Response{ + Data: map[string]interface{}{ + "enabled": true, + "allowed_origins": []string{"http://www.example.com"}, + "allowed_headers": append(StdAllowedHeaders, "X-Custom-Header"), + }, + } + + req = logical.TestRequest(t, logical.ReadOperation, "config/cors") + actual, err := b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } + + req = logical.TestRequest(t, logical.DeleteOperation, "config/cors") + _, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + req = logical.TestRequest(t, logical.ReadOperation, "config/cors") + actual, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + expected = &logical.Response{ + Data: map[string]interface{}{ + "enabled": false, + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("DELETE FAILED -- bad: %#v", actual) + } + +} + func TestSystemBackend_mounts(t *testing.T) { b := testSystemBackend(t) req := logical.TestRequest(t, logical.ReadOperation, "mounts") @@ -50,8 +117,9 @@ func TestSystemBackend_mounts(t *testing.T) { // copy what's given exp := map[string]interface{}{ "secret/": map[string]interface{}{ - "type": "generic", - "description": "generic secret storage", + "type": "kv", + "description": "key/value secret storage", + "accessor": resp.Data["secret/"].(map[string]interface{})["accessor"], "config": map[string]interface{}{ "default_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64), "max_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64), @@ -62,6 +130,7 @@ func TestSystemBackend_mounts(t *testing.T) { "sys/": map[string]interface{}{ "type": "system", "description": "system endpoints used for control, policy and debugging", + "accessor": resp.Data["sys/"].(map[string]interface{})["accessor"], "config": map[string]interface{}{ "default_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64), "max_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64), @@ -72,6 +141,7 @@ func TestSystemBackend_mounts(t *testing.T) { "cubbyhole/": map[string]interface{}{ "description": "per-token private secret storage", "type": "cubbyhole", + "accessor": resp.Data["cubbyhole/"].(map[string]interface{})["accessor"], "config": map[string]interface{}{ "default_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64), "max_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64), @@ -89,7 +159,7 @@ func TestSystemBackend_mount(t *testing.T) { b := testSystemBackend(t) req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/") - req.Data["type"] = "generic" + req.Data["type"] = "kv" resp, err := b.HandleRequest(req) if err != nil { @@ -104,7 +174,7 @@ func TestSystemBackend_mount_force_no_cache(t *testing.T) { core, b, _ := testCoreSystemBackend(t) req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/") - req.Data["type"] = "generic" + req.Data["type"] = "kv" req.Data["config"] = map[string]interface{}{ "force_no_cache": true, } @@ -353,7 +423,7 @@ func TestSystemBackend_leases(t *testing.T) { t.Fatalf("err: %v", err) } if resp.Data["renewable"] == nil || resp.Data["renewable"].(bool) { - t.Fatal("generic leases are not renewable") + t.Fatal("kv leases are not renewable") } // Invalid lease @@ -919,7 +989,8 @@ func TestSystemBackend_revokePrefixAuth(t *testing.T) { MaxLeaseTTLVal: time.Hour * 24 * 32, }, } - b, err := NewSystemBackend(core, bc) + b := NewSystemBackend(core) + err := b.Backend.Setup(bc) if err != nil { t.Fatal(err) } @@ -982,7 +1053,8 @@ func TestSystemBackend_revokePrefixAuth_origUrl(t *testing.T) { MaxLeaseTTLVal: time.Hour * 24 * 32, }, } - b, err := NewSystemBackend(core, bc) + b := NewSystemBackend(core) + err := b.Backend.Setup(bc) if err != nil { t.Fatal(err) } @@ -1048,6 +1120,7 @@ func TestSystemBackend_authTable(t *testing.T) { "token/": map[string]interface{}{ "type": "token", "description": "token based credentials", + "accessor": resp.Data["token/"].(map[string]interface{})["accessor"], "config": map[string]interface{}{ "default_lease_ttl": int64(0), "max_lease_ttl": int64(0), @@ -1163,8 +1236,16 @@ func TestSystemBackend_policyCRUD(t *testing.T) { // Read, and make sure that case has been normalized req = logical.TestRequest(t, logical.ReadOperation, "policy/Foo") resp, err = b.HandleRequest(req) - if resp != nil { - t.Fatalf("err: expected nil response, got %#v", *resp) + if err != nil { + t.Fatalf("err: %v", err) + } + + exp = map[string]interface{}{ + "name": "foo", + "rules": rules, + } + if !reflect.DeepEqual(resp.Data, exp) { + t.Fatalf("got: %#v expect: %#v", resp.Data, exp) } // List the policies @@ -1246,13 +1327,11 @@ func TestSystemBackend_auditHash(t *testing.T) { Key: "salt", Value: []byte("foo"), }) - var err error - config.Salt, err = salt.NewSalt(view, &salt.Config{ + config.SaltView = view + config.SaltConfig = &salt.Config{ HMAC: sha256.New, HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("error getting new salt: %v", err) + Location: salt.DefaultLocation, } return &NoopAudit{ Config: config, @@ -1369,7 +1448,7 @@ func TestSystemBackend_disableAudit(t *testing.T) { } func TestSystemBackend_rawRead_Protected(t *testing.T) { - b := testSystemBackend(t) + b := testSystemBackendRaw(t) req := logical.TestRequest(t, logical.ReadOperation, "raw/"+keyringPath) _, err := b.HandleRequest(req) @@ -1379,7 +1458,7 @@ func TestSystemBackend_rawRead_Protected(t *testing.T) { } func TestSystemBackend_rawWrite_Protected(t *testing.T) { - b := testSystemBackend(t) + b := testSystemBackendRaw(t) req := logical.TestRequest(t, logical.UpdateOperation, "raw/"+keyringPath) _, err := b.HandleRequest(req) @@ -1389,7 +1468,7 @@ func TestSystemBackend_rawWrite_Protected(t *testing.T) { } func TestSystemBackend_rawReadWrite(t *testing.T) { - c, b, _ := testCoreSystemBackend(t) + c, b, _ := testCoreSystemBackendRaw(t) req := logical.TestRequest(t, logical.UpdateOperation, "raw/sys/policy/test") req.Data["value"] = `path "secret/" { policy = "read" }` @@ -1425,7 +1504,7 @@ func TestSystemBackend_rawReadWrite(t *testing.T) { } func TestSystemBackend_rawDelete_Protected(t *testing.T) { - b := testSystemBackend(t) + b := testSystemBackendRaw(t) req := logical.TestRequest(t, logical.DeleteOperation, "raw/"+keyringPath) _, err := b.HandleRequest(req) @@ -1435,7 +1514,7 @@ func TestSystemBackend_rawDelete_Protected(t *testing.T) { } func TestSystemBackend_rawDelete(t *testing.T) { - c, b, _ := testCoreSystemBackend(t) + c, b, _ := testCoreSystemBackendRaw(t) // set the policy! p := &Policy{Name: "test"} @@ -1511,6 +1590,25 @@ func TestSystemBackend_rotate(t *testing.T) { func testSystemBackend(t *testing.T) logical.Backend { c, _, _ := TestCoreUnsealed(t) + return testSystemBackendInternal(t, c) +} + +func testSystemBackendRaw(t *testing.T) logical.Backend { + c, _, _ := TestCoreUnsealedRaw(t) + return testSystemBackendInternal(t, c) +} + +func testCoreSystemBackend(t *testing.T) (*Core, logical.Backend, string) { + c, _, root := TestCoreUnsealed(t) + return c, testSystemBackendInternal(t, c), root +} + +func testCoreSystemBackendRaw(t *testing.T) (*Core, logical.Backend, string) { + c, _, root := TestCoreUnsealedRaw(t) + return c, testSystemBackendInternal(t, c), root +} + +func testSystemBackendInternal(t *testing.T, c *Core) logical.Backend { bc := &logical.BackendConfig{ Logger: c.logger, System: logical.StaticSystemView{ @@ -1519,7 +1617,8 @@ func testSystemBackend(t *testing.T) logical.Backend { }, } - b, err := NewSystemBackend(c, bc) + b := NewSystemBackend(c) + err := b.Backend.Setup(bc) if err != nil { t.Fatal(err) } @@ -1527,19 +1626,88 @@ func testSystemBackend(t *testing.T) logical.Backend { return b } -func testCoreSystemBackend(t *testing.T) (*Core, logical.Backend, string) { - c, _, root := TestCoreUnsealed(t) - bc := &logical.BackendConfig{ - Logger: c.logger, - System: logical.StaticSystemView{ - DefaultLeaseTTLVal: time.Hour * 24, - MaxLeaseTTLVal: time.Hour * 24 * 32, - }, +func TestSystemBackend_PluginCatalog_CRUD(t *testing.T) { + c, b, _ := testCoreSystemBackend(t) + // Bootstrap the pluginCatalog + sym, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + t.Fatalf("error: %v", err) + } + c.pluginCatalog.directory = sym + + req := logical.TestRequest(t, logical.ListOperation, "plugins/catalog/") + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) } - b, err := NewSystemBackend(c, bc) + if len(resp.Data["keys"].([]string)) != len(builtinplugins.Keys()) { + t.Fatalf("Wrong number of plugins, got %d, expected %d", len(resp.Data["keys"].([]string)), len(builtinplugins.Keys())) + } + + req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/mysql-database-plugin") + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + actualRespData := resp.Data + + expectedBuiltin := &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Builtin: true, + } + expectedRespData := structs.New(expectedBuiltin).Map() + + if !reflect.DeepEqual(actualRespData, expectedRespData) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", actualRespData, expectedRespData) + } + + // Set a plugin + file, err := ioutil.TempFile(os.TempDir(), "temp") if err != nil { t.Fatal(err) } - return c, b, root + defer file.Close() + + command := fmt.Sprintf("%s --test", filepath.Base(file.Name())) + req = logical.TestRequest(t, logical.UpdateOperation, "plugins/catalog/test-plugin") + req.Data["sha_256"] = hex.EncodeToString([]byte{'1'}) + req.Data["command"] = command + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/test-plugin") + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + actual := resp.Data + + expectedRunner := &pluginutil.PluginRunner{ + Name: "test-plugin", + Command: filepath.Join(sym, filepath.Base(file.Name())), + Args: []string{"--test"}, + Sha256: []byte{'1'}, + Builtin: false, + } + expected := structs.New(expectedRunner).Map() + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", actual, expected) + } + + // Delete plugin + req = logical.TestRequest(t, logical.DeleteOperation, "plugins/catalog/test-plugin") + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatalf("err: %v", err) + } + + req = logical.TestRequest(t, logical.ReadOperation, "plugins/catalog/test-plugin") + resp, err = b.HandleRequest(req) + if resp != nil || err != nil { + t.Fatalf("expected nil response, plugin not deleted correctly got resp: %v, err: %v", resp, err) + } } diff --git a/vendor/github.com/hashicorp/vault/vault/mount.go b/vendor/github.com/hashicorp/vault/vault/mount.go index d428eee..41aece9 100644 --- a/vendor/github.com/hashicorp/vault/vault/mount.go +++ b/vendor/github.com/hashicorp/vault/vault/mount.go @@ -9,8 +9,11 @@ import ( "strings" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/logical" ) @@ -60,9 +63,30 @@ var ( singletonMounts = []string{ "cubbyhole", "system", + "token", } + + // mountAliases maps old backend names to new backend names, allowing us + // to move/rename backends but maintain backwards compatibility + mountAliases = map[string]string{"generic": "kv"} ) +func (c *Core) generateMountAccessor(entryType string) (string, error) { + var accessor string + for { + randBytes, err := uuid.GenerateRandomBytes(4) + if err != nil { + return "", err + } + accessor = fmt.Sprintf("%s_%s", entryType, fmt.Sprintf("%08x", randBytes[0:4])) + if entry := c.router.MatchingMountByAccessor(accessor); entry == nil { + break + } + } + + return accessor, nil +} + // MountTable is used to represent the internal mount table type MountTable struct { Type string `json:"type"` @@ -136,6 +160,7 @@ type MountEntry struct { Type string `json:"type"` // Logical backend Type Description string `json:"description"` // User-provided description UUID string `json:"uuid"` // Barrier view UUID + Accessor string `json:"accessor"` // Unique but more human-friendly ID. Does not change, not used for any sensitive things (like as a salt, which the UUID sometimes is). Config MountConfig `json:"config"` // Configuration related to this mount (but not backend-derived) Options map[string]string `json:"options"` // Backend options Local bool `json:"local"` // Local mounts are not replicated or affected by replication @@ -147,25 +172,15 @@ type MountConfig struct { DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` } -// Returns a deep copy of the mount entry -func (e *MountEntry) Clone() *MountEntry { - optClone := make(map[string]string) - for k, v := range e.Options { - optClone[k] = v - } - return &MountEntry{ - Table: e.Table, - Path: e.Path, - Type: e.Type, - Description: e.Description, - UUID: e.UUID, - Config: e.Config, - Options: optClone, - Local: e.Local, - Tainted: e.Tainted, - } +// APIMountConfig is an embedded struct of api.MountConfigInput +type APIMountConfig struct { + DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` + PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` } // Mount is used to mount a new backend to the mount table. @@ -205,11 +220,22 @@ func (c *Core) mount(entry *MountEntry) error { } entry.UUID = entryUUID } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor(entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + } viewPath := backendBarrierPrefix + entry.UUID + "/" view := NewBarrierView(c.barrier, viewPath) sysView := c.mountEntrySysView(entry) + conf := make(map[string]string) + if entry.Config.PluginName != "" { + conf["plugin_name"] = entry.Config.PluginName + } - backend, err := c.newLogicalBackend(entry.Type, sysView, view, nil) + backend, err := c.newLogicalBackend(entry.Type, sysView, view, conf) if err != nil { return err } @@ -217,8 +243,14 @@ func (c *Core) mount(entry *MountEntry) error { return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type) } + // Check for the correct backend type + backendType := backend.Type() + if entry.Type == "plugin" && backendType != logical.TypeLogical { + return fmt.Errorf("cannot mount '%s' of type '%s' as a logical backend", entry.Config.PluginName, backendType) + } + // Call initialize; this takes care of init tasks that must be run after - // the ignore paths are collected + // the ignore paths are collected. if err := backend.Initialize(); err != nil { return err } @@ -243,7 +275,7 @@ func (c *Core) mount(entry *MountEntry) error { // Unmount is used to unmount a path. The boolean indicates whether the mount // was found. -func (c *Core) unmount(path string) (bool, error) { +func (c *Core) unmount(path string) error { // Ensure we end the path in a slash if !strings.HasSuffix(path, "/") { path += "/" @@ -252,14 +284,14 @@ func (c *Core) unmount(path string) (bool, error) { // Prevent protected paths from being unmounted for _, p := range protectedMounts { if strings.HasPrefix(path, p) { - return true, fmt.Errorf("cannot unmount '%s'", path) + return fmt.Errorf("cannot unmount '%s'", path) } } // Verify exact match of the route match := c.router.MatchingMount(path) if match == "" || path != match { - return false, fmt.Errorf("no matching mount") + return fmt.Errorf("no matching mount") } // Get the view for this backend @@ -267,23 +299,23 @@ func (c *Core) unmount(path string) (bool, error) { // Mark the entry as tainted if err := c.taintMountEntry(path); err != nil { - return true, err + return err } // Taint the router path to prevent routing. Note that in-flight requests // are uncertain, right now. if err := c.router.Taint(path); err != nil { - return true, err + return err } // Invoke the rollback manager a final time if err := c.rollback.Rollback(path); err != nil { - return true, err + return err } // Revoke all the dynamic keys if err := c.expiration.RevokePrefix(path); err != nil { - return true, err + return err } // Call cleanup function if it exists @@ -294,22 +326,22 @@ func (c *Core) unmount(path string) (bool, error) { // Unmount the backend entirely if err := c.router.Unmount(path); err != nil { - return true, err + return err } // Clear the data in the view if err := logical.ClearView(view); err != nil { - return true, err + return err } // Remove the mount table entry if err := c.removeMountEntry(path); err != nil { - return true, err + return err } if c.logger.IsInfo() { c.logger.Info("core: successfully unmounted", "path", path) } - return true, nil + return nil } // removeMountEntry is used to remove an entry from the mount table @@ -501,7 +533,7 @@ func (c *Core) loadMounts() error { needPersist = true } - for _, requiredMount := range requiredMountTable().Entries { + for _, requiredMount := range c.requiredMountTable().Entries { foundRequired := false for _, coreMount := range c.mounts.Entries { if coreMount.Type == requiredMount.Type { @@ -509,7 +541,14 @@ func (c *Core) loadMounts() error { break } } - if !foundRequired { + // In a replication scenario we will let sync invalidation take + // care of creating a new required mount that doesn't exist yet. + // This should only happen in the upgrade case where a new one is + // introduced on the primary; otherwise initial bootstrapping will + // ensure this comes over. If we upgrade first, we simply don't + // create the mount, so we won't conflict when we sync. If this is + // local (e.g. cubbyhole) we do still add it. + if !foundRequired && (c.replicationState.HasState(consts.ReplicationPerformanceSecondary) || requiredMount.Local) { c.mounts.Entries = append(c.mounts.Entries, requiredMount) needPersist = true } @@ -525,6 +564,14 @@ func (c *Core) loadMounts() error { entry.Table = c.mounts.Type needPersist = true } + if entry.Accessor == "" { + accessor, err := c.generateMountAccessor(entry.Type) + if err != nil { + return err + } + entry.Accessor = accessor + needPersist = true + } } // Done if we have restored the mount table and we don't need @@ -534,7 +581,7 @@ func (c *Core) loadMounts() error { } } else { // Create and persist the default mount table - c.mounts = defaultMountTable() + c.mounts = c.defaultMountTable() } if err := c.persistMounts(c.mounts, false); err != nil { @@ -621,11 +668,12 @@ func (c *Core) setupMounts() error { c.mountsLock.Lock() defer c.mountsLock.Unlock() - var backend logical.Backend var view *BarrierView var err error for _, entry := range c.mounts.Entries { + var backend logical.Backend + // Initialize the backend, special casing for system barrierPath := backendBarrierPrefix + entry.UUID + "/" if entry.Type == "system" { @@ -635,17 +683,32 @@ func (c *Core) setupMounts() error { // Create a barrier view using the UUID view = NewBarrierView(c.barrier, barrierPath) sysView := c.mountEntrySysView(entry) - // Initialize the backend + // Set up conf to pass in plugin_name + conf := make(map[string]string) + if entry.Config.PluginName != "" { + conf["plugin_name"] = entry.Config.PluginName + } // Create the new backend - backend, err = c.newLogicalBackend(entry.Type, sysView, view, nil) + backend, err = c.newLogicalBackend(entry.Type, sysView, view, conf) if err != nil { c.logger.Error("core: failed to create mount entry", "path", entry.Path, "error", err) + if errwrap.Contains(err, ErrPluginNotFound.Error()) && entry.Type == "plugin" { + // If we encounter an error instantiating the backend due to it being missing from the catalog, + // skip backend initialization but register the entry to the mount table to preserve storage + // and path. + goto ROUTER_MOUNT + } return errLoadMountsFailed } if backend == nil { return fmt.Errorf("created mount entry of type %q is nil", entry.Type) } + // Check for the correct backend type + if entry.Type == "plugin" && backend.Type() != logical.TypeLogical { + return fmt.Errorf("cannot mount '%s' of type '%s' as a logical backend", entry.Config.PluginName, backend.Type()) + } + if err := backend.Initialize(); err != nil { return err } @@ -658,16 +721,15 @@ func (c *Core) setupMounts() error { ch.saltUUID = entry.UUID ch.storageView = view } - + ROUTER_MOUNT: // Mount the backend err = c.router.Mount(backend, entry.Path, entry, view) if err != nil { c.logger.Error("core: failed to mount entry", "path", entry.Path, "error", err) return errLoadMountsFailed - } else { - if c.logger.IsInfo() { - c.logger.Info("core: successfully mounted backend", "type", entry.Type, "path", entry.Path) - } + } + if c.logger.IsInfo() { + c.logger.Info("core: successfully mounted backend", "type", entry.Type, "path", entry.Path) } // Ensure the path is tainted if set in the mount table @@ -702,6 +764,9 @@ func (c *Core) unloadMounts() error { // newLogicalBackend is used to create and configure a new logical backend by name func (c *Core) newLogicalBackend(t string, sysView logical.SystemView, view logical.Storage, conf map[string]string) (logical.Backend, error) { + if alias, ok := mountAliases[t]; ok { + t = alias + } f, ok := c.logicalBackends[t] if !ok { return nil, fmt.Errorf("unknown backend type: %s", t) @@ -735,29 +800,34 @@ func (c *Core) mountEntrySysView(entry *MountEntry) logical.SystemView { } // defaultMountTable creates a default mount table -func defaultMountTable() *MountTable { +func (c *Core) defaultMountTable() *MountTable { table := &MountTable{ Type: mountTableType, } mountUUID, err := uuid.GenerateUUID() if err != nil { - panic(fmt.Sprintf("could not create default mount table UUID: %v", err)) + panic(fmt.Sprintf("could not create default secret mount UUID: %v", err)) } - genericMount := &MountEntry{ + mountAccessor, err := c.generateMountAccessor("kv") + if err != nil { + panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err)) + } + kvMount := &MountEntry{ Table: mountTableType, Path: "secret/", - Type: "generic", - Description: "generic secret storage", + Type: "kv", + Description: "key/value secret storage", UUID: mountUUID, + Accessor: mountAccessor, } - table.Entries = append(table.Entries, genericMount) - table.Entries = append(table.Entries, requiredMountTable().Entries...) + table.Entries = append(table.Entries, kvMount) + table.Entries = append(table.Entries, c.requiredMountTable().Entries...) return table } // requiredMountTable() creates a mount table with entries required // to be available -func requiredMountTable() *MountTable { +func (c *Core) requiredMountTable() *MountTable { table := &MountTable{ Type: mountTableType, } @@ -765,12 +835,17 @@ func requiredMountTable() *MountTable { if err != nil { panic(fmt.Sprintf("could not create cubbyhole UUID: %v", err)) } + cubbyholeAccessor, err := c.generateMountAccessor("cubbyhole") + if err != nil { + panic(fmt.Sprintf("could not generate cubbyhole accessor: %v", err)) + } cubbyholeMount := &MountEntry{ Table: mountTableType, Path: "cubbyhole/", Type: "cubbyhole", Description: "per-token private secret storage", UUID: cubbyholeUUID, + Accessor: cubbyholeAccessor, Local: true, } @@ -778,14 +853,48 @@ func requiredMountTable() *MountTable { if err != nil { panic(fmt.Sprintf("could not create sys UUID: %v", err)) } + sysAccessor, err := c.generateMountAccessor("system") + if err != nil { + panic(fmt.Sprintf("could not generate sys accessor: %v", err)) + } sysMount := &MountEntry{ Table: mountTableType, Path: "sys/", Type: "system", Description: "system endpoints used for control, policy and debugging", UUID: sysUUID, + Accessor: sysAccessor, } table.Entries = append(table.Entries, cubbyholeMount) table.Entries = append(table.Entries, sysMount) return table } + +// This function returns tables that are singletons. The main usage of this is +// for replication, so we can send over mount info (especially, UUIDs of +// mounts, which are used for salts) for mounts that may not be able to be +// handled normally. After saving these values on the secondary, we let normal +// sync invalidation do its thing. Because of its use for replication, we +// exclude local mounts. +func (c *Core) singletonMountTables() (mounts, auth *MountTable) { + mounts = &MountTable{} + auth = &MountTable{} + + c.mountsLock.RLock() + for _, entry := range c.mounts.Entries { + if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local { + mounts.Entries = append(mounts.Entries, entry) + } + } + c.mountsLock.RUnlock() + + c.authLock.RLock() + for _, entry := range c.auth.Entries { + if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local { + auth.Entries = append(auth.Entries, entry) + } + } + c.authLock.RUnlock() + + return +} diff --git a/vendor/github.com/hashicorp/vault/vault/mount_test.go b/vendor/github.com/hashicorp/vault/vault/mount_test.go index 4e8d25f..cf24e18 100644 --- a/vendor/github.com/hashicorp/vault/vault/mount_test.go +++ b/vendor/github.com/hashicorp/vault/vault/mount_test.go @@ -47,7 +47,7 @@ func TestCore_Mount(t *testing.T) { me := &MountEntry{ Table: mountTableType, Path: "foo", - Type: "generic", + Type: "kv", } err := c.mount(me) if err != nil { @@ -93,16 +93,18 @@ func TestCore_Mount_Local(t *testing.T) { Type: mountTableType, Entries: []*MountEntry{ &MountEntry{ - Table: mountTableType, - Path: "noop/", - Type: "generic", - UUID: "abcd", + Table: mountTableType, + Path: "noop/", + Type: "kv", + UUID: "abcd", + Accessor: "kv-abcd", }, &MountEntry{ - Table: mountTableType, - Path: "noop2/", - Type: "generic", - UUID: "bcde", + Table: mountTableType, + Path: "noop2/", + Type: "kv", + UUID: "bcde", + Accessor: "kv-bcde", }, }, } @@ -162,7 +164,7 @@ func TestCore_Mount_Local(t *testing.T) { compEntries := c.mounts.Entries[:0] // Filter out required mounts for _, v := range c.mounts.Entries { - if v.Type == "generic" { + if v.Type == "kv" { compEntries = append(compEntries, v) } } @@ -179,9 +181,9 @@ func TestCore_Mount_Local(t *testing.T) { func TestCore_Unmount(t *testing.T) { c, keys, _ := TestCoreUnsealed(t) - existed, err := c.unmount("secret") - if !existed || err != nil { - t.Fatalf("existed: %v; err: %v", existed, err) + err := c.unmount("secret") + if err != nil { + t.Fatalf("err: %v", err) } match := c.router.MatchingMount("secret/foo") @@ -270,8 +272,8 @@ func TestCore_Unmount_Cleanup(t *testing.T) { } // Unmount, this should cleanup - if existed, err := c.unmount("test/"); !existed || err != nil { - t.Fatalf("existed: %v; err: %v", existed, err) + if err := c.unmount("test/"); err != nil { + t.Fatalf("err: %v", err) } // Rollback should be invoked @@ -426,7 +428,8 @@ func TestCore_Remount_Protected(t *testing.T) { } func TestDefaultMountTable(t *testing.T) { - table := defaultMountTable() + c, _, _ := TestCoreUnsealed(t) + table := c.defaultMountTable() verifyDefaultTable(t, table) } @@ -606,7 +609,7 @@ func verifyDefaultTable(t *testing.T, table *MountTable) { if entry.Path != "secret/" { t.Fatalf("bad: %v", entry) } - if entry.Type != "generic" { + if entry.Type != "kv" { t.Fatalf("bad: %v", entry) } case 2: @@ -628,3 +631,28 @@ func verifyDefaultTable(t *testing.T, table *MountTable) { } } } + +func TestSingletonMountTableFunc(t *testing.T) { + c, _, _ := TestCoreUnsealed(t) + + mounts, auth := c.singletonMountTables() + + if len(mounts.Entries) != 1 { + t.Fatal("length of mounts is wrong") + } + for _, entry := range mounts.Entries { + switch entry.Type { + case "system": + default: + t.Fatalf("unknown type %s", entry.Type) + } + } + + if len(auth.Entries) != 1 { + t.Fatal("length of auth is wrong") + } + + if auth.Entries[0].Type != "token" { + t.Fatal("unexpected entry type for auth") + } +} diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go b/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go new file mode 100644 index 0000000..3e2466f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go @@ -0,0 +1,189 @@ +package vault + +import ( + "encoding/json" + "errors" + "fmt" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/helper/jsonutil" + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/logical" +) + +var ( + pluginCatalogPath = "core/plugin-catalog/" + ErrDirectoryNotConfigured = errors.New("could not set plugin, plugin directory is not configured") + ErrPluginNotFound = errors.New("plugin not found in the catalog") +) + +// PluginCatalog keeps a record of plugins known to vault. External plugins need +// to be registered to the catalog before they can be used in backends. Builtin +// plugins are automatically detected and included in the catalog. +type PluginCatalog struct { + catalogView *BarrierView + directory string + + lock sync.RWMutex +} + +func (c *Core) setupPluginCatalog() error { + c.pluginCatalog = &PluginCatalog{ + catalogView: NewBarrierView(c.barrier, pluginCatalogPath), + directory: c.pluginDirectory, + } + + if c.logger.IsInfo() { + c.logger.Info("core: successfully setup plugin catalog", "plugin-directory", c.pluginDirectory) + } + + return nil +} + +// Get retrieves a plugin with the specified name from the catalog. It first +// looks for external plugins with this name and then looks for builtin plugins. +// It returns a PluginRunner or an error if no plugin was found. +func (c *PluginCatalog) Get(name string) (*pluginutil.PluginRunner, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + // If the directory isn't set only look for builtin plugins. + if c.directory != "" { + // Look for external plugins in the barrier + out, err := c.catalogView.Get(name) + if err != nil { + return nil, fmt.Errorf("failed to retrieve plugin \"%s\": %v", name, err) + } + if out != nil { + entry := new(pluginutil.PluginRunner) + if err := jsonutil.DecodeJSON(out.Value, entry); err != nil { + return nil, fmt.Errorf("failed to decode plugin entry: %v", err) + } + + // prepend the plugin directory to the command + entry.Command = filepath.Join(c.directory, entry.Command) + + return entry, nil + } + } + // Look for builtin plugins + if factory, ok := builtinplugins.Get(name); ok { + return &pluginutil.PluginRunner{ + Name: name, + Builtin: true, + BuiltinFactory: factory, + }, nil + } + + return nil, nil +} + +// Set registers a new external plugin with the catalog, or updates an existing +// external plugin. It takes the name, command and SHA256 of the plugin. +func (c *PluginCatalog) Set(name, command string, sha256 []byte) error { + if c.directory == "" { + return ErrDirectoryNotConfigured + } + + switch { + case strings.Contains(name, ".."): + fallthrough + case strings.Contains(command, ".."): + return consts.ErrPathContainsParentReferences + } + + c.lock.Lock() + defer c.lock.Unlock() + + parts := strings.Split(command, " ") + + // Best effort check to make sure the command isn't breaking out of the + // configured plugin directory. + commandFull := filepath.Join(c.directory, parts[0]) + sym, err := filepath.EvalSymlinks(commandFull) + if err != nil { + return fmt.Errorf("error while validating the command path: %v", err) + } + symAbs, err := filepath.Abs(filepath.Dir(sym)) + if err != nil { + return fmt.Errorf("error while validating the command path: %v", err) + } + + if symAbs != c.directory { + return errors.New("can not execute files outside of configured plugin directory") + } + + entry := &pluginutil.PluginRunner{ + Name: name, + Command: parts[0], + Args: parts[1:], + Sha256: sha256, + Builtin: false, + } + + buf, err := json.Marshal(entry) + if err != nil { + return fmt.Errorf("failed to encode plugin entry: %v", err) + } + + logicalEntry := logical.StorageEntry{ + Key: name, + Value: buf, + } + if err := c.catalogView.Put(&logicalEntry); err != nil { + return fmt.Errorf("failed to persist plugin entry: %v", err) + } + return nil +} + +// Delete is used to remove an external plugin from the catalog. Builtin plugins +// can not be deleted. +func (c *PluginCatalog) Delete(name string) error { + c.lock.Lock() + defer c.lock.Unlock() + + return c.catalogView.Delete(name) +} + +// List returns a list of all the known plugin names. If an external and builtin +// plugin share the same name, only one instance of the name will be returned. +func (c *PluginCatalog) List() ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + // Collect keys for external plugins in the barrier. + keys, err := logical.CollectKeys(c.catalogView) + if err != nil { + return nil, err + } + + // Get the keys for builtin plugins + builtinKeys := builtinplugins.Keys() + + // Use a map to unique the two lists + mapKeys := make(map[string]bool) + + for _, plugin := range keys { + mapKeys[plugin] = true + } + + for _, plugin := range builtinKeys { + mapKeys[plugin] = true + } + + retList := make([]string, len(mapKeys)) + i := 0 + for k := range mapKeys { + retList[i] = k + i++ + } + // sort for consistent ordering of builtin pluings + sort.Strings(retList) + + return retList, nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_catalog_test.go b/vendor/github.com/hashicorp/vault/vault/plugin_catalog_test.go new file mode 100644 index 0000000..6cfacda --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/plugin_catalog_test.go @@ -0,0 +1,176 @@ +package vault + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/pluginutil" +) + +func TestPluginCatalog_CRUD(t *testing.T) { + core, _, _ := TestCoreUnsealed(t) + + sym, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + t.Fatalf("error: %v", err) + } + core.pluginCatalog.directory = sym + + // Get builtin plugin + p, err := core.pluginCatalog.Get("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + expectedBuiltin := &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Builtin: true, + } + expectedBuiltin.BuiltinFactory, _ = builtinplugins.Get("mysql-database-plugin") + + if &(p.BuiltinFactory) == &(expectedBuiltin.BuiltinFactory) { + t.Fatal("expected BuiltinFactory did not match actual") + } + expectedBuiltin.BuiltinFactory = nil + p.BuiltinFactory = nil + if !reflect.DeepEqual(p, expectedBuiltin) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expectedBuiltin) + } + + // Set a plugin, test overwriting a builtin plugin + file, err := ioutil.TempFile(os.TempDir(), "temp") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + command := fmt.Sprintf("%s --test", filepath.Base(file.Name())) + err = core.pluginCatalog.Set("mysql-database-plugin", command, []byte{'1'}) + if err != nil { + t.Fatal(err) + } + + // Get the plugin + p, err = core.pluginCatalog.Get("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + expected := &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Command: filepath.Join(sym, filepath.Base(file.Name())), + Args: []string{"--test"}, + Sha256: []byte{'1'}, + Builtin: false, + } + + if !reflect.DeepEqual(p, expected) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expected) + } + + // Delete the plugin + err = core.pluginCatalog.Delete("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + // Get builtin plugin + p, err = core.pluginCatalog.Get("mysql-database-plugin") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + expectedBuiltin = &pluginutil.PluginRunner{ + Name: "mysql-database-plugin", + Builtin: true, + } + expectedBuiltin.BuiltinFactory, _ = builtinplugins.Get("mysql-database-plugin") + + if &(p.BuiltinFactory) == &(expectedBuiltin.BuiltinFactory) { + t.Fatal("expected BuiltinFactory did not match actual") + } + expectedBuiltin.BuiltinFactory = nil + p.BuiltinFactory = nil + if !reflect.DeepEqual(p, expectedBuiltin) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", p, expectedBuiltin) + } + +} + +func TestPluginCatalog_List(t *testing.T) { + core, _, _ := TestCoreUnsealed(t) + + sym, err := filepath.EvalSymlinks(os.TempDir()) + if err != nil { + t.Fatalf("error: %v", err) + } + core.pluginCatalog.directory = sym + + // Get builtin plugins and sort them + builtinKeys := builtinplugins.Keys() + sort.Strings(builtinKeys) + + // List only builtin plugins + plugins, err := core.pluginCatalog.List() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if len(plugins) != len(builtinKeys) { + t.Fatalf("unexpected length of plugin list, expected %d, got %d", len(builtinKeys), len(plugins)) + } + + for i, p := range builtinKeys { + if !reflect.DeepEqual(plugins[i], p) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[i], p) + } + } + + // Set a plugin, test overwriting a builtin plugin + file, err := ioutil.TempFile(os.TempDir(), "temp") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + command := fmt.Sprintf("%s --test", filepath.Base(file.Name())) + err = core.pluginCatalog.Set("mysql-database-plugin", command, []byte{'1'}) + if err != nil { + t.Fatal(err) + } + + // Set another plugin + err = core.pluginCatalog.Set("aaaaaaa", command, []byte{'1'}) + if err != nil { + t.Fatal(err) + } + + // List the plugins + plugins, err = core.pluginCatalog.List() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if len(plugins) != len(builtinKeys)+1 { + t.Fatalf("unexpected length of plugin list, expected %d, got %d", len(builtinKeys)+1, len(plugins)) + } + + // verify the first plugin is the one we just created. + if !reflect.DeepEqual(plugins[0], "aaaaaaa") { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[0], "aaaaaaa") + } + + // verify the builtin pluings are correct + for i, p := range builtinKeys { + if !reflect.DeepEqual(plugins[i+1], p) { + t.Fatalf("expected did not match actual, got %#v\n expected %#v\n", plugins[i+1], p) + } + } + +} diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_reload.go b/vendor/github.com/hashicorp/vault/vault/plugin_reload.go new file mode 100644 index 0000000..eaff18b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/vault/plugin_reload.go @@ -0,0 +1,125 @@ +package vault + +import ( + "fmt" + "strings" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/logical" +) + +// reloadPluginMounts reloads provided mounts, regardless of +// plugin name, as long as the backend type is plugin. +func (c *Core) reloadMatchingPluginMounts(mounts []string) error { + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + var errors error + for _, mount := range mounts { + entry := c.router.MatchingMountEntry(mount) + if entry == nil { + errors = multierror.Append(errors, fmt.Errorf("cannot fetch mount entry on %s", mount)) + continue + // return fmt.Errorf("cannot fetch mount entry on %s", mount) + } + + var isAuth bool + fullPath := c.router.MatchingMount(mount) + if strings.HasPrefix(fullPath, credentialRoutePrefix) { + isAuth = true + } + + if entry.Type == "plugin" { + err := c.reloadPluginCommon(entry, isAuth) + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("cannot reload plugin on %s: %v", mount, err)) + continue + } + c.logger.Info("core: successfully reloaded plugin", "plugin", entry.Config.PluginName, "path", entry.Path) + } + } + return errors +} + +// reloadPlugin reloads all mounted backends that are of +// plugin pluginName (name of the plugin as registered in +// the plugin catalog). +func (c *Core) reloadMatchingPlugin(pluginName string) error { + c.mountsLock.Lock() + defer c.mountsLock.Unlock() + + // Filter mount entries that only matches the plugin name + for _, entry := range c.mounts.Entries { + if entry.Config.PluginName == pluginName && entry.Type == "plugin" { + err := c.reloadPluginCommon(entry, false) + if err != nil { + return err + } + c.logger.Info("core: successfully reloaded plugin", "plugin", pluginName, "path", entry.Path) + } + } + + // Filter auth mount entries that ony matches the plugin name + for _, entry := range c.auth.Entries { + if entry.Config.PluginName == pluginName && entry.Type == "plugin" { + err := c.reloadPluginCommon(entry, true) + if err != nil { + return err + } + c.logger.Info("core: successfully reloaded plugin", "plugin", pluginName, "path", entry.Path) + } + } + + return nil +} + +// reloadPluginCommon is a generic method to reload a backend provided a +// MountEntry. entry.Type should be checked by the caller to ensure that +// it's a "plugin" type. +func (c *Core) reloadPluginCommon(entry *MountEntry, isAuth bool) error { + path := entry.Path + + // Fast-path out if the backend doesn't exist + raw, ok := c.router.root.Get(path) + if !ok { + return nil + } + + // Call backend's Cleanup routine + re := raw.(*routeEntry) + re.backend.Cleanup() + + view := re.storageView + + sysView := c.mountEntrySysView(entry) + conf := make(map[string]string) + if entry.Config.PluginName != "" { + conf["plugin_name"] = entry.Config.PluginName + } + + var backend logical.Backend + var err error + if !isAuth { + // Dispense a new backend + backend, err = c.newLogicalBackend(entry.Type, sysView, view, conf) + } else { + backend, err = c.newCredentialBackend(entry.Type, sysView, view, conf) + } + if err != nil { + return err + } + if backend == nil { + return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type) + } + + // Call initialize; this takes care of init tasks that must be run after + // the ignore paths are collected. + if err := backend.Initialize(); err != nil { + return err + } + + // Set the backend back + re.backend = backend + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/vault/policy.go b/vendor/github.com/hashicorp/vault/vault/policy.go index c808c2a..79f3b56 100644 --- a/vendor/github.com/hashicorp/vault/vault/policy.go +++ b/vendor/github.com/hashicorp/vault/vault/policy.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/vault/helper/parseutil" + "github.com/mitchellh/copystructure" ) const ( @@ -84,6 +85,40 @@ type Permissions struct { DeniedParameters map[string][]interface{} } +func (p *Permissions) Clone() (*Permissions, error) { + ret := &Permissions{ + CapabilitiesBitmap: p.CapabilitiesBitmap, + MinWrappingTTL: p.MinWrappingTTL, + MaxWrappingTTL: p.MaxWrappingTTL, + } + + switch { + case p.AllowedParameters == nil: + case len(p.AllowedParameters) == 0: + ret.AllowedParameters = make(map[string][]interface{}) + default: + clonedAllowed, err := copystructure.Copy(p.AllowedParameters) + if err != nil { + return nil, err + } + ret.AllowedParameters = clonedAllowed.(map[string][]interface{}) + } + + switch { + case p.DeniedParameters == nil: + case len(p.DeniedParameters) == 0: + ret.DeniedParameters = make(map[string][]interface{}) + default: + clonedDenied, err := copystructure.Copy(p.DeniedParameters) + if err != nil { + return nil, err + } + ret.DeniedParameters = clonedDenied.(map[string][]interface{}) + } + + return ret, nil +} + // Parse is used to parse the specified ACL rules into an // intermediary set of policies, before being compiled into // the ACL diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store.go b/vendor/github.com/hashicorp/vault/vault/policy_store.go index 0768f76..986dcda 100644 --- a/vendor/github.com/hashicorp/vault/vault/policy_store.go +++ b/vendor/github.com/hashicorp/vault/vault/policy_store.go @@ -25,9 +25,7 @@ const ( responseWrappingPolicyName = "response-wrapping" // responseWrappingPolicy is the policy that ensures cubbyhole response - // wrapping can always succeed. Note that sys/wrapping/lookup isn't - // contained here because using it would revoke the token anyways, so there - // isn't much point. + // wrapping can always succeed. responseWrappingPolicy = ` path "cubbyhole/response" { capabilities = ["create", "read"] @@ -147,7 +145,7 @@ func (c *Core) setupPolicyStore() error { sysView := &dynamicSystemView{core: c} c.policyStore = NewPolicyStore(view, sysView) - if sysView.ReplicationState() == consts.ReplicationSecondary { + if c.replicationState.HasState(consts.ReplicationPerformanceSecondary) { // Policies will sync from the primary return nil } @@ -202,6 +200,8 @@ func (ps *PolicyStore) SetPolicy(p *Policy) error { if p.Name == "" { return fmt.Errorf("policy name missing") } + // Policies are normalized to lower-case + p.Name = strings.ToLower(strings.TrimSpace(p.Name)) if strutil.StrListContains(immutablePolicies, p.Name) { return fmt.Errorf("cannot update %s policy", p.Name) } @@ -232,6 +232,7 @@ func (ps *PolicyStore) setPolicyInternal(p *Policy) error { // GetPolicy is used to fetch the named policy func (ps *PolicyStore) GetPolicy(name string) (*Policy, error) { defer metrics.MeasureSince([]string{"policy", "get_policy"}, time.Now()) + if ps.lru != nil { // Check for cached policy if raw, ok := ps.lru.Get(name); ok { @@ -239,6 +240,9 @@ func (ps *PolicyStore) GetPolicy(name string) (*Policy, error) { } } + // Policies are normalized to lower-case + name = strings.ToLower(strings.TrimSpace(name)) + // Special case the root policy if name == "root" { p := &Policy{Name: "root"} @@ -322,6 +326,9 @@ func (ps *PolicyStore) ListPolicies() ([]string, error) { // DeletePolicy is used to delete the named policy func (ps *PolicyStore) DeletePolicy(name string) error { defer metrics.MeasureSince([]string{"policy", "delete_policy"}, time.Now()) + + // Policies are normalized to lower-case + name = strings.ToLower(strings.TrimSpace(name)) if strutil.StrListContains(immutablePolicies, name) { return fmt.Errorf("cannot delete %s policy", name) } diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store_test.go b/vendor/github.com/hashicorp/vault/vault/policy_store_test.go index dafca34..97107f1 100644 --- a/vendor/github.com/hashicorp/vault/vault/policy_store_test.go +++ b/vendor/github.com/hashicorp/vault/vault/policy_store_test.go @@ -61,7 +61,7 @@ func TestPolicyStore_CRUD(t *testing.T) { func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) { // Get should return nothing - p, err := ps.GetPolicy("dev") + p, err := ps.GetPolicy("Dev") if err != nil { t.Fatalf("err: %v", err) } @@ -70,7 +70,7 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) { } // Delete should be no-op - err = ps.DeletePolicy("dev") + err = ps.DeletePolicy("deV") if err != nil { t.Fatalf("err: %v", err) } @@ -92,7 +92,7 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) { } // Get should work - p, err = ps.GetPolicy("dev") + p, err = ps.GetPolicy("dEv") if err != nil { t.Fatalf("err: %v", err) } @@ -110,13 +110,13 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) { } // Delete should be clear the entry - err = ps.DeletePolicy("dev") + err = ps.DeletePolicy("Dev") if err != nil { t.Fatalf("err: %v", err) } // Get should fail - p, err = ps.GetPolicy("dev") + p, err = ps.GetPolicy("deV") if err != nil { t.Fatalf("err: %v", err) } diff --git a/vendor/github.com/hashicorp/vault/vault/rekey_test.go b/vendor/github.com/hashicorp/vault/vault/rekey_test.go index c463325..e6453ad 100644 --- a/vendor/github.com/hashicorp/vault/vault/rekey_test.go +++ b/vendor/github.com/hashicorp/vault/vault/rekey_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/inmem" ) func TestCore_Rekey_Lifecycle(t *testing.T) { @@ -372,12 +373,19 @@ func TestCore_Standby_Rekey(t *testing.T) { // Create the first core and initialize it logger := logformat.NewVaultLogger(log.LevelTrace) - inm := physical.NewInmem(logger) - inmha := physical.NewInmemHA(logger) + inm, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + redirectOriginal := "http://127.0.0.1:8200" core, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal, DisableMlock: true, DisableCache: true, @@ -399,7 +407,7 @@ func TestCore_Standby_Rekey(t *testing.T) { redirectOriginal2 := "http://127.0.0.1:8500" core2, err := NewCore(&CoreConfig{ Physical: inm, - HAPhysical: inmha, + HAPhysical: inmha.(physical.HABackend), RedirectAddr: redirectOriginal2, DisableMlock: true, DisableCache: true, diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go index 62cbf44..0433fec 100644 --- a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go @@ -1,14 +1,13 @@ package vault import ( - "bytes" "crypto/tls" "crypto/x509" "fmt" "net" "net/http" "net/url" - "os" + "runtime" "sync" "sync/atomic" "time" @@ -17,10 +16,13 @@ import ( "golang.org/x/net/context" "golang.org/x/net/http2" "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) const ( clusterListenerAcceptDeadline = 500 * time.Millisecond + heartbeatInterval = 30 * time.Second + requestForwardingALPN = "req_fw_sb-act_v1" ) // Starts the listeners and servers necessary to handle forwarded requests @@ -36,10 +38,6 @@ func (c *Core) startForwarding() error { // Resolve locally to avoid races ha := c.ha != nil - // Get our base handler (for our RPC server) and our wrapped handler (for - // straight HTTP/2 forwarding) - baseHandler, wrappedHandler := c.clusterHandlerSetupFunc() - // Get our TLS config tlsConfig, err := c.ClusterTLSConfig() if err != nil { @@ -48,7 +46,7 @@ func (c *Core) startForwarding() error { } // The server supports all of the possible protos - tlsConfig.NextProtos = []string{"h2", "req_fw_sb-act_v1"} + tlsConfig.NextProtos = []string{"h2", requestForwardingALPN} // Create our RPC server and register the request handler server c.clusterParamsLock.Lock() @@ -58,12 +56,16 @@ func (c *Core) startForwarding() error { return nil } - c.rpcServer = grpc.NewServer() + c.rpcServer = grpc.NewServer( + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 2 * heartbeatInterval, + }), + ) - if ha { + if ha && c.clusterHandler != nil { RegisterRequestForwardingServer(c.rpcServer, &forwardedRequestRPCServer{ core: c, - handler: baseHandler, + handler: c.clusterHandler, }) } c.clusterParamsLock.Unlock() @@ -143,24 +145,13 @@ func (c *Core) startForwarding() error { } switch tlsConn.ConnectionState().NegotiatedProtocol { - case "h2": + case requestForwardingALPN: if !ha { conn.Close() continue } - c.logger.Trace("core: got h2 connection") - go fws.ServeConn(conn, &http2.ServeConnOpts{ - Handler: wrappedHandler, - }) - - case "req_fw_sb-act_v1": - if !ha { - conn.Close() - continue - } - - c.logger.Trace("core: got req_fw_sb-act_v1 connection") + c.logger.Trace("core: got request forwarding connection") go fws.ServeConn(conn, &http2.ServeConnOpts{ Handler: c.rpcServer, }) @@ -231,37 +222,31 @@ func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error { return err } - switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") { - case "": - // Set up normal HTTP forwarding handling - tlsConfig, err := c.ClusterTLSConfig() - if err != nil { - c.logger.Error("core: error fetching cluster tls configuration when trying to create connection", "error", err) - return err - } - tp := &http2.Transport{ - TLSClientConfig: tlsConfig, - } - c.requestForwardingConnection = &activeConnection{ - transport: tp, - clusterAddr: clusterAddr, - } - - default: - // Set up grpc forwarding handling - // It's not really insecure, but we have to dial manually to get the - // ALPN header right. It's just "insecure" because GRPC isn't managing - // the TLS state. - - ctx, cancelFunc := context.WithCancel(context.Background()) - c.rpcClientConnCancelFunc = cancelFunc - c.rpcClientConn, err = grpc.DialContext(ctx, clusterURL.Host, grpc.WithDialer(c.getGRPCDialer("req_fw_sb-act_v1", "", nil)), grpc.WithInsecure()) - if err != nil { - c.logger.Error("core: err setting up forwarding rpc client", "error", err) - return err - } - c.rpcForwardingClient = NewRequestForwardingClient(c.rpcClientConn) + // Set up grpc forwarding handling + // It's not really insecure, but we have to dial manually to get the + // ALPN header right. It's just "insecure" because GRPC isn't managing + // the TLS state. + ctx, cancelFunc := context.WithCancel(context.Background()) + c.rpcClientConn, err = grpc.DialContext(ctx, clusterURL.Host, + grpc.WithDialer(c.getGRPCDialer(requestForwardingALPN, "", nil)), + grpc.WithInsecure(), // it's not, we handle it in the dialer + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 2 * heartbeatInterval, + })) + if err != nil { + cancelFunc() + c.logger.Error("core: err setting up forwarding rpc client", "error", err) + return err } + c.rpcClientConnContext = ctx + c.rpcClientConnCancelFunc = cancelFunc + c.rpcForwardingClient = &forwardingClient{ + RequestForwardingClient: NewRequestForwardingClient(c.rpcClientConn), + core: c, + echoTicker: time.NewTicker(heartbeatInterval), + echoContext: ctx, + } + c.rpcForwardingClient.startHeartbeat() return nil } @@ -270,11 +255,6 @@ func (c *Core) clearForwardingClients() { c.logger.Trace("core: clearing forwarding clients") defer c.logger.Trace("core: done clearing forwarding clients") - if c.requestForwardingConnection != nil { - c.requestForwardingConnection.transport.CloseIdleConnections() - c.requestForwardingConnection = nil - } - if c.rpcClientConnCancelFunc != nil { c.rpcClientConnCancelFunc() c.rpcClientConnCancelFunc = nil @@ -283,6 +263,8 @@ func (c *Core) clearForwardingClients() { c.rpcClientConn.Close() c.rpcClientConn = nil } + + c.rpcClientConnContext = nil c.rpcForwardingClient = nil } @@ -292,70 +274,36 @@ func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, erro c.requestForwardingConnectionLock.RLock() defer c.requestForwardingConnectionLock.RUnlock() - switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") { - case "": - if c.requestForwardingConnection == nil { - return 0, nil, nil, ErrCannotForward - } + if c.rpcForwardingClient == nil { + return 0, nil, nil, ErrCannotForward + } - if c.requestForwardingConnection.clusterAddr == "" { - return 0, nil, nil, ErrCannotForward - } + freq, err := forwarding.GenerateForwardedRequest(req) + if err != nil { + c.logger.Error("core: error creating forwarding RPC request", "error", err) + return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request") + } + if freq == nil { + c.logger.Error("core: got nil forwarding RPC request") + return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request") + } + resp, err := c.rpcForwardingClient.ForwardRequest(c.rpcClientConnContext, freq) + if err != nil { + c.logger.Error("core: error during forwarded RPC request", "error", err) + return 0, nil, nil, fmt.Errorf("error during forwarding RPC request") + } - freq, err := forwarding.GenerateForwardedHTTPRequest(req, c.requestForwardingConnection.clusterAddr+"/cluster/local/forwarded-request") - if err != nil { - c.logger.Error("core/ForwardRequest: error creating forwarded request", "error", err) - return 0, nil, nil, fmt.Errorf("error creating forwarding request") - } - - //resp, err := c.requestForwardingConnection.Do(freq) - resp, err := c.requestForwardingConnection.transport.RoundTrip(freq) - if err != nil { - return 0, nil, nil, err - } - defer resp.Body.Close() - - // Read the body into a buffer so we can write it back out to the - // original requestor - buf := bytes.NewBuffer(nil) - _, err = buf.ReadFrom(resp.Body) - if err != nil { - return 0, nil, nil, err - } - return resp.StatusCode, resp.Header, buf.Bytes(), nil - - default: - if c.rpcForwardingClient == nil { - return 0, nil, nil, ErrCannotForward - } - - freq, err := forwarding.GenerateForwardedRequest(req) - if err != nil { - c.logger.Error("core/ForwardRequest: error creating forwarding RPC request", "error", err) - return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request") - } - if freq == nil { - c.logger.Error("core/ForwardRequest: got nil forwarding RPC request") - return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request") - } - resp, err := c.rpcForwardingClient.ForwardRequest(context.Background(), freq, grpc.FailFast(true)) - if err != nil { - c.logger.Error("core/ForwardRequest: error during forwarded RPC request", "error", err) - return 0, nil, nil, fmt.Errorf("error during forwarding RPC request") - } - - var header http.Header - if resp.HeaderEntries != nil { - header = make(http.Header) - for k, v := range resp.HeaderEntries { - for _, j := range v.Values { - header.Add(k, j) - } + var header http.Header + if resp.HeaderEntries != nil { + header = make(http.Header) + for k, v := range resp.HeaderEntries { + for _, j := range v.Values { + header.Add(k, j) } } - - return int(resp.StatusCode), header, resp.Body, nil } + + return int(resp.StatusCode), header, resp.Body, nil } // getGRPCDialer is used to return a dialer that has the correct TLS @@ -406,12 +354,23 @@ func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *fo // meets the interface requirements. w := forwarding.NewRPCResponseWriter() - s.handler.ServeHTTP(w, req) + resp := &forwarding.Response{} - resp := &forwarding.Response{ - StatusCode: uint32(w.StatusCode()), - Body: w.Body().Bytes(), + runRequest := func() { + defer func() { + // Logic here comes mostly from the Go source code + if err := recover(); err != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + s.core.logger.Error("forwarding: panic serving request", "path", req.URL.Path, "error", err, "stacktrace", buf) + } + }() + s.handler.ServeHTTP(w, req) } + runRequest() + resp.StatusCode = uint32(w.StatusCode()) + resp.Body = w.Body().Bytes() header := w.Header() if header != nil { @@ -425,3 +384,66 @@ func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *fo return resp, nil } + +func (s *forwardedRequestRPCServer) Echo(ctx context.Context, in *EchoRequest) (*EchoReply, error) { + if in.ClusterAddr != "" { + s.core.clusterPeerClusterAddrsCache.Set(in.ClusterAddr, nil, 0) + } + return &EchoReply{ + Message: "pong", + }, nil +} + +type forwardingClient struct { + RequestForwardingClient + + core *Core + + echoTicker *time.Ticker + echoContext context.Context +} + +// NOTE: we also take advantage of gRPC's keepalive bits, but as we send data +// with these requests it's useful to keep this as well +func (c *forwardingClient) startHeartbeat() { + go func() { + tick := func() { + c.core.stateLock.RLock() + clusterAddr := c.core.clusterAddr + c.core.stateLock.RUnlock() + + ctx, cancel := context.WithTimeout(c.echoContext, 2*time.Second) + resp, err := c.RequestForwardingClient.Echo(ctx, &EchoRequest{ + Message: "ping", + ClusterAddr: clusterAddr, + }) + cancel() + if err != nil { + c.core.logger.Debug("forwarding: error sending echo request to active node", "error", err) + return + } + if resp == nil { + c.core.logger.Debug("forwarding: empty echo response from active node") + return + } + if resp.Message != "pong" { + c.core.logger.Debug("forwarding: unexpected echo response from active node", "message", resp.Message) + return + } + c.core.logger.Trace("forwarding: successful heartbeat") + } + + tick() + + for { + select { + case <-c.echoContext.Done(): + c.echoTicker.Stop() + c.core.logger.Trace("forwarding: stopping heartbeating") + return + case <-c.echoTicker.C: + tick() + } + } + }() +} diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go index cae684d..add7bf3 100644 --- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: request_forwarding_service.proto -// DO NOT EDIT! /* Package vault is a generated protocol buffer package. @@ -9,6 +8,8 @@ It is generated from these files: request_forwarding_service.proto It has these top-level messages: + EchoRequest + EchoReply */ package vault @@ -33,6 +34,59 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +type EchoRequest struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` + ClusterAddr string `protobuf:"bytes,2,opt,name=cluster_addr,json=clusterAddr" json:"cluster_addr,omitempty"` +} + +func (m *EchoRequest) Reset() { *m = EchoRequest{} } +func (m *EchoRequest) String() string { return proto.CompactTextString(m) } +func (*EchoRequest) ProtoMessage() {} +func (*EchoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *EchoRequest) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *EchoRequest) GetClusterAddr() string { + if m != nil { + return m.ClusterAddr + } + return "" +} + +type EchoReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` + ClusterAddrs []string `protobuf:"bytes,2,rep,name=cluster_addrs,json=clusterAddrs" json:"cluster_addrs,omitempty"` +} + +func (m *EchoReply) Reset() { *m = EchoReply{} } +func (m *EchoReply) String() string { return proto.CompactTextString(m) } +func (*EchoReply) ProtoMessage() {} +func (*EchoReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *EchoReply) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *EchoReply) GetClusterAddrs() []string { + if m != nil { + return m.ClusterAddrs + } + return nil +} + +func init() { + proto.RegisterType((*EchoRequest)(nil), "vault.EchoRequest") + proto.RegisterType((*EchoReply)(nil), "vault.EchoReply") +} + // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -45,6 +99,7 @@ const _ = grpc.SupportPackageIsVersion4 type RequestForwardingClient interface { ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error) + Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error) } type requestForwardingClient struct { @@ -64,10 +119,20 @@ func (c *requestForwardingClient) ForwardRequest(ctx context.Context, in *forwar return out, nil } +func (c *requestForwardingClient) Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error) { + out := new(EchoReply) + err := grpc.Invoke(ctx, "/vault.RequestForwarding/Echo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for RequestForwarding service type RequestForwardingServer interface { ForwardRequest(context.Context, *forwarding.Request) (*forwarding.Response, error) + Echo(context.Context, *EchoRequest) (*EchoReply, error) } func RegisterRequestForwardingServer(s *grpc.Server, srv RequestForwardingServer) { @@ -92,6 +157,24 @@ func _RequestForwarding_ForwardRequest_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _RequestForwarding_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EchoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RequestForwardingServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vault.RequestForwarding/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RequestForwardingServer).Echo(ctx, req.(*EchoRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _RequestForwarding_serviceDesc = grpc.ServiceDesc{ ServiceName: "vault.RequestForwarding", HandlerType: (*RequestForwardingServer)(nil), @@ -100,6 +183,10 @@ var _RequestForwarding_serviceDesc = grpc.ServiceDesc{ MethodName: "ForwardRequest", Handler: _RequestForwarding_ForwardRequest_Handler, }, + { + MethodName: "Echo", + Handler: _RequestForwarding_Echo_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "request_forwarding_service.proto", @@ -108,15 +195,21 @@ var _RequestForwarding_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("request_forwarding_service.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 151 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x28, 0x4a, 0x2d, 0x2c, - 0x4d, 0x2d, 0x2e, 0x89, 0x4f, 0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xc9, 0xcc, 0x4b, 0x8f, 0x2f, - 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4b, - 0x2c, 0xcd, 0x29, 0x91, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, - 0xcf, 0x48, 0x2c, 0xce, 0xc8, 0x4c, 0xce, 0x2f, 0x2a, 0xd0, 0x07, 0xcb, 0xe9, 0x67, 0xa4, 0xe6, - 0x14, 0xa4, 0x16, 0xe9, 0x23, 0x8c, 0xd0, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0x18, 0x60, 0x14, - 0xc4, 0x25, 0x18, 0x04, 0xb1, 0xc4, 0x0d, 0xae, 0x40, 0xc8, 0x96, 0x8b, 0x0f, 0xca, 0x83, 0xca, - 0x09, 0x09, 0xeb, 0x21, 0xf4, 0xeb, 0x41, 0x05, 0xa5, 0x44, 0x50, 0x05, 0x8b, 0x0b, 0xf2, 0xf3, - 0x8a, 0x53, 0x95, 0x18, 0x92, 0xd8, 0xc0, 0x46, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x81, - 0xce, 0x3f, 0x7f, 0xbf, 0x00, 0x00, 0x00, + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0x3d, 0x4f, 0xc3, 0x30, + 0x10, 0x86, 0xdb, 0xf2, 0xa5, 0xb8, 0x05, 0x81, 0x61, 0x88, 0x32, 0x85, 0xb0, 0x74, 0x72, 0x24, + 0x58, 0x58, 0x18, 0x18, 0x60, 0xe8, 0x98, 0x3f, 0x10, 0xb9, 0xf6, 0x11, 0x47, 0x72, 0x6b, 0x73, + 0xe7, 0x14, 0x65, 0xe5, 0x97, 0x23, 0x92, 0x94, 0xa6, 0x0b, 0xe3, 0xbd, 0x27, 0x3d, 0xf7, 0xdc, + 0xcb, 0x52, 0x84, 0xcf, 0x06, 0x28, 0x94, 0x1f, 0x0e, 0xbf, 0x24, 0xea, 0x7a, 0x5b, 0x95, 0x04, + 0xb8, 0xab, 0x15, 0x08, 0x8f, 0x2e, 0x38, 0x7e, 0xb6, 0x93, 0x8d, 0x0d, 0xc9, 0x73, 0x55, 0x07, + 0xd3, 0xac, 0x85, 0x72, 0x9b, 0xdc, 0x48, 0x32, 0xb5, 0x72, 0xe8, 0xf3, 0x6e, 0x97, 0x1b, 0xb0, + 0x1e, 0x30, 0x3f, 0x20, 0xf2, 0xd0, 0x7a, 0xa0, 0x1e, 0x90, 0xad, 0xd8, 0xfc, 0x4d, 0x19, 0x57, + 0xf4, 0x87, 0x78, 0xcc, 0x2e, 0x36, 0x40, 0x24, 0x2b, 0x88, 0xa7, 0xe9, 0x74, 0x19, 0x15, 0xfb, + 0x91, 0xdf, 0xb3, 0x85, 0xb2, 0x0d, 0x05, 0xc0, 0x52, 0x6a, 0x8d, 0xf1, 0xac, 0x5b, 0xcf, 0x87, + 0xec, 0x55, 0x6b, 0xcc, 0x56, 0x2c, 0xea, 0x59, 0xde, 0xb6, 0xff, 0x90, 0x1e, 0xd8, 0xe5, 0x98, + 0x44, 0xf1, 0x2c, 0x3d, 0x59, 0x46, 0xc5, 0x62, 0x84, 0xa2, 0xc7, 0xef, 0x29, 0xbb, 0x19, 0xa4, + 0xde, 0xff, 0xcc, 0xf9, 0x0b, 0xbb, 0x1a, 0xa6, 0xbd, 0xf0, 0xad, 0x38, 0x3c, 0x26, 0x86, 0x30, + 0xb9, 0x3b, 0x0e, 0xc9, 0xbb, 0x2d, 0x41, 0x36, 0xe1, 0x82, 0x9d, 0xfe, 0x0a, 0x72, 0x2e, 0xba, + 0x6a, 0xc4, 0xe8, 0xf3, 0xe4, 0xfa, 0x28, 0xf3, 0xb6, 0xcd, 0x26, 0xeb, 0xf3, 0xae, 0xa3, 0xa7, + 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x13, 0x7f, 0xc2, 0x88, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto index 4ab32c1..0018bb4 100644 --- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto +++ b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto @@ -4,6 +4,17 @@ import "github.com/hashicorp/vault/helper/forwarding/types.proto"; package vault; +message EchoRequest { + string message = 1; + string cluster_addr = 2; +} + +message EchoReply { + string message = 1; + repeated string cluster_addrs = 2; +} + service RequestForwarding { rpc ForwardRequest(forwarding.Request) returns (forwarding.Response) {} + rpc Echo(EchoRequest) returns (EchoReply) {} } diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling.go b/vendor/github.com/hashicorp/vault/vault/request_handling.go index ad37b5a..b003b3f 100644 --- a/vendor/github.com/hashicorp/vault/vault/request_handling.go +++ b/vendor/github.com/hashicorp/vault/vault/request_handling.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/helper/policyutil" "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" ) @@ -26,7 +27,7 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err } // Allowing writing to a path ending in / makes it extremely difficult to - // understand user intent for the filesystem-like backends (generic, + // understand user intent for the filesystem-like backends (kv, // cubbyhole) -- did they want a key named foo/ or did they want to write // to a directory foo/ with no (or forgotten) key, or...? It also affects // lookup, because paths ending in / are considered prefixes by some @@ -76,8 +77,8 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err } else { wrappingResp := &logical.Response{ WrapInfo: resp.WrapInfo, + Warnings: resp.Warnings, } - wrappingResp.CloneWarnings(resp) resp = wrappingResp } } @@ -170,7 +171,10 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r if errType != nil { retErr = multierror.Append(retErr, errType) } - return logical.ErrorResponse(ctErr.Error()), nil, retErr + if ctErr == ErrInternalError { + return nil, auth, retErr + } + return logical.ErrorResponse(ctErr.Error()), auth, retErr } // Attach the display name @@ -188,7 +192,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r if resp != nil { // If wrapping is used, use the shortest between the request and response var wrapTTL time.Duration - var wrapFormat string + var wrapFormat, creationPath string // Ensure no wrap info information is set other than, possibly, the TTL if resp.WrapInfo != nil { @@ -196,6 +200,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r wrapTTL = resp.WrapInfo.TTL } wrapFormat = resp.WrapInfo.Format + creationPath = resp.WrapInfo.CreationPath resp.WrapInfo = nil } @@ -216,16 +221,18 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r } if wrapTTL > 0 { - resp.WrapInfo = &logical.ResponseWrapInfo{ - TTL: wrapTTL, - Format: wrapFormat, + resp.WrapInfo = &wrapping.ResponseWrapInfo{ + TTL: wrapTTL, + Format: wrapFormat, + CreationPath: creationPath, } } } // If there is a secret, we must register it with the expiration manager. // We exclude renewal of a lease, since it does not need to be re-registered - if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") { + if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") && + !strings.HasPrefix(req.Path, "sys/leases/renew") { // Get the SystemView for the mount sysView := c.router.MatchingSystemView(req.Path) if sysView == nil { @@ -245,12 +252,12 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r resp.Secret.TTL = maxTTL } - // Generic mounts should return the TTL but not register + // KV mounts should return the TTL but not register // for a lease as this provides a massive slowdown registerLease := true matchingBackend := c.router.MatchingBackend(req.Path) if matchingBackend == nil { - c.logger.Error("core: unable to retrieve generic backend from router") + c.logger.Error("core: unable to retrieve kv backend from router") retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -288,10 +295,11 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r if err != nil { c.logger.Error("core: failed to look up token", "error", err) retErr = multierror.Append(retErr, ErrInternalError) - return nil, nil, retErr + return nil, auth, retErr } if err := c.expiration.RegisterAuth(te.Path, resp.Auth); err != nil { + c.tokenStore.Revoke(te.ID) c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err) retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr @@ -335,7 +343,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log if resp != nil { // If wrapping is used, use the shortest between the request and response var wrapTTL time.Duration - var wrapFormat string + var wrapFormat, creationPath string // Ensure no wrap info information is set other than, possibly, the TTL if resp.WrapInfo != nil { @@ -343,6 +351,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log wrapTTL = resp.WrapInfo.TTL } wrapFormat = resp.WrapInfo.Format + creationPath = resp.WrapInfo.CreationPath resp.WrapInfo = nil } @@ -361,9 +370,10 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log } if wrapTTL > 0 { - resp.WrapInfo = &logical.ResponseWrapInfo{ - TTL: wrapTTL, - Format: wrapFormat, + resp.WrapInfo = &wrapping.ResponseWrapInfo{ + TTL: wrapTTL, + Format: wrapFormat, + CreationPath: creationPath, } } } @@ -439,6 +449,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log // Register with the expiration manager if err := c.expiration.RegisterAuth(te.Path, auth); err != nil { + c.tokenStore.Revoke(te.ID) c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err) return nil, auth, ErrInternalError } diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling_test.go b/vendor/github.com/hashicorp/vault/vault/request_handling_test.go index c966b04..5f148c8 100644 --- a/vendor/github.com/hashicorp/vault/vault/request_handling_test.go +++ b/vendor/github.com/hashicorp/vault/vault/request_handling_test.go @@ -12,14 +12,14 @@ import ( func TestRequestHandling_Wrapping(t *testing.T) { core, _, root := TestCoreUnsealed(t) - core.logicalBackends["generic"] = PassthroughBackendFactory + core.logicalBackends["kv"] = PassthroughBackendFactory meUUID, _ := uuid.GenerateUUID() err := core.mount(&MountEntry{ Table: mountTableType, UUID: meUUID, Path: "wraptest", - Type: "generic", + Type: "kv", }) if err != nil { t.Fatalf("err: %v", err) diff --git a/vendor/github.com/hashicorp/vault/vault/rollback.go b/vendor/github.com/hashicorp/vault/vault/rollback.go index 9ace6b3..1ee6d9f 100644 --- a/vendor/github.com/hashicorp/vault/vault/rollback.go +++ b/vendor/github.com/hashicorp/vault/vault/rollback.go @@ -113,8 +113,15 @@ func (m *RollbackManager) triggerRollbacks() { for _, e := range backends { path := e.Path if e.Table == credentialTableType { - path = "auth/" + path + path = credentialRoutePrefix + path } + + // When the mount is filtered, the backend will be nil + backend := m.router.MatchingBackend(path) + if backend == nil { + continue + } + m.inflightLock.RLock() _, ok := m.inflight[path] m.inflightLock.RUnlock() diff --git a/vendor/github.com/hashicorp/vault/vault/rollback_test.go b/vendor/github.com/hashicorp/vault/vault/rollback_test.go index 797993a..f050df7 100644 --- a/vendor/github.com/hashicorp/vault/vault/rollback_test.go +++ b/vendor/github.com/hashicorp/vault/vault/rollback_test.go @@ -29,7 +29,7 @@ func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) { if err != nil { t.Fatal(err) } - if err := router.Mount(backend, "foo", &MountEntry{UUID: meUUID}, view); err != nil { + if err := router.Mount(backend, "foo", &MountEntry{UUID: meUUID, Accessor: "noopaccessor"}, view); err != nil { t.Fatalf("err: %s", err) } diff --git a/vendor/github.com/hashicorp/vault/vault/router.go b/vendor/github.com/hashicorp/vault/vault/router.go index 5a90dfa..f05e207 100644 --- a/vendor/github.com/hashicorp/vault/vault/router.go +++ b/vendor/github.com/hashicorp/vault/vault/router.go @@ -14,21 +14,25 @@ import ( // Router is used to do prefix based routing of a request to a logical backend type Router struct { - l sync.RWMutex - root *radix.Tree - tokenStoreSalt *salt.Salt + l sync.RWMutex + root *radix.Tree + mountUUIDCache *radix.Tree + mountAccessorCache *radix.Tree + tokenStoreSaltFunc func() (*salt.Salt, error) // storagePrefix maps the prefix used for storage (ala the BarrierView) // to the backend. This is used to map a key back into the backend that owns it. - // For example, logical/uuid1/foobar -> secrets/ (generic backend) + foobar + // For example, logical/uuid1/foobar -> secrets/ (kv backend) + foobar storagePrefix *radix.Tree } // NewRouter returns a new router func NewRouter() *Router { r := &Router{ - root: radix.New(), - storagePrefix: radix.New(), + root: radix.New(), + storagePrefix: radix.New(), + mountUUIDCache: radix.New(), + mountAccessorCache: radix.New(), } return r } @@ -60,9 +64,12 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount } // Build the paths - paths := backend.SpecialPaths() - if paths == nil { - paths = new(logical.Paths) + paths := new(logical.Paths) + if backend != nil { + specialPaths := backend.SpecialPaths() + if specialPaths != nil { + paths = specialPaths + } } // Create a mount entry @@ -74,8 +81,22 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount rootPaths: pathsToRadix(paths.Root), loginPaths: pathsToRadix(paths.Unauthenticated), } + + switch { + case prefix == "": + return fmt.Errorf("missing prefix to be used for router entry; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + case storageView.prefix == "": + return fmt.Errorf("missing storage view prefix; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + case re.mountEntry.UUID == "": + return fmt.Errorf("missing mount identifier; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + case re.mountEntry.Accessor == "": + return fmt.Errorf("missing mount accessor; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type) + } + r.root.Insert(prefix, re) r.storagePrefix.Insert(storageView.prefix, re) + r.mountUUIDCache.Insert(re.mountEntry.UUID, re.mountEntry) + r.mountAccessorCache.Insert(re.mountEntry.Accessor, re.mountEntry) return nil } @@ -98,6 +119,9 @@ func (r *Router) Unmount(prefix string) error { // Purge from the radix trees r.root.Delete(prefix) r.storagePrefix.Delete(re.storageView.prefix) + r.mountUUIDCache.Delete(re.mountEntry.UUID) + r.mountAccessorCache.Delete(re.mountEntry.Accessor) + return nil } @@ -141,6 +165,39 @@ func (r *Router) Untaint(path string) error { return nil } +func (r *Router) MatchingMountByUUID(mountID string) *MountEntry { + if mountID == "" { + return nil + } + + r.l.RLock() + defer r.l.RUnlock() + + _, raw, ok := r.mountUUIDCache.LongestPrefix(mountID) + if !ok { + return nil + } + + return raw.(*MountEntry) +} + +// MatchingMountByAccessor returns the MountEntry by accessor lookup +func (r *Router) MatchingMountByAccessor(mountAccessor string) *MountEntry { + if mountAccessor == "" { + return nil + } + + r.l.RLock() + defer r.l.RUnlock() + + _, raw, ok := r.mountAccessorCache.LongestPrefix(mountAccessor) + if !ok { + return nil + } + + return raw.(*MountEntry) +} + // MatchingMount returns the mount prefix that would be used for a path func (r *Router) MatchingMount(path string) string { r.l.RLock() @@ -152,7 +209,7 @@ func (r *Router) MatchingMount(path string) string { return mount } -// MatchingView returns the view used for a path +// MatchingStorageView returns the storageView used for a path func (r *Router) MatchingStorageView(path string) *BarrierView { r.l.RLock() _, raw, ok := r.root.LongestPrefix(path) @@ -174,7 +231,7 @@ func (r *Router) MatchingMountEntry(path string) *MountEntry { return raw.(*routeEntry).mountEntry } -// MatchingMountEntry returns the MountEntry used for a path +// MatchingBackend returns the backend used for a path func (r *Router) MatchingBackend(path string) logical.Backend { r.l.RLock() _, raw, ok := r.root.LongestPrefix(path) @@ -210,6 +267,12 @@ func (r *Router) MatchingStoragePrefix(path string) (string, string, bool) { re := raw.(*routeEntry) mountPath := re.mountEntry.Path prefix := re.storageView.prefix + + // Add back the prefix for credential backends + if strings.HasPrefix(path, credentialBarrierPrefix) { + mountPath = credentialRoutePrefix + mountPath + } + return mountPath, prefix, true } @@ -228,17 +291,19 @@ func (r *Router) RouteExistenceCheck(req *logical.Request) (bool, bool, error) { func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logical.Response, bool, bool, error) { // Find the mount point r.l.RLock() - mount, raw, ok := r.root.LongestPrefix(req.Path) - if !ok { + adjustedPath := req.Path + mount, raw, ok := r.root.LongestPrefix(adjustedPath) + if !ok && !strings.HasSuffix(adjustedPath, "/") { // Re-check for a backend by appending a slash. This lets "foo" mean // "foo/" at the root level which is almost always what we want. - req.Path += "/" - mount, raw, ok = r.root.LongestPrefix(req.Path) + adjustedPath += "/" + mount, raw, ok = r.root.LongestPrefix(adjustedPath) } r.l.RUnlock() if !ok { return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath } + req.Path = adjustedPath defer metrics.MeasureSince([]string{"route", string(req.Operation), strings.Replace(mount, "/", "-", -1)}, time.Now()) re := raw.(*routeEntry) @@ -273,7 +338,11 @@ func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logica case strings.HasPrefix(originalPath, "cubbyhole/"): // In order for the token store to revoke later, we need to have the same // salted ID, so we double-salt what's going to the cubbyhole backend - req.ClientToken = re.SaltID(r.tokenStoreSalt.SaltID(req.ClientToken)) + salt, err := r.tokenStoreSaltFunc() + if err != nil { + return nil, false, false, err + } + req.ClientToken = re.SaltID(salt.SaltID(req.ClientToken)) default: req.ClientToken = re.SaltID(req.ClientToken) } diff --git a/vendor/github.com/hashicorp/vault/vault/router_test.go b/vendor/github.com/hashicorp/vault/vault/router_test.go index e5de72e..acf4fcc 100644 --- a/vendor/github.com/hashicorp/vault/vault/router_test.go +++ b/vendor/github.com/hashicorp/vault/vault/router_test.go @@ -2,13 +2,17 @@ package vault import ( "fmt" + "io/ioutil" + "reflect" "strings" "sync" "testing" "time" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/logical" + log "github.com/mgutz/logxi/v1" ) type NoopBackend struct { @@ -62,10 +66,26 @@ func (n *NoopBackend) InvalidateKey(k string) { n.Invalidations = append(n.Invalidations, k) } +func (n *NoopBackend) Setup(config *logical.BackendConfig) error { + return nil +} + +func (n *NoopBackend) Logger() log.Logger { + return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff) +} + func (n *NoopBackend) Initialize() error { return nil } +func (n *NoopBackend) Type() logical.BackendType { + return logical.TypeLogical +} + +func (n *NoopBackend) RegisterLicense(license interface{}) error { + return nil +} + func TestRouter_Mount(t *testing.T) { r := NewRouter() _, barrier, _ := mockBarrier(t) @@ -75,8 +95,15 @@ func TestRouter_Mount(t *testing.T) { if err != nil { t.Fatal(err) } + + mountEntry := &MountEntry{ + Path: "prod/aws/", + UUID: meUUID, + Accessor: "awsaccessor", + } + n := &NoopBackend{} - err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID}, view) + err = r.Mount(n, "prod/aws/", mountEntry, view) if err != nil { t.Fatalf("err: %v", err) } @@ -85,6 +112,7 @@ func TestRouter_Mount(t *testing.T) { if err != nil { t.Fatal(err) } + err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view) if !strings.Contains(err.Error(), "cannot mount under existing mount") { t.Fatalf("err: %v", err) @@ -106,6 +134,11 @@ func TestRouter_Mount(t *testing.T) { t.Fatalf("bad: %v", v) } + mountEntryFetched := r.MatchingMountByUUID(mountEntry.UUID) + if mountEntryFetched == nil || !reflect.DeepEqual(mountEntry, mountEntryFetched) { + t.Fatalf("failed to fetch mount entry using its ID; expected: %#v\n actual: %#v\n", mountEntry, mountEntryFetched) + } + mount, prefix, ok := r.MatchingStoragePrefix("logical/foo") if !ok { t.Fatalf("missing storage prefix") @@ -131,6 +164,84 @@ func TestRouter_Mount(t *testing.T) { } } +func TestRouter_MountCredential(t *testing.T) { + r := NewRouter() + _, barrier, _ := mockBarrier(t) + view := NewBarrierView(barrier, credentialBarrierPrefix) + + meUUID, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + + mountEntry := &MountEntry{ + Path: "aws", + UUID: meUUID, + Accessor: "awsaccessor", + } + + n := &NoopBackend{} + err = r.Mount(n, "auth/aws/", mountEntry, view) + if err != nil { + t.Fatalf("err: %v", err) + } + + meUUID, err = uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + + err = r.Mount(n, "auth/aws/", &MountEntry{UUID: meUUID}, view) + if !strings.Contains(err.Error(), "cannot mount under existing mount") { + t.Fatalf("err: %v", err) + } + + if path := r.MatchingMount("auth/aws/foo"); path != "auth/aws/" { + t.Fatalf("bad: %s", path) + } + + if v := r.MatchingStorageView("auth/aws/foo"); v != view { + t.Fatalf("bad: %v", v) + } + + if path := r.MatchingMount("auth/stage/aws/foo"); path != "" { + t.Fatalf("bad: %s", path) + } + + if v := r.MatchingStorageView("auth/stage/aws/foo"); v != nil { + t.Fatalf("bad: %v", v) + } + + mountEntryFetched := r.MatchingMountByUUID(mountEntry.UUID) + if mountEntryFetched == nil || !reflect.DeepEqual(mountEntry, mountEntryFetched) { + t.Fatalf("failed to fetch mount entry using its ID; expected: %#v\n actual: %#v\n", mountEntry, mountEntryFetched) + } + + mount, prefix, ok := r.MatchingStoragePrefix("auth/foo") + if !ok { + t.Fatalf("missing storage prefix") + } + if mount != "auth/aws" || prefix != credentialBarrierPrefix { + t.Fatalf("Bad: %v - %v", mount, prefix) + } + + req := &logical.Request{ + Path: "auth/aws/foo", + } + resp, err := r.Route(req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } + + // Verify the path + if len(n.Paths) != 1 || n.Paths[0] != "foo" { + t.Fatalf("bad: %v", n.Paths) + } +} + func TestRouter_Unmount(t *testing.T) { r := NewRouter() _, barrier, _ := mockBarrier(t) @@ -141,7 +252,7 @@ func TestRouter_Unmount(t *testing.T) { t.Fatal(err) } n := &NoopBackend{} - err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID}, view) + err = r.Mount(n, "prod/aws/", &MountEntry{Path: "prod/aws/", UUID: meUUID, Accessor: "awsaccessor"}, view) if err != nil { t.Fatalf("err: %v", err) } @@ -174,7 +285,7 @@ func TestRouter_Remount(t *testing.T) { t.Fatal(err) } n := &NoopBackend{} - me := &MountEntry{Path: "prod/aws/", UUID: meUUID} + me := &MountEntry{Path: "prod/aws/", UUID: meUUID, Accessor: "awsaccessor"} err = r.Mount(n, "prod/aws/", me, view) if err != nil { t.Fatalf("err: %v", err) @@ -237,7 +348,7 @@ func TestRouter_RootPath(t *testing.T) { "policy/*", }, } - err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view) if err != nil { t.Fatalf("err: %v", err) } @@ -279,7 +390,7 @@ func TestRouter_LoginPath(t *testing.T) { "oauth/*", }, } - err = r.Mount(n, "auth/foo/", &MountEntry{UUID: meUUID}, view) + err = r.Mount(n, "auth/foo/", &MountEntry{UUID: meUUID, Accessor: "authfooaccessor"}, view) if err != nil { t.Fatalf("err: %v", err) } @@ -314,7 +425,7 @@ func TestRouter_Taint(t *testing.T) { t.Fatal(err) } n := &NoopBackend{} - err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view) if err != nil { t.Fatalf("err: %v", err) } @@ -357,7 +468,7 @@ func TestRouter_Untaint(t *testing.T) { t.Fatal(err) } n := &NoopBackend{} - err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = r.Mount(n, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view) if err != nil { t.Fatalf("err: %v", err) } diff --git a/vendor/github.com/hashicorp/vault/vault/seal_testing.go b/vendor/github.com/hashicorp/vault/vault/seal_testing.go index f74b140..27271cf 100644 --- a/vendor/github.com/hashicorp/vault/vault/seal_testing.go +++ b/vendor/github.com/hashicorp/vault/vault/seal_testing.go @@ -107,7 +107,7 @@ func (d *TestSeal) SetRecoveryKey(key []byte) error { func testCoreUnsealedWithConfigs(t *testing.T, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) { seal := &TestSeal{} - core := TestCoreWithSeal(t, seal) + core := TestCoreWithSeal(t, seal, false) result, err := core.Initialize(&InitParams{ BarrierConfig: barrierConf, RecoveryConfig: recoveryConf, diff --git a/vendor/github.com/hashicorp/vault/vault/testing.go b/vendor/github.com/hashicorp/vault/vault/testing.go index b567fe7..3e500c2 100644 --- a/vendor/github.com/hashicorp/vault/vault/testing.go +++ b/vendor/github.com/hashicorp/vault/vault/testing.go @@ -2,16 +2,26 @@ package vault import ( "bytes" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" "crypto/sha256" "crypto/tls" "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" "encoding/pem" "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" "net" "net/http" + "os" "os/exec" - "testing" + "path/filepath" + "sync" "time" log "github.com/mgutz/logxi/v1" @@ -25,10 +35,14 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/reload" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" "github.com/hashicorp/vault/physical" + "github.com/mitchellh/go-testing-interface" + + physInmem "github.com/hashicorp/vault/physical/inmem" ) // This file contains a number of methods that are useful for unit @@ -70,24 +84,37 @@ oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F ) // TestCore returns a pure in-memory, uninitialized core for testing. -func TestCore(t testing.TB) *Core { - return TestCoreWithSeal(t, nil) +func TestCore(t testing.T) *Core { + return TestCoreWithSeal(t, nil, false) } -// TestCoreNewSeal returns an in-memory, ininitialized core with the new seal -// configuration. -func TestCoreNewSeal(t testing.TB) *Core { - return TestCoreWithSeal(t, &TestSeal{}) +// TestCoreRaw returns a pure in-memory, uninitialized core for testing. The raw +// storage endpoints are enabled with this core. +func TestCoreRaw(t testing.T) *Core { + return TestCoreWithSeal(t, nil, true) +} + +// TestCoreNewSeal returns a pure in-memory, uninitialized core with +// the new seal configuration. +func TestCoreNewSeal(t testing.T) *Core { + return TestCoreWithSeal(t, &TestSeal{}, false) } // TestCoreWithSeal returns a pure in-memory, uninitialized core with the // specified seal for testing. -func TestCoreWithSeal(t testing.TB, testSeal Seal) *Core { +func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core { logger := logformat.NewVaultLogger(log.LevelTrace) - physicalBackend := physical.NewInmem(logger) + physicalBackend, err := physInmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } conf := testCoreConfig(t, physicalBackend, logger) + if enableRaw { + conf.EnableRaw = true + } + if testSeal != nil { conf.Seal = testSeal } @@ -100,7 +127,7 @@ func TestCoreWithSeal(t testing.TB, testSeal Seal) *Core { return c } -func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.Logger) *CoreConfig { +func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig { noopAudits := map[string]audit.Factory{ "noop": func(config *audit.BackendConfig) (audit.Backend, error) { view := &logical.InmemStorage{} @@ -108,14 +135,11 @@ func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.L Key: "salt", Value: []byte("foo"), }) - var err error - config.Salt, err = salt.NewSalt(view, &salt.Config{ + config.SaltConfig = &salt.Config{ HMAC: sha256.New, HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("error getting new salt: %v", err) } + config.SaltView = view return &noopAudit{ Config: config, }, nil @@ -134,7 +158,7 @@ func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.L for backendName, backendFactory := range noopBackends { logicalBackends[backendName] = backendFactory } - logicalBackends["generic"] = LeasedPassthroughBackendFactory + logicalBackends["kv"] = LeasedPassthroughBackendFactory for backendName, backendFactory := range testLogicalBackends { logicalBackends[backendName] = backendFactory } @@ -153,13 +177,13 @@ func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.L // TestCoreInit initializes the core with a single key, and returns // the key that must be used to unseal the core and a root token. -func TestCoreInit(t testing.TB, core *Core) ([][]byte, string) { - return TestCoreInitClusterWrapperSetup(t, core, nil, func() (http.Handler, http.Handler) { return nil, nil }) +func TestCoreInit(t testing.T, core *Core) ([][]byte, string) { + return TestCoreInitClusterWrapperSetup(t, core, nil, nil) } -func TestCoreInitClusterWrapperSetup(t testing.TB, core *Core, clusterAddrs []*net.TCPAddr, handlerSetupFunc func() (http.Handler, http.Handler)) ([][]byte, string) { +func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, clusterAddrs []*net.TCPAddr, handler http.Handler) ([][]byte, string) { core.SetClusterListenerAddrs(clusterAddrs) - core.SetClusterSetupFuncs(handlerSetupFunc) + core.SetClusterHandler(handler) result, err := core.Initialize(&InitParams{ BarrierConfig: &SealConfig{ SecretShares: 3, @@ -177,14 +201,24 @@ func TestCoreInitClusterWrapperSetup(t testing.TB, core *Core, clusterAddrs []*n } func TestCoreUnseal(core *Core, key []byte) (bool, error) { - core.SetClusterSetupFuncs(func() (http.Handler, http.Handler) { return nil, nil }) return core.Unseal(key) } // TestCoreUnsealed returns a pure in-memory core that is already // initialized and unsealed. -func TestCoreUnsealed(t testing.TB) (*Core, [][]byte, string) { +func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) { core := TestCore(t) + return testCoreUnsealed(t, core) +} + +// TestCoreUnsealedRaw returns a pure in-memory core that is already +// initialized, unsealed, and with raw endpoints enabled. +func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) { + core := TestCoreRaw(t) + return testCoreUnsealed(t, core) +} + +func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) { keys, token := TestCoreInit(t, core) for _, key := range keys { if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil { @@ -203,7 +237,7 @@ func TestCoreUnsealed(t testing.TB) (*Core, [][]byte, string) { return core, keys, token } -func TestCoreUnsealedBackend(t testing.TB, backend physical.Backend) (*Core, [][]byte, string) { +func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) { logger := logformat.NewVaultLogger(log.LevelTrace) conf := testCoreConfig(t, backend, logger) conf.Seal = &TestSeal{} @@ -231,7 +265,7 @@ func TestCoreUnsealedBackend(t testing.TB, backend physical.Backend) (*Core, [][ return core, keys, token } -func testTokenStore(t testing.TB, c *Core) *TokenStore { +func testTokenStore(t testing.T, c *Core) *TokenStore { me := &MountEntry{ Table: credentialTableType, Path: "token/", @@ -254,21 +288,23 @@ func testTokenStore(t testing.TB, c *Core) *TokenStore { } ts := tokenstore.(*TokenStore) - router := NewRouter() - router.Mount(ts, "auth/token/", &MountEntry{Table: credentialTableType, UUID: ""}, ts.view) + err = c.router.Unmount("auth/token/") + if err != nil { + t.Fatal(err) + } + err = c.router.Mount(ts, "auth/token/", &MountEntry{Table: credentialTableType, UUID: "authtokenuuid", Path: "auth/token", Accessor: "authtokenaccessor"}, ts.view) + if err != nil { + t.Fatal(err) + } - subview := c.systemBarrierView.SubView(expirationSubPath) - logger := logformat.NewVaultLogger(log.LevelTrace) - - exp := NewExpirationManager(router, subview, ts, logger) - ts.SetExpirationManager(exp) + ts.SetExpirationManager(c.expiration) return ts } // TestCoreWithTokenStore returns an in-memory core that has a token store // mounted, so that logical token functions can be used -func TestCoreWithTokenStore(t testing.TB) (*Core, *TokenStore, [][]byte, string) { +func TestCoreWithTokenStore(t testing.T) (*Core, *TokenStore, [][]byte, string) { c, keys, root := TestCoreUnsealed(t) ts := testTokenStore(t, c) @@ -278,7 +314,7 @@ func TestCoreWithTokenStore(t testing.TB) (*Core, *TokenStore, [][]byte, string) // TestCoreWithBackendTokenStore returns a core that has a token store // mounted and used the provided physical backend, so that logical token // functions can be used -func TestCoreWithBackendTokenStore(t testing.TB, backend physical.Backend) (*Core, *TokenStore, [][]byte, string) { +func TestCoreWithBackendTokenStore(t testing.T, backend physical.Backend) (*Core, *TokenStore, [][]byte, string) { c, keys, root := TestCoreUnsealedBackend(t, backend) ts := testTokenStore(t, c) @@ -293,6 +329,51 @@ func TestKeyCopy(key []byte) []byte { return result } +func TestDynamicSystemView(c *Core) *dynamicSystemView { + me := &MountEntry{ + Config: MountConfig{ + DefaultLeaseTTL: 24 * time.Hour, + MaxLeaseTTL: 2 * 24 * time.Hour, + }, + } + + return &dynamicSystemView{c, me} +} + +func TestAddTestPlugin(t testing.T, c *Core, name, testFunc string) { + file, err := os.Open(os.Args[0]) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + hash := sha256.New() + + _, err = io.Copy(hash, file) + if err != nil { + t.Fatal(err) + } + + sum := hash.Sum(nil) + + // Determine plugin directory path + fullPath, err := filepath.EvalSymlinks(os.Args[0]) + if err != nil { + t.Fatal(err) + } + directoryPath := filepath.Dir(fullPath) + + // Set core's plugin directory and plugin catalog directory + c.pluginDirectory = directoryPath + c.pluginCatalog.directory = directoryPath + + command := fmt.Sprintf("%s --test.run=%s", filepath.Base(os.Args[0]), testFunc) + err = c.pluginCatalog.Set(name, command, sum) + if err != nil { + t.Fatal(err) + } +} + var testLogicalBackends = map[string]logical.Factory{} // Starts the test server which responds to SSH authentication. @@ -400,11 +481,17 @@ func AddTestLogicalBackend(name string, factory logical.Factory) error { } type noopAudit struct { - Config *audit.BackendConfig + Config *audit.BackendConfig + salt *salt.Salt + saltMutex sync.RWMutex } -func (n *noopAudit) GetHash(data string) string { - return n.Config.Salt.GetIdentifiedHMAC(data) +func (n *noopAudit) GetHash(data string) (string, error) { + salt, err := n.Salt() + if err != nil { + return "", err + } + return salt.GetIdentifiedHMAC(data), nil } func (n *noopAudit) LogRequest(a *logical.Auth, r *logical.Request, e error) error { @@ -419,6 +506,32 @@ func (n *noopAudit) Reload() error { return nil } +func (n *noopAudit) Invalidate() { + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + n.salt = nil +} + +func (n *noopAudit) Salt() (*salt.Salt, error) { + n.saltMutex.RLock() + if n.salt != nil { + defer n.saltMutex.RUnlock() + return n.salt, nil + } + n.saltMutex.RUnlock() + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + if n.salt != nil { + return n.salt, nil + } + salt, err := salt.NewSalt(n.Config.SaltView, n.Config.SaltConfig) + if err != nil { + return nil, err + } + n.salt = salt + return salt, nil +} + type rawHTTP struct{} func (n *rawHTTP) HandleRequest(req *logical.Request) (*logical.Response, error) { @@ -446,6 +559,10 @@ func (n *rawHTTP) System() logical.SystemView { } } +func (n *rawHTTP) Logger() log.Logger { + return logformat.NewVaultLogger(log.LevelTrace) +} + func (n *rawHTTP) Cleanup() { // noop } @@ -459,6 +576,19 @@ func (n *rawHTTP) InvalidateKey(string) { // noop } +func (n *rawHTTP) Setup(config *logical.BackendConfig) error { + // noop + return nil +} + +func (n *rawHTTP) Type() logical.BackendType { + return logical.TypeUnknown +} + +func (n *rawHTTP) RegisterLicense(license interface{}) error { + return nil +} + func GenerateRandBytes(length int) ([]byte, error) { if length < 0 { return nil, fmt.Errorf("length must be >= 0") @@ -480,7 +610,8 @@ func GenerateRandBytes(length int) ([]byte, error) { return buf, nil } -func TestWaitActive(t testing.TB, core *Core) { +func TestWaitActive(t testing.T, core *Core) { + t.Helper() start := time.Now() var standby bool var err error @@ -498,6 +629,83 @@ func TestWaitActive(t testing.TB, core *Core) { } } +type TestCluster struct { + BarrierKeys [][]byte + CACert *x509.Certificate + CACertBytes []byte + CACertPEM []byte + CACertPEMFile string + CAKey *ecdsa.PrivateKey + CAKeyPEM []byte + Cores []*TestClusterCore + ID string + RootToken string + RootCAs *x509.CertPool + TempDir string +} + +func (c *TestCluster) Start() { + for _, core := range c.Cores { + if core.Server != nil { + for _, ln := range core.Listeners { + go core.Server.Serve(ln) + } + } + } +} + +func (c *TestCluster) EnsureCoresSealed(t testing.T) { + t.Helper() + if err := c.ensureCoresSealed(); err != nil { + t.Fatal(err) + } +} + +func (c *TestCluster) Cleanup() { + // Close listeners + for _, core := range c.Cores { + if core.Listeners != nil { + for _, ln := range core.Listeners { + ln.Close() + } + } + } + + // Seal the cores + c.ensureCoresSealed() + + // Remove any temp dir that exists + if c.TempDir != "" { + os.RemoveAll(c.TempDir) + } + + // Give time to actually shut down/clean up before the next test + time.Sleep(time.Second) +} + +func (c *TestCluster) ensureCoresSealed() error { + for _, core := range c.Cores { + if err := core.Shutdown(); err != nil { + return err + } + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + return fmt.Errorf("timeout waiting for core to seal") + } + sealed, err := core.Sealed() + if err != nil { + return err + } + if sealed { + break + } + time.Sleep(250 * time.Millisecond) + } + } + return nil +} + type TestListener struct { net.Listener Address *net.TCPAddr @@ -505,189 +713,283 @@ type TestListener struct { type TestClusterCore struct { *Core - Listeners []*TestListener - Root string - BarrierKeys [][]byte - CACertBytes []byte - CACert *x509.Certificate - TLSConfig *tls.Config - ClusterID string - Client *api.Client + Client *api.Client + Handler http.Handler + Listeners []*TestListener + ReloadFuncs *map[string][]reload.ReloadFunc + ReloadFuncsLock *sync.RWMutex + Server *http.Server + ServerCert *x509.Certificate + ServerCertBytes []byte + ServerCertPEM []byte + ServerKey *ecdsa.PrivateKey + ServerKeyPEM []byte + TLSConfig *tls.Config } -func (t *TestClusterCore) CloseListeners() { - if t.Listeners != nil { - for _, ln := range t.Listeners { - ln.Close() +type TestClusterOptions struct { + KeepStandbysSealed bool + SkipInit bool + HandlerFunc func(*Core) http.Handler + BaseListenAddress string + NumCores int +} + +var DefaultNumCores = 3 + +type certInfo struct { + cert *x509.Certificate + certPEM []byte + certBytes []byte + key *ecdsa.PrivateKey + keyPEM []byte +} + +// NewTestCluster creates a new test cluster based on the provided core config +// and test cluster options. +func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster { + var numCores int + if opts == nil || opts.NumCores == 0 { + numCores = DefaultNumCores + } else { + numCores = opts.NumCores + } + + certIPs := []net.IP{ + net.IPv6loopback, + net.ParseIP("127.0.0.1"), + } + var baseAddr *net.TCPAddr + if opts != nil && opts.BaseListenAddress != "" { + var err error + baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress) + if err != nil { + t.Fatal("could not parse given base IP") } - } - // Give time to actually shut down/clean up before the next test - time.Sleep(time.Second) -} - -func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unsealStandbys bool) []*TestClusterCore { - if handlers == nil || len(handlers) != 3 { - t.Fatal("handlers must be size 3") + certIPs = append(certIPs, baseAddr.IP) } - // - // TLS setup - // - block, _ := pem.Decode([]byte(TestClusterCACert)) - if block == nil { - t.Fatal("error decoding cluster CA cert") + var testCluster TestCluster + tempDir, err := ioutil.TempDir("", "vault-test-cluster-") + if err != nil { + t.Fatal(err) + } + testCluster.TempDir = tempDir + + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + testCluster.CAKey = caKey + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + IPAddresses: certIPs, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey) + if err != nil { + t.Fatal(err) } - caBytes := block.Bytes caCert, err := x509.ParseCertificate(caBytes) if err != nil { t.Fatal(err) } - - serverCert, err := tls.X509KeyPair([]byte(TestClusterServerCert), []byte(TestClusterServerKey)) + testCluster.CACert = caCert + testCluster.CACertBytes = caBytes + testCluster.RootCAs = x509.NewCertPool() + testCluster.RootCAs.AddCert(caCert) + caCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock) + testCluster.CACertPEMFile = filepath.Join(testCluster.TempDir, "ca_cert.pem") + err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0755) + if err != nil { + t.Fatal(err) + } + marshaledCAKey, err := x509.MarshalECPrivateKey(caKey) + if err != nil { + t.Fatal(err) + } + caKeyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledCAKey, + } + testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock) + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0755) if err != nil { t.Fatal(err) } - rootCAs := x509.NewCertPool() - rootCAs.AppendCertsFromPEM([]byte(TestClusterCACert)) - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{serverCert}, - RootCAs: rootCAs, - ClientCAs: rootCAs, - ClientAuth: tls.VerifyClientCertIfGiven, - } - tlsConfig.BuildNameToCertificate() + var certInfoSlice []*certInfo - // Sanity checking - block, _ = pem.Decode([]byte(TestClusterServerCert)) - if block == nil { - t.Fatal(err) - } - parsedServerCert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - chains, err := parsedServerCert.Verify(x509.VerifyOptions{ - DNSName: "127.0.0.1", - Roots: rootCAs, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - }) - if err != nil { - t.Fatal(err) - } - if chains == nil || len(chains) == 0 { - t.Fatal("no verified chains for server auth") - } - chains, err = parsedServerCert.Verify(x509.VerifyOptions{ - DNSName: "127.0.0.1", - Roots: rootCAs, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }) - if err != nil { - t.Fatal(err) - } - if chains == nil || len(chains) == 0 { - t.Fatal("no verified chains for chains auth") - } + // + // Certs generation + // + for i := 0; i < numCores; i++ { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + IPAddresses: certIPs, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, key.Public(), caKey) + if err != nil { + t.Fatal(err) + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + t.Fatal(err) + } + certPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + } + certPEM := pem.EncodeToMemory(certPEMBlock) + marshaledKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + t.Fatal(err) + } + keyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + } + keyPEM := pem.EncodeToMemory(keyPEMBlock) - logger := logformat.NewVaultLogger(log.LevelTrace) + certInfoSlice = append(certInfoSlice, &certInfo{ + cert: cert, + certPEM: certPEM, + certBytes: certBytes, + key: key, + keyPEM: keyPEM, + }) + } // // Listener setup // - ln, err := net.ListenTCP("tcp", &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 0, - }) - if err != nil { - t.Fatal(err) - } - c1lns := []*TestListener{&TestListener{ - Listener: tls.NewListener(ln, tlsConfig), - Address: ln.Addr().(*net.TCPAddr), - }, - } - ln, err = net.ListenTCP("tcp", &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 0, - }) - if err != nil { - t.Fatal(err) - } - c1lns = append(c1lns, &TestListener{ - Listener: tls.NewListener(ln, tlsConfig), - Address: ln.Addr().(*net.TCPAddr), - }) - server1 := &http.Server{ - Handler: handlers[0], - } - if err := http2.ConfigureServer(server1, nil); err != nil { - t.Fatal(err) - } - for _, ln := range c1lns { - go server1.Serve(ln) + logger := logformat.NewVaultLogger(log.LevelTrace) + ports := make([]int, numCores) + if baseAddr != nil { + for i := 0; i < numCores; i++ { + ports[i] = baseAddr.Port + i + } + } else { + baseAddr = &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 0, + } } - ln, err = net.ListenTCP("tcp", &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 0, - }) - if err != nil { - t.Fatal(err) - } - c2lns := []*TestListener{&TestListener{ - Listener: tls.NewListener(ln, tlsConfig), - Address: ln.Addr().(*net.TCPAddr), - }, - } - server2 := &http.Server{ - Handler: handlers[1], - } - if err := http2.ConfigureServer(server2, nil); err != nil { - t.Fatal(err) - } - for _, ln := range c2lns { - go server2.Serve(ln) + listeners := [][]*TestListener{} + servers := []*http.Server{} + handlers := []http.Handler{} + tlsConfigs := []*tls.Config{} + certGetters := []*reload.CertificateGetter{} + for i := 0; i < numCores; i++ { + baseAddr.Port = ports[i] + ln, err := net.ListenTCP("tcp", baseAddr) + if err != nil { + t.Fatal(err) + } + certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) + keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port)) + err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0755) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0755) + if err != nil { + t.Fatal(err) + } + tlsCert, err := tls.X509KeyPair(certInfoSlice[i].certPEM, certInfoSlice[i].keyPEM) + if err != nil { + t.Fatal(err) + } + certGetter := reload.NewCertificateGetter(certFile, keyFile) + certGetters = append(certGetters, certGetter) + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + RootCAs: testCluster.RootCAs, + ClientCAs: testCluster.RootCAs, + ClientAuth: tls.VerifyClientCertIfGiven, + NextProtos: []string{"h2", "http/1.1"}, + GetCertificate: certGetter.GetCertificate, + } + tlsConfig.BuildNameToCertificate() + tlsConfigs = append(tlsConfigs, tlsConfig) + lns := []*TestListener{&TestListener{ + Listener: tls.NewListener(ln, tlsConfig), + Address: ln.Addr().(*net.TCPAddr), + }, + } + listeners = append(listeners, lns) + var handler http.Handler = http.NewServeMux() + handlers = append(handlers, handler) + server := &http.Server{ + Handler: handler, + } + servers = append(servers, server) + if err := http2.ConfigureServer(server, nil); err != nil { + t.Fatal(err) + } } - ln, err = net.ListenTCP("tcp", &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 0, - }) - if err != nil { - t.Fatal(err) - } - c3lns := []*TestListener{&TestListener{ - Listener: tls.NewListener(ln, tlsConfig), - Address: ln.Addr().(*net.TCPAddr), - }, - } - server3 := &http.Server{ - Handler: handlers[2], - } - if err := http2.ConfigureServer(server3, nil); err != nil { - t.Fatal(err) - } - for _, ln := range c3lns { - go server3.Serve(ln) - } - - // Create three cores with the same physical and different redirect/cluster addrs + // Create three cores with the same physical and different redirect/cluster + // addrs. // N.B.: On OSX, instead of random ports, it assigns new ports to new // listeners sequentially. Aside from being a bad idea in a security sense, // it also broke tests that assumed it was OK to just use the port above - // the redirect addr. This has now been changed to 10 ports above, but if + // the redirect addr. This has now been changed to 105 ports above, but if // we ever do more than three nodes in a cluster it may need to be bumped. + // Note: it's 105 so that we don't conflict with a running Consul by + // default. coreConfig := &CoreConfig{ LogicalBackends: make(map[string]logical.Factory), CredentialBackends: make(map[string]logical.Factory), AuditBackends: make(map[string]audit.Factory), - RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port), - ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port+10), + RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port), + ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port+105), DisableMlock: true, + EnableUI: true, } if base != nil { + coreConfig.DisableCache = base.DisableCache + coreConfig.EnableUI = base.EnableUI + coreConfig.DefaultLeaseTTL = base.DefaultLeaseTTL + coreConfig.MaxLeaseTTL = base.MaxLeaseTTL + coreConfig.CacheSize = base.CacheSize + coreConfig.PluginDirectory = base.PluginDirectory + coreConfig.Seal = base.Seal + coreConfig.DevToken = base.DevToken + + if !coreConfig.DisableMlock { + base.DisableMlock = false + } + if base.Physical != nil { coreConfig.Physical = base.Physical } @@ -723,36 +1025,43 @@ func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unseal if base.Logger != nil { coreConfig.Logger = base.Logger } + + coreConfig.ClusterCipherSuites = base.ClusterCipherSuites + + coreConfig.DisableCache = base.DisableCache + + coreConfig.DevToken = base.DevToken } if coreConfig.Physical == nil { - coreConfig.Physical = physical.NewInmem(logger) + coreConfig.Physical, err = physInmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } } if coreConfig.HAPhysical == nil { - coreConfig.HAPhysical = physical.NewInmemHA(logger) + haPhys, err := physInmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + coreConfig.HAPhysical = haPhys.(physical.HABackend) } - c1, err := NewCore(coreConfig) - if err != nil { - t.Fatalf("err: %v", err) - } - - coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port) - if coreConfig.ClusterAddr != "" { - coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port+10) - } - c2, err := NewCore(coreConfig) - if err != nil { - t.Fatalf("err: %v", err) - } - - coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port) - if coreConfig.ClusterAddr != "" { - coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port+10) - } - c3, err := NewCore(coreConfig) - if err != nil { - t.Fatalf("err: %v", err) + cores := []*Core{} + for i := 0; i < numCores; i++ { + coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port) + if coreConfig.ClusterAddr != "" { + coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port+105) + } + c, err := NewCore(coreConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + cores = append(cores, c) + if opts != nil && opts.HandlerFunc != nil { + handlers[i] = opts.HandlerFunc(c) + servers[i].Handler = handlers[i] + } } // @@ -763,72 +1072,97 @@ func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unseal for i, ln := range lns { ret[i] = &net.TCPAddr{ IP: ln.Address.IP, - Port: ln.Address.Port + 10, + Port: ln.Address.Port + 105, } } return ret } - c2.SetClusterListenerAddrs(clusterAddrGen(c2lns)) - c2.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[1], logger)) - c3.SetClusterListenerAddrs(clusterAddrGen(c3lns)) - c3.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[2], logger)) - keys, root := TestCoreInitClusterWrapperSetup(t, c1, clusterAddrGen(c1lns), WrapHandlerForClustering(handlers[0], logger)) - for _, key := range keys { - if _, err := c1.Unseal(TestKeyCopy(key)); err != nil { - t.Fatalf("unseal err: %s", err) + if numCores > 1 { + for i := 1; i < numCores; i++ { + cores[i].SetClusterListenerAddrs(clusterAddrGen(listeners[i])) + cores[i].SetClusterHandler(handlers[i]) } } - // Verify unsealed - sealed, err := c1.Sealed() - if err != nil { - t.Fatalf("err checking seal status: %s", err) - } - if sealed { - t.Fatal("should not be sealed") - } + if opts == nil || !opts.SkipInit { + keys, root := TestCoreInitClusterWrapperSetup(t, cores[0], clusterAddrGen(listeners[0]), handlers[0]) + barrierKeys, _ := copystructure.Copy(keys) + testCluster.BarrierKeys = barrierKeys.([][]byte) + testCluster.RootToken = root - TestWaitActive(t, c1) - - if unsealStandbys { - for _, key := range keys { - if _, err := c2.Unseal(TestKeyCopy(key)); err != nil { - t.Fatalf("unseal err: %s", err) + // Write root token and barrier keys + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + for i, key := range testCluster.BarrierKeys { + buf.Write([]byte(base64.StdEncoding.EncodeToString(key))) + if i < len(testCluster.BarrierKeys)-1 { + buf.WriteRune('\n') } } + err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755) + if err != nil { + t.Fatal(err) + } + + // Unseal first core for _, key := range keys { - if _, err := c3.Unseal(TestKeyCopy(key)); err != nil { + if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } } - // Let them come fully up to standby - time.Sleep(2 * time.Second) + // Verify unsealed + sealed, err := cores[0].Sealed() + if err != nil { + t.Fatalf("err checking seal status: %s", err) + } + if sealed { + t.Fatal("should not be sealed") + } - // Ensure cluster connection info is populated - isLeader, _, err := c2.Leader() + TestWaitActive(t, cores[0]) + + // Unseal other cores unless otherwise specified + if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 { + for i := 1; i < numCores; i++ { + for _, key := range keys { + if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + } + + // Let them come fully up to standby + time.Sleep(2 * time.Second) + + // Ensure cluster connection info is populated. + // Other cores should not come up as leaders. + for i := 1; i < numCores; i++ { + isLeader, _, _, err := cores[i].Leader() + if err != nil { + t.Fatal(err) + } + if isLeader { + t.Fatalf("core[%d] should not be leader", i) + } + } + } + + // + // Set test cluster core(s) and test cluster + // + cluster, err := cores[0].Cluster() if err != nil { t.Fatal(err) } - if isLeader { - t.Fatal("c2 should not be leader") - } - isLeader, _, err = c3.Leader() - if err != nil { - t.Fatal(err) - } - if isLeader { - t.Fatal("c3 should not be leader") - } + testCluster.ID = cluster.ID } - cluster, err := c1.Cluster() - if err != nil { - t.Fatal(err) - } - - getAPIClient := func(port int) *api.Client { + getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { transport := cleanhttp.DefaultPooledTransport() transport.TLSClientConfig = tlsConfig client := &http.Client{ @@ -845,151 +1179,35 @@ func TestCluster(t testing.TB, handlers []http.Handler, base *CoreConfig, unseal if err != nil { t.Fatal(err) } - apiClient.SetToken(root) + if opts == nil || !opts.SkipInit { + apiClient.SetToken(testCluster.RootToken) + } return apiClient } var ret []*TestClusterCore - keyCopies, _ := copystructure.Copy(keys) - ret = append(ret, &TestClusterCore{ - Core: c1, - Listeners: c1lns, - Root: root, - BarrierKeys: keyCopies.([][]byte), - CACertBytes: caBytes, - CACert: caCert, - TLSConfig: tlsConfig, - ClusterID: cluster.ID, - Client: getAPIClient(c1lns[0].Address.Port), - }) + for i := 0; i < numCores; i++ { + tcc := &TestClusterCore{ + Core: cores[i], + ServerKey: certInfoSlice[i].key, + ServerKeyPEM: certInfoSlice[i].keyPEM, + ServerCert: certInfoSlice[i].cert, + ServerCertBytes: certInfoSlice[i].certBytes, + ServerCertPEM: certInfoSlice[i].certPEM, + Listeners: listeners[i], + Handler: handlers[i], + Server: servers[i], + TLSConfig: tlsConfigs[i], + Client: getAPIClient(listeners[i][0].Address.Port, tlsConfigs[i]), + } + tcc.ReloadFuncs = &cores[i].reloadFuncs + tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock + tcc.ReloadFuncsLock.Lock() + (*tcc.ReloadFuncs)["listener|tcp"] = []reload.ReloadFunc{certGetters[i].Reload} + tcc.ReloadFuncsLock.Unlock() + ret = append(ret, tcc) + } - keyCopies, _ = copystructure.Copy(keys) - ret = append(ret, &TestClusterCore{ - Core: c2, - Listeners: c2lns, - Root: root, - BarrierKeys: keyCopies.([][]byte), - CACertBytes: caBytes, - CACert: caCert, - TLSConfig: tlsConfig, - ClusterID: cluster.ID, - Client: getAPIClient(c2lns[0].Address.Port), - }) - - keyCopies, _ = copystructure.Copy(keys) - ret = append(ret, &TestClusterCore{ - Core: c3, - Listeners: c3lns, - Root: root, - BarrierKeys: keyCopies.([][]byte), - CACertBytes: caBytes, - CACert: caCert, - TLSConfig: tlsConfig, - ClusterID: cluster.ID, - Client: getAPIClient(c3lns[0].Address.Port), - }) - - return ret + testCluster.Cores = ret + return &testCluster } - -const ( - TestClusterCACert = `-----BEGIN CERTIFICATE----- -MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2 -NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS -xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP -67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE -JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb -cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY -WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w -DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU -G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy -MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB -AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW -n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh -MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/ -spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d -CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q -5gn6KxUPBKHEtNzs5DgGM7nq ------END CERTIFICATE-----` - - TestClusterCAKey = `-----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c -N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl -HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2 -eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ -1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z -wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel -CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg -eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/ -fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW -TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB -nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud -XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh -Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X -YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW -2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7 -YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ -48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8 -aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX -Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB -55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1 -HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt -TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9 -hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP -QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr -PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3 ------END RSA PRIVATE KEY-----` - - TestClusterServerCert = `-----BEGIN CERTIFICATE----- -MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2 -NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB -9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI -b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL -5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W -1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF -+czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj -gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe -Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH -QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w -LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE -fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv -cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX -lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z -6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch -f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D -Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe -TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg== ------END CERTIFICATE-----` - - TestClusterServerKey = `-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm -peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq -/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61 -fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV -T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq -zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug -RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6 -mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh -bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL -FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV -WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m -tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx -PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3 -8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz -HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8 -goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU -jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu -kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f -DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB -p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe -X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS -rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P -aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455 -t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx -we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ== ------END RSA PRIVATE KEY-----` -) diff --git a/vendor/github.com/hashicorp/vault/vault/token_store.go b/vendor/github.com/hashicorp/vault/vault/token_store.go index 46614ed..2708e48 100644 --- a/vendor/github.com/hashicorp/vault/vault/token_store.go +++ b/vendor/github.com/hashicorp/vault/vault/token_store.go @@ -3,13 +3,19 @@ package vault import ( "encoding/json" "fmt" + "sync" + "sync/atomic" + "regexp" "strings" "time" + log "github.com/mgutz/logxi/v1" + "github.com/armon/go-metrics" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/helper/parseutil" @@ -79,7 +85,6 @@ type TokenStore struct { *framework.Backend view *BarrierView - salt *salt.Salt expiration *ExpirationManager @@ -90,6 +95,14 @@ type TokenStore struct { tokenLocks []*locksutil.LockEntry cubbyholeDestroyer func(*TokenStore, string) error + + logger log.Logger + + saltLock sync.RWMutex + salt *salt.Salt + saltConfig *salt.Config + + tidyLock int64 } // NewTokenStore is used to construct a token store that is @@ -102,14 +115,15 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error) t := &TokenStore{ view: view, cubbyholeDestroyer: destroyCubbyhole, + logger: c.logger, + tokenLocks: locksutil.CreateLocks(), + saltLock: sync.RWMutex{}, } if c.policyStore != nil { t.policyLookupFunc = c.policyStore.GetPolicy } - t.tokenLocks = locksutil.CreateLocks() - // Setup the framework endpoints t.Backend = &framework.Backend{ AuthRenew: t.authRenew, @@ -126,7 +140,7 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error) lookupPrefix, accessorPrefix, parentPrefix, - "salt", + salt.DefaultLocation, }, }, @@ -469,18 +483,50 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error) } func (ts *TokenStore) Initialize() error { - // Setup the salt - salt, err := salt.NewSalt(ts.view, &salt.Config{ + ts.saltLock.Lock() + + // Setup the salt config + ts.saltConfig = &salt.Config{ HashFunc: salt.SHA1Hash, - }) - if err != nil { - return err + Location: salt.DefaultLocation, } - ts.salt = salt + ts.salt = nil + ts.saltLock.Unlock() return nil } +func (ts *TokenStore) Invalidate(key string) { + ts.logger.Trace("token: invalidating key", "key", key) + + switch key { + case tokenSubPath + salt.DefaultLocation: + ts.saltLock.Lock() + ts.salt = nil + ts.saltLock.Unlock() + } +} + +func (ts *TokenStore) Salt() (*salt.Salt, error) { + ts.saltLock.RLock() + if ts.salt != nil { + defer ts.saltLock.RUnlock() + return ts.salt, nil + } + ts.saltLock.RUnlock() + ts.saltLock.Lock() + defer ts.saltLock.Unlock() + if ts.salt != nil { + return ts.salt, nil + } + salt, err := salt.NewSalt(ts.view, ts.saltConfig) + if err != nil { + return nil, err + } + ts.salt = salt + return salt, nil +} + // TokenEntry is used to represent a given token type TokenEntry struct { // ID of this entry, generally a random UUID @@ -581,8 +627,13 @@ func (ts *TokenStore) SetExpirationManager(exp *ExpirationManager) { } // SaltID is used to apply a salt and hash to an ID to make sure its not reversible -func (ts *TokenStore) SaltID(id string) string { - return ts.salt.SaltID(id) +func (ts *TokenStore) SaltID(id string) (string, error) { + s, err := ts.Salt() + if err != nil { + return "", err + } + + return s.SaltID(id), nil } // RootToken is used to generate a new token with root privileges and no parent @@ -610,7 +661,7 @@ func (ts *TokenStore) tokenStoreAccessorList( ret := make([]string, 0, len(entries)) for _, entry := range entries { - aEntry, err := ts.lookupBySaltedAccessor(entry) + aEntry, err := ts.lookupBySaltedAccessor(entry, false) if err != nil { resp.AddWarning("Found an accessor entry that could not be successfully decoded") continue @@ -641,7 +692,11 @@ func (ts *TokenStore) createAccessor(entry *TokenEntry) error { entry.Accessor = accessorUUID // Create index entry, mapping the accessor to the token ID - path := accessorPrefix + ts.SaltID(entry.Accessor) + saltID, err := ts.SaltID(entry.Accessor) + if err != nil { + return err + } + path := accessorPrefix + saltID aEntry := &accessorEntry{ TokenID: entry.ID, @@ -672,9 +727,18 @@ func (ts *TokenStore) create(entry *TokenEntry) error { entry.ID = entryUUID } + saltedId, err := ts.SaltID(entry.ID) + if err != nil { + return err + } + exist, _ := ts.lookupSalted(saltedId, true) + if exist != nil { + return fmt.Errorf("cannot create a token with a duplicate ID") + } + entry.Policies = policyutil.SanitizePolicies(entry.Policies, policyutil.DoNotAddDefaultPolicy) - err := ts.createAccessor(entry) + err = ts.createAccessor(entry) if err != nil { return err } @@ -692,7 +756,10 @@ func (ts *TokenStore) store(entry *TokenEntry) error { // storeCommon handles the actual storage of an entry, possibly generating // secondary indexes func (ts *TokenStore) storeCommon(entry *TokenEntry, writeSecondary bool) error { - saltedId := ts.SaltID(entry.ID) + saltedId, err := ts.SaltID(entry.ID) + if err != nil { + return err + } // Marshal the entry enc, err := json.Marshal(entry) @@ -716,7 +783,11 @@ func (ts *TokenStore) storeCommon(entry *TokenEntry, writeSecondary bool) error } // Create the index entry - path := parentPrefix + ts.SaltID(entry.Parent) + "/" + saltedId + parentSaltedID, err := ts.SaltID(entry.Parent) + if err != nil { + return err + } + path := parentPrefix + parentSaltedID + "/" + saltedId le := &logical.StorageEntry{Key: path} if err := ts.view.Put(le); err != nil { return fmt.Errorf("failed to persist entry: %v", err) @@ -756,7 +827,12 @@ func (ts *TokenStore) UseToken(te *TokenEntry) (*TokenEntry, error) { defer lock.Unlock() // Call lookupSalted instead of Lookup to avoid deadlocking since Lookup grabs a read lock - te, err := ts.lookupSalted(ts.SaltID(te.ID), false) + saltedID, err := ts.SaltID(te.ID) + if err != nil { + return nil, err + } + + te, err = ts.lookupSalted(saltedID, false) if err != nil { return nil, fmt.Errorf("failed to refresh entry: %v", err) } @@ -808,15 +884,19 @@ func (ts *TokenStore) Lookup(id string) (*TokenEntry, error) { lock.RLock() defer lock.RUnlock() - return ts.lookupSalted(ts.SaltID(id), false) + saltedID, err := ts.SaltID(id) + if err != nil { + return nil, err + } + return ts.lookupSalted(saltedID, false) } // lookupSalted is used to find a token given its salted ID. If tainted is // true, entries that are in some revocation state (currently, indicated by num // uses < 0), the entry will be returned anyways -func (ts *TokenStore) lookupSalted(saltedId string, tainted bool) (*TokenEntry, error) { +func (ts *TokenStore) lookupSalted(saltedID string, tainted bool) (*TokenEntry, error) { // Lookup token - path := lookupPrefix + saltedId + path := lookupPrefix + saltedID raw, err := ts.view.Get(path) if err != nil { return nil, fmt.Errorf("failed to read entry: %v", err) @@ -838,6 +918,19 @@ func (ts *TokenStore) lookupSalted(saltedId string, tainted bool) (*TokenEntry, return nil, nil } + // If we are still restoring the expiration manager, we want to ensure the + // token is not expired + if ts.expiration == nil { + return nil, nil + } + check, err := ts.expiration.RestoreSaltedTokenCheck(entry.Path, saltedID) + if err != nil { + return nil, fmt.Errorf("failed to check token in restore mode: %v", err) + } + if !check { + return nil, nil + } + persistNeeded := false // Upgrade the deprecated fields @@ -891,7 +984,11 @@ func (ts *TokenStore) Revoke(id string) error { return fmt.Errorf("cannot revoke blank token") } - return ts.revokeSalted(ts.SaltID(id)) + saltedID, err := ts.SaltID(id) + if err != nil { + return err + } + return ts.revokeSalted(saltedID) } // revokeSalted is used to invalidate a given salted token, @@ -981,7 +1078,12 @@ func (ts *TokenStore) revokeSalted(saltedId string) (ret error) { // Clear the secondary index if any if entry.Parent != "" { - path := parentPrefix + ts.SaltID(entry.Parent) + "/" + saltedId + parentSaltedID, err := ts.SaltID(entry.Parent) + if err != nil { + return err + } + + path := parentPrefix + parentSaltedID + "/" + saltedId if err = ts.view.Delete(path); err != nil { return fmt.Errorf("failed to delete entry: %v", err) } @@ -989,7 +1091,12 @@ func (ts *TokenStore) revokeSalted(saltedId string) (ret error) { // Clear the accessor index if any if entry.Accessor != "" { - path := accessorPrefix + ts.SaltID(entry.Accessor) + accessorSaltedID, err := ts.SaltID(entry.Accessor) + if err != nil { + return err + } + + path := accessorPrefix + accessorSaltedID if err = ts.view.Delete(path); err != nil { return fmt.Errorf("failed to delete entry: %v", err) } @@ -1014,7 +1121,10 @@ func (ts *TokenStore) RevokeTree(id string) error { } // Get the salted ID - saltedId := ts.SaltID(id) + saltedId, err := ts.SaltID(id) + if err != nil { + return err + } // Nuke the entire tree recursively if err := ts.revokeTreeSalted(saltedId); err != nil { @@ -1064,11 +1174,15 @@ func (ts *TokenStore) handleCreateAgainstRole( return ts.handleCreateCommon(req, d, false, roleEntry) } -func (ts *TokenStore) lookupByAccessor(accessor string) (accessorEntry, error) { - return ts.lookupBySaltedAccessor(ts.SaltID(accessor)) +func (ts *TokenStore) lookupByAccessor(accessor string, tainted bool) (accessorEntry, error) { + saltedID, err := ts.SaltID(accessor) + if err != nil { + return accessorEntry{}, err + } + return ts.lookupBySaltedAccessor(saltedID, tainted) } -func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEntry, error) { +func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string, tainted bool) (accessorEntry, error) { entry, err := ts.view.Get(accessorPrefix + saltedAccessor) var aEntry accessorEntry @@ -1082,8 +1196,12 @@ func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEnt err = jsonutil.DecodeJSON(entry.Value, &aEntry) // If we hit an error, assume it's a pre-struct straight token ID if err != nil { - aEntry.TokenID = string(entry.Value) - te, err := ts.lookupSalted(ts.SaltID(aEntry.TokenID), false) + saltedID, err := ts.SaltID(string(entry.Value)) + if err != nil { + return accessorEntry{}, err + } + + te, err := ts.lookupSalted(saltedID, tainted) if err != nil { return accessorEntry{}, fmt.Errorf("failed to look up token using accessor index: %s", err) } @@ -1093,6 +1211,7 @@ func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEnt // on lookup is nil, not an error, so we keep that behavior here to be // safe...the token ID is simply not filled in. if te != nil { + aEntry.TokenID = te.ID aEntry.AccessorID = te.Accessor } } @@ -1103,49 +1222,80 @@ func (ts *TokenStore) lookupBySaltedAccessor(saltedAccessor string) (accessorEnt // handleTidy handles the cleaning up of leaked accessor storage entries and // cleaning up of leases that are associated to tokens that are expired. func (ts *TokenStore) handleTidy(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var tidyErrors *multierror.Error + + if !atomic.CompareAndSwapInt64(&ts.tidyLock, 0, 1) { + ts.logger.Warn("token: tidy operation on tokens is already in progress") + return nil, fmt.Errorf("tidy operation on tokens is already in progress") + } + + defer atomic.CompareAndSwapInt64(&ts.tidyLock, 1, 0) + + ts.logger.Info("token: beginning tidy operation on tokens") + defer ts.logger.Info("token: finished tidy operation on tokens") + // List out all the accessors saltedAccessorList, err := ts.view.List(accessorPrefix) if err != nil { - return nil, fmt.Errorf("failed to fetch accessor entries: %v", err) + return nil, fmt.Errorf("failed to fetch accessor index entries: %v", err) } - var tidyErrors *multierror.Error - // First, clean up secondary index entries that are no longer valid parentList, err := ts.view.List(parentPrefix) if err != nil { return nil, fmt.Errorf("failed to fetch secondary index entries: %v", err) } + var countParentList, deletedCountParentList int64 + // Scan through the secondary index entries; if there is an entry // with the token's salt ID at the end, remove it for _, parent := range parentList { - children, err := ts.view.List(parentPrefix + parent + "/") + children, err := ts.view.List(parentPrefix + parent) if err != nil { - tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read child index entry: %v", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read secondary index: %v", err)) continue } for _, child := range children { + countParentList++ + if countParentList%500 == 0 { + ts.logger.Info("token: checking validity of tokens in secondary index list", "progress", countParentList) + } + // Look up tainted entries so we can be sure that if this isn't - // found, it doesn't exist + // found, it doesn't exist. Doing the following without locking + // since appropriate locks cannot be held with salted token IDs. te, _ := ts.lookupSalted(child, true) if te == nil { - err = ts.view.Delete(parentPrefix + parent + "/" + child) + index := parentPrefix + parent + child + ts.logger.Trace("token: deleting invalid secondary index", "index", index) + err = ts.view.Delete(index) if err != nil { - tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete secondary index entry: %v", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete secondary index: %v", err)) } + deletedCountParentList++ } } } + var countAccessorList, + deletedCountAccessorEmptyToken, + deletedCountAccessorInvalidToken, + deletedCountInvalidTokenInAccessor int64 + // For each of the accessor, see if the token ID associated with it is // a valid one. If not, delete the leases associated with that token // and delete the accessor as well. for _, saltedAccessor := range saltedAccessorList { - accessorEntry, err := ts.lookupBySaltedAccessor(saltedAccessor) + countAccessorList++ + if countAccessorList%500 == 0 { + ts.logger.Info("token: checking if accessors contain valid tokens", "progress", countAccessorList) + } + + accessorEntry, err := ts.lookupBySaltedAccessor(saltedAccessor, true) if err != nil { - tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read the accessor entry: %v", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read the accessor index: %v", err)) continue } @@ -1153,25 +1303,43 @@ func (ts *TokenStore) handleTidy(req *logical.Request, data *framework.FieldData // in it. If not, it is an invalid accessor entry and needs to // be deleted. if accessorEntry.TokenID == "" { + index := accessorPrefix + saltedAccessor // If deletion of accessor fails, move on to the next // item since this is just a best-effort operation - err = ts.view.Delete(accessorPrefix + saltedAccessor) + err = ts.view.Delete(index) if err != nil { - tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete the accessor entry: %v", err)) + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete the accessor index: %v", err)) continue } + deletedCountAccessorEmptyToken++ } - saltedId := ts.SaltID(accessorEntry.TokenID) + lock := locksutil.LockForKey(ts.tokenLocks, accessorEntry.TokenID) + lock.RLock() // Look up tainted variants so we only find entries that truly don't // exist + saltedId, err := ts.SaltID(accessorEntry.TokenID) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to read salt id: %v", err)) + lock.RUnlock() + continue + } te, err := ts.lookupSalted(saltedId, true) + if err != nil { + tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to lookup tainted ID: %v", err)) + lock.RUnlock() + continue + } + + lock.RUnlock() // If token entry is not found assume that the token is not valid any // more and conclude that accessor, leases, and secondary index entries // for this token should not exist as well. if te == nil { + ts.logger.Info("token: deleting token with nil entry", "salted_token", saltedId) + // RevokeByToken expects a '*TokenEntry'. For the // purposes of tidying, it is sufficient if the token // entry only has ID set. @@ -1186,26 +1354,31 @@ func (ts *TokenStore) handleTidy(req *logical.Request, data *framework.FieldData tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to revoke leases of expired token: %v", err)) continue } + deletedCountInvalidTokenInAccessor++ + + index := accessorPrefix + saltedAccessor // If deletion of accessor fails, move on to the next item since // this is just a best-effort operation. We do this last so that on // next run if something above failed we still have the accessor // entry to try again. - err = ts.view.Delete(accessorPrefix + saltedAccessor) + err = ts.view.Delete(index) if err != nil { tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete accessor entry: %v", err)) continue } + deletedCountAccessorInvalidToken++ } } - // Later request handling code seems to check if the type is multierror so - // if we haven't added any errors we need to just return a normal nil error - if tidyErrors == nil { - return nil, nil - } + ts.logger.Debug("token: number of tokens scanned in parent index list", "count", countParentList) + ts.logger.Debug("token: number of tokens revoked in parent index list", "count", deletedCountParentList) + ts.logger.Debug("token: number of accessors scanned", "count", countAccessorList) + ts.logger.Debug("token: number of deleted accessors which had empty tokens", "count", deletedCountAccessorEmptyToken) + ts.logger.Debug("token: number of revoked tokens which were invalid but present in accessors", "count", deletedCountInvalidTokenInAccessor) + ts.logger.Debug("token: number of deleted accessors which had invalid tokens", "count", deletedCountAccessorInvalidToken) - return nil, tidyErrors + return nil, tidyErrors.ErrorOrNil() } // handleUpdateLookupAccessor handles the auth/token/lookup-accessor path for returning @@ -1221,7 +1394,7 @@ func (ts *TokenStore) handleUpdateLookupAccessor(req *logical.Request, data *fra urlaccessor = true } - aEntry, err := ts.lookupByAccessor(accessor) + aEntry, err := ts.lookupByAccessor(accessor, false) if err != nil { return nil, err } @@ -1275,7 +1448,7 @@ func (ts *TokenStore) handleUpdateRevokeAccessor(req *logical.Request, data *fra urlaccessor = true } - aEntry, err := ts.lookupByAccessor(accessor) + aEntry, err := ts.lookupByAccessor(accessor, true) if err != nil { return nil, err } @@ -1833,7 +2006,10 @@ func (ts *TokenStore) handleLookup( defer lock.RUnlock() // Lookup the token - saltedId := ts.SaltID(id) + saltedId, err := ts.SaltID(id) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } out, err := ts.lookupSalted(saltedId, true) if err != nil { @@ -2194,6 +2370,10 @@ func (ts *TokenStore) tokenStoreRoleCreateUpdate( entry.PathSuffix = data.Get("path_suffix").(string) } + if strings.Contains(entry.PathSuffix, "..") { + return logical.ErrorResponse(fmt.Sprintf("error registering path suffix: %s", consts.ErrPathContainsParentReferences)), nil + } + allowedPoliciesStr, ok := data.GetOk("allowed_policies") if ok { entry.AllowedPolicies = policyutil.SanitizePolicies(strings.Split(allowedPoliciesStr.(string), ","), policyutil.DoNotAddDefaultPolicy) diff --git a/vendor/github.com/hashicorp/vault/vault/token_store_test.go b/vendor/github.com/hashicorp/vault/vault/token_store_test.go index 7a84fe7..ca4cbb2 100644 --- a/vendor/github.com/hashicorp/vault/vault/token_store_test.go +++ b/vendor/github.com/hashicorp/vault/vault/token_store_test.go @@ -3,6 +3,7 @@ package vault import ( "encoding/json" "fmt" + "path" "reflect" "sort" "strings" @@ -11,6 +12,7 @@ import ( "time" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/logical" ) @@ -54,7 +56,10 @@ func TestTokenStore_TokenEntryUpgrade(t *testing.T) { t.Fatal(err) } - saltedId := ts.SaltID(entry.ID) + saltedId, err := ts.SaltID(entry.ID) + if err != nil { + t.Fatal(err) + } path := lookupPrefix + saltedId le := &logical.StorageEntry{ Key: path, @@ -240,7 +245,7 @@ func TestTokenStore_AccessorIndex(t *testing.T) { t.Fatalf("bad: %#v", out) } - aEntry, err := ts.lookupByAccessor(out.Accessor) + aEntry, err := ts.lookupByAccessor(out.Accessor, false) if err != nil { t.Fatalf("err: %s", err) } @@ -294,7 +299,11 @@ func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) { } // Revoke root to make the number of accessors match - ts.revokeSalted(ts.SaltID(root)) + salted, err := ts.SaltID(root) + if err != nil { + t.Fatal(err) + } + ts.revokeSalted(salted) req := logical.TestRequest(t, logical.ListOperation, "accessors") @@ -312,20 +321,24 @@ func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) { if len(keys) != len(testKeys) { t.Fatalf("wrong number of accessors found") } - if len(resp.Warnings()) != 0 { - t.Fatalf("got warnings:\n%#v", resp.Warnings()) + if len(resp.Warnings) != 0 { + t.Fatalf("got warnings:\n%#v", resp.Warnings) } // Test upgrade from old struct method of accessor storage (of token id) for _, accessor := range keys { - aEntry, err := ts.lookupByAccessor(accessor) + aEntry, err := ts.lookupByAccessor(accessor, false) if err != nil { t.Fatal(err) } if aEntry.TokenID == "" || aEntry.AccessorID == "" { t.Fatalf("error, accessor entry looked up is empty, but no error thrown") } - path := accessorPrefix + ts.SaltID(accessor) + salted, err := ts.SaltID(accessor) + if err != nil { + t.Fatal(err) + } + path := accessorPrefix + salted le := &logical.StorageEntry{Key: path, Value: []byte(aEntry.TokenID)} if err := ts.view.Put(le); err != nil { t.Fatalf("failed to persist accessor index entry: %v", err) @@ -347,12 +360,12 @@ func TestTokenStore_HandleRequest_ListAccessors(t *testing.T) { if len(keys) != len(testKeys) { t.Fatalf("wrong number of accessors found") } - if len(resp.Warnings()) != 0 { - t.Fatalf("got warnings:\n%#v", resp.Warnings()) + if len(resp.Warnings) != 0 { + t.Fatalf("got warnings:\n%#v", resp.Warnings) } for _, accessor := range keys2 { - aEntry, err := ts.lookupByAccessor(accessor) + aEntry, err := ts.lookupByAccessor(accessor, false) if err != nil { t.Fatal(err) } @@ -437,6 +450,8 @@ func TestTokenStore_CreateLookup(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } + ts2.SetExpirationManager(c.expiration) + if err := ts2.Initialize(); err != nil { t.Fatalf("err: %v", err) } @@ -465,6 +480,9 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) { if ent.ID != "foobarbaz" { t.Fatalf("bad: ent.ID: expected:\"foobarbaz\"\n actual:%s", ent.ID) } + if err := ts.create(ent); err == nil { + t.Fatal("expected error creating token with the same ID") + } out, err := ts.Lookup(ent.ID) if err != nil { @@ -479,6 +497,8 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } + ts2.SetExpirationManager(c.expiration) + if err := ts2.Initialize(); err != nil { t.Fatalf("err: %v", err) } @@ -493,6 +513,73 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) { } } +func TestTokenStore_CreateLookup_ExpirationInRestoreMode(t *testing.T) { + _, ts, _, _ := TestCoreWithTokenStore(t) + + ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} + if err := ts.create(ent); err != nil { + t.Fatalf("err: %v", err) + } + if ent.ID == "" { + t.Fatalf("missing ID") + } + + // Replace the lease with a lease with an expire time in the past + saltedID, err := ts.SaltID(ent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create a lease entry + leaseID := path.Join(ent.Path, saltedID) + le := &leaseEntry{ + LeaseID: leaseID, + ClientToken: ent.ID, + Path: ent.Path, + IssueTime: time.Now(), + ExpireTime: time.Now().Add(1 * time.Hour), + } + if err := ts.expiration.persistEntry(le); err != nil { + t.Fatalf("err: %v", err) + } + + out, err := ts.Lookup(ent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if !reflect.DeepEqual(out, ent) { + t.Fatalf("bad: expected:%#v\nactual:%#v", ent, out) + } + + // Set to expired lease time + le.ExpireTime = time.Now().Add(-1 * time.Hour) + if err := ts.expiration.persistEntry(le); err != nil { + t.Fatalf("err: %v", err) + } + + err = ts.expiration.Stop() + if err != nil { + t.Fatal(err) + } + + // Reset expiration manager to restore mode + ts.expiration.restoreModeLock.Lock() + ts.expiration.restoreMode = 1 + ts.expiration.restoreLocks = locksutil.CreateLocks() + ts.expiration.quitCh = make(chan struct{}) + ts.expiration.restoreModeLock.Unlock() + + // Test that the token lookup does not return the token entry due to the + // expired lease + out, err = ts.Lookup(ent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("lease expired, no token expected: %#v", out) + } +} + func TestTokenStore_UseToken(t *testing.T) { _, ts, _, root := TestCoreWithTokenStore(t) @@ -605,7 +692,10 @@ func TestTokenStore_Revoke_Leases(t *testing.T) { // Mount a noop backend noop := &NoopBackend{} - ts.expiration.router.Mount(noop, "", &MountEntry{UUID: ""}, view) + err := ts.expiration.router.Mount(noop, "noop/", &MountEntry{UUID: "noopuuid", Accessor: "noopaccessor"}, view) + if err != nil { + t.Fatal(err) + } ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} if err := ts.create(ent); err != nil { @@ -615,7 +705,7 @@ func TestTokenStore_Revoke_Leases(t *testing.T) { // Register a lease req := &logical.Request{ Operation: logical.ReadOperation, - Path: "secret/foo", + Path: "noop/foo", ClientToken: ent.ID, } resp := &logical.Response{ @@ -2347,7 +2437,7 @@ func TestTokenStore_RoleExplicitMaxTTL(t *testing.T) { if err != nil { t.Fatalf("expected an error") } - if len(resp.Warnings()) == 0 { + if len(resp.Warnings) == 0 { t.Fatalf("expected a warning") } @@ -2513,9 +2603,14 @@ func TestTokenStore_RoleExplicitMaxTTL(t *testing.T) { t.Fatalf("expected error") } + time.Sleep(2 * time.Second) + req.Operation = logical.ReadOperation req.Path = "auth/token/lookup-self" resp, err = core.HandleRequest(req) + if resp != nil && err == nil { + t.Fatalf("expected error, response is %#v", *resp) + } if err == nil { t.Fatalf("expected error") } @@ -3098,7 +3193,10 @@ func TestTokenStore_RevokeUseCountToken(t *testing.T) { } tut := resp.Auth.ClientToken - saltTut := ts.SaltID(tut) + saltTut, err := ts.SaltID(tut) + if err != nil { + t.Fatal(err) + } te, err := ts.lookupSalted(saltTut, false) if err != nil { t.Fatal(err) @@ -3293,7 +3391,10 @@ func TestTokenStore_HandleTidyCase1(t *testing.T) { // cubbyhole and by not deleting its secondary index, its accessor and // associated leases. - saltedTut := ts.SaltID(tut) + saltedTut, err := ts.SaltID(tut) + if err != nil { + t.Fatal(err) + } _, err = ts.lookupSalted(saltedTut, true) if err != nil { t.Fatalf("failed to lookup token: %v", err) @@ -3363,7 +3464,10 @@ func TestTokenStore_TidyLeaseRevocation(t *testing.T) { if err != nil { t.Fatal(err) } - exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID}, view) + err = exp.router.Mount(noop, "prod/aws/", &MountEntry{UUID: meUUID, Accessor: "awsaccessor"}, view) + if err != nil { + t.Fatal(err) + } // Create new token root, err := ts.rootToken() @@ -3429,7 +3533,10 @@ func TestTokenStore_TidyLeaseRevocation(t *testing.T) { } // Now, delete the token entry. The leases should still exist. - saltedTut := ts.SaltID(tut) + saltedTut, err := ts.SaltID(tut) + if err != nil { + t.Fatal(err) + } te, err := ts.lookupSalted(saltedTut, true) if err != nil { t.Fatalf("failed to lookup token: %v", err) diff --git a/vendor/github.com/hashicorp/vault/vault/wrapping.go b/vendor/github.com/hashicorp/vault/vault/wrapping.go index 46409c3..5171593 100644 --- a/vendor/github.com/hashicorp/vault/vault/wrapping.go +++ b/vendor/github.com/hashicorp/vault/vault/wrapping.go @@ -115,6 +115,10 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l resp.WrapInfo.Token = te.ID resp.WrapInfo.CreationTime = creationTime + // If this is not a rewrap, store the request path as creation_path + if req.Path != "sys/wrapping/rewrap" { + resp.WrapInfo.CreationPath = req.Path + } // This will only be non-nil if this response contains a token, so in that // case put the accessor in the wrap info. @@ -200,6 +204,12 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l "creation_ttl": resp.WrapInfo.TTL, "creation_time": creationTime, } + // Store creation_path if not a rewrap + if req.Path != "sys/wrapping/rewrap" { + cubbyReq.Data["creation_path"] = req.Path + } else { + cubbyReq.Data["creation_path"] = resp.WrapInfo.CreationPath + } cubbyResp, err = c.router.Route(cubbyReq) if err != nil { // Revoke since it's not yet being tracked for expiration @@ -233,6 +243,7 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l return nil, nil } +// ValidateWrappingToken checks whether a token is a wrapping token. func (c *Core) ValidateWrappingToken(req *logical.Request) (bool, error) { if req == nil { return false, fmt.Errorf("invalid request") diff --git a/vendor/github.com/hashicorp/vault/version/version_base.go b/vendor/github.com/hashicorp/vault/version/version_base.go index bd1d2ca..07173e6 100644 --- a/vendor/github.com/hashicorp/vault/version/version_base.go +++ b/vendor/github.com/hashicorp/vault/version/version_base.go @@ -4,7 +4,7 @@ package version func init() { // The main version number that is being run at the moment. - Version = "0.7.0" + Version = "0.8.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/vendor/github.com/hashicorp/vault/website/Gemfile b/vendor/github.com/hashicorp/vault/website/Gemfile index 405a8c9..a2c4d15 100644 --- a/vendor/github.com/hashicorp/vault/website/Gemfile +++ b/vendor/github.com/hashicorp/vault/website/Gemfile @@ -1,3 +1,3 @@ source "https://rubygems.org" -gem "middleman-hashicorp", "0.3.22" +gem "middleman-hashicorp", "0.3.28" diff --git a/vendor/github.com/hashicorp/vault/website/Gemfile.lock b/vendor/github.com/hashicorp/vault/website/Gemfile.lock index 229218a..dea1709 100644 --- a/vendor/github.com/hashicorp/vault/website/Gemfile.lock +++ b/vendor/github.com/hashicorp/vault/website/Gemfile.lock @@ -6,7 +6,7 @@ GEM minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - autoprefixer-rails (6.7.7.1) + autoprefixer-rails (7.1.1.2) execjs bootstrap-sass (3.3.7) autoprefixer-rails (>= 5.2.1) @@ -42,14 +42,15 @@ GEM eventmachine (1.2.3) execjs (2.7.0) ffi (1.9.18) - haml (4.0.7) + haml (5.0.1) + temple (>= 0.8.0) tilt hike (1.2.3) hooks (0.4.1) uber (~> 0.0.14) http_parser.rb (0.6.0) i18n (0.7.0) - json (2.0.3) + json (2.1.0) kramdown (1.13.2) listen (3.0.8) rb-fsevent (~> 0.9, >= 0.9.4) @@ -77,7 +78,7 @@ GEM rack (>= 1.4.5, < 2.0) thor (>= 0.15.2, < 2.0) tilt (~> 1.4.1, < 2.0) - middleman-hashicorp (0.3.22) + middleman-hashicorp (0.3.28) bootstrap-sass (~> 3.3) builder (~> 3.2) middleman (~> 3.4) @@ -100,28 +101,28 @@ GEM mime-types (3.1) mime-types-data (~> 3.2015) mime-types-data (3.2016.0521) - mini_portile2 (2.1.0) - minitest (5.10.1) + mini_portile2 (2.2.0) + minitest (5.10.2) multi_json (1.12.1) - nokogiri (1.7.1) - mini_portile2 (~> 2.1.0) + nokogiri (1.8.0) + mini_portile2 (~> 2.2.0) padrino-helpers (0.12.8.1) i18n (~> 0.6, >= 0.6.7) padrino-support (= 0.12.8.1) tilt (~> 1.4.1) padrino-support (0.12.8.1) activesupport (>= 3.1) - rack (1.6.5) + rack (1.6.8) rack-livereload (0.3.16) rack rack-test (0.6.3) rack (>= 1.0) rb-fsevent (0.9.8) - rb-inotify (0.9.8) - ffi (>= 0.5.0) + rb-inotify (0.9.10) + ffi (>= 0.5.0, < 2) redcarpet (3.4.0) - rouge (2.0.7) - sass (3.4.23) + rouge (2.1.1) + sass (3.4.24) sprockets (2.12.4) hike (~> 1.2) multi_json (~> 1.0) @@ -132,26 +133,27 @@ GEM sprockets-sass (1.3.1) sprockets (~> 2.0) tilt (~> 1.1) + temple (0.8.0) thor (0.19.4) thread_safe (0.3.6) tilt (1.4.1) turbolinks (5.0.1) turbolinks-source (~> 5) - turbolinks-source (5.0.0) + turbolinks-source (5.0.3) tzinfo (1.2.3) thread_safe (~> 0.1) uber (0.0.15) uglifier (2.7.2) execjs (>= 0.3.0) json (>= 1.8.0) - xpath (2.0.0) + xpath (2.1.0) nokogiri (~> 1.3) PLATFORMS ruby DEPENDENCIES - middleman-hashicorp (= 0.3.22) + middleman-hashicorp (= 0.3.28) BUNDLED WITH - 1.14.6 + 1.15.1 diff --git a/vendor/github.com/hashicorp/vault/website/Makefile b/vendor/github.com/hashicorp/vault/website/Makefile index d7620d1..4d3d361 100644 --- a/vendor/github.com/hashicorp/vault/website/Makefile +++ b/vendor/github.com/hashicorp/vault/website/Makefile @@ -1,4 +1,4 @@ -VERSION?="0.3.22" +VERSION?="0.3.28" build: @echo "==> Starting build in Docker..." diff --git a/vendor/github.com/hashicorp/vault/website/config.rb b/vendor/github.com/hashicorp/vault/website/config.rb index 1ca9c0c..a961753 100644 --- a/vendor/github.com/hashicorp/vault/website/config.rb +++ b/vendor/github.com/hashicorp/vault/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.vaultproject.io/" activate :hashicorp do |h| h.name = "vault" - h.version = "0.7.0" + h.version = "0.8.3" h.github_slug = "hashicorp/vault" h.website_root = "website" end diff --git a/vendor/github.com/hashicorp/vault/website/data/news.yml b/vendor/github.com/hashicorp/vault/website/data/news.yml new file mode 100644 index 0000000..1958547 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/data/news.yml @@ -0,0 +1,23 @@ +default_link_text: "Read more" +posts: + - + title: "Vault 0.8.1 released" + body: >- + We are proud to announce the release of HashiCorp Vault 0.8.1. This + version includes Google Cloud IAM authentication, Oracle database + backends, self-reloading plugins, and much more! + link_url: "https://www.hashicorp.com/blog/vault-0-8-1/" + - + title: "Vault 0.8.0 released" + body: >- + We are proud to announce the release of HashiCorp Vault 0.8. This version + of Vault brings secure plugins, disaster recovery, mount filters for + replication, and MFA on paths. + link_url: "https://www.hashicorp.com/blog/vault-0-8/" + - + title: "Why New Relic uses Vault for secrets management" + body: >- + As New Relic's systems and infrastructure grew, they faced challenges with + securely storing and managing credentials. Vault provides them with a + consistent approach to manage secrets and credentials. + link_url: "https://www.hashicorp.com/blog/hashicorp-vault-helps-new-relic-manage-secrets-for-their-digital-intelligence-platform/" diff --git a/vendor/github.com/hashicorp/vault/website/packer.json b/vendor/github.com/hashicorp/vault/website/packer.json index 35de632..fd2618f 100644 --- a/vendor/github.com/hashicorp/vault/website/packer.json +++ b/vendor/github.com/hashicorp/vault/website/packer.json @@ -8,17 +8,14 @@ "builders": [ { "type": "docker", - "image": "hashicorp/middleman-hashicorp:0.3.22", + "image": "hashicorp/middleman-hashicorp:0.3.28", "discard": "true", - "run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"] + "volumes": { + "{{ pwd }}": "/website" + } } ], "provisioners": [ - { - "type": "file", - "source": ".", - "destination": "/website" - }, { "type": "shell", "environment_vars": [ @@ -30,7 +27,7 @@ "inline": [ "bundle check || bundle install", "bundle exec middleman build", - "/bin/sh ./scripts/deploy.sh" + "/bin/bash ./scripts/deploy.sh" ] } ] diff --git a/vendor/github.com/hashicorp/vault/website/redirects.txt b/vendor/github.com/hashicorp/vault/website/redirects.txt new file mode 100644 index 0000000..fe5d039 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/redirects.txt @@ -0,0 +1,100 @@ +# +# REDIRECTS FILE +# +# This is a sample redirect file. Redirects allow individual projects to add +# their own redirect rules in a declarative manner using Fastly edge +# dictionaries. +# +# FORMAT +# +# Redirects are in the format. There must be at least one space between the +# original path and the new path, and there must be exactly two entries per +# line. +# +# /original-path /new-path +# +# GLOB MATCHING +# +# Because of the way lookup tables work, there is no support for glob matching. +# Fastly does not provide a way to iterate through the lookup table, so it is +# not possible to run through the table and find anything that matches. As such +# URLs must match directly. +# +# More complex redirects are possible, but must be added directly to the +# configuration. Please contact the release engineering team for assistance. +# +# DELETING +# +# Deleting items is not supported at this time. To delete an item, contact the +# release engineering team and they will delete the dictionary item. +# +# MISC +# +# - Blank lines are ignored +# - Comments are hash-style +# - URLs are limited to 256 characters +# - Items are case-sensitive (please use all lowercase) +# + +/api/secret/generic/index.html /api/secret/kv/index.html +/api/system/renew.html /api/system/leases.html +/api/system/revoke.html /api/system/leases.html +/api/system/revoke-force.html /api/system/leases.html +/api/system/revoke-prefix.html /api/system/leases.html +/docs/config/index.html /docs/configuration/index.html +/docs/auth/aws-ec2.html /docs/auth/aws.html +/docs/install/install.html /docs/install/index.html +/docs/install/upgrade.html /guides/upgrading/index.html +/docs/install/upgrade-to-0.5.html /guides/upgrading/upgrade-to-0.5.0.html +/docs/install/upgrade-to-0.5.1.html /guides/upgrading/upgrade-to-0.5.1.html +/docs/install/upgrade-to-0.6.html /guides/upgrading/upgrade-to-0.6.0.html +/docs/install/upgrade-to-0.6.1.html /guides/upgrading/upgrade-to-0.6.1.html +/docs/install/upgrade-to-0.6.2.html /guides/upgrading/upgrade-to-0.6.2.html +/docs/http/sys-init.html /api/system/init.html +/docs/http/sys-seal-status.html /api/system/seal-status.html +/docs/http/sys-seal.html /api/system/seal.html +/docs/http/sys-unseal.html /api/system/unseal.html +/docs/http/sys-mounts.html /api/system/mounts.html +/docs/http/sys-remount.html /api/system/remount.html +/docs/http/sys-auth.html /api/system/auth.html +/docs/http/sys-policy.html /api/system/policy.html +/docs/http/sys-audit.html /api/system/audit.html +/docs/http/sys-renew.html /api/system/leases.html +/docs/http/sys-revoke.html /api/system/leases.html +/docs/http/sys-revoke-prefix.html /api/system/leases.html +/docs/http/sys-leader.html /api/system/leader.html +/docs/http/sys-key-status.html /api/system/key-status.html +/docs/http/sys-rekey.html /api/system/rekey.html +/docs/http/sys-rotate.html /api/system/rotate.html +/docs/http/sys-raw.html /api/system/raw.html +/docs/http/sys-health.html /api/system/health.html +/docs/guides/generate-root.html /guides/generate-root.html +/docs/guides/index.html /guides/index.html +/docs/guides/production.html /guides/production.html +/docs/guides/replication.html /guides/replication.html +/docs/guides/upgrading/index.html /guides/upgrading/index.html +/docs/guides/upgrading/upgrade-to-0.5.0.html /guides/upgrading/upgrade-to-0.5.0.html +/docs/guides/upgrading/upgrade-to-0.5.1.html /guides/upgrading/upgrade-to-0.5.1.html +/docs/guides/upgrading/upgrade-to-0.6.0.html /guides/upgrading/upgrade-to-0.6.0.html +/docs/guides/upgrading/upgrade-to-0.6.1.html /guides/upgrading/upgrade-to-0.6.1.html +/docs/guides/upgrading/upgrade-to-0.6.2.html /guides/upgrading/upgrade-to-0.6.2.html +/docs/guides/upgrading/upgrade-to-0.6.3.html /guides/upgrading/upgrade-to-0.6.3.html +/docs/guides/upgrading/upgrade-to-0.6.4.html /guides/upgrading/upgrade-to-0.6.4.html +/docs/guides/upgrading/upgrade-to-0.7.0.html /guides/upgrading/upgrade-to-0.7.0.html +/docs/secrets/custom.html /docs/plugin/index.html +/docs/secrets/generic/index.html /docs/secrets/kv/index.html +/intro/getting-started/acl.html /intro/getting-started/policies.html + +/docs/vault-enterprise/index.html /docs/enterprise/index.html +/docs/vault-enterprise/replication/index.html /docs/enterprise/replication/index.html +/docs/vault-enterprise/hsm/index.html /docs/enterprise/hsm/index.html +/docs/vault-enterprise/hsm/behavior.html /docs/enterprise/hsm/behavior.html +/docs/vault-enterprise/hsm/configuration.html /docs/enterprise/hsm/configuration.html +/docs/vault-enterprise/hsm/security.html /docs/enterprise/hsm/security.html +/docs/vault-enterprise/ui/index.html /docs/enterprise/ui/index.html +/docs/vault-enterprise/identity/index.html /docs/enterprise/identity/index.html +/docs/vault-enterprise/mfa/index.html /docs/enterprise/mfa/index.html +/docs/vault-enterprise/mfa/mfa-duo.html /docs/enterprise/mfa/mfa-duo.html +/docs/vault-enterprise/mfa/mfa-okta.html /docs/enterprise/mfa/mfa-okta.html +/docs/vault-enterprise/mfa/mfa-pingid.html /docs/enterprise/mfa/mfa-pingid.html +/docs/vault-enterprise/mfa/mfa-totp.html /docs/enterprise/mfa/mfa-totp.html diff --git a/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh b/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh index 383ad8a..689ab1c 100755 --- a/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh +++ b/vendor/github.com/hashicorp/vault/website/scripts/deploy.sh @@ -1,9 +1,10 @@ -#!/bin/bash +#!/usr/bin/env bash set -e PROJECT="vault" PROJECT_URL="www.vaultproject.io" FASTLY_SERVICE_ID="7GrxRJP3PVBuqQbyxYQ0MV" +FASTLY_DICTIONARY_ID="4uTFhCUtoa1cV9DuXeC1Fo" # Ensure the proper AWS environment variables are set if [ -z "$AWS_ACCESS_KEY_ID" ]; then @@ -93,6 +94,75 @@ if [ -z "$NO_UPLOAD" ]; then modify "s3://hc-sites/$PROJECT/latest/" fi +# Add redirects if they exist +if [ -z "$NO_REDIRECTS" ] || [ ! test -f "./redirects.txt" ]; then + echo "Adding redirects..." + fields=() + while read -r line; do + [[ "$line" =~ ^#.* ]] && continue + [[ -z "$line" ]] && continue + + # Read fields + IFS=" " read -ra parts <<<"$line" + fields+=("${parts[@]}") + done < "./redirects.txt" + + # Check we have pairs + if [ $((${#fields[@]} % 2)) -ne 0 ]; then + echo "Bad redirects (not an even number)!" + exit 1 + fi + + # Check we don't have more than 1000 entries (yes, it says 2000 below, but that + # is because we've split into multiple lines). + if [ "${#fields}" -gt 2000 ]; then + echo "More than 1000 entries!" + exit 1 + fi + + # Validations + for field in "${fields[@]}"; do + if [ "${#field}" -gt 256 ]; then + echo "'$field' is > 256 characters!" + exit 1 + fi + + if [ "${field:0:1}" != "/" ]; then + echo "'$field' does not start with /!" + exit 1 + fi + done + + # Build the payload for single-request updates. + jq_args=() + jq_query="." + for (( i=0; i<${#fields[@]}; i+=2 )); do + original="${fields[i]}" + redirect="${fields[i+1]}" + echo "Redirecting ${original} -> ${redirect}" + jq_args+=(--arg "key$((i/2))" "${original}") + jq_args+=(--arg "value$((i/2))" "${redirect}") + jq_query+="| .items |= (. + [{op: \"upsert\", item_key: \$key$((i/2)), item_value: \$value$((i/2))}])" + done + + # Do not post empty items (the API gets sad) + if [ "${#jq_args[@]}" -ne 0 ]; then + json="$(jq "${jq_args[@]}" "${jq_query}" <<<'{"items": []}')" + + # Post the JSON body + curl \ + --fail \ + --silent \ + --output /dev/null \ + --request "PATCH" \ + --header "Fastly-Key: $FASTLY_API_KEY" \ + --header "Content-type: application/json" \ + --header "Accept: application/json" \ + --data "$json"\ + "https://api.fastly.com/service/$FASTLY_SERVICE_ID/dictionary/$FASTLY_DICTIONARY_ID/items" + fi +fi + # Perform a purge of the surrogate key. if [ -z "$NO_PURGE" ]; then echo "Purging Fastly cache..." @@ -118,8 +188,13 @@ if [ -z "$NO_WARM" ]; then echo "wget --recursive --delete-after https://$PROJECT_URL/" echo "" wget \ - --recursive \ --delete-after \ - --quiet \ + --level inf \ + --no-directories \ + --no-host-directories \ + --no-verbose \ + --page-requisites \ + --recursive \ + --spider \ "https://$PROJECT_URL/" fi diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/app-id/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/app-id/index.html.md new file mode 100644 index 0000000..4d509f0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/app-id/index.html.md @@ -0,0 +1,17 @@ +--- +layout: "api" +page_title: "App ID Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-appid" +description: |- + This is the API documentation for the Vault App ID authentication backend. +--- + +# App ID Auth Backend HTTP API (DEPRECATED) + +This is the API documentation for the Vault App ID authentication backend. For +general information about the usage and operation of the App ID backend, please +see the [Vault App ID backend documentation](/docs/auth/app-id.html). + +This documentation assumes the App ID backend is mounted at the `/auth/app-id` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/approle/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/approle/index.html.md new file mode 100644 index 0000000..2e10bee --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/approle/index.html.md @@ -0,0 +1,632 @@ +--- +layout: "api" +page_title: "AppRole Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-approle" +description: |- + This is the API documentation for the Vault AppRole authentication backend. +--- + +# AppRole Auth Backend HTTP API + +This is the API documentation for the Vault AppRole authentication backend. For +general information about the usage and operation of the AppRole backend, please +see the [Vault AppRole backend documentation](/docs/auth/approle.html). + +This documentation assumes the AppRole backend is mounted at the `/auth/approle` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. + +## List Roles + +This endpoint returns a list the existing AppRoles in the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/approle/role` | `200 application/json` | +| `GET` | `/auth/approle/role?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/approle/role +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "dev", + "prod", + "test" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Create New AppRole + +Creates a new AppRole or updates an existing AppRole. This endpoint +supports both `create` and `update` capabilities. There can be one or more +constraints enabled on the role. It is required to have at least one of them +enabled while creating or updating a role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name` | `204 (empty body)` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `bind_secret_id` `(bool: true)` - Require `secret_id` to be presented when + logging in using this AppRole. +- `bound_cidr_list` `(array: [])` - Comma-separated list of CIDR blocks; if set, + specifies blocks of IP addresses which can perform the login operation. +- `policies` `(array: [])` - Comma-separated list of policies set on tokens + issued via this AppRole. +- `secret_id_num_uses` `(integer: 0)` - Number of times any particular SecretID + can be used to fetch a token from this AppRole, after which the SecretID will + expire. A value of zero will allow unlimited uses. +- `secret_id_ttl` `(string: "")` - Duration in either an integer number of + seconds (`3600`) or an integer time unit (`60m`) after which any SecretID + expires. +- `token_num_uses` `(integer: 0)` - Number of times issued tokens can be used. + A value of 0 means unlimited uses. +- `token_ttl` `(string: "")` - Duration in either an integer number of seconds + (`3600`) or an integer time unit (`60m`) to set as the TTL for issued tokens + and at renewal time. +- `token_max_ttl` `(string: "")` - Duration in either an integer number of + seconds (`3600`) or an integer time unit (`60m`) after which the issued token + can no longer be renewed. +- `period` `(string: "")` - Duration in either an integer number of seconds + (`3600`) or an integer time unit (`60m`). If set, the token generated using + this AppRole is a _periodic_ token; so long as it is renewed it never expires, + but the TTL set on the token at each renewal is fixed to the value specified + here. If this value is modified, the token will pick up the new value at its + next renewal. + +### Sample Payload + +```json +{ + "token_ttl": "10m", + "token_max_ttl": "15m", + "policies": [ + "default" + ], + "period": 0, + "bind_secret_id": true +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1 +``` + +## Read AppRole + +Reads the properties of an existing AppRole. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/approle/role/:role_name` | `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/approle/role/application1 +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "token_ttl": 1200, + "token_max_ttl": 1800, + "secret_id_ttl": 600, + "secret_id_num_uses": 40, + "policies": [ + "default" + ], + "period": 0, + "bind_secret_id": true, + "bound_cidr_list": "" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete AppRole + +Deletes an existing AppRole from the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/approle/role/:role_name` | `204 (empty body)` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/approle/role/application1 +``` + +## Read AppRole Role ID + +Reads the RoleID of an existing AppRole. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/approle/role/:role_name/role-id` | `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/approle/role/application1/role-id +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "role_id": "e5a7b66e-5d08-da9c-7075-71984634b882" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Update AppRole Role ID + +Updates the RoleID of an existing AppRole to a custom value. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name/role-id` | `204 (empty body)` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `role_id` `(string: )` - Value to be set as RoleID. + +### Sample Payload + +```json +{ + "role_id": "custom-role-id" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1/role-id +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "role_id": "e5a7b66e-5d08-da9c-7075-71984634b882" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Generate New Secret ID + +Generates and issues a new SecretID on an existing AppRole. Similar to +tokens, the response will also contain a `secret_id_accessor` value which can +be used to read the properties of the SecretID without divulging the SecretID +itself, and also to delete the SecretID from the AppRole. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name/secret-id` | `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `metadata` `(map: {})` - Metadata to be tied to the SecretID. This should be + a JSON-formatted string containing the metadata in key-value pairs. This + metadata will be set on tokens issued with this SecretID, and is logged in + audit logs _in plaintext_. +- `cidr_list` `(string: "")` - Comma separated list of CIDR blocks enforcing + secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' + is set on the role, then the list of CIDR blocks listed here should be a + subset of the CIDR blocks listed on the role. + +### Sample Payload + +```json +{ + "metadata": { + "tag1": "production" + } +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1/secret-id +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780", + "secret_id": "841771dc-11c9-bbc7-bcac-6a3945a69cd9" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## List Secret ID Accessors + +Lists the accessors of all the SecretIDs issued against the AppRole. +This includes the accessors for "custom" SecretIDs as well. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/approle/role/:role_name/secret-id` | `200 application/json` | +| `GET` | `/auth/approle/role/:role_name/secret-id?list=true` | `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/approle/role/application1/secret-id +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "ce102d2a-8253-c437-bf9a-aceed4241491", + "a1c8dee4-b869-e68d-3520-2040c1a0849a", + "be83b7e2-044c-7244-07e1-47560ca1c787", + "84896a0c-1347-aa90-a4f6-aca8b7558780", + "239b1328-6523-15e7-403a-a48038cdc45a" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Read AppRole Secret ID + +Reads out the properties of a SecretID. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name/secret-id/lookup` | `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `secret_id` `(string: )` - Secret ID attached to the role. + +### Sample Payload + +```json +{ + "secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --payload @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1/secret-id/lookup +``` + +## Destroy AppRole Secret ID + +Destroy an AppRole secret ID. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name/secret-id/destroy` | `204 (empty body)` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `secret_id` `(string: )` - Secret ID attached to the role. + +### Sample Payload + +```json +{ + "secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --payload @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1/secret-id/destroy +``` + +## Read AppRole Secret ID Accessor + +Reads out the properties of a SecretID. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name/secret-id-accessor/lookup` | `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `secret_id_accessor` `(string: )` - Secret ID accessor attached to the role. + +### Sample Payload + +```json +{ + "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --payload @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1/secret-id-accessor/lookup +``` + +## Destroy AppRole Secret ID Accessor + +Destroy an AppRole secret ID by its accessor. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name/secret-id-accessor/destroy` | `204 (empty body)` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `secret_id_accessor` `(string: )` - Secret ID accessor attached to the role. + +### Sample Payload + +```json +{ + "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --payload @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1/secret-id-accessor/destroy +``` + +## Create Custom AppRole Secret ID + +Assigns a "custom" SecretID against an existing AppRole. This is used in the +"Push" model of operation. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/role/:role_name/custom-secret-id` | `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - Name of the AppRole. +- `secret_id` `(string: )` - SecretID to be attached to the Role. +- `metadata` `(map: {})` - Metadata to be tied to the SecretID. This should be + a JSON-formatted string containing the metadata in key-value pairs. This + metadata will be set on tokens issued with this SecretID, and is logged in + audit logs _in plaintext_. +- `cidr_list` `(string: "")` - Comma separated list of CIDR blocks enforcing + secret IDs to be used from ppecific set of IP addresses. If 'bound_cidr_list' + is set on the role, then the list of CIDR blocks listed here should be a + subset of the CIDR blocks listed on the role. + +### Sample Payload + +```json +{ + "secret-id": "testsecretid" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/approle/role/application1/custom-secret-id +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780", + "secret_id": "testsecretid" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Login With AppRole + +Issues a Vault token based on the presented credentials. `role_id` is always +required; if `bind_secret_id` is enabled (the default) on the AppRole, +`secret_id` is required too. Any other bound authentication values on the +AppRole (such as client IP CIDR) are also evaluated. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/approle/login` | `200 application/json` | + +### Parameters + +- `role_id` `(string: )` - RoleID of the AppRole. +- `secret_id` `(string: )` - SecretID belonging to AppRole. + +### Sample Payload + +```json +{ + "role_id": "59d6d1ca-47bb-4e7e-a40b-8be3bc5a0ba8", + "secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780" +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/approle/login +``` + +### Sample Response + +```json +{ + "auth": { + "renewable": true, + "lease_duration": 1200, + "metadata": null, + "policies": [ + "default" + ], + "accessor": "fd6c9a00-d2dc-3b11-0be5-af7ae0e1d374", + "client_token": "5b1a0318-679c-9c45-e5c6-d1b9a9035d49" + }, + "warnings": null, + "wrap_info": null, + "data": null, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Read, Update, or Delete AppRole Properties + +Updates the respective property in the existing AppRole. All of these +parameters of the AppRole can be updated using the `/auth/approle/role/:role_name` +endpoint directly. The endpoints for each field is provided separately +to be able to delegate specific endpoints using Vault's ACL system. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/policies` | `200/204` | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/secret-id-num-uses` | `200/204` | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/secret-id-ttl` | `200/204` | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/token-ttl` | `200/204` | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/token-max-ttl` | `200/204` | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/bind-secret-id` | `200/204` | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/bound-cidr-list` | `200/204` | +| `GET/POST/DELETE` | `/auth/approle/role/:role_name/period` | `200/204` | + +Refer to `/auth/approle/role/:role_name` endpoint. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/aws/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/aws/index.html.md new file mode 100644 index 0000000..8ba4671 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/aws/index.html.md @@ -0,0 +1,1264 @@ +--- +layout: "api" +page_title: "AWS Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-aws" +description: |- + This is the API documentation for the Vault AWS authentication backend. +--- + +# AWS Auth Backend HTTP API + +This is the API documentation for the Vault AWS authentication backend. For +general information about the usage and operation of the AWS backend, please +see the [Vault AWS backend documentation](/docs/auth/aws.html). + +This documentation assumes the AWS backend is mounted at the `/auth/aws` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. + +## Configure Client + +Configures the credentials required to perform API calls to AWS as well as +custom endpoints to talk to AWS APIs. The instance identity document +fetched from the PKCS#7 signature will provide the EC2 instance ID. The +credentials configured using this endpoint will be used to query the status +of the instances via DescribeInstances API. If static credentials are not +provided using this endpoint, then the credentials will be retrieved from +the environment variables `AWS_ACCESS_KEY`, `AWS_SECRET_KEY` and +`AWS_REGION` respectively. If the credentials are still not found and if the +backend is configured on an EC2 instance with metadata querying +capabilities, the credentials are fetched automatically. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/config/client` | `204 (empty body)` | + +### Parameters + +- `access_key` `(string: "")` - AWS Access key with permissions to query AWS + APIs. The permissions required depend on the specific configurations. If using + the `iam` auth method without inferencing, then no credentials are necessary. + If using the `ec2` auth method or using the `iam` auth method with + inferencing, then these credentials need access to `ec2:DescribeInstances`. If + additionally a `bound_iam_role` is specified, then these credentials also need + access to `iam:GetInstanceProfile`. If, however, an alternate sts + configuration is set for the target account, then the credentials must be + permissioned to call `sts:AssumeRole` on the configured role, and that role + must have the permissions described here. +- `secret_key` `(string: "")` - AWS Secret key with permissions to query AWS + APIs. +- `endpoint` `(string: "")` - URL to override the default generated endpoint for + making AWS EC2 API calls. +- `iam_endpoint` `(string: "")` - URL to override the default generated endpoint + for making AWS IAM API calls. +- `sts_endpoint` `(string: "")` - URL to override the default generated endpoint + for making AWS STS API calls. +- `iam_server_id_header_value` `(string: "")` - The value to require in the + `X-Vault-AWS-IAM-Server-ID` header as part of GetCallerIdentity requests that + are used in the iam auth method. If not set, then no value is required or + validated. If set, clients must include an X-Vault-AWS-IAM-Server-ID header in + the headers of login requests, and further this header must be among the + signed headers validated by AWS. This is to protect against different types of + replay attacks, for example a signed request sent to a dev server being resent + to a production server. Consider setting this to the Vault server's DNS name. + +### Sample Payload + +```json +{ + "access_key": "VKIAJBRHKH6EVTTNXDHA", + "secret_key": "vCtSM8ZUEQ3mOFVlYPBQkf2sO6F/W7a5TVzrl3Oj" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/config/client +``` + +## Read Config + +Returns the previously configured AWS access credentials. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/config/client` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/config/client +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "secret_key": "vCtSM8ZUEQ3mOFVlYPBQkf2sO6F/W7a5TVzrl3Oj", + "access_key": "VKIAJBRHKH6EVTTNXDHA" + "endpoint" "", + "iam_endpoint" "", + "sts_endpoint" "", + "iam_server_id_header_value" "", + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete Config + +Deletes the previously configured AWS access credentials. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/aws/config/client` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/aws/config/client +``` + +## Create Certificate Configuration + +Registers an AWS public key to be used to verify the instance identity +documents. While the PKCS#7 signature of the identity documents have DSA +digest, the identity signature will have RSA digest, and hence the public +keys for each type varies respectively. Indicate the type of the public key +using the "type" parameter. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/config/certificate/:cert_name` | `204 (empty body)` | + +### Parameters + +- `cert_name` `(string: )` - Name of the certificate. +- `aws_public_cert` `(string: )` - AWS Public key required to verify + PKCS7 signature of the EC2 instance metadata. +- `type` `(string: "pkcs7")` - Takes the value of either "pkcs7" or "identity", + indicating the type of document which can be verified using the given + certificate. The PKCS#7 document will have a DSA digest and the identity + signature will have an RSA signature, and accordingly the public certificates + to verify those also vary. Defaults to "pkcs7". + +### Sample Payload + +```json +{ + "aws_public_cert": "-----BEGIN CERTIFICATE-----\nMIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw\nFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD\nVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z\nODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u\nIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl\ncnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e\nih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3\nVyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P\nhviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j\nk+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U\nhhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF\nlRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf\nMNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW\nMXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw\nvSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw\n7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K\n-----END CERTIFICATE-----\n" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/config/certificate/test-cert +``` + +## Read Certificate Configuration + +Returns the previously configured AWS public key. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/config/certificate/:cert_name` | `200 application/json` | + +### Parameters + +- `cert_name` `(string: )` - Name of the certificate. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/config/certificate/test-cert +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "aws_public_cert": "-----BEGIN CERTIFICATE-----\nMIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw\nFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD\nVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z\nODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u\nIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl\ncnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e\nih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3\nVyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P\nhviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j\nk+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U\nhhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF\nlRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf\nMNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW\nMXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw\nvSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw\n7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K\n-----END CERTIFICATE-----\n" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## List Certificate Configurations + +Lists all the AWS public certificates that are registered with the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/aws/config/certificates` | `200 application/json` | +| `GET` | `/auth/aws/config/certificates?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/aws/config/certificates +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "cert1" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Create STS Role + +Allows the explicit association of STS roles to satellite AWS accounts +(i.e. those which are not the account in which the Vault server is +running.) Login attempts from EC2 instances running in these accounts will +be verified using credentials obtained by assumption of these STS roles. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/config/sts/:account_id` | `204 (empty body)` | + +### Parameters + +- `account_id` `(string: )` - AWS account ID to be associated with + STS role. If set, Vault will use assumed credentials to verify any login + attempts from EC2 instances in this account. +- `sts_role` `(string: )` - AWS ARN for STS role to be assumed when + interacting with the account specified. The Vault server must have + permissions to assume this role. + +### Sample Payload + +```json +{ + "sts_role": "arn:aws:iam:111122223333:role/myRole" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/config/sts/111122223333 +``` + +## Read STS Role + +Returns the previously configured STS role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/config/sts/:account_id` | `200 application/json` | + +### Parameters + +- `account_id` `(string: )` - AWS account ID to be associated with + STS role. If set, Vault will use assumed credentials to verify any login + attempts from EC2 instances in this account. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/config/sts/111122223333 +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "sts_role ": "arn:aws:iam:111122223333:role/myRole" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## List STS Roles + +Lists all the AWS Account IDs for which an STS role is registered. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/aws/config/sts` | `200 application/json` | +| `GET` | `/auth/aws/config/sts?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/aws/config/sts +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "keys": [ + "111122223333", + "999988887777" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete STS Role + +Deletes a previously configured AWS account/STS role association. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/aws/config/sts` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/aws/config/sts +``` + +## Configure Identity Whitelist Tidy Operation + +Configures the periodic tidying operation of the whitelisted identity entries. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/config/tidy/identity-whitelist` | `204 (empty body)` | + +### Parameters + +- `safety_buffer` `(string: "72h")` - The amount of extra time that must have + passed beyond the `roletag` expiration, before it is removed from the backend + storage. Defaults to 72h. +- `disable_periodic_tidy` `(bool: false)` - If set to 'true', disables the + periodic tidying of the `identity-whitelist/` entries. + +### Sample Payload + +```json +{ + "safety_buffer": "48h" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/config/tidy/identity-whitelist +``` + +## Read Identity Whitelist Tidy Settings + +Returns the previously configured periodic whitelist tidying settings. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/config/tidy/identity-whitelist` | `200 applicaiton/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/config/tidy/identity-whitelist +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "safety_buffer": 600, + "disable_periodic_tidy": false + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete Identity Whitelist Tidy Settings + +Deletes the previously configured periodic whitelist tidying settings. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/aws/config/tidy/identity-whitelist` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/aws/config/tidy/identity-whitelist +``` + +## Configure Role Tag Blacklist Tidy Operation + +Configures the periodic tidying operation of the blacklisted role tag entries. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/config/tidy/roletag-blacklist` | `204 (empty body)` | + +### Parameters + +- `safety_buffer` `(string: "72h")` - The amount of extra time that must have + passed beyond the `roletag` expiration, before it is removed from the backend + storage. Defaults to 72h. +- `disable_periodic_tidy` `(bool: false)` - If set to 'true', disables the + periodic tidying of the `roletag-blacklist/` entries. + +### Sample Payload + +```json +{ + "safety_buffer": "48h" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/config/tidy/roletag-blacklist +``` + +## Read Role Tag Blackist Tidy Settings + +Returns the previously configured periodic blacklist tidying settings. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/config/tidy/roletag-blacklist` | `200 applicaiton/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/config/tidy/roletag-blacklist +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "safety_buffer": 600, + "disable_periodic_tidy": false + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete Role Tag Blackist Tidy Settings + +Deletes the previously configured periodic blacklist tidying settings. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/aws/config/tidy/roletag-blacklist` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/aws/config/tidy/roletag-blacklist +``` + +## Create Role + +Registers a role in the backend. Only those instances or principals which +are using the role registered using this endpoint, will be able to perform +the login operation. Contraints can be specified on the role, that are +applied on the instances or principals attempting to login. At least one +constraint should be specified on the role. The available constraints you +can choose are dependent on the `auth_type` of the role and, if the +`auth_type` is `iam`, then whether inferencing is enabled. A role will not +let you configure a constraint if it is not checked by the `auth_type` and +inferencing configuration of that role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/role/:role` | `204 (empty body)` | + +### Parameters + +- `role` `(string: )` - Name of the role. +- `auth_type` `(string: "iam")` - The auth type permitted for this role. Valid + choices are "ec2" or "iam". If no value is specified, then it will default to + "iam" (except for legacy `aws-ec2` auth types, for which it will default to + "ec2"). Only those bindings applicable to the auth type chosen will be allowed + to be configured on the role. +- `bound_ami_id` `(string: "")` - If set, defines a constraint on the EC2 + instances that they should be using the AMI ID specified by this parameter. + This constraint is checked during ec2 auth as well as the iam auth method only + when inferring an EC2 instance. +- `bound_account_id` `(string: "")` - If set, defines a constraint on the EC2 + instances that the account ID in its identity document to match the one + specified by this parameter. This constraint is checked during ec2 auth as + well as the iam auth method only when inferring an EC2 instance. +- `bound_region` `(string: "")` - If set, defines a constraint on the EC2 + instances that the region in its identity document must match the one + specified by this parameter. This constraint is only checked by the ec2 auth + method as well as the iam auth method only when inferring an ec2 instance. +- `bound_vpc_id` `(string: "")` - If set, defines a constraint on the EC2 + instance to be associated with the VPC ID that matches the value specified by + this parameter. This constraint is only checked by the ec2 auth method as well + as the iam auth method only when inferring an ec2 instance. +- `bound_subnet_id` `(string: "")` - If set, defines a constraint on the EC2 + instance to be associated with the subnet ID that matches the value specified + by this parameter. This constraint is only checked by the ec2 auth method as + well as the iam auth method only when inferring an ec2 instance. +- `bound_iam_role_arn` `(string: "")` - If set, defines a constraint on the + authenticating EC2 instance that it must match the IAM role ARN specified by + this parameter. The value is refix-matched (as though it were a glob ending + in `*`). The configured IAM user or EC2 instance role must be allowed to + execute the `iam:GetInstanceProfile` action if this is specified. This + constraint is checked by the ec2 auth method as well as the iam auth method + only when inferring an EC2 instance. +- `bound_iam_instance_profile_arn` `(string: "")` - If set, defines a constraint + on the EC2 instances to be associated with an IAM instance profile ARN which + has a prefix that matches the value specified by this parameter. The value is + prefix-matched (as though it were a glob ending in `*`). This constraint is + checked by the ec2 auth method as well as the iam auth method only when + inferring an ec2 instance. +- `role_tag` `(string: "")` - If set, enables the role tags for this role. The + value set for this field should be the 'key' of the tag on the EC2 instance. + The 'value' of the tag should be generated using `role//tag` endpoint. + Defaults to an empty string, meaning that role tags are disabled. This + constraint is valid only with the ec2 auth method and is not allowed when an + auth_type is iam. +- `bound_iam_principal_arn` `(string: "")` - Defines the IAM principal that must + be authenticated using the iam auth method. It should look like + "arn:aws:iam::123456789012:user/MyUserName" or + "arn:aws:iam::123456789012:role/MyRoleName". Wildcards are supported at the + end of the ARN, e.g., "arn:aws:iam::123456789012:\*" will match any IAM + principal in the AWS account 123456789012. This constraint is only checked by + the iam auth method. Wildcards are supported at the end of the ARN, e.g., + "arn:aws:iam::123456789012:role/\*" will match all roles in the AWS account. +- `inferred_entity_type` `(string: "")` - When set, instructs Vault to turn on + inferencing. The only current valid value is "ec2\_instance" instructing Vault + to infer that the role comes from an EC2 instance in an IAM instance profile. + This only applies to the iam auth method. If you set this on an existing role + where it had not previously been set, tokens that had been created prior will + not be renewable; clients will need to get a new token. +- `inferred_aws_region` `(string: "")` - When role inferencing is activated, the + region to search for the inferred entities (e.g., EC2 instances). Required if + role inferencing is activated. This only applies to the iam auth method. +- `resolve_aws_unique_ids` `(bool: false)` - When set, resolves the + `bound_iam_principal_arn` to the + [AWS Unique ID](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-unique-ids) + for the bound principal ARN. This field is ignored when + `bound_iam_principal_arn` ends with a wildcard character. + This requires Vault to be able to call `iam:GetUser` or `iam:GetRole` on the + `bound_iam_principal_arn` that is being bound. Resolving to internal AWS IDs + more closely mimics the behavior of AWS services in that if an IAM user or + role is deleted and a new one is recreated with the same name, those new users + or roles won't get access to roles in Vault that were permissioned to the + prior principals of the same name. The default value for new roles is true, + while the default value for roles that existed prior to this option existing + is false (you can check the value for a given role using the GET method on the + role). Any authentication tokens created prior to this being supported won't + verify the unique ID upon token renewal. When this is changed from false to + true on an existing role, Vault will attempt to resolve the role's bound IAM + ARN to the unique ID and, if unable to do so, will fail to enable this option. + Changing this from `true` to `false` is not supported; if absolutely + necessary, you would need to delete the role and recreate it explicitly + setting it to `false`. However; the instances in which you would want to do + this should be rare. If the role creation (or upgrading to use this) succeed, + then Vault has already been able to resolve internal IDs, and it doesn't need + any further IAM permissions to authenticate users. If a role has been deleted + and recreated, and Vault has cached the old unique ID, you should just call + this endpoint specifying the same `bound_iam_principal_arn` and, as long as + Vault still has the necessary IAM permissions to resolve the unique ID, Vault + will update the unique ID. (If it does not have the necessary permissions to + resolve the unique ID, then it will fail to update.) If this option is set to + false, then you MUST leave out the path component in bound_iam_principal_arn + for **roles** only, but not IAM users. That is, if your IAM role ARN is of the + form `arn:aws:iam::123456789012:role/some/path/to/MyRoleName`, you **must** + specify a bound_iam_principal_arn of + `arn:aws:iam::123456789012:role/MyRoleName` for authentication to work. +- `ttl` `(string: "")` - The TTL period of tokens issued using this role, + provided as "1h", where hour is the largest suffix. +- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens issued using + this role. +- `period` `(string: "")` - If set, indicates that the token generated using + this role should never expire. The token should be renewed within the duration + specified by this value. At each renewal, the token's TTL will be set to the + value of this parameter. The maximum allowed lifetime of tokens issued using + this role. +- `policies` `(array: [])` - Policies to be set on tokens issued using this + role. +- `allow_instance_migration` `(bool: false)` - If set, allows migration of the + underlying instance where the client resides. This keys off of pendingTime in + the metadata document, so essentially, this disables the client nonce check + whenever the instance is migrated to a new host and pendingTime is newer than + the previously-remembered time. Use with caution. This only applies to + authentications via the ec2 auth method. +- `disallow_reauthentication` `(bool: false)` - If set, only allows a single + token to be granted per instance ID. In order to perform a fresh login, the + entry in whitelist for the instance ID needs to be cleared using + 'auth/aws/identity-whitelist/' endpoint. Defaults to 'false'. + This only applies to authentications via the ec2 auth method. + +### Sample Payload + +```json +{ + "bound_ami_id": "ami-fce36987", + "role_tag": "", + "policies": [ + "default", + "dev", + "prod" + ], + "max_ttl": 1800000, + "disallow_reauthentication": false, + "allow_instance_migration": false +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/role/dev-role +``` + +## Read Role + +Returns the previously registered role configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/role/:role` | `200 application/json` | + +### Parameters + +- `role` `(string: )` - Name of the role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/role/dev-role +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "bound_ami_id": "ami-fce36987", + "role_tag": "", + "policies": [ + "default", + "dev", + "prod" + ], + "max_ttl": 1800000, + "disallow_reauthentication": false, + "allow_instance_migration": false + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## List Roles + +Lists all the roles that are registered with the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/aws/roles` | `200 application/json` | +| `GET` | `/auth/aws/roles?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/aws/roles +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "keys": [ + "dev-role", + "prod-role" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete Role + +Deletes the previously registered role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/aws/role/:role` | `204 (empty body)` | + +### Parameters + +- `role` `(string: )` - Name of the role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/aws/role/dev-role +``` + +## Create Role Tags + +Creates a role tag on the role, which help in restricting the capabilities +that are set on the role. Role tags are not tied to any specific ec2 +instance unless specified explicitly using the `instance_id` parameter. By +default, role tags are designed to be used across all instances that +satisfies the constraints on the role. Regardless of which instances have +role tags on them, capabilities defined in a role tag must be a strict +subset of the given role's capabilities. Note that, since adding and +removing a tag is often a widely distributed privilege, care needs to be +taken to ensure that the instances are attached with correct tags to not +let them gain more privileges than what were intended. If a role tag is +changed, the capabilities inherited by the instance will be those defined +on the new role tag. Since those must be a subset of the role +capabilities, the role should never provide more capabilities than any +given instance can be allowed to gain in a worst-case scenario. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/role/:role/tag` | `200 application/json` | + +### Parameters + +- `role` `(string: )` - Name of the role. +- `policies` `(array: [])` - Policies to be associated with the tag. If set, + must be a subset of the role's policies. If set, but set to an empty value, + only the 'default' policy will be given to issued tokens. +- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens issued using + this role. +- `instance_id` `(string: "")` - Instance ID for which this tag is intended for. + If set, the created tag can only be used by the instance with the given ID. +- `allow_instance_migration` `(bool: false)` - If set, allows migration of the + underlying instance where the client resides. This keys off of pendingTime in + the metadata document, so essentially, this disables the client nonce check + whenever the instance is migrated to a new host and pendingTime is newer than + the previously-remembered time. Use with caution. Defaults to 'false'. +- `disallow_reauthentication` `(bool: false)` - If set, only allows a single + token to be granted per instance ID. This can be cleared with the + auth/aws/identity-whitelist endpoint. Defaults to 'false'. + +### Sample Payload + +```json +{ + "policies": ["default", "prod"] +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/role/dev-role/tag +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "tag_value": "v1:09Vp0qGuyB8=:r=dev-role:p=default,prod:d=false:t=300h0m0s:uPLKCQxqsefRhrp1qmVa1wsQVUXXJG8UZP/pJIdVyOI=", + "tag_key": "VaultRole" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Login + +Fetch a token. This endpoint verifies the pkcs7 signature of the instance +identity document or the signature of the signed GetCallerIdentity request. +With the ec2 auth method, or when inferring an EC2 instance, verifies that +the instance is actually in a running state. Cross checks the constraints +defined on the role with which the login is being performed. With the ec2 +auth method, as an alternative to pkcs7 signature, the identity document +along with its RSA digest can be supplied to this endpoint. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/login` | `200 application/json` | + +### Sample Payload + +- `role` `(string: "")` - Name of the role against which the login is being + attempted. If `role` is not specified, then the login endpoint looks for a + role bearing the name of the AMI ID of the EC2 instance that is trying to + login if using the ec2 auth method, or the "friendly name" (i.e., role name or + username) of the IAM principal authenticated. If a matching role is not found, + login fails. +- `identity` `(string: )` - Base64 encoded EC2 instance identity + document. This needs to be supplied along with the `signature` parameter. If + using `curl` for fetching the identity document, consider using the option + `-w 0` while piping the output to `base64` binary. +- `signature` `(string: )` - Base64 encoded SHA256 RSA signature of + the instance identity document. This needs to be supplied along with + `identity` parameter when using the ec2 auth method. +- `pkcs7` `(string: )` - PKCS7 signature of the identity document with + all `\n` characters removed. Either this needs to be set *OR* both `identity` + and `signature` need to be set when using the ec2 auth method. +- `nonce` `(string: "")` - The nonce to be used for subsequent login requests. + If this parameter is not specified at all and if reauthentication is allowed, + then the backend will generate a random nonce, attaches it to the instance's + identity-whitelist entry and returns the nonce back as part of auth metadata. + This value should be used with further login requests, to establish client + authenticity. Clients can choose to set a custom nonce if preferred, in which + case, it is recommended that clients provide a strong nonce. If a nonce is + provided but with an empty value, it indicates intent to disable + reauthentication. Note that, when `disallow_reauthentication` option is + enabled on either the role or the role tag, the `nonce` holds no significance. + This is ignored unless using the ec2 auth method. +- `iam_http_request_method` `(string: )` - HTTP method used in the + signed request. Currently only POST is supported, but other methods may be + supported in the future. This is required when using the iam auth method. +- `iam_request_url` `(string: )` - Base64-encoded HTTP URL used in + the signed request. Most likely just `aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=` + (base64-encoding of `https://sts.amazonaws.com/`) as most requests will + probably use POST with an empty URI. This is required when using the iam auth + method. +- `iam_request_body` `(string: )` - Base64-encoded body of the + signed request. Most likely + `QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==` which is the + base64 encoding of `Action=GetCallerIdentity&Version=2011-06-15`. This is + required when using the iam auth method. +- `iam_request_headers` `(string: )` - Base64-encoded, + JSON-serialized representation of the sts:GetCallerIdentity HTTP request + headers. The JSON serialization assumes that each header key maps to either a + string value or an array of string values (though the length of that array + will probably only be one). If the `iam_server_id_header_value` is configured + in Vault for the aws auth mount, then the headers must include the + X-Vault-AWS-IAM-Server-ID header, its value must match the value configured, + and the header must be included in the signed headers. This is required when + using the iam auth method. + + +### Sample Payload + +```json +{} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/aws/login +``` + +### Sample Response + +```json +{ + "auth": { + "renewable": true, + "lease_duration": 1800000, + "metadata": { + "role_tag_max_ttl": "0", + "instance_id": "i-de0f1344" + "ami_id": "ami-fce36983" + "role": "dev-role", + "auth_type": "ec2" + }, + "policies": [ + "default", + "dev", + ], + "accessor": "20b89871-e6f2-1160-fb29-31c2f6d4645e", + "client_token": "c9368254-3f21-aded-8a6f-7c818e81b17a" + }, + "warnings": null, + "data": null, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Place Role Tags in Blacklist + +Places a valid role tag in a blacklist. This ensures that the role tag +cannot be used by any instance to perform a login operation again. Note +that if the role tag was previously used to perform a successful login, +placing the tag in the blacklist does not invalidate the already issued +token. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/roletag-blacklist/:role_tag` | `204 (empty body)` | + +### Parameters + +- `role_tag` `(string: )` - Role tag to be blacklisted. The tag can be + supplied as-is. In order to avoid any encoding problems, it can be base64 + encoded. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/auth/aws/roletag-blacklist/djE6MDlWcDBxR3V5Qjg9OmE9YW1pLWZjZTNjNjk2OnA9ZGVmYXVsdCxwcm9kOmQ9ZmFsc2U6dD0zMDBoMG0wczp1UExLQ1F4cXNlZlJocnAxcW1WYTF3c1FWVVhYSkc4VVpQLwo= +``` + +### Read Role Tag Blacklist Information + +Returns the blacklist entry of a previously blacklisted role tag. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/roletag-blacklist/:role_tag` | `200 application/json` | + +### Parameters + +- `role_tag` `(string: )` - Role tag to be blacklisted. The tag can be + supplied as-is. In order to avoid any encoding problems, it can be base64 + encoded. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/roletag-blacklist/djE6MDlWcDBxR3V5Qjg9OmE9YW1pLWZjZTNjNjk2OnA9ZGVmYXVsdCxwcm9kOmQ9ZmFsc2U6dD0zMDBoMG0wczp1UExLQ1F4cXNlZlJocnAxcW1WYTF3c1FWVVhYSkc4VVpQLwo= +``` + + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "expiration_time": "2016-04-25T10:35:20.127058773-04:00", + "creation_time": "2016-04-12T22:35:01.178348124-04:00" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## List Blacklist Tags + +Lists all the role tags that are blacklisted. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/aws/roletag-blacklist` | `200 application/json` | +| `GET` | `/auth/aws/roletag-blacklist?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/aws/roletag-blacklist +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "keys": [ + "v1:09Vp0qGuyB8=:a=ami-fce3c696:p=default,prod:d=false:t=300h0m0s:uPLKCQxqsefRhrp1qmVa1wsQVUXXJG8UZP/" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete Blacklist Tags + +Deletes a blacklisted role tag. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/aws/roletag-blacklist/:role_tag` | `204 (empty body)` | + +### Parameters + +- `role_tag` `(string: )` - Role tag to be blacklisted. The tag can be + supplied as-is. In order to avoid any encoding problems, it can be base64 + encoded. + + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/aws/roletag-blacklist/djE6MDlWcDBxR3V5Qjg9OmE9YW1pLWZjZTNjNjk2OnA9ZGVmYXVsdCxwcm9kOmQ9ZmFsc2U6dD0zMDBoMG0wczp1UExLQ1F4cXNlZlJocnAxcW1WYTF3c1FWVVhYSkc4VVpQLwo= +``` + +## Tidy Blacklist Tags + +Cleans up the entries in the blacklist based on expiration time on the entry and +`safety_buffer`. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/tidy/roletag-blacklist` | `204 (empty body)` | + +### Parameters + +- `safety_buffer` `(string: "72h")` - The amount of extra time that must have + passed beyond the `roletag` expiration, before it is removed from the backend + storage. Defaults to 72h. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/auth/aws/tidy/roletag-blacklist +``` + +### Read Identity Whitelist Information + +Returns an entry in the whitelist. An entry will be created/updated by every +successful login. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/aws/identity-whitelist/:instance_id` | `200 application/json` | + +### Parameters + +- `instance_id` `(string: )` - EC2 instance ID. A successful login + operation from an EC2 instance gets cached in this whitelist, keyed off of + instance ID. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/aws/identity-whitelist/i-aab47d37 +``` + + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "pending_time": "2016-04-14T01:01:41Z", + "expiration_time": "2016-05-05 10:09:16.67077232 +0000 UTC", + "creation_time": "2016-04-14 14:09:16.67077232 +0000 UTC", + "client_nonce": "5defbf9e-a8f9-3063-bdfc-54b7a42a1f95", + "role": "dev-role" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## List Identity Whitelist Entries + + Lists all the instance IDs that are in the whitelist of successful logins. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/aws/identity-whitelist` | `200 application/json` | +| `GET` | `/auth/aws/identity-whitelist?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/aws/roletag-blacklist +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "data": { + "keys": [ + "i-aab47d37" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete Identity Whitelist Entries + +Deletes a cache of the successful login from an instance. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/aws/identity-whitelist/:instance_id` | `204 (empty body)` | + +### Parameters + +- `instance_id` `(string: )` - EC2 instance ID. A successful login + operation from an EC2 instance gets cached in this whitelist, keyed off of + instance ID. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/aws/identity-whitelist/i-aab47d37 +``` + +## Tidy Identity Whitelist Entries + +Cleans up the entries in the whitelist based on expiration time and +`safety_buffer`. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/aws/tidy/identity-whitelist` | `204 (empty body)` | + +### Parameters + +- `safety_buffer` `(string: "72h")` - The amount of extra time that must have + passed beyond the `roletag` expiration, before it is removed from the backend + storage. Defaults to 72h. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/auth/aws/tidy/identity-whitelist +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/cert/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/cert/index.html.md new file mode 100644 index 0000000..0de838e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/cert/index.html.md @@ -0,0 +1,330 @@ +--- +layout: "api" +page_title: "TLS Certificate Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-cert" +description: |- + This is the API documentation for the Vault TLS Certificate authentication + backend. +--- + +# TLS Certificate Auth Backend HTTP API + +This is the API documentation for the Vault TLS Certificate authentication +backend. For general information about the usage and operation of the TLS +Certificate backend, please see the [Vault TLS Certificate backend documentation](/docs/auth/cert.html). + +This documentation assumes the TLS Certificate backend is mounted at the +`/auth/cert` path in Vault. Since it is possible to mount auth backends at any +location, please update your API calls accordingly. + +## Create CA Certificate Role + +Sets a CA cert and associated parameters in a role name. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/cert/certs/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` - The name of the certificate role. +- `certificate` `(string: )` - The PEM-format CA certificate. +- `allowed_names` `(string: "")` - Constrain the Common and Alternative Names in + the client certificate with a [globbed pattern] + (https://github.com/ryanuber/go-glob/blob/master/README.md#example). Value is + a comma-separated list of patterns. Authentication requires at least one Name matching at least one pattern. If not set, defaults to allowing all names. +- `policies` `(string: "")` - A comma-separated list of policies to set on tokens + issued when authenticating against this CA certificate. +- `display_name` `(string: "")` - The `display_name` to set on tokens issued + when authenticating against this CA certificate. If not set, defaults to the + name of the role. +- `ttl` `(string: "")` - The TTL period of the token, provided as a number of + seconds. If not provided, the token is valid for the the mount or system + default TTL time, in that order. + +### Sample Payload + +```json +{ + "certificate": "-----BEGIN CERTIFICATE-----\nMIIEtzCCA5+.......ZRtAfQ6r\nwlW975rYa1ZqEdA=\n-----END CERTIFICATE-----", + "display_name": "test" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json + https://vault.rocks/v1/auth/cert/certs/test-ca +``` + +## Read CA Certificate Role + +Gets information associated with the named role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/cert/certs/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` - The name of the certificate role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/cert/certs/test-ca +``` + +### Sample Response + +```json +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "certificate": "-----BEGIN CERTIFICATE-----\nMIIEtzCCA5+.......ZRtAfQ6r\nwlW975rYa1ZqEdA=\n-----END CERTIFICATE-----", + "display_name": "test", + "policies": "", + "allowed_names": "", + "ttl": 2764800 + }, + "warnings": null, + "auth": null +} +``` + +## List Certificate Roles + +Lists configured certificate names. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/cert/certs` | `200 application/json` | +| `GET` | `/auth/cert/certs?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/cert/certs + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "cert1", + "cert2" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Delete Certificate Role + +Deletes the named role and CA cert from the backend mount. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/cert/certs/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` - The name of the certificate role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/cert/certs/cert1 +``` + +## Create CRL + +Sets a named CRL. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/cert/crls/:name` | `204 (empty body)` | + + +### Parameters + +- `name` `(string: )` - The name of the CRL. +- `crl` `(string: )` - The PEM format CRL. + +### Sample Payload + +```json +{ + "crl": "-----BEGIN X509 CRL-----\n...\n-----END X509 CRL-----" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --date @payload.json \ + https://vault.rocks/v1/auth/cert/crls/custom-crl +``` + +## Read CRL + +Gets information associated with the named CRL (currently, the serial +numbers contained within). As the serials can be integers up to an +arbitrary size, these are returned as strings. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/cert/crls/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` - The name of the CRL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/cert/crls/custom-crl +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "serials": { + "13": {} + } + }, + "lease_duration": 0, + "lease_id": "", + "renewable": false, + "warnings": null +} +``` + +## Delete CRL + +Deletes the named CRL from the backend mount. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/cert/crls/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` - The name of the CRL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/cert/crls/cert1 +``` + +## Configure TLS Certificate Backend + +Configuration options for the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/cert/config` | `204 (empty body)` | + +### Parameters + +- `disable_binding` `(boolean: false)` - If set, during renewal, skips the + matching of presented client identity with the client identity used during + login. + +### Sample Payload + +```json +{ + "disable_binding": true +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --date @payload.json \ + https://vault.rocks/v1/auth/cert/certs/cert1 +``` + +## Login with TLS Certiicate Backend + +Log in and fetch a token. If there is a valid chain to a CA configured in the +backend and all role constraints are matched, a token will be issued. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/cert/login` | `200 application/json` | + +### Parameters + +- `name` `(string: "")` - Authenticate against only the named certificate role, + returning its policy list if successful. If not set, defaults to trying all + certificate roles and returning any one that matches. + +### Sample Payload + +```json +{ + "name": "cert1" +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --date @payload.json \ + https://vault.rocks/v1/auth/cert/login +``` + +### Sample Response + +```json +{ + "auth": { + "client_token": "cf95f87d-f95b-47ff-b1f5-ba7bff850425", + "policies": [ + "web", + "stage" + ], + "lease_duration": 3600, + "renewable": true, + } +} +``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/gcp/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/gcp/index.html.md new file mode 100644 index 0000000..cfedc24 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/gcp/index.html.md @@ -0,0 +1,482 @@ +--- +layout: "api" +page_title: "Google Cloud Platform Auth Plugin Backend - HTTP API" +sidebar_current: "docs-http-auth-gcp" +description: |- + This is the API documentation for the Vault GCP authentication + backend plugin. +--- + +# GCP Auth Plugin HTTP API + +This is the API documentation for the Vault GCP authentication backend +plugin. To learn more about the usage and operation, see the +[Vault GCP backend documentation](/docs/auth/gcp.html). + +This documentation assumes the plugin backend is mounted at the +`/auth/gcp` path in Vault. Since it is possible to mount auth backends +at any location, please update your API calls accordingly. + +## Configure + +Configures the credentials required for the plugin to perform API calls +to GCP. These credentials will be used to query the status of IAM +entities and get service account or other Google public certificates +to confirm signed JWTs passed in during login. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/gcp/config` | `204 (empty body)` | + +### Parameters + +- `credentials` `(string: "")` - A marshaled JSON string that is the content + of a GCP credentials file. If you would rather specify a file, you can use + `credentials="@path/to/creds.json`. The GCP permissions + Vault currently requires are: + - `iam.serviceAccounts.get` + - `iam.serviceAccountKeys.get` + + If this value is not specified or if it is explicitly set to empty, + Vault will attempt to use [Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) + for that server's machine. + +- `google_certs_endpoint` `(string: "")`: The Google OAuth2 endpoint to obtain public certificates for. This is used + primarily for testing and should generally not be set. If not set, will default to the [Google public certs + endpoint](https://www.googleapis.com/oauth2/v3/certs) + +### Sample Payload + +```json +{ + "credentials": "{ \"type\": \"service_account\", \"project_id\": \"project-123456\",...}" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/gcp/config +``` + +## Read Config + +Returns the previously configured config, including credentials. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/gcp/config` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/gcp/config +``` + +### Sample Response + +```json +{ + "data":{ + "client_email":"serviceaccount1@project-123456.iam.gserviceaccount.com", + "client_id":"...", + "private_key":"-----BEGIN PRIVATE KEY-----...-----END PRIVATE KEY-----\n", + "private_key_id":"...", + "project_id":"project-123456", + "google_certs_url": "" + }, + ... +} + +``` + +## Delete Config + +Deletes the previously configured GCP config and credentials. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/gcp/config` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/gcp/config +``` + +## Create Role + +Registers a role in the backend. Role types have specific entities +that can perform login operations against this endpoint. Constraints specific +to the role type must be set on the role. These are applied to the authenticated +entities attempting to login. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/gcp/role/:name` | `204 (empty body)` | + +### Parameters +- `name` `(string: )` - Name of the role. +- `type` `(string: )` - The type of this role. Only the + restrictions applicable to this role type will be allowed to + be configured on the role (see below). Valid choices are: `iam`. +- `project_id` `(string: "")` - Required. Only entities belonging to this + project can login for this role. +- `ttl` `(string: "")` - The TTL period of tokens issued using this role in + seconds. +- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens + issued in seconds using this role. +- `period` `(string: "")` - If set, indicates that the token generated using + this role should never expire. The token should be renewed within the duration + specified by this value. At each renewal, the token's TTL will be set to the + value of this parameter. +- `policies` `(array: [])` - Policies to be set on tokens issued using this + role. +- `bound_service_accounts` `(array: [])` - Required for `iam` roles. + A comma-separated list of service account emails or ids. + Defines the service accounts that login is restricted to. If set to `\*`, all + service accounts are allowed (role will still be bound by project). Will be + inferred from service account used to issue metadata token for GCE instances. + +**`iam`-only params**: + +- `max_jwt_exp` `(string: "")` - Optional, defaults to 900 (15min). + Number of seconds past the time of authentication that the login param JWT + must expire within. For example, if a user attempts to login with a token + that expires within an hour and this is set to 15 minutes, Vault will return + an error prompting the user to create a new signed JWT with a shorter `exp`. + The GCE metadata tokens currently do not allow the `exp` claim to be customized. + +- `allow_gce_inference` `(bool: true)` - A flag to determine if this role should + allow GCE instances to authenticate by inferring service accounts from the + GCE identity metadata token. + +- `service_accounts` `(array: [])` - Required for `iam` roles. + A comma-separated list of service account emails or ids. + Defines the service accounts that login is restricted to. If set to `*`, all + service accounts are allowed (role will still be bound by project). + +**`gce`-only params**: + +- `bound_zone` `(string: "")`: If set, determines the zone that a GCE instance must belong to. + If bound_instance_group is provided, it is assumed to be a zonal group and the group must belong to this zone. + +- `bound_region` `(string: "")`: If set, determines the region that a GCE instance must belong to. + If bound_instance_group is provided, it is assumed to be a regional group and the group must belong to this region. + **If bound_zone is provided, region will be ignored.** + +- `bound_instance_group` `(string: "")`: If set, determines the instance group that an authorized instance must belong to. + bound_zone or bound_region must also be set if bound_instance_group is set. + +- `bound_labels` `(array: [])`: A comma-separated list of Google Cloud Platform labels formatted as "$key:$value" strings that + must be set on authorized GCE instances. Because GCP labels are not currently ACL'd, we recommend that this be used in + conjunction with other restrictions. + +### Sample Payload + +Example `iam` Role: + +```json +{ + "type": "iam", + "project": "project-123456", + "policies": [ + "default", + "dev", + "prod" + ], + "max_ttl": 1800000, + "max_jwt_exp": 10000, + "service_accounts": [ + "dev-1@project-123456.iam.gserviceaccount.com", + "dev-2@project-123456.iam.gserviceaccount.com", + "123456789" + ], + "allow_instance_migration": false +} +``` + +Example `gce` Role: + +```json +{ + "type": "gce", + "project": "project-123456", + "policies": [ + "default", + "dev", + "prod" + ], + "max_ttl": 1800000, + "max_jwt_exp": 10000, + "service_accounts": [ + "dev-1@project-123456.iam.gserviceaccount.com", + "dev-2@project-123456.iam.gserviceaccount.com", + "123456789" + ], + "allow_instance_migration": false +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/gcp/role/dev-role +``` + +## Edit Service Accounts For IAM Role + +Edit service accounts for an existing IAM role in the backend. +This allows you to add or remove service accounts from the list of +service accounts on the role. + +| Method | Path | Produces | +| :------- | :---------------------------------------| :------------------| +| `POST` | `/auth/gcp/role/:name/service-accounts` | `204 (empty body)` | + +### Parameters +- `name` `(string: )` - Name of an existing `iam` role. + Returns error if role is not an `iam` role. +- `add` `(array: [])` - List of service accounts to add to the role's + service accounts +- `remove` `(array: [])` - List of service accounts to remove from the + role's service accounts + +### Sample Payload + +```json +{ + "add": [ + "dev-1@project-123456.iam.gserviceaccount.com", + "123456789" + ], + "remove": [ + "dev-2@project-123456.iam.gserviceaccount.com" + ] +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/gcp/role/dev-role +``` + +## Edit Labels For GCE Role + +Edit service accounts for an existing IAM role in the backend. +This allows you to add or remove service accounts from the list of +service accounts on the role. + +| Method | Path | Produces | +| :------- | :---------------------------------------| :------------------| +| `POST` | `/auth/gcp/role/:name/labels` | `204 (empty body)` | + +### Parameters +- `name` `(string: )` - Name of an existing `gce` role. Returns error if role is not an `gce` role. +- `add` `(array: [])` - List of `$key:$value` labels to add to the GCE role's bound labels. +- `remove` `(array: [])` - List of label keys to remove from the role's bound labels. + +### Sample Payload + +```json +{ + "add": [ + "foo:bar", + "env:dev", + "key:value" + ], + "remove": [ + "keyInLabel1, keyInLabel2" + ] +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/gcp/role/dev-role +``` + +## Read Role + +Returns the previously registered role configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/gcp/role/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` - Name of the role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/gcp/role/dev-role +``` + +### Sample Response + +```json +{ + "data":{ + "max_jwt_exp": 900, + "max_ttl": 0, + "ttl":0, + "period": 0, + "policies":[ + "default", + "dev", + "prod" + ], + "project_id":"project-123456", + "role_type":"iam", + "service_accounts": [ + "dev-1@project-123456.iam.gserviceaccount.com", + "dev-2@project-123456.iam.gserviceaccount.com", + "123456789", + ] + }, + ... +} + +``` + +## List Roles + +Lists all the roles that are registered with the plugin. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/gcp/roles` | `200 application/json` | +| `GET` | `/auth/gcp/roles?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/gcp/roles +``` + +### Sample Response + +```json +{ + "data": { + "keys": [ + "dev-role", + "prod-role" + ] + }, + ... +} +``` + +## Delete Role + +Deletes the previously registered role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/gcp/role/:role` | `204 (empty body)` | + +### Parameters + +- `role` `(string: )` - Name of the role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/gcp/role/dev-role +``` + +## Login + +Fetch a token. This endpoint takes a signed JSON Web Token (JWT) and +a role name for some entity. It verifies the JWT signature to authenticate that +entity and then authorizes the entity for the given role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/gcp/login` | `200 application/json` | + +### Sample Payload + +- `role` `(string: "")` - Name of the role against which the login is being + attempted. +- `jwt` `(string: "")` - Signed [JSON Web Token](https://tools.ietf.org/html/rfc7519) (JWT). + For `iam`, this is a JWT generated using the IAM API method + [signJwt](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt) + or a self-signed JWT. For `gce`, this is an [identity metadata token](https://cloud.google.com/compute/docs/instances/verifying-instance-identity#request_signature). + + +### Sample Payload + +```json +{ + "role": "dev-role", + "jwt": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/gcp/login +``` + +### Sample Response + +```json +{ + "auth":{ + "client_token":"f33f8c72-924e-11f8-cb43-ac59d697597c", + "accessor":"0e9e354a-520f-df04-6867-ee81cae3d42d", + "policies":[ + "default", + "dev", + "prod" + ], + "metadata":{ + "role": "dev-role", + "service_account_email": "dev1@project-123456.iam.gserviceaccount.com", + "service_account_id": "111111111111111111111" + }, + "lease_duration":2764800, + "renewable":true + }, + ... +} +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/github/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/github/index.html.md new file mode 100644 index 0000000..8020c73 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/github/index.html.md @@ -0,0 +1,139 @@ +--- +layout: "api" +page_title: "Github Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-github" +description: |- + This is the API documentation for the Vault Github authentication backend. +--- + +# Github Auth Backend HTTP API + +This is the API documentation for the Vault Github authentication backend. For +general information about the usage and operation of the Github backend, please +see the [Vault Github backend documentation](/docs/auth/github.html). + +This documentation assumes the Github backend is mounted at the `/auth/github` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. + +## Configure Backend + +Configures the connection parameters for Okta. This path honors the +distinction between the `create` and `update` capabilities inside ACL policies. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/github/config` | `204 (empty body)` | + +### Parameters + +- `organization` `(string: )` - The organization users must be part + of. +- `base_url` `(string: "")` - The API endpoint to use. Useful if you are running + GitHub Enterprise or an API-compatible authentication server. +- `ttl` `(string: "")` - Duration after which authentication will be expired. +- `max_ttl` `(string: "")` - Maximum duration after which authentication will + be expired. + +### Sample Payload + +```json +{ + "organization": "acme-org" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/githubokta/config +``` + +## Read Configuration + +Reads the Okta configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/github/config` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/github/config +``` + +### Sample Response + +```json +{ + "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "organization": "acme-org", + "base_url": "", + "ttl": "", + "max_ttl": "" + }, + "warnings": null +} +``` + +## Login + +Login using GitHub access token. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/github/login` | `200 application/json` | + +### Parameters + +- `token` `(string: )` - GitHub personal API token. + +### Sample Payload + +```json +{ + "token": "ABC123..." +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + https://vault.rocks/v1/auth/github/login +``` + +### Sample Response + +```javascript +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": null, + "warnings": null, + "auth": { + "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344", + "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d", + "policies": ["default"], + "metadata": { + "username": "fred", + "org": "acme-org" + }, + }, + "lease_duration": 7200, + "renewable": true +} + ``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/index.html.md new file mode 100644 index 0000000..b89d31c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/index.html.md @@ -0,0 +1,19 @@ +--- +layout: "api" +page_title: "HTTP API" +sidebar_current: "docs-http-auth" +description: |- + Each authentication backend publishes its own set of API paths and methods. + These endpoints are documented in this section. +--- + +# Authentication Backends + +Each authentication backend publishes its own set of API paths and methods. +These endpoints are documented in this section. Authentication backends are +mount at a path, but the documentation will assume the default mount points for +simplicity. If you are mounting at a different path, you should adjust your API +calls accordingly. + +For the API documentation for a specific authentication backend, please choose a +authentication backend from the navigation. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/kubernetes/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/kubernetes/index.html.md new file mode 100644 index 0000000..3963361 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/kubernetes/index.html.md @@ -0,0 +1,293 @@ +--- +layout: "api" +page_title: "Kubernetes Auth Plugin Backend - HTTP API" +sidebar_current: "docs-http-auth-kubernetes" +description: |- + This is the API documentation for the Vault Kubernetes authentication + backend plugin. +--- + +# Kubernetes Auth Plugin HTTP API + +This is the API documentation for the Vault Kubernetes authentication backend +plugin. To learn more about the usage and operation, see the +[Vault Kubernetes backend documentation](/docs/auth/kubernetes.html). + +This documentation assumes the backend is mounted at the +`/auth/kubernetes` path in Vault. Since it is possible to mount auth backends +at any location, please update your API calls accordingly. + +## Configure + +The Kubernetes Auth backend validates service account JWTs and verifies their +existence with the Kubernetes TokenReview API. This endpoint configures the +public key used to validate the JWT signature and the necessary information to +access the Kubernetes API. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/kubernetes/config` | `204 (empty body)` | + +### Parameters + - `pem_keys` `(array: )` - List of PEM-formated public keys or certificates + used to verify the signatures of kubernetes service account + JWTs. If a certificate is given, its public key will be + extracted. + - `kubernetes_host` `(string: )` - Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server. + - `kubernetes_ca_cert` `(string: "")` - PEM encoded CA cert for use by the TLS client used to talk with the API. + +### Sample Payload + +```json +{ + "pem_keys": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----", + "kubernetes_host": "https://192.168.99.100:8443", + "kubernetes_ca_cert": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/kubernetes/config +``` + +## Read Config + +Returns the previously configured config, including credentials. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/kubernetes/config` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/kubernetes/config +``` + +### Sample Response + +```json +{ + "data":{ + "pem_keys": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----", + "kubernetes_host": "https://192.168.99.100:8443", + "kubernetes_ca_cert": "-----BEGIN CERTIFICATE-----.....-----END CERTIFICATE-----" + }, + ... +} + +``` + +## Create Role + +Registers a role in the backend. Role types have specific entities +that can perform login operations against this endpoint. Constraints specific +to the role type must be set on the role. These are applied to the authenticated +entities attempting to login. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/kubernetes/role/:name`| `204 (empty body)` | + +### Parameters +- `name` `(string: )` - Name of the role. +- `bound_service_account_names` `(array: )` - List of service account + names able to access this role. If set to "\*" all names are allowed, both this + and bound_service_account_namespaces can not be "\*". +- `bound_service_account_namespaces` `(array: )` - List of namespaces + allowed to access this role. If set to "\*" all namespaces are allowed, both + this and bound_service_account_names can not be set to "\*". +- `ttl` `(string: "")` - The TTL period of tokens issued using this role in + seconds. +- `max_ttl` `(string: "")` - The maximum allowed lifetime of tokens + issued in seconds using this role. +- `period` `(string: "")` - If set, indicates that the token generated using + this role should never expire. The token should be renewed within the duration + specified by this value. At each renewal, the token's TTL will be set to the + value of this parameter. +- `policies` `(array: [])` - Policies to be set on tokens issued using this + role. + +### Sample Payload + +```json +{ + "bound_service_account_names": "vault-auth", + "bound_service_account_namespaces": "default", + "policies": [ + "dev", + "prod" + ], + "max_ttl": 1800000, +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/kubernetes/role/dev-role +``` +## Read Role + +Returns the previously registered role configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/kubernetes/role/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` - Name of the role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/kubernetes/role/dev-role +``` + +### Sample Response + +```json +{ + "data":{ + "bound_service_account_names": "vault-auth", + "bound_service_account_namespaces": "default", + "max_ttl": 1800000,, + "ttl":0, + "period": 0, + "policies":[ + "dev", + "prod" + ], + }, + ... +} + +``` + +## List Roles + +Lists all the roles that are registered with the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/kubernetes/roles` | `200 application/json` | +| `GET` | `/auth/kubernetes/roles?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/kubernetes/roles +``` + +### Sample Response + +```json +{ + "data": { + "keys": [ + "dev-role", + "prod-role" + ] + }, + ... +} +``` + +## Delete Role + +Deletes the previously registered role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/kubernetes/role/:role`| `204 (empty body)` | + +### Parameters + +- `role` `(string: )` - Name of the role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/kubernetes/role/dev-role +``` + +## Login + +Fetch a token. This endpoint takes a signed JSON Web Token (JWT) and +a role name for some entity. It verifies the JWT signature to authenticate that +entity and then authorizes the entity for the given role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/kubernetes/login` | `200 application/json` | + +### Sample Payload + +- `role` `(string: )` - Name of the role against which the login is being + attempted. +- `jwt` `(string: )` - Signed [JSON Web + Token](https://tools.ietf.org/html/rfc7519) (JWT) for authenticating a service + account. + +### Sample Payload + +```json +{ + "role": "dev-role", + "jwt": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/kubernetes/login +``` + +### Sample Response + +```json +{ + "auth": { + "client_token": "62b858f9-529c-6b26-e0b8-0457b6aacdb4", + "accessor": "afa306d0-be3d-c8d2-b0d7-2676e1c0d9b4", + "policies": [ + "default" + ], + "metadata": { + "role": "test", + "service_account_name": "vault-auth", + "service_account_namespace": "default", + "service_account_secret_name": "vault-auth-token-pd21c", + "service_account_uid": "aa9aa8ff-98d0-11e7-9bb7-0800276d99bf" + }, + "lease_duration": 2764800, + "renewable": true + } + ... +} +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/ldap/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/ldap/index.html.md new file mode 100644 index 0000000..b0ff4ec --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/ldap/index.html.md @@ -0,0 +1,452 @@ +--- +layout: "api" +page_title: "LDAP Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-ldap" +description: |- + This is the API documentation for the Vault LDAP authentication backend. +--- + +# LDAP Auth Backend HTTP API + +This is the API documentation for the Vault LDAP authentication backend. For +general information about the usage and operation of the LDAP backend, please +see the [Vault LDAP backend documentation](/docs/auth/ldap.html). + +This documentation assumes the LDAP backend is mounted at the `/auth/ldap` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. + +## Configure LDAP Backend + +This endpoint configures the LDAP authentication backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/ldap/config` | `204 (empty body)` | + +### Parameters + +- `url` `(string: )` – The LDAP server to connect to. Examples: + `ldap://ldap.myorg.com`, `ldaps://ldap.myorg.com:636` +- `starttls` `(bool: false)` – If true, issues a `StartTLS` command after + establishing an unencrypted connection. +- `tls_min_version` `(string: tls12)` – Minimum TLS version to use. Accepted + values are `tls10`, `tls11` or `tls12`. +- `tls_max_version` `(string: tls12)` – Maximum TLS version to use. Accepted + values are `tls10`, `tls11` or `tls12`. +- `insecure_tls` `(bool: false)` – If true, skips LDAP server SSL certificate + verification - insecure, use with caution! +- `certificate` `(string: "")` – CA certificate to use when verifying LDAP server + certificate, must be x509 PEM encoded. +- `binddn` `(string: "")` – Distinguished name of object to bind when performing + user search. Example: `cn=vault,ou=Users,dc=example,dc=com` +- `bindpass` `(string: "")` – Password to use along with `binddn` when performing + user search. +- `userdn` `(string: "")` – Base DN under which to perform user search. Example: + `ou=Users,dc=example,dc=com` +- `userattr` `(string: "")` – Attribute on user attribute object matching the + username passed when authenticating. Examples: `sAMAccountName`, `cn`, `uid` +- `discoverdn` `(bool: false)` – Use anonymous bind to discover the bind DN of a + user. +- `deny_null_bind` `(bool: true)` – This option prevents users from bypassing + authentication when providing an empty password. +- `upndomain` `(string: "")` – The userPrincipalDomain used to construct the UPN + string for the authenticating user. The constructed UPN will appear as + `[username]@UPNDomain`. Example: `example.com`, which will cause vault to bind + as `username@example.com`. +- `groupfilter` `(string: "")` – Go template used when constructing the group + membership query. The template can access the following context variables: + \[`UserDN`, `Username`\]. The default is + `(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`, + which is compatible with several common directory schemas. To support + nested group resolution for Active Directory, instead use the following + query: `(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))`. +- `groupdn` `(string: "")` – LDAP search base to use for group membership + search. This can be the root containing either groups or users. Example: + `ou=Groups,dc=example,dc=com` +- `groupattr` `(string: "")` – LDAP attribute to follow on objects returned by + `groupfilter` in order to enumerate user group membership. Examples: for + groupfilter queries returning _group_ objects, use: `cn`. For queries + returning _user_ objects, use: `memberOf`. The default is `cn`. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/ldap/config +``` + +### Sample Payload + +```json +{ + "binddn": "cn=vault,ou=Users,dc=example,dc=com", + "deny_null_bind": true, + "discoverdn": false, + "groupattr": "cn", + "groupdn": "ou=Groups,dc=example,dc=com", + "groupfilter": "(\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))", + "insecure_tls": false, + "starttls": false, + "tls_max_version": "tls12", + "tls_min_version": "tls12", + "url": "ldaps://ldap.myorg.com:636", + "userattr": "samaccountname", + "userdn": "ou=Users,dc=example,dc=com" +} +``` + +## Read LDAP Configuration + +This endpoint retrieves the LDAP configuration for the authentication backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/ldap/config` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/ldap/config +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "binddn": "cn=vault,ou=Users,dc=example,dc=com", + "bindpass": "", + "certificate": "", + "deny_null_bind": true, + "discoverdn": false, + "groupattr": "cn", + "groupdn": "ou=Groups,dc=example,dc=com", + "groupfilter": "(\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))", + "insecure_tls": false, + "starttls": false, + "tls_max_version": "tls12", + "tls_min_version": "tls12", + "upndomain": "", + "url": "ldaps://ldap.myorg.com:636", + "userattr": "samaccountname", + "userdn": "ou=Users,dc=example,dc=com" + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## List LDAP Groups + +This endpoint returns a list of existing groups in the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/ldap/groups` | `200 application/json` | +| `GET` | `/auth/ldap/groups?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/ldap/groups +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "scientists", + "engineers" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Read LDAP Group + +This endpoint returns the policies associated with a LDAP group. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/ldap/groups/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – The name of the LDAP group + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/ldap/groups/admins +``` + +### Sample Response + +```json +{ + "data": { + "policies": "admin,default" + }, + "renewable": false, + "lease_id": "" + "lease_duration": 0, + "warnings": null +} +``` + +## Create/Update LDAP Group + +This endpoint creates or updates LDAP group policies. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/ldap/groups/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – The name of the LDAP group +- `policies` `(string: "")` – Comma-separated list of policies associated to the + group. + +### Sample Payload + +```json +{ + "policies": "admin,default" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/ldap/groups/admins +``` + +## Delete LDAP Group + +This endpoint deletes the LDAP group and policy association. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/ldap/groups/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – The name of the LDAP group + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/ldap/groups/admins +``` + +## List LDAP Users + +This endpoint returns a list of existing users in the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/ldap/users` | `200 application/json` | +| `GET` | `/auth/ldap/users?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/ldap/users +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "mitchellh", + "armon" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Read LDAP User + +This endpoint returns the policies associated with a LDAP user. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/ldap/users/:username` | `200 application/json` | + +### Parameters + +- `username` `(string: )` – The username of the LDAP user + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/ldap/users/mitchellh +``` + +### Sample Response + +```json +{ + "data": { + "policies": "admin,default", + "groups": "" + }, + "renewable": false, + "lease_id": "" + "lease_duration": 0, + "warnings": null +} +``` + +## Create/Update LDAP User + +This endpoint creates or updates LDAP users policies and group associations. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/ldap/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` – The username of the LDAP user +- `policies` `(string: "")` – Comma-separated list of policies associated to the + user. +- `groups` `(string: "")` – Comma-separated list of groups associated to the + user. + +### Sample Payload + +```json +{ + "policies": "admin,default" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/ldap/users/mitchellh +``` + +## Delete LDAP User + +This endpoint deletes the LDAP user and policy association. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/ldap/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` – The username of the LDAP user + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/ldap/users/mitchellh +``` + +## Login with LDAP User + +This endpoint allows you to log in with LDAP credentials + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/ldap/login/:username` | `200 application/json` | + +### Parameters + +- `username` `(string: )` – The username of the LDAP user +- `password` `(string: )` – The password for the LDAP user + +### Sample Payload + +```json +{ + "password": "MyPassword1" +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/ldap/login/mitchellh +``` + +### Sample Response + +```json +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": null, + "auth": { + "client_token": "c4f280f6-fdb2-18eb-89d3-589e2e834cdb", + "policies": [ + "admins", + "default" + ], + "metadata": { + "username": "mitchellh" + }, + "lease_duration": 0, + "renewable": false + } +} +``` + diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/okta/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/okta/index.html.md new file mode 100644 index 0000000..cff51af --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/okta/index.html.md @@ -0,0 +1,395 @@ +--- +layout: "api" +page_title: "Okta Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-okta" +description: |- + This is the API documentation for the Vault Okta authentication backend. +--- + +# Okta Auth Backend HTTP API + +This is the API documentation for the Vault Okta authentication backend. For +general information about the usage and operation of the Okta backend, please +see the [Vault Okta backend documentation](/docs/auth/okta.html). + +This documentation assumes the Okta backend is mounted at the `/auth/okta` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. + +## Create Configuration + +Configures the connection parameters for Okta. This path honors the +distinction between the `create` and `update` capabilities inside ACL policies. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/okta/config` | `204 (empty body)` | + +### Parameters + +- `org_name` `(string: )` - Name of the organization to be used in the + Okta API. +- `api_token` `(string: "")` - Okta API token. This is required to query Okta + for user group membership. If this is not supplied only locally configured + groups will be enabled. +- `base_url` `(string: "")` - If set, will be used as the base domain + for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com. +- `ttl` `(string: "")` - Duration after which authentication will be expired. +- `max_ttl` `(string: "")` - Maximum duration after which authentication will + be expired. + +### Sample Payload + +```json +{ + "org_name": "example", + "api_token": "abc123" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/okta/config +``` + +## Read Configuration + +Reads the Okta configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/okta/config` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/okta/config +``` + +### Sample Response + +```json +{ + "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "org_name": "example", + "api_token": "abc123", + "base_url": "okta.com", + "ttl": "", + "max_ttl": "" + }, + "warnings": null +} +``` + +## List Users + +List the users configurated in the Okta backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/okta/users` | `200 application/json` | +| `GET` | `/auth/okta/users?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/okta/users +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "fred", + "jane" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Register User + +Registers a new user and maps a set of policies to it. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/okta/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` - Name of the user. +- `groups` `(string: "")` - Comma-separated list of groups associated with the + user. +- `policies` `(string: "")` - Comma-separated list of policies associated with + the user. + +```json +{ + "policies": "dev,prod", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/okta/users/fred +``` + +## Read User + +Reads the properties of an existing username. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/okta/users/:username` | `200 application/json` | + +### Parameters + +- `username` `(string: )` - Username for this user. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/okta/users/test-user +``` + +### Sample Response + +```json +{ + "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "policies": "default,dev", + "groups": "" + }, + "warnings": null +} +``` + +## Delete User + +Deletes an existing username from the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/okta/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` - Username for this user. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/okta/users/test-user +``` + +## List Groups + +List the groups configurated in the Okta backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/okta/groups` | `200 application/json` | +| `GET` | `/auth/okta/groups?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/okta/groups +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "admins", + "dev-users" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Register Group + +Registers a new group and maps a set of policies to it. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/okta/groups/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` - The name of the group. +- `policies` `(string: "")` - Comma-separated list of policies associated with + the group. + +```json +{ + "policies": "dev,prod", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/okta/groups/admins +``` + +## Read Group + +Reads the properties of an existing group. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/okta/groups/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` - The name for the group. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/okta/groups/admins +``` + +### Sample Response + +```json +{ + "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "policies": "default,admin" + }, + "warnings": null +} +``` + +## Delete Group + +Deletes an existing group from the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/okta/groups/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` - The name for the group. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/okta/users/test-user +``` + +## Login + +Login with the username and password. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/okta/login/:username` | `200 application/json` | + +### Parameters + +- `username` `(string: )` - Username for this user. +- `password` `(string: )` - Password for the autheticating user. + +### Sample Payload + +```json +{ + "password": "Password!" +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/okta/login/fred +``` + +### Sample Response + +```javascript +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": null, + "warnings": null, + "auth": { + "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344", + "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d", + "policies": ["default"], + "metadata": { + "username": "fred", + "policies": "default" + }, + }, + "lease_duration": 7200, + "renewable": true +} + ``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/radius/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/radius/index.html.md new file mode 100644 index 0000000..9132e0a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/radius/index.html.md @@ -0,0 +1,237 @@ +--- +layout: "api" +page_title: "RADIUS Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-radius" +description: |- + This is the API documentation for the Vault RADIUS authentication backend. +--- + +# RADIUS Auth Backend HTTP API + +This is the API documentation for the Vault RADIUS authentication backend. For +general information about the usage and operation of the RADIUS backend, please +see the [Vault RADIUS backend documentation](/docs/auth/radius.html). + +This documentation assumes the RADIUS backend is mounted at the `/auth/radius` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. + +## Configure RADIUS + +Configures the connection parameters and shared secret used to communicate with +RADIUS. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/radius/config` | `204 (empty body)` | + +### Parameters + +- `host` `(string: )` - The RADIUS server to connect to. Examples: + `radius.myorg.com`, `127.0.0.1` +- `port` `(integer: 1812)` - The UDP port where the RADIUS server is listening + on. Defaults is 1812. +- `secret` `(string: )` - The RADIUS shared secret. +- `unregistered_user_policies` `(string: "")` - A comma-separated list of + policies to be granted to unregistered users. +- `dial_timeout` `(integer: 10)` - Number of second to wait for a backend + connection before timing out. Default is 10. +- `nas_port` `(integer: 10)` - The NAS-Port attribute of the RADIUS request. + Defaults is 10. + +### Sample Payload + +```json +{ + "host": "radius.myorg.com", + "port": 1812, + "secret": "mySecret" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/radius/config +``` + +## Register User + +Registers a new user and maps a set of policies to it. This path honors the +distinction between the `create` and `update` capabilities inside ACL policies. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/radius/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` - Username for this user. +- `policies` `(string: "")` - Comma-separated list of policies. If set to + empty string, only the `default` policy will be applicable to the user. + +```json +{ + "policies": "dev,prod", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/radius/users/test-user +``` + +## Read User + +Reads the properties of an existing username. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/radius/users/:username` | `200 application/json` | + +### Parameters + +- `username` `(string: )` - Username for this user. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/radius/users/test-user +``` + +### Sample Response + +```json +{ + "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "policies": "default,dev" + }, + "warnings": null +} +``` + +## Delete User + +Deletes an existing username from the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/radius/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` - Username for this user. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/radius/users/test-user +``` + +## List Users + +List the users registered with the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/radius/users` | `200 application/json` | +| `GET` | `/auth/radius/users?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/radius/users +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "devuser", + "produser" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Login + +Login with the username and password. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/radius/login` | `200 application/json` | +| `POST` | `/auth/radius/login/:username` | `200 application/json` | + +### Parameters + +- `username` `(string: )` - Username for this user. +- `password` `(string: )` - Password for the autheticating user. + +### Sample Payload + +```json +{ + "password": "Password!" +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/radius/login/test-user +``` + +### Sample Response + +```javascript +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": null, + "warnings": null, + "auth": { + "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344", + "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d", + "policies": ["default"], + "metadata": { + "username": "vishal" + }, + }, + "lease_duration": 7200, + "renewable": true +} + ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/token/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/token/index.html.md new file mode 100644 index 0000000..10c88a6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/token/index.html.md @@ -0,0 +1,704 @@ +--- +layout: "api" +page_title: "Token Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-token" +description: |- + This is the API documentation for the Vault token authentication backend. +--- + +# Token Auth Backend HTTP API + +This is the API documentation for the Vault token authentication backend. For +general information about the usage and operation of the token backend, please +see the [Vault Token backend documentation](/docs/auth/token.html). + +## List Accessors + +This endpoint lists token accessor. This requires `sudo` capability, and access +to it should be tightly controlled as the accessors can be used to revoke very +large numbers of tokens and their associated leases at once. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/token/accessors` | `200 application/json` | +| `GET` | `/auth/token/accessors?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/auth/token/accessors +``` + +### Sample Response + +```json +{ + "auth": null, + "warnings": null, + "wrap_info": null, + "data": { + "keys": [ + "476ea048-ded5-4d07-eeea-938c6b4e43ec", + "bb00c093-b7d3-b0e9-69cc-c4d85081165b" + ] + }, + "lease_duration": 0, + "renewable": false, + "lease_id": "" +} +``` + +## Create Token + +Creates a new token. Certain options are only available when called by a +root token. If used via the `/auth/token/create-orphan` endpoint, a root +token is not required to create an orphan token (otherwise set with the +`no_parent` option). If used with a role name in the path, the token will +be created against the specified role name; this may override options set +during this call. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/create` | `200 application/json` | +| `POST` | `/auth/token/create-orphan` | `200 application/json` | +| `POST` | `/auth/token/create/:role_name` | `200 application/json` | + +### Parameters + +- `id` `(string: "")` – The ID of the client token. Can only be specified by a + root token. Otherwise, the token ID is a randomly generated UUID. +- `role_name` `(string: "")` – The name of the token role. +- `policies` `(array: "")` – A list of policies for the token. This must be a + subset of the policies belonging to the token making the request, unless root. + If not specified, defaults to all the policies of the calling token. +- `meta` `(map: {})` – A map of string to string valued metadata. This is + passed through to the audit backends. +- `no_parent` `(bool: false)` - If true and set by a root caller, the token will + not have the parent token of the caller. This creates a token with no parent. +- `no_default_policy` `(bool: false)` - If true the `default` policy will not be + contained in this token's policy set. +- `renewable` `(bool: true)` - Set to `false` to disable the ability of the token + to be renewed past its initial TTL. Setting the value to `true` will allow + the token to be renewable up to the system/mount maximum TTL. +- `lease` `(string: "")` - DEPRECATED; use `ttl` instead +- `ttl` `(string: "")` -The TTL period of the token, provided as "1h", where + hour is the largest suffix. If not provided, the token is valid for the + [default lease TTL](/docs/configuration/index.html), or indefinitely if the + root policy is used. +- `explicit_max_ttl` `(string: "")` - If set, the token will have an explicit + max TTL set upon it. This maximum token TTL *cannot* be changed later, and + unlike with normal tokens, updates to the system/mount max TTL value will + have no effect at renewal time -- the token will never be able to be renewed + or used past the value set at issue time. +- `display_name` `(string: "token")` - The display name of the token. +- `num_uses` `(integer: 0)` - The maximum uses for the given token. This can be + used to create a one-time-token or limited use token. The value of 0 has no + limit to the number of uses. +- `period` `(string: "")` - If specified, the token will be periodic; it will have + no maximum TTL (unless an "explicit-max-ttl" is also set) but every renewal + will use the given period. Requires a root/sudo token to use. + +### Sample Payload + +```json +{ + "policies": [ + "web", + "stage" + ], + "metadata": { + "user": "armon" + }, + "ttl": "1h", + "renewable": true +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/create +``` + +### Sample Response + +```json +{ + "auth": { + "client_token": "ABCD", + "policies": [ + "web", + "stage" + ], + "metadata": { + "user": "armon" + }, + "lease_duration": 3600, + "renewable": true, + } +} +``` + +## Lookup a Token + +Returns information about the client token. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/lookup` | `200 application/json` | +| `GET` | `/auth/token/lookup/:token` | `200 application/json` | + +### Parameters + +- `token` `(string: )` - Token to lookup. + +### Sample Payload + +```json +{ + "token": "ClientToken" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/lookup +``` + +### Sample Response + +```json +{ + "data": { + "id": "ClientToken", + "policies": [ + "web", + "stage" + ], + "path": "auth/github/login", + "meta": { + "user": "armon", + "organization": "hashicorp" + }, + "display_name": "github-armon", + "num_uses": 0, + } +} +``` + +## Lookup a Token (Self) + +Returns information about the current client token. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/token/lookup-self` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/token/lookup-self +``` + +### Sample Response + +```json +{ + "data": { + "id": "ClientToken", + "policies": [ + "web", + "stage" + ], + "path": "auth/github/login", + "meta": { + "user": "armon", + "organization": "hashicorp" + }, + "display_name": "github-armon", + "num_uses": 0, + } +} +``` + +## Lookup a Token Accessor + +Returns information about the client token from the accessor. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/lookup-accessor` | `200 application/json` | +| `GET` | `/auth/token/lookup-accessor/:accessor` | `200 application/json` | + +### Parameters + +- `accessor` `(string: )` - Token accessor to lookup. + +### Sample Payload + +```json +{ + "accessor": "2c84f488-2133-4ced-87b0-570f93a76830" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/lookup-accessor +``` + +### Sample Response + +```json +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "creation_time": 1457533232, + "creation_ttl": 2764800, + "display_name": "token", + "meta": null, + "num_uses": 0, + "orphan": false, + "path": "auth/token/create", + "policies": [ + "default", + "web" + ], + "ttl": 2591976 + }, + "warnings": null, + "auth": null +} +``` + +## Renew a Token + +Renews a lease associated with a token. This is used to prevent the expiration +of a token, and the automatic revocation of it. Token renewal is possible only +if there is a lease associated with it. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/renew` | `200 application/json` | +| `POST` | `/auth/token/renew/:token` | `200 application/json` | + +### Parameters + +- `token` `(string: )` - Token to renew. This can be part of the URL + or the body. +- `increment` `(string: "")` - An optional requested lease increment can be + provided. This increment may be ignored. + +### Sample Payload + +```json +{ + "token": "ClientToken" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/renew +``` + +### Sample Response + +```json +{ + "auth": { + "client_token": "ABCD", + "policies": [ + "web", + "stage" + ], + "metadata": { + "user": "armon" + }, + "lease_duration": 3600, + "renewable": true, + } +} +``` + +## Renew a Token (Self) + +Renews a lease associated with the calling token. This is used to prevent the +expiration of a token, and the automatic revocation of it. Token renewal is +possible only if there is a lease associated with it. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/renew-self` | `200 application/json` | + +### Parameters + +- `increment` `(string: "")` - An optional requested lease increment can be + provided. This increment may be ignored. + +### Sample Payload + +```json +{ + "increment": "1h" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/renew-self +``` + +### Sample Response + +```json +{ + "auth": { + "client_token": "ABCD", + "policies": [ + "web", + "stage" + ], + "metadata": { + "user": "armon" + }, + "lease_duration": 3600, + "renewable": true, + } +} +``` + +## Revoke a Token + +Revokes a token and all child tokens. When the token is revoked, all secrets +generated with it are also revoked. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/revoke` | `204 (empty body)` | + +### Parameters + +- `token` `(string: )` - Token to revoke. + +### Sample Payload + +```json +{ + "token": "ClientToken" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/revoke +``` + +## Revoke a Token (Self) + +Revokes the token used to call it and all child tokens. When the token is +revoked, all dynamic secrets generated with it are also revoked. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/revoke-self` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/auth/token/revoke-self +``` + +## Revoke a Token Accessor + +Revoke the token associated with the accessor and all the child tokens. This is +meant for purposes where there is no access to token ID but there is need to +revoke a token and its children. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/revoke-accessor` | `204 (empty body)` | + +### Parameters + +- `accessor` `(string: )` - Accessor of the token. + +### Sample Payload + +```json +{ + "accessor": "2c84f488-2133-4ced-87b0-570f93a76830" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/revoke-accessor +``` + +## Revoke Token and Orphan Children + +Revokes a token but not its child tokens. When the token is revoked, all secrets +generated with it are also revoked. All child tokens are orphaned, but can be +revoked sub-sequently using `/auth/token/revoke/`. This is a root-protected +endpoint. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/revoke-orphan` | `204 (empty body)` | +| `POST` | `/auth/token/revoke-orphan/:token` | `204 (empty body)` | + +### Parameters + +- `token` `(string: )` - Token to revoke. This can be part of the URL + or the body. + +### Sample Payload + +```json +{ + "token": "ClientToken" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/token/revoke-orphan +``` + +## Read Token Role + +Fetches the named role configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/token/roles/:role_name`| `200 application/json` | + +### Parameters + +- `role_name` `(string: )` - The name of the token role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/token/roles/nomad +``` + +### Sample Response + +```javascript +{ + "request_id": "075a19cd-4e56-a3ca-d956-7609819831ec", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "allowed_policies": [ + "dev" + ], + "disallowed_policies": [], + "explicit_max_ttl": 0, + "name": "nomad", + "orphan": false, + "path_suffix": "", + "period": 0, + "renewable": true + }, + "warnings": null +} +``` + +## List Token Roles + +List available token roles. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/token/roles` | `200 application/json` | +| `GET` | `/auth/token/roles?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST + https://vault.rocks/v1/auth/token/roles +``` + +### Sample Response + +```json +{ + "data": { + "keys": [ + "role1", + "role2" + ] + } +} +``` + +## Create/Update Token Role + +Creates (or replaces) the named role. Roles enforce specific behavior when +creating tokens that allow token functionality that is otherwise not +available or would require `sudo`/root privileges to access. Role +parameters, when set, override any provided options to the `create` +endpoints. The role name is also included in the token path, allowing all +tokens created against a role to be revoked using the `sys/revoke-prefix` +endpoint. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/roles/:role_name` | `204 (empty body)` | + +### Parameters + +- `role_name` `(string: )` – The name of the token role. +- `allowed_policies` `(list: [])` – If set, tokens can be created with any + subset of the policies in this list, rather than the normal semantics of + tokens being a subset of the calling token's policies. The parameter is a + comma-delimited string of policy names. If at creation time + `no_default_policy` is not set and `"default"` is not contained in + `disallowed_policies`, the `"default"` policy will be added to the created + token automatically. +- `disallowed_policies` `(list: [])` – If set, successful token creation via + this role will require that no policies in the given list are requested. The + parameter is a comma-delimited string of policy names. Adding `"default"` to + this list will prevent `"default"` from being added automatically to created + tokens. +- `orphan` `(bool: true)` - If `true`, tokens created against this policy will + be orphan tokens (they will have no parent). As such, they will not be + automatically revoked by the revocation of any other token. +- `period` `(string: "")` - If specified, the token will be periodic; it will have + no maximum TTL (unless an "explicit-max-ttl" is also set) but every renewal + will use the given period. Requires a root/sudo token to use. +- `renewable` `(bool: true)` - Set to `false` to disable the ability of the token + to be renewed past its initial TTL. Setting the value to `true` will allow + the token to be renewable up to the system/mount maximum TTL. +- `explicit_max_ttl` `(string: "")` - If set, the token will have an explicit + max TTL set upon it. This maximum token TTL *cannot* be changed later, and + unlike with normal tokens, updates to the system/mount max TTL value will + have no effect at renewal time -- the token will never be able to be renewed + or used past the value set at issue time. +- `path_suffix` `(string: "")` - If set, tokens created against this role will + have the given suffix as part of their path in addition to the role name. This + can be useful in certain scenarios, such as keeping the same role name in the + future but revoking all tokens created against it before some point in time. + The suffix can be changed, allowing new callers to have the new suffix as part + of their path, and then tokens with the old suffix can be revoked via + `sys/revoke-prefix`. + +### Sample Payload + +```json + "allowed_policies": [ + "dev" + ], + "name": "nomad", + "orphan": false, + "renewable": true +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST + --data @payload.json + https://vault.rocks/v1/auth/token/roles/nomad +``` + +## Delete Token Role + +This endpoint deletes the named token role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/token/roles/:role_name` | `204 (empty body)` | + +### Parameters + +- `role_name` `(string: )` - The name of the token role. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/token/roles/admins +``` + +## Tidy Tokens + +Performs some maintenance tasks to clean up invalid entries that may remain +in the token store. Generally, running this is not needed unless upgrade +notes or support personnel suggest it. This may perform a lot of I/O to the +storage backend so should be used sparingly. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/token/tidy` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/auth/token/tidy +``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/api/auth/userpass/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/auth/userpass/index.html.md new file mode 100644 index 0000000..42a35d9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/auth/userpass/index.html.md @@ -0,0 +1,254 @@ +--- +layout: "api" +page_title: "Userpass Auth Backend - HTTP API" +sidebar_current: "docs-http-auth-userpass" +description: |- + This is the API documentation for the Vault username and password + authentication backend. +--- + +# Username & Password Auth Backend HTTP API + +This is the API documentation for the Vault Username & Password authentication backend. For +general information about the usage and operation of the Username and Password backend, please +see the [Vault Userpass backend documentation](/docs/auth/userpass.html). + +This documentation assumes the Username & Password backend is mounted at the `/auth/userpass` +path in Vault. Since it is possible to mount auth backends at any location, +please update your API calls accordingly. + +## Create/Update User + +Create a new user or update an existing user. This path honors the distinction between the `create` and `update` capabilities inside ACL policies. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/userpass/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` – The username for the user. +- `password` `(string: )` - The password for the user. Only required + when creating the user. +- `policies` `(string: "")` – Comma-separated list of policies. If set to empty + string, only the `default` policy will be applicable to the user. +- `ttl` `(string: "")` - The lease duration which decides login expiration. +- `max_ttl` `(string: "")` - Maximum duration after which login should expire. + +### Sample Payload + +```json +{ + "password": "superSecretPassword", + "policies": "admin,default" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/userpass/users/mitchellh +``` + +## Read User + +Reads the properties of an existing username. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/auth/userpass/users/:username` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/auth/userpass/users/mitchellh +``` + +### Sample Response + +```json +{ + "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "max_ttl": 0, + "policies": "default,dev", + "ttl": 0 + }, + "warnings": null +} +``` + +## Delete User + +This endpoint deletes the user from the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/auth/userpass/users/:username` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` - The username for the user. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/auth/userpass/users/mitchellh +``` + +## Update Password on User + +Update password for an existing user. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/userpass/users/:username/password` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` – The username for the user. +- `password` `(string: )` - The password for the user. + +### Sample Payload + +```json +{ + "password": "superSecretPassword2", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/userpass/users/mitchellh/password +``` + +## Update Policies on User + +Update policies for an existing user. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/userpass/users/:username/policies` | `204 (empty body)` | + +### Parameters + +- `username` `(string: )` – The username for the user. +- `policies` `(string: "")` – Comma-separated list of policies. If set to empty + +### Sample Payload + +```json +{ + "policies": "policy1,policy2", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/userpass/users/mitchellh/policies +``` + +## List Users + +List available userpass users. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/auth/userpass/users` | `200 application/json` | +| `GET` | `/auth/userpass/users?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST + https://vault.rocks/v1/auth/userpass/users +``` + +### Sample Response + +```json +{ + "data": { + "keys": [ + "mitchellh", + "armon" + ] + } +} +``` + +## Login + +Login with the username and password. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/auth/userpass/login/:username` | `200 application/json` | + +### Parameters + +- `username` `(string: )` – The username for the user. +- `password` `(string: )` - The password for the user. + +### Sample Payload + +```json +{ + "password": "superSecretPassword2", +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/auth/userpass/login/mitchellh +``` + +### Sample Response + +```json +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": null, + "warnings": null, + "auth": { + "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344", + "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d", + "policies": ["default"], + "metadata": { + "username": "mitchellh" + }, + "lease_duration": 7200, + "renewable": true + } +} +``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/api/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/index.html.md index 719b471..144bb0c 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/index.html.md @@ -52,7 +52,7 @@ via the `X-Vault-Token` header for future requests. ## Reading, Writing, and Listing Secrets Different backends implement different APIs according to their functionality. -The examples below are created with the `generic` backend, which acts like a +The examples below are created with the `kv` backend, which acts like a Key/Value store. Read the documentation for a particular backend for detailed information on its API; this simply provides a general overview. @@ -64,7 +64,7 @@ following URL: ``` This maps to `secret/foo` where `foo` is the key in the `secret/` mount, which -is mounted by default on a fresh Vault install and is of type `generic`. +is mounted by default on a fresh Vault install and is of type `kv`. Here is an example of reading a secret using cURL: @@ -76,7 +76,7 @@ $ curl \ ``` You can list secrets as well. To do this, either issue a GET with the query -parameter `list=true`, or you can use the LIST HTTP verb. For the `generic` +parameter `list=true`, or you can use the LIST HTTP verb. For the `kv` backend, listing is allowed on directories only, and returns the keys in the given directory: @@ -154,10 +154,11 @@ The following HTTP status codes are used throughout the API. - `200` - Success with data. - `204` - Success, no data returned. -- `400` - Invalid request, missing or invalid data. See the - "validation" section for more details on the error response. +- `400` - Invalid request, missing or invalid data. - `403` - Forbidden, your authentication details are either - incorrect or you don't have access to this feature. + incorrect, you don't have access to this feature, or - if CORS is + enabled - you made a cross-origin request from an origin that is + not allowed to make such requests. - `404` - Invalid path. This can both mean that the path truly doesn't exist or that you don't have permission to view a specific path. We use 404 in some cases to avoid state leakage. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md b/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md index 3e35be5..acfaff2 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/libraries.html.md @@ -37,6 +37,7 @@ These libraries are provided by the community. ### Ansible +* [ansible-vault](https://github.com/jhaals/ansible-vault) lookup plugin without third-party dependencies. * [Ansible Modules Hashivault](https://pypi.python.org/pypi/ansible-modules-hashivault) ```shell @@ -84,7 +85,6 @@ $ cabal install vault-tool ### Java * [Spring Vault](https://github.com/spring-projects/spring-vault) -* [vault-java](https://github.com/jhaals/vault-java) * [vault-java-driver](https://github.com/BetterCloud/vault-java-driver) ### Kotlin @@ -119,6 +119,20 @@ $ composer require jippi/vault-php-sdk $ composer require violuke/vault-php-sdk ``` +* [vault-php](https://github.com/CSharpRU/vault-php) + +```shell +$ composer require csharpru/vault-php +``` + +### PowerShell + +* [Zyborg.Vault](https://github.com/zyborg/Zyborg.Vault) + +```PowerShell +Install-Module Zyborg.Vault +``` + ### Python * [HVAC](https://github.com/ianunruh/hvac) diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md index 25dc268..5e9236c 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/aws/index.html.md @@ -23,13 +23,17 @@ are multiple ways to pass root IAM credentials to the Vault server, specified below with the highest precedence first. If credentials already exist, this will overwrite them. +The official AWS SDK is used for sourcing credentials from env vars, shared +files, or IAM/ECS instances. + - Static credentials provided to the API as a payload - Credentials in the `AWS_ACCESS_KEY`, `AWS_SECRET_KEY`, and `AWS_REGION` environment variables **on the server** -- Querying the EC2 metadata service if the **Vault server** is on EC2 and has - querying capabilities +- Shared credentials files + +- Assigned IAM role or ECS task role credentials At present, this endpoint does not confirm that the provided AWS credentials are valid AWS credentials with proper permissions. @@ -44,7 +48,9 @@ valid AWS credentials with proper permissions. - `secret_key` `(string: )` – Specifies the AWS secret access key. -- `region` `(string: )` – Specifies the AWS region. +- `region` `(string: )` – Specifies the AWS region. If not set it + will use the `AWS_REGION` env var, `AWS_DEFAULT_REGION` env var, or + `us-east-1` in that order. ### Sample Payload @@ -230,6 +236,7 @@ This endpoint lists all existing roles in the backend. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/aws/roles` | `200 application/json` | +| `GET` | `/aws/roles?list=true` | `200 application/json` | ### Sample Request diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md index 9f9471c..5bfabb5 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/cassandra/index.html.md @@ -8,6 +8,11 @@ description: |- # Cassandra Secret Backend HTTP API +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the API documentation for +the new implementation of this backend at +[Cassandra Database Plugin HTTP API](/api/secret/databases/cassandra.html). + This is the API documentation for the Vault Cassandra secret backend. For general information about the usage and operation of the Cassandra backend, please see the diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md index b91e956..3d8a3d2 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/consul/index.html.md @@ -152,6 +152,7 @@ This endpoint lists all existing roles in the backend. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/consul/roles` | `200 application/json` | +| `GET` | `/consul/roles?list=true` | `200 application/json` | ### Sample Request diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md index 903baaa..3178af0 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/cubbyhole/index.html.md @@ -60,7 +60,8 @@ not return a value. The values themselves are not accessible via this command. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `List` | `/cubbyhole/:path` | `200 application/json` | +| `LIST` | `/cubbyhole/:path` | `200 application/json` | +| `GET` | `/cubbyhole/:path?list=true` | `200 application/json` | ### Parameters diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/cassandra.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/cassandra.html.md new file mode 100644 index 0000000..5b60b27 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/cassandra.html.md @@ -0,0 +1,132 @@ +--- +layout: "api" +page_title: "Cassandra Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-cassandra" +description: |- + The Cassandra plugin for Vault's Database backend generates database credentials to access Cassandra servers. +--- + +# Cassandra Database Plugin HTTP API + +The Cassandra Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the Cassandra database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `hosts` `(string: )` – Specifies a set of comma-delineated Cassandra + hosts to connect to. + +- `port` `(int: 9042)` – Specifies the default port to use if none is provided + as part of the host URI. Defaults to Cassandra's default transport port, 9042. + +- `username` `(string: )` – Specifies the username to use for + superuser access. + +- `password` `(string: )` – Specifies the password corresponding to + the given username. + +- `tls` `(bool: true)` – Specifies whether to use TLS when connecting to + Cassandra. + +- `insecure_tls` `(bool: false)` – Specifies whether to skip verification of the + server certificate when using TLS. + +- `pem_bundle` `(string: "")` – Specifies concatenated PEM blocks containing a + certificate and private key; a certificate, private key, and issuing CA + certificate; or just a CA certificate. + +- `pem_json` `(string: "")` – Specifies JSON containing a certificate and + private key; a certificate, private key, and issuing CA certificate; or just a + CA certificate. For convenience format is the same as the output of the + `issue` command from the `pki` backend; see + [the pki documentation](/docs/secrets/pki/index.html). + +- `protocol_version` `(int: 2)` – Specifies the CQL protocol version to use. + +- `connect_timeout` `(string: "5s")` – Specifies the connection timeout to use. + +TLS works as follows: + +- If `tls` is set to true, the connection will use TLS; this happens + automatically if `pem_bundle`, `pem_json`, or `insecure_tls` is set + +- If `insecure_tls` is set to true, the connection will not perform verification + of the server certificate; this also sets `tls` to true + +- If only `issuing_ca` is set in `pem_json`, or the only certificate in + `pem_bundle` is a CA certificate, the given CA certificate will be used for + server certificate verification; otherwise the system CA certificates will be + used + +- If `certificate` and `private_key` are set in `pem_bundle` or `pem_json`, + client auth will be turned on for the connection + +`pem_bundle` should be a PEM-concatenated bundle of a private key + client +certificate, an issuing CA certificate, or both. `pem_json` should contain the +same information; for convenience, the JSON format is the same as that output by +the issue command from the PKI backend. + +### Sample Payload + +```json +{ + "plugin_name": "cassandra-database-plugin", + "allowed_roles": "readonly", + "hosts": "cassandra1.local", + "username": "user", + "password": "pass" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/cassandra/config/connection +``` + +## Statements + +Statements are configured during role creation and are used by the plugin to +determine what is sent to the datatabse on user creation, renewing, and +revocation. For more information on configuring roles see the [Role +API](/api/secret/databases/index.html#create-role) in the Database Backend docs. + +### Parameters + +The following are the statements used by this plugin. If not mentioned in this +list the plugin does not support that statement type. + +- `creation_statements` `(string: "")` – Specifies the database + statements executed to create and configure a user. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}' and '{{password}}' values will be substituted. If not + provided, defaults to a generic create user statements that creates a + non-superuser. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a semicolon-separated string, a + base64-encoded semicolon-separated string, a serialized JSON string array, or + a base64-encoded serialized JSON string array. The '{{name}}' value will be + substituted. If not provided defaults to a generic drop user statement. + +- `rollback_statements` `(string: "")` – Specifies the database statements to be + executed to rollback a create operation in the event of an error. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}' value will be substituted. If not provided, defaults to + a generic drop user statement diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/hanadb.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/hanadb.html.md new file mode 100644 index 0000000..e108815 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/hanadb.html.md @@ -0,0 +1,87 @@ +--- +layout: "api" +page_title: "HANA Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-hana" +description: |- + The HANA plugin for Vault's Database backend generates database credentials to access HANA servers. +--- + +# HANA Database Plugin HTTP API + +The HANA Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the HANA database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the HANA DSN. + +- `max_open_connections` `(int: 2)` - Specifies the maximum number of open + connections to the database. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "hana-database-plugin", + "allowed_roles": "readonly", + "connection_url": "hdb://username:password@localhost:1433", + "max_open_connections": 5, + "max_connection_lifetime": "5s", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/hana +``` + +## Statements + +Statements are configured during role creation and are used by the plugin to +determine what is sent to the datatabse on user creation, renewing, and +revocation. For more information on configuring roles see the [Role +API](/api/secret/databases/index.html#create-role) in the Database Backend docs. + +### Parameters + +The following are the statements used by this plugin. If not mentioned in this +list the plugin does not support that statement type. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}', '{{password}}', and '{{expiration}}' values will be + substituted. + - The expiration time will be HANA server time plus the role's `default_ttl`. + If `default_ttl` is 0 or not set, a SQL HdbError 438 will be returned. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a semicolon-separated string, a + base64-encoded semicolon-separated string, a serialized JSON string array, or + a base64-encoded serialized JSON string array. The '{{name}}' value will be + substituted. If not provided, defaults to dropping the user only if they have + no dependent objects. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/index.html.md new file mode 100644 index 0000000..55b8fcb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/index.html.md @@ -0,0 +1,337 @@ +--- +layout: "api" +page_title: "Databases - HTTP API" +sidebar_current: "docs-http-secret-databases" +description: |- + Top page for database secret backend information +--- + +# Database Secret Backend HTTP API + +This is the API documentation for the Vault Database secret backend. For +general information about the usage and operation of the Database backend, +please see the +[Vault Database backend documentation](/docs/secrets/databases/index.html). + +This documentation assumes the Database backend is mounted at the +`/database` path in Vault. Since it is possible to mount secret backends at +any location, please update your API calls accordingly. + +## Configure Connection + +This endpoint configures the connection string used to communicate with the +desired database. In addition to the parameters listed here, each Database +plugin has additional, database plugin specifig, parameters for this endpoint. +Please read the HTTP API for the plugin you'd wish to configure to see the full +list of additional parameters. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `name` `(string: )` – Specifies the name for this database + connection. This is specified as part of the URL. + +- `plugin_name` `(string: )` - Specifies the name of the plugin to use + for this connection. + +- `verify_connection` `(bool: true)` – Specifies if the connection is verified + during initial configuration. Defaults to true. + +- `allowed_roles` `(slice: [])` - Array or comma separated string of the roles + allowed to use this connection. Defaults to empty (no roles), if contains a + "*" any role can use this connection. + +### Sample Payload + +```json +{ + "plugin_name": "mysql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "root:mysql@tcp(127.0.0.1:3306)/" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/mysql +``` + +## Read Connection + +This endpoint returns the configuration settings for a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/database/config/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the connection to read. + This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/database/config/mysql +``` + +### Sample Response + +```json +{ + "data": { + "allowed_roles": [ + "readonly" + ], + "connection_details": { + "connection_url": "root:mysql@tcp(127.0.0.1:3306)/", + }, + "plugin_name": "mysql-database-plugin" + }, +} +``` + +## Delete Connection + +This endpoint deletes a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/database/config/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the connection to delete. + This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/database/config/mysql +``` + +## Reset Connection + +This endpoint closes a connection and it's underlying plugin and restarts it +with the configuration stored in the barrier. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/reset/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the connection to delete. + This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/database/reset/mysql +``` + +## Create Role + +This endpoint creates or updates a role definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to create. This + is specified as part of the URL. + +- `db_name` `(string: )` - The name of the database connection to use + for this role. + +- `default_ttl` `(string/int: 0)` - Specifies the TTL for the leases + associated with this role. Accepts time suffixed strings ("1h") or an integer + number of seconds. Defaults to system/backend default TTL time. + +- `max_ttl` `(string/int: 0)` - Specifies the maximum TTL for the leases + associated with this role. Accepts time suffixed strings ("1h") or an integer + number of seconds. Defaults to system/backend default TTL time. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. See the plugin's API page + for more information on support and formatting for this parameter. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. See the plugin's API page for more information + on support and formatting for this parameter. + +- `rollback_statements` `(string: "")` – Specifies the database statements to be + executed rollback a create operation in the event of an error. Not every + plugin type will support this functionality. See the plugin's API page for + more information on support and formatting for this parameter. + +- `renew_statements` `(string: "")` – Specifies the database statements to be + executed to renew a user. Not every plugin type will support this + functionality. See the plugin's API page for more information on support and + formatting for this parameter. + + + +### Sample Payload + +```json +{ + "db_name": "mysql", + "creation_statements": "CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';", + "default_ttl": "1h", + "max_ttl": "24h" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/roles/my-role +``` + +## Read Role + +This endpoint queries the role definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/database/roles/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to read. This + is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/database/roles/my-role +``` + +### Sample Response + +```json +{ + "data": { + "creation_statements": "CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"{{name}}\";", + "db_name": "mysql", + "default_ttl": 3600, + "max_ttl": 86400, + "renew_statements": "", + "revocation_statements": "", + "rollback_statements": "" + }, +} +``` + +## List Roles + +This endpoint returns a list of available roles. Only the role names are +returned, not any values. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/database/roles` | `200 application/json` | +| `GET` | `/database/roles?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/database/roles +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "keys": ["dev", "prod"] + }, + "lease_duration": 2764800, + "lease_id": "", + "renewable": false +} +``` + +## Delete Role + +This endpoint deletes the role definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/database/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to delete. This + is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/database/roles/my-role +``` + +## Generate Credentials + +This endpoint generates a new set of dynamic credentials based on the named +role. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/database/creds/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to create + credentials against. This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/database/creds/my-role +``` + +### Sample Response + +```json +{ + "data": { + "username": "root-1430158508-126", + "password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21" + } +} +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mongodb.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mongodb.html.md new file mode 100644 index 0000000..48a8ae2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mongodb.html.md @@ -0,0 +1,87 @@ +--- +layout: "api" +page_title: "MongoDB Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-mongodb" +description: |- + The MongoDB plugin for Vault's Database backend generates database credentials to access MongoDB servers. +--- + +# MongoDB Database Plugin HTTP API + +The MongoDB Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MongoDB database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` – Specifies the MongoDB standard connection string (URI). + +### Sample Payload + +```json +{ + "plugin_name": "mongodb-database-plugin", + "allowed_roles": "readonly", + "connection_url": "mongodb://admin:Password!@mongodb.acme.com:27017/admin?ssl=true" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/mongodb +``` + +## Statements + +Statements are configured during role creation and are used by the plugin to +determine what is sent to the datatabse on user creation, renewing, and +revocation. For more information on configuring roles see the [Role +API](/api/secret/databases/index.html#create-role) in the Database Backend docs. + +### Parameters + +The following are the statements used by this plugin. If not mentioned in this +list the plugin does not support that statement type. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. Must be a + serialized JSON object, or a base64-encoded serialized JSON object. + The object can optionally contain a "db" string for session connection, + and must contain a "roles" array. This array contains objects that holds + a "role", and an optional "db" value, and is similar to the BSON document that + is accepted by MongoDB's `roles` field. Vault will transform this array into + such format. For more information regarding the `roles` field, refer to + [MongoDB's documentation](https://docs.mongodb.com/manual/reference/method/db.createUser/). + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a serialized JSON object, or a base64-encoded + serialized JSON object. The object can optionally contain a "db" string. If no + "db" value is provided, it defaults to the "admin" database. + +### Sample Creation Statement + +```json +{ + "db": "admin", + "roles": [ + { + "role": "read", + "db": "foo", + } + ] +} +``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mssql.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mssql.html.md new file mode 100644 index 0000000..42d7546 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mssql.html.md @@ -0,0 +1,83 @@ +--- +layout: "api" +page_title: "MSSQL Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-mssql" +description: |- + The MSSQL plugin for Vault's Database backend generates database credentials to access MSSQL servers. +--- + +# MSSQL Database Plugin HTTP API + +The MSSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MSSQL database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the MSSQL DSN. + +- `max_open_connections` `(int: 2)` - Specifies the maximum number of open + connections to the database. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "mssql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "sqlserver://sa:yourStrong(!)Password@localhost:1433", + "max_open_connections": 5, + "max_connection_lifetime": "5s", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/mssql +``` + +## Statements + +Statements are configured during role creation and are used by the plugin to +determine what is sent to the datatabse on user creation, renewing, and +revocation. For more information on configuring roles see the [Role +API](/api/secret/databases/index.html#create-role) in the Database Backend docs. + +### Parameters + +The following are the statements used by this plugin. If not mentioned in this +list the plugin does not support that statement type. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}' and '{{password}}' values will be substituted. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a semicolon-separated string, a + base64-encoded semicolon-separated string, a serialized JSON string array, or + a base64-encoded serialized JSON string array. The '{{name}}' value will be + substituted. If not provided defaults to a generic drop user statement. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mysql-maria.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mysql-maria.html.md new file mode 100644 index 0000000..0a64ab4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/mysql-maria.html.md @@ -0,0 +1,83 @@ +--- +layout: "api" +page_title: "MySQL/MariaDB Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-mysql-maria" +description: |- + The MySQL/MariaDB plugin for Vault's Database backend generates database credentials to access MySQL and MariaDB servers. +--- + +# MySQL/MariaDB Database Plugin HTTP API + +The MySQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MySQL database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the MySQL DSN. + +- `max_open_connections` `(int: 2)` - Specifies the maximum number of open + connections to the database. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "mysql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "root:mysql@tcp(127.0.0.1:3306)/", + "max_open_connections": 5, + "max_connection_lifetime": "5s", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/mysql +``` + +## Statements + +Statements are configured during role creation and are used by the plugin to +determine what is sent to the datatabse on user creation, renewing, and +revocation. For more information on configuring roles see the [Role +API](/api/secret/databases/index.html#create-role) in the Database Backend docs. + +### Parameters + +The following are the statements used by this plugin. If not mentioned in this +list the plugin does not support that statement type. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}' and '{{password}}' values will be substituted. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a semicolon-separated string, a + base64-encoded semicolon-separated string, a serialized JSON string array, or + a base64-encoded serialized JSON string array. The '{{name}}' value will be + substituted. If not provided defaults to a generic drop user statement. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/oracle.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/oracle.html.md new file mode 100644 index 0000000..5a6f543 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/oracle.html.md @@ -0,0 +1,83 @@ +--- +layout: "api" +page_title: "Oracle Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-oracle-maria" +description: |- + The Oracle plugin for Vault's Database backend generates database credentials to access Oracle servers. +--- + +# Oracle Database Plugin HTTP API + +The Oracle Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the Oracle database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the Oracle DSN. + +- `max_open_connections` `(int: 2)` - Specifies the maximum number of open + connections to the database. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "oracle-database-plugin", + "allowed_roles": "readonly", + "connection_url": "system/Oracle@localhost:1521/OraDoc.localhost", + "max_open_connections": 5, + "max_connection_lifetime": "5s" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/oracle +``` + +## Statements + +Statements are configured during role creation and are used by the plugin to +determine what is sent to the datatabse on user creation, renewing, and +revocation. For more information on configuring roles see the [Role +API](/api/secret/databases/index.html#create-role) in the Database Backend docs. + +### Parameters + +The following are the statements used by this plugin. If not mentioned in this +list the plugin does not support that statement type. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}' and '{{password}}' values will be substituted. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a semicolon-separated string, a + base64-encoded semicolon-separated string, a serialized JSON string array, or + a base64-encoded serialized JSON string array. The '{{name}}' value will be + substituted. If not provided defaults to a generic drop user statement. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/postgresql.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/postgresql.html.md new file mode 100644 index 0000000..bb58a52 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/databases/postgresql.html.md @@ -0,0 +1,98 @@ +--- +layout: "api" +page_title: "PostgreSQL Database Plugin - HTTP API" +sidebar_current: "docs-http-secret-databases-postgresql" +description: |- + The PostgreSQL plugin for Vault's Database backend generates database credentials to access PostgreSQL servers. +--- + +# PostgreSQL Database Plugin HTTP API + +The PostgreSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the PostgreSQL database. + +## Configure Connection + +In addition to the parameters defined by the [Database +Backend](/api/secret/databases/index.html#configure-connection), this plugin +has a number of parameters to further configure a connection. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/database/config/:name` | `204 (empty body)` | + +### Parameters +- `connection_url` `(string: )` - Specifies the PostgreSQL DSN. + +- `max_open_connections` `(int: 2)` - Specifies the maximum number of open + connections to the database. + +- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle + connections to the database. A zero uses the value of `max_open_connections` + and a negative value disables idle connections. If larger than + `max_open_connections` it will be reduced to be equal. + +- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of + time a connection may be reused. If <= 0s connections are reused forever. + +### Sample Payload + +```json +{ + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "readonly", + "connection_url": "postgresql://root:root@localhost:5432/postgres", + "max_open_connections": 5, + "max_connection_lifetime": "5s", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/database/config/postgresql +``` + +## Statements + +Statements are configured during role creation and are used by the plugin to +determine what is sent to the datatabse on user creation, renewing, and +revocation. For more information on configuring roles see the [Role +API](/api/secret/databases/index.html#create-role) in the Database Backend docs. + +### Parameters + +The following are the statements used by this plugin. If not mentioned in this +list the plugin does not support that statement type. + +- `creation_statements` `(string: )` – Specifies the database + statements executed to create and configure a user. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The '{{name}}', '{{password}}' and '{{expiration}}' values will be + substituted. + +- `revocation_statements` `(string: "")` – Specifies the database statements to + be executed to revoke a user. Must be a semicolon-separated string, a + base64-encoded semicolon-separated string, a serialized JSON string array, or + a base64-encoded serialized JSON string array. The '{{name}}' value will be + substituted. If not provided defaults to a generic drop user statement. + +- `rollback_statements` `(string: "")` – Specifies the database statements to be + executed rollback a create operation in the event of an error. Not every + plugin type will support this functionality. Must be a semicolon-separated + string, a base64-encoded semicolon-separated string, a serialized JSON string + array, or a base64-encoded serialized JSON string array. The '{{name}}' value + will be substituted. + +- `renew_statements` `(string: "")` – Specifies the database statements to be + executed to renew a user. Not every plugin type will support this + functionality. Must be a semicolon-separated string, a base64-encoded + semicolon-separated string, a serialized JSON string array, or a + base64-encoded serialized JSON string array. The '{{name}}' and + '{{expiration}}` values will be substituted. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/identity/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/identity/index.html.md new file mode 100644 index 0000000..df87993 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/identity/index.html.md @@ -0,0 +1,421 @@ +--- +layout: "api" +page_title: "Identity Secret Backend - HTTP API" +sidebar_current: "docs-http-secret-identity" +description: |- + This is the API documentation for the Vault Identity secret backend. +--- + +# Identity Secret Backend HTTP API + +This is the API documentation for the Vault Identity secret backend. For +general information about the usage and operation of the Identity backend, +please see the +[Vault Identity backend documentation](/docs/secrets/identity/index.html). + +## Register Entity + +This endpoint creates or updates an Entity. + +| Method | Path | Produces | +| :------- | :------------------ | :----------------------| +| `POST` | `/identity/entity` | `200 application/json` | + +### Parameters + +- `name` `(string: entity-)` – Name of the entity. + +- `metadata` `(list of strings: [])` – Metadata to be associated with the entity. Format should be a list of `key=value` pairs. + +- `policies` `(list of strings: [])` – Policies to be tied to the entity. Comma separated list of strings. + +### Sample Payload + +```json +{ + "metadata": ["organization=hashicorp", "team=vault"], + "policies": ["eng-dev", "infra-dev"] +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/identity/entity +``` + +### Sample Response + +```json +{ + "data": { + "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297", + "personas": null + } +} +``` + +## Read Entity by ID + +This endpoint queries the entity by its identifier. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/identity/entity/id/:id` | `200 application/json` | + +### Parameters + +- `id` `(string: )` – Specifies the identifier of the entity. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/identity/entity/id/8d6a45e5-572f-8f13-d226-cd0d1ec57297 +``` + +### Sample Response + +```json +{ + "data": { + "bucket_key_hash": "177553e4c58987f4cc5d7e530136c642", + "creation_time": "2017-07-25T20:29:22.614756844Z", + "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297", + "last_update_time": "2017-07-25T20:29:22.614756844Z", + "metadata": { + "organization": "hashicorp", + "team": "vault" + }, + "name": "entity-c323de27-2ad2-5ded-dbf3-0c7ef98bc613", + "personas": [], + "policies": [ + "eng-dev", + "infra-dev" + ] + } +} +``` + +## Update Entity by ID + +This endpoint is used to update an existing entity. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/identity/entity/id/:id` | `200 application/json` | + +### Parameters + +- `id` `(string: )` – Specifies the identifier of the entity. + +- `name` `(string: entity-)` – Name of the entity. + +- `metadata` `(list of strings: [])` – Metadata to be associated with the entity. Format should be a list of `key=value` pairs. + +- `policies` `(list of strings: [])` – Policies to be tied to the entity. Comma separated list of strings. + + +### Sample Payload + +```json +{ + "name":"updatedEntityName", + "metadata": ["organization=hashi", "team=nomad"], + "policies": ["eng-developers", "infra-developers"] +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/identity/entity/id/8d6a45e5-572f-8f13-d226-cd0d1ec57297 +``` + +### Sample Response + +``` +{ + "data": { + "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297", + "personas": null + } +} +``` + +## Delete Entity by ID + +This endpoint deletes an entity and all its associated personas. + +| Method | Path | Produces | +| :--------- | :-------------------------- | :----------------------| +| `DELETE` | `/identity/entity/id/:id` | `204 (empty body)` | + +## Parameters + +- `id` `(string: )` – Specifies the identifier of the entity. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/identity/entity/id/8d6a45e5-572f-8f13-d226-cd0d1ec57297 +``` + +## List Entities by ID + +This endpoint returns a list of available entities by their identifiers. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/identity/entity/id` | `200 application/json` | +| `GET` | `/identity/entity/id?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/identity/entity/id +``` + +### Sample Response + +```json +{ + "data": { + "keys": [ + "02fe5a88-912b-6794-62ed-db873ef86a95", + "3bf81bc9-44df-8138-57f9-724a9ae36d04", + "627fba68-98c9-c012-71ba-bfb349585ce1", + "6c4c805b-b384-3d0e-4d51-44d349887b96", + "70a72feb-35d1-c775-0813-8efaa8b4b9b5", + "f1092a67-ce34-48fd-161d-c13a367bc1cd", + "faedd89a-0d82-c197-c8f9-93a3e6cf0cd0" + ] + } +} +``` + +## Register Persona + +This endpoint creates a new persona and attaches it to the entity with the +given identifier. + +| Method | Path | Produces | +| :------- | :------------------ | :----------------------| +| `POST` | `/identity/persona` | `200 application/json` | + +### Parameters + +- `name` (string: Required) - Name of the persona. Name should be the + identifier of the client in the authentication source. For example, if the + persona belongs to userpass backend, the name should be a valid username + within userpass backend. If persona belongs to GitHub, it should be the + GitHub username. + +- `entity_id` (string: required) - Entity ID to which this persona belongs to. + +- `mount_accessor` (string: required) - Accessor of the mount to which the + persona should belong to. + +- `metadata` `(list of strings: [])` – Metadata to be associated with the persona. Format should be a list of `key=value` pairs. + +### Sample Payload + +``` +{ + "name": "testuser", + "metadata": ["group=san_francisco", "region=west"], + "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3", + "mount_accessor": "auth_userpass_e50b1a44" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/identity/persona +``` + +### Sample Response + +``` +{ + "data": { + "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3", + "id": "34982d3d-e3ce-5d8b-6e5f-b9bb34246c31" + } +} +``` + +## Read Persona by ID + +This endpoint queries the persona by its identifier. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/identity/persona/id/:id` | `200 application/json` | + +### Parameters + +- `id` `(string: )` – Specifies the identifier of the persona. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 +``` + +### Sample Response + +``` +{ + "data": { + "creation_time": "2017-07-25T21:41:09.820717636Z", + "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3", + "id": "34982d3d-e3ce-5d8b-6e5f-b9bb34246c31", + "last_update_time": "2017-07-25T21:41:09.820717636Z", + "metadata": { + "group": "san_francisco", + "region": "west" + }, + "mount_accessor": "auth_userpass_e50b1a44", + "mount_path": "userpass/", + "mount_type": "userpass", + "name": "testuser" + } +} +``` + +## Update Persona by ID + +This endpoint is used to update an existing persona. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/identity/persona/id/:id` | `200 application/json` | + +### Parameters + +- `id` `(string: )` – Specifies the identifier of the entity. + +- `name` (string: Required) - Name of the persona. Name should be the + identifier of the client in the authentication source. For example, if the + persona belongs to userpass backend, the name should be a valid username + within userpass backend. If persona belongs to GitHub, it should be the + GitHub username. + +- `entity_id` (string: required) - Entity ID to which this persona belongs to. + +- `mount_accessor` (string: required) - Accessor of the mount to which the + persona should belong to. + +- `metadata` `(list of strings: [])` – Metadata to be associated with the + persona. Format should be a list of `key=value` pairs. + +### Sample Payload + +``` +{ + "name": "testuser", + "metadata": ["group=philadelphia", "region=east"], + "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3", + "mount_accessor": "auth_userpass_e50b1a44" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 +``` + +### Sample Response + +``` +{ + "data": { + "entity_id": "404e57bc-a0b1-a80f-0a73-b6e92e8a52d3", + "id": "34982d3d-e3ce-5d8b-6e5f-b9bb34246c31" + } +} +``` + +### Delete Persona by ID + +This endpoint deletes a persona from its corresponding entity. + +| Method | Path | Produces | +| :--------- | :-------------------------- | :----------------------| +| `DELETE` | `/identity/persona/id/:id` | `204 (empty body)` | + +## Parameters + +- `id` `(string: )` – Specifies the identifier of the persona. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 +``` + +### List Personas by ID + +This endpoint returns a list of available personas by their identifiers. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/identity/persona/id` | `200 application/json` | +| `GET` | `/identity/persona/id?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/identity/persona/id +``` + +### Sample Response + +``` +{ + "data": { + "keys": [ + "2e8217fa-8cb6-8aec-9e22-3196d74ca2ba", + "91ebe973-ec86-84db-3c7c-f760415326de", + "92308b08-4139-3ec6-7af2-8e98166b4e0c", + "a3b042e6-5cc1-d5a9-8874-d53a51954de2", + "d5844921-017f-e496-2a9a-23d4a2f3e8a3" + ] + } +} +``` + diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/kv/index.html.md similarity index 78% rename from vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md rename to vendor/github.com/hashicorp/vault/website/source/api/secret/kv/index.html.md index be00171..ffa784e 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/generic/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/kv/index.html.md @@ -1,18 +1,18 @@ --- layout: "api" -page_title: "Generic Secret Backend - HTTP API" -sidebar_current: "docs-http-secret-generic" +page_title: "Key/Value Secret Backend - HTTP API" +sidebar_current: "docs-http-secret-kv" description: |- - This is the API documentation for the Vault Generic secret backend. + This is the API documentation for the Vault Key/Value secret backend. --- -# Generic Secret Backend HTTP API +# Key/Value Secret Backend HTTP API -This is the API documentation for the Vault Generic secret backend. For general -information about the usage and operation of the Generic backend, please see -the [Vault Generic backend documentation](/docs/secrets/generic/index.html). +This is the API documentation for the Vault Key/Value secret backend. For general +information about the usage and operation of the Key/Value backend, please see +the [Vault Key/Value backend documentation](/docs/secrets/kv/index.html). -This documentation assumes the Generic backend is mounted at the `/secret` +This documentation assumes the Key/Value backend is mounted at the `/secret` path in Vault. Since it is possible to mount secret backends at any location, please update your API calls accordingly. @@ -51,6 +51,12 @@ $ curl \ } ``` +_Note_: the `lease_duration` field (which on the CLI shows as +`refresh_interval`) is advisory. No lease is created. This is a way for writers +to indicate how often a given value shold be re-read by the client. See the +[Vault Key/Value backend documentation](/docs/secrets/kv/index.html) for +more details. + ## List Secrets This endpoint returns a list of key names at the specified location. Folders are @@ -62,6 +68,7 @@ this command. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/secret/:path` | `200 application/json` | +| `GET` | `/secret/:path?list=true` | `200 application/json` | ### Parameters @@ -114,8 +121,9 @@ policy granting the `update` capability. - `:key` `(string: "")` – Specifies a key, paired with an associated value, to be held at the given location. Multiple key/value pairs can be specified, and - all will be returned on a read operation. A key called `ttl` will trigger some - special behavior; see above for details. + all will be returned on a read operation. A key called `ttl` will trigger + some special behavior; see the [Vault Key/Value backend + documentation](/docs/secrets/kv/index.html) for details. ### Sample Payload diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md index e833b5c..763027e 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mongodb/index.html.md @@ -8,6 +8,11 @@ description: |- # MongoDB Secret Backend HTTP API +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the API documentation for +the new implementation of this backend at +[MongoDB Database Plugin HTTP API](/api/secret/databases/mongodb.html). + This is the API documentation for the Vault MongoDB secret backend. For general information about the usage and operation of the MongoDB backend, please see the [Vault MongoDB backend documentation](/docs/secrets/mongodb/index.html). diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md index 678eea5..e340012 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mssql/index.html.md @@ -8,6 +8,11 @@ description: |- # MSSQL Secret Backend HTTP API +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the API documentation for +the new implementation of this backend at +[MSSQL Database Plugin HTTP API](/api/secret/databases/mssql.html). + This is the API documentation for the Vault MSSQL secret backend. For general information about the usage and operation of the MSSQL backend, please see the [Vault MSSQL backend documentation](/docs/secrets/mssql/index.html). @@ -164,6 +169,7 @@ returned, not any values. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/mssql/roles` | `200 application/json` | +| `GET` | `/mssql/roles?list=true` | `200 application/json` | ### Sample Request diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md index 1d4bb90..8f0d55a 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/mysql/index.html.md @@ -8,6 +8,11 @@ description: |- # MySQL Secret Backend HTTP API +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the API documentation for +the new implementation of this backend at +[MySQL/MariaDB Database Plugin HTTP API](/api/secret/databases/mysql-maria.html). + This is the API documentation for the Vault MySQL secret backend. For general information about the usage and operation of the MySQL backend, please see the [Vault MySQL backend documentation](/docs/secrets/mysql/index.html). @@ -185,6 +190,7 @@ returned, not any values. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/mysql/roles` | `200 application/json` | +| `GET` | `/mysql/roles?list=true` | `200 application/json` | ### Sample Request diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md index 37eadb0..ade89d3 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/pki/index.html.md @@ -16,11 +16,42 @@ This documentation assumes the PKI backend is mounted at the `/pki` path in Vault. Since it is possible to mount secret backends at any location, please update your API calls accordingly. +## Table of Contents + +* [Read CA Certificate](#read-ca-certificate) +* [Read CA Certificate Chain](#read-ca-certificate-chain) +* [Read Certificate](#read-certificate) +* [List Certificates](#list-certificates) +* [Submit CA Information](#submit-ca-information) +* [Read CRL Configuration](#read-crl-configuration) +* [Set CRL Configuration](#set-crl-configuration) +* [Read URLs](#read-urls) +* [Set URLs](#set-urls) +* [Read CRL](#read-crl) +* [Rotate CRLs](#rotate-crls) +* [Generate Intermediate](#generate-intermediate) +* [Set Signed Intermediate](#set-signed-intermediate) +* [Read Certificate](#read-certificate) +* [Generate Certificate](#generate-certificate) +* [Revoke Certificate](#revoke-certificate) +* [Create/Update Role](#create-update-role) +* [Read Role](#read-role) +* [List Roles](#list-roles) +* [Delete Role](#delete-role) +* [Generate Root](#generate-root) +* [Delete Root](#delete-root) +* [Sign Intermediate](#sign-intermediate) +* [Sign Self-Issued](#sign-self-issued) +* [Sign Certificate](#sign-certificate) +* [Sign Verbatim](#sign-verbatim) +* [Tidy](#tidy) + ## Read CA Certificate This endpoint retrieves the CA certificate *in raw DER-encoded form*. This is a -bare endpoint that does not return a standard Vault data structure. If `/pem` is -added to the endpoint, the CA certificate is returned in PEM format. +bare endpoint that does not return a standard Vault data structure and cannot +be read by the Vault CLI. If `/pem` is added to the endpoint, the CA +certificate is returned in PEM format. This is an unauthenticated endpoint. @@ -45,7 +76,7 @@ $ curl \ This endpoint retrieves the CA certificate chain, including the CA _in PEM format_. This is a bare endpoint that does not return a standard Vault data -structure. +structure and cannot be read by the Vault CLI. This is an unauthenticated endpoint. @@ -110,6 +141,7 @@ This endpoint returns a list of the current certificates by serial number only. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/pki/certs` | `200 application/json` | +| `GET` | `/pki/certs?list=true` | `200 application/json` | ### Sample Request @@ -432,8 +464,6 @@ $ curl \ https://vault.rocks/v1/pki/intermediate/generate/internal ``` -### Sample Response - ```json { "lease_id": "", @@ -805,6 +835,7 @@ returned, not any values. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/pki/roles` | `200 application/json` | +| `GET` | `/pki/roles?list=true` | `200 application/json` | ### Sample Request @@ -854,14 +885,18 @@ $ curl \ ## Generate Root -This endpoint generates a new self-signed CA certificate and private key. _This -will overwrite any previously-existing private key and certificate._ If the path -ends with `exported`, the private key will be returned in the response; if it is -`internal` the private key will not be returned and *cannot be retrieved later*. -Distribution points use the values set via `config/urls`. +This endpoint generates a new self-signed CA certificate and private key. If +the path ends with `exported`, the private key will be returned in the +response; if it is `internal` the private key will not be returned and *cannot +be retrieved later*. Distribution points use the values set via `config/urls`. -As with other issued certificates, Vault will automatically revoke the generated -root at the end of its lease period; the CA certificate will sign its own CRL. +As with other issued certificates, Vault will automatically revoke the +generated root at the end of its lease period; the CA certificate will sign its +own CRL. + +As of Vault 0.8.1, if a CA cert/key already exists within the backend, this +function will return a 204 and will not overwrite it. Previous versions of +Vault would overwrite the existing cert/key with new values. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | @@ -912,6 +947,12 @@ root at the end of its lease period; the CA certificate will sign its own CRL. Useful if the CN is not a hostname or email address, but is instead some human-readable identifier. +- `permitted_dns_domains` `(string: "")` – A comma separated string (or, string + array) containing DNS domains for which certificates are allowed to be issued + or signed by this CA certificate. Supports subdomains via a `.` in front of + the domain, as per + [RFC](https://tools.ietf.org/html/rfc5280#section-4.2.1.10). + ### Sample Payload ```json @@ -946,6 +987,26 @@ $ curl \ } ``` +## Delete Root + +This endpoint deletes the current CA key (the old CA certificate will still be +accessible for reading until a new certificate/key are generated or uploaded). +_This endpoint requires sudo/root privileges._ + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/pki/root` | `204 (empty body)` | + + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/pki/root +``` + ## Sign Intermediate This endpoint uses the configured CA certificate to issue a certificate with @@ -974,7 +1035,8 @@ verbatim. - `ttl` `(string: "")` – Specifies the requested Time To Live (after which the certificate will be expired). This cannot be larger than the mount max (or, if - not set, the system max). + not set, the system max). However, this can be after the expiration of the + signing CA. - `format` `(string: "pem")` – Specifies the format for returned data. Can be `pem`, `der`, or `pem_bundle`. If `der`, the output is base64 encoded. If @@ -1001,13 +1063,18 @@ verbatim. path; 3) Extensions requested in the CSR will be copied into the issued certificate. +- `permitted_dns_domains` `(string: "")` – A comma separated string (or, string + array) containing DNS domains for which certificates are allowed to be issued + or signed by this CA certificate. Supports subdomains via a `.` in front of + the domain, as per + [RFC](https://tools.ietf.org/html/rfc5280#section-4.2.1.10). + ### Sample Payload ```json { "csr": "...", "common_name": "example.com" - } ``` @@ -1037,6 +1104,65 @@ $ curl \ "auth": null } ``` +## Sign Self-Issued + +This endpoint uses the configured CA certificate to sign a self-issued +certificate (which will usually be a self-signed certificate as well). + +**_This is an extremely privileged endpoint_**. The given certificate will be +signed as-is with only minimal validation performed (is it a CA cert, and is it +actually self-issued). The only values that will be changed will be the +authority key ID, the issuer DN, and, if set, any distribution points. + +This is generally only needed for root certificate rolling in cases where you +don't want/can't get access to a CSR (such as if it's a root stored in Vault +where the key is not exposed). If you don't know whether you need this +endpoint, you most likely should be using a different endpoint (such as +`sign-intermediate`). + +This endpoint requires `sudo` capability. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/pki/root/sign-self-issued` | `200 application/json` | + +### Parameters + +- `certificate` `(string: )` – Specifies the PEM-encoded self-issued certificate. + +### Sample Payload + +```json +{ + "certificate": "..." +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/pki/root/sign-self-issued +``` + +### Sample Response + +```json +{ + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "certificate": "-----BEGIN CERTIFICATE-----\nMIIDzDCCAragAwIBAgIUOd0ukLcjH43TfTHFG9qE0FtlMVgwCwYJKoZIhvcNAQEL\n...\numkqeYeO30g1uYvDuWLXVA==\n-----END CERTIFICATE-----\n", + "issuing_ca": "-----BEGIN CERTIFICATE-----\nMIIDUTCCAjmgAwIBAgIJAKM+z4MSfw2mMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNV\n...\nG/7g4koczXLoUM3OQXd5Aq2cs4SS1vODrYmgbioFsQ3eDHd1fg==\n-----END CERTIFICATE-----\n", + }, + "auth": null +} +``` + ## Sign Certificate diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md index 7c3e2b6..e974ffe 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/postgresql/index.html.md @@ -8,6 +8,11 @@ description: |- # PostgreSQL Secret Backend HTTP API +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the API documentation for +the new implementation of this backend at +[PostgreSQL Database Plugin HTTP API](/api/secret/databases/postgresql.html). + This is the API documentation for the Vault PostgreSQL secret backend. For general information about the usage and operation of the PostgreSQL backend, please see the @@ -179,6 +184,7 @@ returned, not any values. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/postgresql/roles` | `200 application/json` | +| `GET` | `/postgresql/roles?list=true` | `200 application/json` | ### Sample Request diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md index 3948928..e5dffb5 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/rabbitmq/index.html.md @@ -61,8 +61,7 @@ $ curl \ ## Configure Lease -This endpoint configures the lease settings for generated credentials. This is -endpoint requires sudo privileges. +This endpoint configures the lease settings for generated credentials. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | @@ -168,7 +167,7 @@ This endpoint deletes the role definition. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `DELETE` | `/rabbitmq/roles/:namer` | `204 (empty body)` | +| `DELETE` | `/rabbitmq/roles/:name` | `204 (empty body)` | ### Parameters diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md index 03133ad..37da8a4 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/ssh/index.html.md @@ -144,7 +144,7 @@ This endpoint creates or updates a named role. `allow_subdomains`. - `key_option_specs` `(string: "")` – Specifies a aomma separated option - specification which will be prefixed to RSA keys in the remote host's + specification which will be prefixed to RSA keys in the remote host's authorized_keys file. N.B.: Vault does not check this string for validity. - `ttl` `(string: "")` – Specifies the Time To Live value provided as a string @@ -195,6 +195,13 @@ This endpoint creates or updates a named role. will always be the token display name. The key ID is logged by the SSH server and can be useful for auditing. +- `key_id_format` `(string: "")` – When supplied, this value specifies a custom + format for the key id of a signed certificate. The following variables are + availble for use: '{{token_display_name}}' - The display name of the token used + to make the request. '{{role_name}}' - The name of the role signing the request. + '{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed. + e.g. "custom-keyid-{{token_display_name}}", + ### Sample Payload ```json @@ -286,6 +293,7 @@ returned, not any values. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/ssh/roles` | `200 application/json` | +| `GET` | `/ssh/roles?list=true` | `200 application/json` | ### Sample Request @@ -612,7 +620,7 @@ overridden._ - `public_key` `(string: "")` – Specifies the public key part of the SSH CA key pair; required if `generate_signing_key` is false. -- `generate_signing_key` `(bool: false)` – Specifies if Vault should generate +- `generate_signing_key` `(bool: true)` – Specifies if Vault should generate the signing key pair internally. The generated public key will be returned so you can add it to your configuration. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/totp/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/totp/index.html.md new file mode 100644 index 0000000..0ed35d6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/totp/index.html.md @@ -0,0 +1,273 @@ +--- +layout: "api" +page_title: "TOTP Secret Backend - HTTP API" +sidebar_current: "docs-http-secret-totp" +description: |- + This is the API documentation for the Vault TOTP secret backend. +--- + +# TOTP Secret Backend HTTP API + +This is the API documentation for the Vault TOTP secret backend. For +general information about the usage and operation of the TOTP backend, +please see the +[Vault TOTP backend documentation](/docs/secrets/totp/index.html). + +This documentation assumes the TOTP backend is mounted at the +`/totp` path in Vault. Since it is possible to mount secret backends at +any location, please update your API calls accordingly. + +## Create Key + +This endpoint creates or updates a key definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------------------------------------------------------------------------------- | +| `POST` | `/totp/keys/:name` | if generating a key and exported is true: `200 application/json` else: `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the key to create. This is specified as part of the URL. + +- `generate` `(bool: false)` – Specifies if a key should be generated by Vault or if a key is being passed from another service. + +- `exported` `(bool: true)` – Specifies if a QR code and url are returned upon generating a key. Only used if generate is true. + +- `key_size` `(int: 20)` – Specifies the size in bytes of the Vault generated key. Only used if generate is true. + +- `url` `(string: "")` – Specifies the TOTP key url string that can be used to configure a key. Only used if generate is false. + +- `key` `(string: )` – Specifies the master key used to generate a TOTP code. Only used if generate is false. + +- `issuer` `(string: "" )` – Specifies the name of the key’s issuing organization. + +- `account_name` `(string: "" )` – Specifies the name of the account associated with the key. + +- `period` `(int or duration format string: 30)` – Specifies the length of time in seconds used to generate a counter for the TOTP code calculation. + +- `algorithm` `(string: "SHA1")` – Specifies the hashing algorithm used to generate the TOTP code. Options include "SHA1", "SHA256" and "SHA512". + +- `digits` `(int: 6)` – Specifies the number of digits in the generated TOTP code. This value can be set to 6 or 8. + +- `skew` `(int: 1)` – Specifies the number of delay periods that are allowed when validating a TOTP code. This value can be either 0 or 1. Only used if generate is true. + +- `qr_size` `(int: 200)` – Specifies the pixel size of the square QR code when generating a new key. Only used if generate is true and exported is true. If this value is 0, a QR code will not be returned. + +### Sample Payload + +```json +{ + "url": "otpauth://totp/Google:test@gmail.com?secret=Y64VEVMBTSXCYIWRSHRNDZW62MPGVU2G&issuer=Google" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/totp/keys/my-key +``` + +### Sample Payload + +```json +{ + "generate": true, + "issuer": "Google", + "account_name": "test@gmail.com", +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/totp/keys/my-key +``` + +### Sample Response + +```json +{ + "data": { + "barcode": "iVBORw0KGgoAAAANSUhEUgAAAMgAAADIEAAAAADYoy0BAAAGXklEQVR4nOyd4Y4iOQyEmRPv/8p7upX6BJm4XbbDbK30fT9GAtJJhpLjdhw3z1+/HmDEP396AvDO878/X1+9i1frWvu5Po/6Xz+P2kft1nFVa1f7z+YdjT/5PrEQMxDEDAQx4/n6orsGr6z9ZP1mviMbP/MBav/R6/U61Ud0vk8sxAwEMQNBzHju3lTvv6P2ajwS9Ve9zz+9pkfjRp+r/SjzwULMQBAzEMSMrQ/pUo0bouun7dW9LXVvrBq/TMBCzEAQMxDEjKM+JFqT17W4mu9Y+49eq/OL3r/GVX3CJ7KtWIgZCGIGgpix9SHTtXGa4476qfoa1adVc+HV/6/yfWIhZiCIGQhixpsP6Z4nulD3lqavV7q+Yvo6G7/zfWIhZiCIGQhixteJ/Rh1Da3e71d9RjRul2ocdeK7xELMQBAzEMSM3z6ku6dTrdOo1l9M6y5O7clVx5n4SCzEDAQxA0HMuN3L+qlavqj9itpePY+VtVdrHqfzeQULMQNBzEAQM97ikAv1vr/brltTeCp/svarcjLe2F1PnbohCGIGgphRqjG8mJ6PmtYMVnP363Vqv6d8qZrzf2AhfiCIGQhixm0c8n+jQ8+7+jZ4cY3PrlfHO/1Ml+45st18sRAzEMQMBDHjdxyixgPqs0lWsvvwqH00zrSO41R80p3XXXssxAwEMQNBzJCeuaieo6pedzGtb1/76fqgLH6ofg+dZ65gIWYgiBkIYsbbs9/V+/EVde1V+62eh1I/r/qIrs+Ixo2uYy/LGAQxA0HMeNvLilDX1OraXc2jVNtPzxJXr6v+HzuwEDMQxAwEMWNbp95d21WmzzBR6066e07dPMq0XoW9LEMQxAwEMUOqUz+1p9ONd07Xz586u6yifp/4EEMQxAwEMUPay7rIcthqTrx6v1/NTX+qZrIbF63v34GFmIEgZiCIGdvfU++e1a3GM2oOPjtvpfbfjS+qeZFJXgcLMQNBzEAQM6Tn9p7OLVdrFqP5TFF9ZXTdqfqTV7AQMxDEDAQx482HdPMPGdN8SjeHr6710zzJidrCB/kQTxDEDAQxY7uXdTGNC9S9pK6vqs6nWzdyej53PhELMQNBzEAQM0o59YtTz/xQfVO3jmOdl0rmE6f5ort5YSFmIIgZCGLGbU69eka3ep+v5sCzcbp5jZXMR0zr+aPPqVM3BkHMQBAzRs/tjejmwj9d05ihzq96nQr5EEMQxAwEMWPrQy6q9/fdevFTcVA0v+n5K7U/tf4lGhcfYgiCmIEgZtw+6+RCXUurvkKlepZ2vS5i+oyTaby0GxcLMQNBzEAQM0r5kKnv6K6xK9X4R13zu+eyJnXpazssxAwEMQNBzNj+fkg3nqjGK9laPz1vleXwq2v+p+vciUMMQRAzEMSM298xrOYDVqrtpmtzt59uHqc6v2zcBxbiB4KYgSBmbOvUV7q577VdOIliXqLr87p7Tere2YnrsRAzEMQMBDFj+zuGar3Gp+rNp3kUtR5lmj/Jxo/GvZsvFmIGgpiBIGbcPi/rW+MPPaeqOs407xL1E1E9lzWpg8FCzEAQMxDEDOk3qC66a7f6fsSn1uz18+o8P+GzsBAzEMQMBDFjm1Ov7L3s3p+2/6lcfoa6ZxaNm50DWyEOMQRBzEAQM7Zne6PX3XilW5M3zbd0c/3ZHpvqY6P+7j7HQsxAEDMQxIxRPqRaT6Kuzemkh7WJ3RrJbJxq7eOuPyzEDAQxA0HMKJ3t/XbxobW/Gmdka/PpPMxPgoWYgSBmIIgZ0m9QrXTP1mb9Ru2y+/hsD2xaM9jN5UfjEIf8RSCIGQhiRus3qLp7ONU6jK4vynxMdn10XdY+m4/SHxZiBoKYgSBm3MYhGdl9/qkzvN18ilpDqF6nxiPVGs3Xz7EQMxDEDAQx4/ZcVoR6fqobZ6h7Vtm81TVejZdWuvHNXXssxAwEMQNBzHju3pyujdO68Ky9Wm+h9qPGJVG/6nyU+WIhZiCIGQhixtaHdFF9hlqLeOrcVPcMQDeOmtTNYyFmIIgZCGLGUR/SPQs73QuL5tGtiVznlc1X/T8iXtthIWYgiBkIYsbWh3T3nNS1dXqe6tReW8S0Hr1b5/LAQvxAEDMQxIw3H9I9nzU9R6XGHdn41dx4d4+rGp9En7OX9ReAIGYgiBlff6IWG2KwEDP+DQAA//+TDHXGhqE4+AAAAABJRU5ErkJggg==", + "url" : "otpauth://totp/Google:test@gmail.com?algorithm=SHA1&digits=6&issuer=Google&period=30&secret=HTXT7KJFVNAJUPYWQRWMNVQE5AF5YZI2", + } +} +``` + +If a QR code is returned, it consists of base64-formatted PNG bytes. You can embed it in a web page by including the base64 string in an `img` tag with the prefix `data:image/png;base64` + +``` + +``` + +## Read Key + +This endpoint queries the key definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/totp/keys/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the key to read. This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/totp/keys/my-key +``` + +### Sample Response + +```json +{ + "data": { + "account_name": "test@gmail.com", + "algorithm" : "SHA1", + "digits" : 6, + "issuer": "Google", + "period" : 30, + } +} +``` + +## List Keys + +This endpoint returns a list of available keys. Only the key names are +returned, not any values. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/totp/keys` | `200 application/json` | +| `GET` | `/totp/keys?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/totp/keys +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "keys": ["my-key"] + }, + "lease_duration": 0, + "lease_id": "", + "renewable": false +} +``` + +## Delete Key + +This endpoint deletes the key definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/totp/keys/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the key to delete. This + is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/totp/keys/my-key +``` + +## Generate Code + +This endpoint generates a new time-based one-time use password based on the named +key. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/totp/code/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the key to create + credentials against. This is specified as part of the URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/totp/code/my-key +``` + +### Sample Response + +```json +{ + "data": { + "code": "810920", + } +} +``` + +## Validate Code + +This endpoint validates a time-based one-time use password generated from the named +key. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/totp/code/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the key used to generate the password. This is specified as part of the URL. + +- `code` `(string: )` – Specifies the password you want to validate. + +### Sample Payload + +```json +{ + "code": "123802" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/totp/code/my-key +``` + +### Sample Response + +```json +{ + "data": { + "valid": true, + } +} +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md b/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md index 37c7a7a..9437b01 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/secret/transit/index.html.md @@ -48,8 +48,10 @@ values set here cannot be changed after key creation. - `type` `(string: "aes256-gcm96")` – Specifies the type of key to create. The currently-supported types are: - - `aes256-gcm96` – AES-256 wrapped with GCM using a 12-byte nonce size (symmetric) + - `aes256-gcm96` – AES-256 wrapped with GCM using a 12-byte nonce size + (symmetric, supports derivation) - `ecdsa-p256` – ECDSA using the P-256 elliptic curve (asymmetric) + - `ed25519` – ED25519 (asymmetric, supports derivation) ### Sample Payload @@ -107,7 +109,8 @@ $ curl \ "keys": { "1": 1442851412 }, - "min_decryption_version": 0, + "min_decryption_version": 1, + "min_encryption_version": 0, "name": "foo", "supports_encryption": true, "supports_decryption": true, @@ -125,6 +128,7 @@ actual keys themselves). | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/transit/keys` | `200 application/json` | +| `GET` | `/transit/keys?list=true` | `200 application/json` | ### Sample Request @@ -173,7 +177,7 @@ $ curl \ https://vault.rocks/v1/transit/keys/my-key ``` -#### Update Key Configuration +## Update Key Configuration This endpoint allows tuning configuration values for a given key. (These values are returned during a read operation on the named key.) @@ -189,8 +193,12 @@ are returned during a read operation on the named key.) policy can prevent old copies of ciphertext from being decrypted, should they fall into the wrong hands. For signatures, this value controls the minimum version of signature that can be verified against. For HMACs, this controls - the minimum version of a key allowed to be used as the key for the HMAC - function. + the minimum version of a key allowed to be used as the key for verification. + +- `min_encryption_version` `(int: 0)` – Specifies the minimum version of the + key that can be used to encrypt plaintext, sign payloads, or generate HMACs. + Must be `0` (which will use the latest version) or a value greater or equal + to `min_decryption_version`. - `deletion_allowed` `(bool: false)`- Specifies if the key is allowed to be deleted. @@ -234,7 +242,7 @@ $ curl \ https://vault.rocks/v1/transit/keys/my-key/rotate ``` -## Read Key +## Export Key This endpoint returns the named key. The `keys` object shows the value of the key for each version. If `version` is specified, the specific version will be @@ -259,9 +267,9 @@ be valid. - `name` `(string: )` – Specifies the name of the key to read information about. This is specified as part of the URL. -- `version` `(int: "")` – Specifies the version of the key to read. If omitted, +- `version` `(string: "")` – Specifies the version of the key to read. If omitted, all versions of the key will be returned. This is specified as part of the - URL. + URL. If the version is set to `latest`, the current key will be returned. ### Sample Request @@ -310,6 +318,10 @@ the key does not exist, an error will be returned. - `context` `(string: "")` – Specifies the **base64 encoded** context for key derivation. This is required if key derivation is enabled for this key. +- `key_version` `(int: 0)` – Specifies the version of the key to use for + encryption. If not set, uses the latest version. Must be greater than or + equal to the key's `min_encryption_version`, if set. + - `nonce` `(string: "")` – Specifies the **base64 encoded** nonce value. This must be provided if convergent encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+. The @@ -338,7 +350,7 @@ the key does not exist, an error will be returned. - `type` `(string: "aes256-gcm96")` –This parameter is required when encryption key is expected to be created. When performing an upsert operation, the type of key to create. Currently, "aes256-gcm96" (symmetric) is the only type - supported. + supported. - `convergent_encryption` `(string: "")` – This parameter will only be used when a key is expected to be created. Whether to support convergent encryption. @@ -468,6 +480,10 @@ functionality to untrusted users or scripts. - `context` `(string: "")` – Specifies the **base64 encoded** context for key derivation. This is required if key derivation is enabled. +- `key_version` `(int: 0)` – Specifies the version of the key to use for the + operation. If not set, uses the latest version. Must be greater than or equal + to the key's `min_encryption_version`, if set. + - `nonce` `(string: "")` – Specifies a base64 encoded nonce value used during encryption. Must be provided if convergent encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in @@ -680,7 +696,7 @@ $ curl \ } ``` -## Generate HMAC with Key +## Generate HMAC This endpoint returns the digest of given data using the specified hash algorithm and the named key. The key can be of any type supported by `transit`; @@ -697,6 +713,10 @@ be used. - `name` `(string: )` – Specifies the name of the encryption key to generate hmac against. This is specified as part of the URL. +- `key_version` `(int: 0)` – Specifies the version of the key to use for the + operation. If not set, uses the latest version. Must be greater than or equal + to the key's `min_encryption_version`, if set. + - `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use. This can also be specified as part of the URL. Currently-supported algorithms are: @@ -707,9 +727,6 @@ be used. - `input` `(string: )` – Specifies the **base64 encoded** input data. -- `format` `(string: "hex")` – Specifies the output encoding. This can be either - `hex` or `base64`. - ### Sample Payload ```json @@ -738,7 +755,7 @@ $ curl \ } ``` -## Sign Data with Key +## Sign Data This endpoint returns the cryptographic signature of the given data using the named key and the specified hash algorithm. The key must be of a type that @@ -751,10 +768,16 @@ supports signing. ### Parameters - `name` `(string: )` – Specifies the name of the encryption key to - generate hmac against. This is specified as part of the URL. + use for signing. This is specified as part of the URL. -- `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use. This - can also be specified as part of the URL. Currently-supported algorithms are: +- `key_version` `(int: 0)` – Specifies the version of the key to use for + signing. If not set, uses the latest version. Must be greater than or equal + to the key's `min_encryption_version`, if set. + +- `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use for + supporting key types (notably, not including `ed25519` which specifies its + own hash algorithm). This can also be specified as part of the URL. + Currently-supported algorithms are: - `sha2-224` - `sha2-256` @@ -763,9 +786,6 @@ supports signing. - `input` `(string: )` – Specifies the **base64 encoded** input data. -- `format` `(string: "hex")` – Specifies the output encoding. This can be either - `hex` or `base64`. - ### Sample Payload ```json @@ -794,7 +814,7 @@ $ curl \ } ``` -## Verify Data with Key +## Verify Signed Data This endpoint returns whether the provided signature is valid for the given data. @@ -805,8 +825,8 @@ data. ### Parameters -- `name` `(string: )` – Specifies the name of the encryption key to - generate hmac against. This is specified as part of the URL. +- `name` `(string: )` – Specifies the name of the encryption key that + was used to generate the signature or HMAC. - `algorithm` `(string: "sha2-256")` – Specifies the hash algorithm to use. This can also be specified as part of the URL. Currently-supported algorithms are: @@ -818,9 +838,6 @@ data. - `input` `(string: )` – Specifies the **base64 encoded** input data. -- `format` `(string: "hex")` – Specifies the output encoding. This can be either - `hex` or `base64`. - - `signature` `(string: "")` – Specifies the signature output from the `/transit/sign` function. Either this must be supplied or `hmac` must be supplied. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md index eb9a61d..36ca664 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/auth.html.md @@ -74,10 +74,22 @@ For example, mounting the "foo" auth backend will make it accessible at - `type` `(string: )` – Specifies the name of the authentication backend type, such as "github" or "token". -Additionally, the following options are allowed in Vault open-source, but +- `config` `(map: nil)` – Specifies configuration options for + this mount. These are the possible values: + + - `plugin_name` + + The plugin_name can be provided in the config map or as a top-level option, + with the former taking precedence. + +- `plugin_name` `(string: "")` – Specifies the name of the auth plugin to + use based from the name in the plugin catalog. Applies only to plugin + backends. + +Additionally, the following options are allowed in Vault open-source, but relevant functionality is only supported in Vault Enterprise: -- `local` `(bool: false)` – Specifies if the auth backend is a local mount +- `local` `(bool: false)` – Specifies if the auth backend is a local mount only. Local mounts are not replicated nor (if a secondary) removed by replication. diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md index 70b4494..06a2bf3 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-accessor.html.md @@ -44,7 +44,7 @@ for the given path. $ curl \ --header "X-Vault-Token: ..." \ --request POST \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/capabilities-accessor ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md index 175c2b3..4adfb96 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/capabilities-self.html.md @@ -41,7 +41,7 @@ client token is the Vault token with which this API call is made. $ curl \ --header "X-Vault-Token: ..." \ --request POST \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/capabilities-self ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md index 62a0fbf..83fac0a 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/config-auditing.html.md @@ -50,7 +50,7 @@ This endpoint lists the information for the given request header. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/config/auditing/request-headers/:name` | `200 application/json` | +| `GET` | `/sys/config/auditing/request-headers/:name` | `200 application/json` | ### Parameters @@ -105,7 +105,7 @@ This endpoint enables auditing of a header. $ curl \ --header "X-Vault-Token: ..." \ --request PUT \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/config/auditing/request-headers/my-header ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/config-cors.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/config-cors.html.md new file mode 100644 index 0000000..26c5b42 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/config-cors.html.md @@ -0,0 +1,99 @@ +--- +layout: "api" +page_title: "/sys/config/cors - HTTP API" +sidebar_current: "docs-http-system-config-cors" +description: |- + The '/sys/config/cors' endpoint configures how the Vault server responds to cross-origin requests. +--- + +# `/sys/config/cors` + +The `/sys/config/cors` endpoint is used to configure CORS settings. + +- **`sudo` required** – All CORS endpoints require `sudo` capability in + addition to any path-specific capabilities. + +## Read CORS Settings + +This endpoint returns the current CORS configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/sys/config/cors` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/sys/config/cors +``` + +### Sample Response + +```json +{ + "enabled": true, + "allowed_origins": ["http://www.example.com"], + "allowed_headers": [ + "Content-Type", + "X-Requested-With", + "X-Vault-AWS-IAM-Server-ID", + "X-Vault-No-Request-Forwarding", + "X-Vault-Token", + "X-Vault-Wrap-Format", + "X-Vault-Wrap-TTL", + ] +} +``` + +## Configure CORS Settings + +This endpoint allows configuring the origins that are permitted to make +cross-origin requests, as well as headers that are allowed on cross-origin requests. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `PUT` | `/sys/config/cors` | `204 (empty body)` | + +### Parameters + +- `allowed_origins` `(string or string array: )` – A wildcard (`*`), comma-delimited string, or array of strings specifying the origins that are permitted to make cross-origin requests. + +- `allowed_headers` `(string or string array: "" or [])` – A comma-delimited string or array of strings specifying headers that are permitted to be on cross-origin requests. Headers set via this parameter will be appended to the list of headers that Vault allows by default. + +### Sample Payload + +```json +{ + "allowed_origins": "*", + "allowed_headers": "X-Custom-Header" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request PUT \ + --data @payload.json \ + https://vault.rocks/v1/sys/config/cors +``` + +## Delete CORS Settings + +This endpoint removes any CORS configuration. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/sys/config/cors` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/config/cors +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md index e6d71f3..54be70e 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/generate-root.html.md @@ -81,7 +81,7 @@ generation attempt can take place at a time. One (and only one) of `otp` or ``` $ curl \ --request PUT \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/generate-root/attempt ``` @@ -139,7 +139,7 @@ nonce must be provided with each call. ```json { "key": "acbd1234", - "nonce": "ad235", + "nonce": "ad235" } ``` @@ -148,7 +148,7 @@ nonce must be provided with each call. ``` $ curl \ --request PUT \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/generate-root/update ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md index e9de30a..ad3b7e6 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/init.html.md @@ -94,7 +94,7 @@ Additionally, the following options are only supported on Vault Pro/Enterprise: ``` $ curl \ --request PUT \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/init ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md index 5e20ca9..358fffb 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/leader.html.md @@ -34,6 +34,7 @@ $ curl \ { "ha_enabled": true, "is_self": false, - "leader_address": "https://127.0.0.1:8200/" + "leader_address": "https://127.0.0.1:8200/", + "leader_cluster_address": "https://127.0.0.1:8201/" } ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md index ec51664..7d20d76 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/leases.html.md @@ -62,6 +62,7 @@ This endpoint returns a list of lease ids. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | | `LIST` | `/sys/leases/lookup/:prefix` | `200 application/json` | +| `GET` | `/sys/leases/lookup/:prefix?list=true` | `200 application/json` | ### Sample Request diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-duo.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-duo.html.md new file mode 100644 index 0000000..db081fb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-duo.html.md @@ -0,0 +1,119 @@ +--- +layout: "api" +page_title: "/sys/mfa/method/duo - HTTP API" +sidebar_current: "docs-http-system-mfa-duo" +description: |- + The '/sys/mfa/method/duo' endpoint focuses on managing Duo MFA behaviors in Vault Enterprise. +--- + +## Configure Duo MFA Method + +This endpoint defines a MFA method of type Duo. + +| Method | Path | Produces | +| :------- | :----------------------------- | :--------------------- | +| `POST` | `/sys/mfa/method/duo/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping. + +- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings: + - persona.name: The name returned by the mount configured via the `mount_accessor` parameter + - entity.name: The name configured for the Entity + - persona.metadata.``: The value of the Persona's metadata parameter + - entity.metadata.``: The value of the Entity's metadata paramater + +- `secret_key` `(string)` - Secret key for Duo. + +- `integration_key` `(string)` - Integration key for Duo. + +- `api_hostname` `(string)` - API hostname for Duo. + +- `push_info` `(string)` - Push information for Duo. + +### Sample Payload + +```json +{ + "mount_accessor": "auth_userpass_1793464a", + "secret_key": "BIACEUEAXI20BNWTEYXT", + "integration_key":"8C7THtrIigh2rPZQMbguugt8IUftWhMRCOBzbuyz", + "api_hostname":"api-2b5c39f5.duosecurity.com" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/mfa/method/duo/my_duo +``` + +## Read Duo MFA Method + +This endpoint queries the MFA configuration of Duo type for a given method +name. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `GET` | `/sys/mfa/method/duo/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/sys/mfa/method/duo/my_duo + +``` + +### Sample Response + +```json +{ + "data": { + "api_hostname": "api-2b5c39f5.duosecurity.com", + "id": "0ad21b78-e9bb-64fa-88b8-1e38db217bde", + "integration_key": "BIACEUEAXI20BNWTEYXT", + "mount_accessor": "auth_userpass_1793464a", + "name": "my_duo", + "pushinfo": "", + "secret_key": "8C7THtrIigh2rPZQMbguugt8IUftWhMRCOBzbuyz", + "type": "duo", + "username_format": "" + } +} +``` +## Delete Duo MFA Method + +This endpoint deletes a Duo MFA method. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `DELETE` | `/sys/mfa/method/duo/:name` | `204 (empty body)` | + + +### Parameters + +- `name` `(string: )` - Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/mfa/method/duo/my_duo + +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-okta.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-okta.html.md new file mode 100644 index 0000000..1b82370 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-okta.html.md @@ -0,0 +1,115 @@ +--- +layout: "api" +page_title: "/sys/mfa/method/okta - HTTP API" +sidebar_current: "docs-http-system-mfa-okta" +description: |- + The '/sys/mfa/method/okta' endpoint focuses on managing Okta MFA behaviors in Vault Enterprise. +--- + +## Configure Okta MFA Method + +This endpoint defines a MFA method of type Okta. + +| Method | Path | Produces | +| :------- | :----------------------------- | :--------------------- | +| `POST` | `/sys/mfa/method/okta/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping. + +- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings: + - persona.name: The name returned by the mount configured via the `mount_accessor` parameter + - entity.name: The name configured for the Entity + - persona.metadata.``: The value of the Persona's metadata parameter + - entity.metadata.``: The value of the Entity's metadata paramater + +- `org_name` `(string)` - Name of the organization to be used in the Okta API. + +- `api_token` `(string)` - Okta API key. + +- `base_url` `(string)` - If set, will be used as the base domain for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com. + +### Sample Payload + +```json +{ + "mount_accessor": "auth_userpass_1793464a", + "org_name": "dev-262778", + "api_token": "0081u7KrReNkzmABZJAP2oDyIXccveqx9vIOEyCZDC" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/mfa/method/okta/my_okta +``` + +## Read Okta MFA Method + +This endpoint queries the MFA configuration of Okta type for a given method +name. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `GET` | `/sys/mfa/method/okta/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/sys/mfa/method/okta/my_okta + +``` + +### Sample Response + +```json +{ + "data": { + "api_token": "0081u7KrReNkzmABZJAP2oDyIXccveqx9vIOEyCZDC", + "id": "e39f08a1-a42d-143d-5b87-15c61d89c15a", + "mount_accessor": "auth_userpass_1793464a", + "name": "my_okta", + "org_name": "dev-262778", + "production": true, + "type": "okta", + "username_format": "" + } +} +``` +## Delete Okta MFA Method + +This endpoint deletes a Okta MFA method. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `DELETE` | `/sys/mfa/method/okta/:name` | `204 (empty body)` | + + +### Parameters + +- `name` `(string: )` - Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/mfa/method/okta/my_okta + +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-pingid.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-pingid.html.md new file mode 100644 index 0000000..a519f87 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-pingid.html.md @@ -0,0 +1,111 @@ +--- +layout: "api" +page_title: "/sys/mfa/method/pingid - HTTP API" +sidebar_current: "docs-http-system-mfa-pingid" +description: |- + The '/sys/mfa/method/pingid' endpoint focuses on managing PingID MFA behaviors in Vault Enterprise. +--- + +## Configure PingID MFA Method + +This endpoint defines a MFA method of type PingID. + +| Method | Path | Produces | +| :------- | :----------------------------- | :--------------------- | +| `POST` | `/sys/mfa/method/pingid/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping. + +- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings: + - persona.name: The name returned by the mount configured via the `mount_accessor` parameter + - entity.name: The name configured for the Entity + - persona.metadata.``: The value of the Persona's metadata parameter + - entity.metadata.``: The value of the Entity's metadata paramater + +- `settings_file_base64` `(string)` - A base64-encoded third-party settings file retrieved from PingID's configuration page. + +### Sample Payload + +```json +{ + "mount_accessor": "auth_userpass_1793464a", + "settings_file_base64": "AA8owj3..." +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/mfa/method/pingid/ping +``` + +## Read PingiD MFA Method + +This endpoint queries the MFA configuration of PingID type for a given method +name. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `GET` | `/sys/mfa/method/pingid/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/sys/mfa/method/pingid/ping + +``` + +### Sample Response + +```json +{ + "data": { + "use_signature": true, + "idp_url": "https://idpxnyl3m.pingidentity.com/pingid", + "admin_url": "https://idpxnyl3m.pingidentity.com/pingid", + "authenticator_url": "https://authenticator.pingone.com/pingid/ppm", + "mount_accessor": "auth_userpass_1793464a", + "name": "ping", + "org_alias": "181459b0-9fb1-4938-8c86...", + "type": "pingid", + "username_format": "" + } +} +``` +## Delete PingID MFA Method + +This endpoint deletes a PingID MFA method. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `DELETE` | `/sys/mfa/method/pingid/:name` | `204 (empty body)` | + + +### Parameters + +- `name` `(string: )` - Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/mfa/method/pingid/ping + +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-totp.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-totp.html.md new file mode 100644 index 0000000..6e1d622 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa-totp.html.md @@ -0,0 +1,235 @@ +--- +layout: "api" +page_title: "/sys/mfa/method/totp - HTTP API" +sidebar_current: "docs-http-system-mfa-totp" +description: |- + The '/sys/mfa/method/totp' endpoint focuses on managing TOTP MFA behaviors in Vault Enterprise. +--- + +## Configure TOTP MFA Method + +This endpoint defines a MFA method of type TOTP. + +| Method | Path | Produces | +| :------- | :----------------------------- | :--------------------- | +| `POST` | `/sys/mfa/method/totp/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +- `issuer` `(string: )` - The name of the key's issuing organization. + +- `period` `(int or duration format string: 30)` - The length of time used to generate a counter for the TOTP token calculation. + +- `key_size` `(int: 20)` – Specifies the size in bytes of the generated key. + +- `qr_size` `(int: 200)` - The pixel size of the generated square QR code. + +- `algorithm` `(string: "SHA1")` – Specifies the hashing algorithm used to generate the TOTP code. Options include "SHA1", "SHA256" and "SHA512". + +- `digits` `(int: 6)` - The number of digits in the generated TOTP token. This value can either be 6 or 8. + +- `skew` `(int: 1)` - The number of delay periods that are allowed when validating a TOTP token. This value can either be 0 or 1. + + +### Sample Payload + +```json +{ + "issuer": "vault" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/mfa/method/totp/my_totp +``` + +## Read TOTP MFA Method + +This endpoint queries the MFA configuration of TOTP type for a given method +name. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `GET` | `/sys/mfa/method/totp/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/sys/mfa/method/totp/my_totp + +``` + +### Sample Response + +```json +{ + "data": { + "algorithm": "SHA1", + "digits": 6, + "id": "865587ba-6229-7f2a-6da0-609d5370af70", + "issuer": "vault", + "key_size": 20, + "name": "my_totp", + "period": 30, + "qr_size": 200, + "skew": 1, + "type": "totp" + } +} +``` + +## Delete TOTP MFA Method + +This endpoint deletes a TOTP MFA method. + +| Method | Path | Produces | +| :------- | :----------------------------- | :----------------------- | +| `DELETE` | `/sys/mfa/method/totp/:name` | `204 (empty body)` | + + +### Parameters + +- `name` `(string: )` - Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/mfa/method/totp/my_totp + +``` + +## Generate a TOTP MFA Secret + +This endpoint generates an MFA secret in the entity of the calling token, if it +doesn't exist already, using the configuration stored under the given MFA +method name. + +| Method | Path | Produces | +| :------- | :------------------------------------ | :----------------------- | +| `GET` | `/sys/mfa/method/totp/:name/generate` | `200 application/json` | + +### Parameters + +- `name` `(string: )` - Name of the MFA method. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/sys/mfa/method/totp/my_totp/generate +``` + +### Sample Response + +```json +{ + "data": { + "barcode": "iVBORw0KGgoAAAANSUhEUgAAAMgAAADIEAAAAADYoy0BAAAGc0lEQVR4nOyd244bOQxEZxbz/7+cRQI4sLWiyCLlTU1wzkMAu1uXTIGSxUv3148fH2DEP396AvDK189/Pj97jR/W9Wi/fs7uz/pZya5H92dk40fzjeY1+XtiIWYgiBkIYsbX84dba7O6B0z3hmgPqO5Z6/WsH3VvynjuDwsxA0HMQBAzvnZfZueI6Pvs93r2+z6ax7qWZ+2zPSP7PutfPW8of08sxAwEMQNBzNjuISqRLyc7X6jnkex8UPVBqXtJxk2PORZiBoKYgSBmXNlDsriIeq6J+umeG1RfVjav7L4JWIgZCGIGgpix3UO6a6MaU4/OD9Fnda2P2qnxl2ge1fbKOFiIGQhiBoKY8bKHdH052V5Q3VPUz9n42XhR++zzStWXVgELMQNBzEAQM37tIbd9MtX4Qvc8EI13q32210Sfb8wHCzEDQcxAEDM+f6532e/86nmiPYk31X2sZP1Pz0PVGP+pPRZiBoKYgSBmbPeQ/9xUvP6geg7p9leNj3RrH7v1K+reRm7vNwBBzEAQM471IVmt3oN31V9E93Xr3KNx1BrD7t+jMl8sxAwEMQNBzDjm9lZj5d04gZoLHPX3rjjFu3J5T/8/LMQMBDEDQcyQ6kPUHNsH1TU+Gi/qr+rLmo6zfq6eTzr9YiFmIIgZCGLG5/M69y5fzfr9Ol613bQ/NaYe9bui5gKczmNYiBkIYgaCmHF85mK01t2KO2Q1h9l43dzgbhxG7b+zZ2EhZiCIGQhiRuvZ77dygNVY+3q9es5Qv1+vT2sIlb0OCzEDQcxAEDMkX1bXN7S2z8brxl2q40b3rXR9bxn4sr4RCGIGgpjxUh8S0a2feFdNYrd/Ndad9Xsrpv/cHgsxA0HMQBAzPk/reXdN7fqA3ln/PZnndE9SxsFCzEAQMxDEjJd4iJqrqtYiPlBr9qZE81znk7V/F8TUjUEQMxDEjO1ze9U1PqtFzO5X87VW1H6i+XXqyneQl/UXgSBmIIgZpdzebgxdPWdMfVnr/dHn23XsWb18VpP4DBZiBoKYgSBmbPOyukzr2Lvnlu781FzkaF7deezAQsxAEDMQxIxjTP33TcN8JpXqOOp9qg8tm586n8qehYWYgSBmIIgZrfcYZvGPW2tztZ0aj8nGzb7Prnfr5z+wED8QxAwEMaP0PvVpzV63zru6pld//6t7SvRZzXmO5rPrBwsxA0HMQBAzpGeddH08WT/VNTv6vZ/NJxp/Wh8S9ZvN5/T/x0LMQBAzEMSMY0y9mpdV3YPU+pNsnGm+1v9dyxhBjaExCGIGgpixPYdUzwUdf/8JNV+qu3dE/aj9Z/Un0XzWcTiHGIMgZiCIGaX3qf+++fLe0f0dP83FVfO0VNS9jXOIMQhiBoKY8esc0vUBqb4ttY49Q13js/uzPKsuSr4XFmIGgpiBIGaM8rKmz0bJxo36nZ5Xov6zcbvnFqWWEQsxA0HMQBAzSs9cXKneH8ULpnUV0/lle0Y3DqOOv2uPhZiBIGYgiBnbOvVunfdKNyf2to9L9UV1Y/Tr/ep5iXiIIQhiBoKYcczLys4P1b2lGwNf23dr99Q8rqnvrOvr+sBC/EAQMxDEjG1M/UHXFxWhxAUq31evd5nmjWU+MvKyvgEIYgaCmLF9F+7Ul7TSrbdQzw/qeOv9K7f+v0o7LMQMBDEDQcw4vj8kYlpf3vUFZe2jeVbbVX1Y1fE6eyAWYgaCmIEgZmyfdVL9XT7NAb5F9xyh7n3Tc1IlToKFmIEgZiCIGaV3UHXrv6P23fyubj1K1l80zwg17yq6vhsfCzE DQcxAEDOOz+2troUZaq1hNP40lr/eP61TicbP5nO6joWYgSBmIIgZx7ysiNu+ruj6dB5q7D4ii5Oo82EP+UYgiBkIYob0/pCV2/Uda7/TunX1PJHNq9qvGvN/HgcLMQNBzEAQM1p5WdM6kI mv5zReNo9uvtet+WTz+sBC/EAQMxDEjO0zF99dA9it+6jOM7qe+dKqde7V/qP5nP5eWIgZCGIGgpix3UNUbsXkq/Xd2Thd35zqE5v66Hb9YCFmIIgZCGLGlT3kwS1fUbcGsVq3HvUXnY/U+ ExEZW/DQsxAEDMQxIzS+0Mybq3REd1c3ur5qBs7z/a4zjNWsBAzEMQMBDHjZQ+Z+oAeqGv42o9aq5j1m5HN51ZdfWX+WIgZCGIGgpixfX8I/DmwEDP+DQAA//9kwGH4xZewMgAAAABJRU5E rkJggg==", + "url": "otpauth://totp/vault:4746fb81-028c-cd4e-026b-7dd18fe4c2f4?algorithm=SHA1&digits=6&issuer=vault&period=30&secret=XVE7TOZWJVEWQOATOD7 U53IEAJG72Z2I" + } +} +``` + +## Administratively Generate a TOTP MFA Secret + +This endpoint can be used to generate a TOTP MFA secret. Unlike the `generate` +API which stores the generated secret on the entity ID of the calling token, +the `admin-generate` API stores the generated secret on the given entity ID. + +| Method | Path | Produces | +| :------- | :------------------------------------------- | :----------------------- | +| `POST` | `/sys/mfa/method/totp/:name/admin-generate` | `200 application/json` | + +### Parameters + +- `name` `(string: )` - Name of the MFA method. + +- `entity_id` `(string: )` - Entity ID on which the generated secret + needs to get stored. + +### Sample Payload + +```json +{ + "entity_id":"4746fb81-028c-cd4e-026b-7dd18fe4c2f4" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json + https://vault.rocks/v1/sys/mfa/method/totp/my_totp/admin-generate +``` + +### Sample Response + +```json +{ + "data": { + "barcode": "iVBORw0KGgoAAAANSUhEUgAAAMgAAADIEAAAAADYoy0BAAAGZElEQVR4nOyd7W4jNwxFkyLv/8pbpMAAHnUo8pJyc1Oc82OB2KOP9QVFSyTlrz9/PsCIv356AnDn6/ufz89e48i6ov6u59f319ezfqtWnf2/snHX19XnVL7bYyFmIIgZCGLG5/e61V2b37WWXkRr9+lxonEvqv1XfeHK6/NYiBkIYgaCmPH19GK2j4ieX9/P9h0R2T6l+pzqYzKfmM0zQvk8sRAzEMQMBDHj0YdMqa6Z1TOvyDdF/VTPnqLnVzIfePLEHAsxA0HMQBAz3uJDqvuVbO1Vv/934yzReNV4ykmwEDMQxAwEMePRh5xeG6fxjNXnVM+Y1HHVM7Tq56R8nliIGQhiBoKYcfMhp2LT6hp7UY2hV8dTY/KZT1N9SufzxELMQBAzEMSMf3zI6X2HembUXbuz8dT9g+qLIiafJxZiBoKYgSBm3OpDunlMartTubrdeEUWI1dzAqa5wK9gIWYgiBkIYsaReIi6P1jbqd/7qznDqm9SfcKJs6sVLMQMBDEDQcz43K3z3fqLaf336fiFymQf8dRe8aVYiBkIYgaCmHHzIdW8pGo8QT1rOuULTvkc9a6TqD+lTgULMQNBzEAQMx7jIRdqjV7ULvpbrReJxl/nodYodseLxu/GWz6wED8QxAwEMeN2X9ZK9/4neRLDNfrUc919yPp8t/8PLMQPBDEDQcyQ6tTVO0JWqvGT6PWu75jub6L+T/kg4iHGIIgZCGLGKB5yKn5SpVuDWKXrI0/4jgssxAwEMQNBzNieZWV0v/erZ0Knxp3OYxoXqoyPhZiBIGYgiBmPub1q7m7G6fiB2n+3niSbZwT7kP8RCGIGgpjxeF9W5iPU8/+ofUY1r6q6ZkfzmtbJV2skozw04iHGIIgZCGKGlJeVvZ61V+MX07OiqJ9oftE41fGm9TD4EEMQxAwEMaN010lW1zGNoVfX+OrZl/rcfx0P2c0XCzEDQcxAEDMe87K68YBubH7tdxonmcb0q+2qZ2DK/gkLMQNBzEAQM2516lXUs6GIbA3v+qruPinrX/Ud0Xx288JCzEAQMxDEjG2NYXf/odztsXuuEoOu9BO93933RONm86n0h4WYgSBmIIgZt9+gqtaDq7m3K2rObnc+UT/Z36f2MZ28MCzEDAQxA0HMaJ1lTevBo/5OxRsysv1O9Fx1nO4Z3QcW4geCmIEgZki/H1Klu+Z369PVeVXHexfEQ34RCGIGgpjxWGN40c1VXdtXaxW79SLTOvqonVpDqdavkNv7C0AQMxDEjNK9vdP4gFq3ntV1Z+NEf099ZEZ1vrv+sBAzEMQMBDFjW2PYjT9U4xqqT1D7/ak4j5rn9QoWYgaCmIEgZpTu7e2u5Vk/4aQO3XFSHUc9s1rnNfVdr/1hIWYgiBkIYsZtH6LGA9Z20d8Z3fuq1HyqUznC1fZVX0c8xBgEMQNBzNje2xvxrlzYan1K1H82fjXOUfU12XzW5yufBxZiBoKYgSBmSL+FezE9s1lR9yHq2VvUT7fefOpjd58fFmIGgpiBIGZId52sr6u5u9M7RtTn1Hmr8ZzqnSvK2R4WYgaCmIEgZmzjIdHr3Rzfi278I3pdvStFrWVUfU52d8quPRZiBoKYgSBmbOvU//Xwobr1U7FpNV8qatetm4+onvU9jYuFmIEgZiCIGbf7srLv1d01vLsWqzmzapymeyaWfQ5qXtsrWIgZCGIGgphR+g2q9f0V9Y6SartsXtP8qOmZWDTe5AwMCzEDQcxAEDMeY+rVfUj0fLddN9Y+3feodfPd+pnKvLAQMxDEDAQxY1unPo11q+2i8adxCXV+an/qfHbzwELMQBAzEMQMKaYediL6FDVmruaBTWsU1bMwdT67/zcWYgaCmIEgZrR+g+piPata18Tu9/Zurq5aTxLNV833qs6nMg8sxAwEMQNBzHisD8lQ6y3W/qt3kpyuS8/ez3xiNL/qfMjL+oUgiBkIYkYppn7R9THd56s5tBGn60myfcSJ3GMsxAwEMQNBzGjdlxXRja1068mj9tla3c3DysaLxon+fgILMQNBzEAQM476kG5c5aJ7BnS6bqX6+jS/jLysXwCCmIEgZmx/x7DK6btCurF31WdMawPfkXOMhZiBIGYgiBmPvx+iUo2RV/vp3llSpZuXpfqgbNwnsBAzEMQMBDHjSH0InAMLMePvAAAA//8x2VnbmmL6HQAAAABJRU5ErkJggg==", + "url": "otpauth://totp/vault:4746fb81-028c-cd4e-026b-7dd18fe4c2f4?algorithm=SHA1&digits=6&issuer=vault&period=30&secret=6HQ4RZ7GM6MMLRKVDCI23LXNZF7UDZ2U" + } +} +``` + +### Administratively Destroy TOTP MFA Secret + +This endpoint deletes a TOTP MFA secret from the given entity ID. + +Note that in order to overwrite a secret on the entity, it is required to +explicitly delete the secret first. This API can be used to delete the secret +and the `generate` or `admin-generate` APIs should be used to regenerate a new +secret. + +| Method | Path | Produces | +| :------- | :-------------------------------------- | :--------------------- | +| `POST` | `/sys/mfa/method/:name/admin-destroy` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Name of the MFA method. + +- `entity_id` `(string: )` - Entity ID from which the MFA secret + should be removed. + +### Sample Payload + +```json +{ + "entity_id": "4746fb81-028c-cd4e-026b-7dd18fe4c2f4" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json + https://vault.rocks/v1/sys/mfa/method/totp/my_totp/admin-destroy +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mfa.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa.html.md new file mode 100644 index 0000000..fb7bd4f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mfa.html.md @@ -0,0 +1,19 @@ +--- +layout: "api" +page_title: "/sys/mfa - HTTP API" +sidebar_current: "docs-http-system-mfa" +description: |- + The '/sys/mfa' endpoint focuses on managing MFA behaviors in Vault Enterprise MFA. +--- + +# `/sys/mfa` + +~> **Enterprise Only** – These endpoints require Vault Enterprise. + +## Supported MFA types. + +- [TOTP](/api/system/mfa-totp.html) + +- [Okta](/api/system/mfa-okta.html) + +- [Duo](/api/system/mfa-duo.html) diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/mounts.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/mounts.html.md index 8b485f5..46e1b22 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/mounts.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/mounts.html.md @@ -74,15 +74,22 @@ This endpoint mounts a new secret backend at the given path. mount. - `config` `(map: nil)` – Specifies configuration options for - this mount. This is an object with three possible values: + this mount. This is an object with four possible values: - `default_lease_ttl` - `max_lease_ttl` - `force_no_cache` + - `plugin_name` - These control the default and maximum lease time-to-live, and force - disabling backend caching respectively. If set on a specific mount, this - overrides the global defaults. + These control the default and maximum lease time-to-live, force + disabling backend caching, and option plugin name for plugin backends + respectively. The first three options override the global defaults if + set on a specific mount. The plugin_name can be provided in the config + map or as a top-level option, with the former taking precedence. + +- `plugin_name` `(string: "")` – Specifies the name of the plugin to + use based from the name in the plugin catalog. Applies only to plugin + backends. Additionally, the following options are allowed in Vault open-source, but relevant functionality is only supported in Vault Enterprise: @@ -108,7 +115,7 @@ relevant functionality is only supported in Vault Enterprise: $ curl \ --header "X-Vault-Token: ..." \ --request POST \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/mounts/my-mount ``` @@ -190,6 +197,6 @@ This endpoint tunes configuration parameters for a given mount point. $ curl \ --header "X-Vault-Token: ..." \ --request POST \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/mounts/my-mount/tune ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/plugins-catalog.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/plugins-catalog.html.md new file mode 100644 index 0000000..0c63456 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/plugins-catalog.html.md @@ -0,0 +1,154 @@ +--- +layout: "api" +page_title: "/sys/plugins/catalog - HTTP API" +sidebar_current: "docs-http-system-plugins-catalog" +description: |- + The `/sys/plugins/catalog` endpoint is used to manage plugins. +--- + +# `/sys/plugins/catalog` + +The `/sys/plugins/catalog` endpoint is used to list, register, update, and +remove plugins in Vault's catalog. Plugins must be registered before use, and +once registered backends can use the plugin by querying the catalog. + +## List Plugins + +This endpoint lists the plugins in the catalog. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/sys/plugins/catalog` | `200 application/json` | +| `GET` | `/sys/plugins/catalog?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST + https://vault.rocks/v1/sys/plugins/catalog +``` + +### Sample Response + +```javascript +{ + "data": { + "keys": [ + "cassandra-database-plugin", + "mssql-database-plugin", + "mysql-database-plugin", + "postgresql-database-plugin" + ] + } +} +``` + +## Register Plugin + +This endpoint registers a new plugin, or updates an existing one with the +supplied name. + +- **`sudo` required** – This endpoint requires `sudo` capability in addition to + any path-specific capabilities. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `PUT` | `/sys/plugins/catalog/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name for this plugin. The name + is what is used to look up plugins in the catalog. This is part of the request + URL. + +- `sha256` `(string: )` – This is the SHA256 sum of the plugin's + binary. Before a plugin is run it's SHA will be checked against this value, if + they do not match the plugin can not be run. + +- `command` `(string: )` – Specifies the command used to execute the + plugin. This is relative to the plugin directory. e.g. `"myplugin + --my_flag=1"` + +### Sample Payload + +```json +{ + "sha_256": "d130b9a0fbfddef9709d8ff92e5e6053ccd246b78632fc03b8548457026961e9", + "command": "mysql-database-plugin" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request PUT \ + --data @payload.json \ + https://vault.rocks/v1/sys/plugins/catalog/example-plugin +``` + +## Read Plugin + +This endpoint returns the configuration data for the plugin with the given name. + +- **`sudo` required** – This endpoint requires `sudo` capability in addition to + any path-specific capabilities. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/sys/plugins/catalog/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the plugin to retrieve. + This is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://vault.rocks/v1/sys/plugins/catalog/example-plugin +``` + +### Sample Response + +```javascript +{ + "data": { + "args": [], + "builtin": false, + "command": "/tmp/vault-plugins/mysql-database-plugin", + "name": "example-plugin", + "sha256": "0TC5oPv93vlwnY/5Ll5gU8zSRreGMvwDuFSEVwJpYek=" + } +} +``` +## Remove Plugin from Catalog + +This endpoint removes the plugin with the given name. + +- **`sudo` required** – This endpoint requires `sudo` capability in addition to + any path-specific capabilities. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/sys/plugins/catalog/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the plugin to delete. + This is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/plugins/catalog/example-plugin +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/plugins-reload-backend.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/plugins-reload-backend.html.md new file mode 100644 index 0000000..ea4bd28 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/plugins-reload-backend.html.md @@ -0,0 +1,47 @@ +--- +layout: "api" +page_title: "/sys/plugins/reload/backend - HTTP API" +sidebar_current: "docs-http-system-plugins-reload-backend" +description: |- + The `/sys/plugins/reload/backend` endpoint is used to reload plugin backends. +--- + +# `/sys/plugins/reload/backend` + +The `/sys/plugins/reload/backend` endpoint is used to reload mounted plugin +backends. Either the plugin name (`plugin`) or the desired plugin backend mounts +(`mounts`) must be provided, but not both. In the case that the plugin name is +provided, all mounted paths that use that plugin backend will be reloaded. + +## Reload Plugins + +This endpoint reloads mounted plugin backends. + +| Method | Path - | Produces | +| :------- | :---------------------------- | :--------------------- | +| `PUT` | `/sys/plugins/reload/backend` | `204 (empty body)` | + +### Parameters + +- `plugin` `(string: "")` – The name of the plugin to reload, as + registered in the plugin catalog. + +- `mounts` `(slice: [])` – Array or comma-separated string mount paths + of the plugin backends to reload. + +### Sample Payload + +```json +{ + "plugin": "mock-plugin" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request PUT + https://vault.rocks/v1/sys/plugins/reload/backend +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/policy.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/policy.html.md index 7f82fee..e4a3723 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/policy.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/policy.html.md @@ -93,7 +93,7 @@ updated, it takes effect immediately to all associated users. $ curl \ --header "X-Vault-Token: ..." \ --request PUT \ - --data payload.json \ + --data @payload.json \ https://vault.rocks/v1/sys/policy/my-policy ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/raw.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/raw.html.md index 041c358..7963dbb 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/raw.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/raw.html.md @@ -10,6 +10,10 @@ description: |- The `/sys/raw` endpoint is access the raw underlying store in Vault. +This endpont is off by default. See the +[Vault configuration documentation](/docs/configuration/index.html) to +enable. + ## Read Raw This endpoint reads the value of the key at the given path. This is the raw path @@ -76,6 +80,41 @@ $ curl \ https://vault.rocks/v1/sys/raw/secret/foo ``` +## List Raw + +This endpoint returns a list keys for a given path prefix. + +**This endpoint requires 'sudo' capability.** + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/sys/raw/:prefix` | `200 application/json` | +| `GET` | `/sys/raw/:prefix?list=true` | `200 application/json` | + + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/sys/raw/logical +``` + +### Sample Response + +```json +{ + "data":{ + "keys":[ + "abcd-1234...", + "efgh-1234...", + "ijkl-1234..." + ] + } +} +``` + ## Delete Raw This endpoint deletes the key with given path. This is the raw path in the diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/rekey.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/rekey.html.md index bf41dde..d721d29 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/rekey.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/rekey.html.md @@ -74,9 +74,9 @@ and starting a new rekey, which will also provide a new nonce. array must be the same as `secret_shares`. - `backup` `(bool: false)` – Specifies if using PGP-encrypted keys, whether - Vault should also back them up to `core/unseal-keys-backup` in the physical - storage backend. These can then be retrieved and removed via the - `sys/rekey/backup` endpoint. + Vault should also store a plaintext backup of the PGP-encrypted keys at + `core/unseal-keys-backup` in the physical storage backend. These can then + be retrieved and removed via the `sys/rekey/backup` endpoint. ### Sample Payload diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/replication-dr.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/replication-dr.html.md new file mode 100644 index 0000000..46e0ef5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/replication-dr.html.md @@ -0,0 +1,329 @@ +--- +layout: "api" +page_title: "/sys/replication - HTTP API" +sidebar_current: "docs-http-system-replication-dr" +description: |- + The '/sys/replication/dr' endpoint focuses on managing general operations in Vault Enterprise Disaster Recovery replication +--- + +# `/sys/replication/dr` + +~> **Enterprise Only** – These endpoints require Vault Enterprise. + +## Check DR Status + +This endpoint prints information about the status of replication (mode, +sync progress, etc). + +This is an authenticated endpoint. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/sys/replication/dr/status` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + https://vault.rocks/v1/sys/replication/dr/status +``` + +### Sample Response + +The printed status of the replication environment. As an example, for a +primary, it will look something like: + +```json +{ + "mode": "dr-primary", + "cluster_id": "d4095d41-3aee-8791-c421-9bc7f88f7c3e", + "known_secondaries": [], + "last_wal": 0, + "merkle_root": "c3260c4c682ff2d6eb3c8bfd877134b3cec022d1", + "request_id": "009ea98c-06cd-6dc3-74f2-c4904b22e535", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "cluster_id": "d4095d41-3aee-8791-c421-9bc7f88f7c3e", + "known_secondaries": [], + "last_wal": 0, + "merkle_root": "c3260c4c682ff2d6eb3c8bfd877134b3cec022d1", + "mode": "primary" + }, + "wrap_info": null, + "warnings": null, + "auth": null +} +``` + +## Enable DR Primary Replication + +This endpoint enables DR replication in primary mode. This is used when DR replication +is currently disabled on the cluster (if the cluster is already a secondary, it +must be promoted). + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/dr/primary/enable` | `204 (empty body)` | + +### Parameters + +- `primary_cluster_addr` `(string: "")` – Specifies the cluster address that the + primary gives to secondary nodes. Useful if the primary's cluster address is + not directly accessible and must be accessed via an alternate path/address, + such as through a TCP-based load balancer. + +### Sample Payload + +```json +{} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/dr/primary/enable +``` + +## Demote DR Primary + +This endpoint demotes a DR primary cluster to a secondary. This DR secondary cluster +will not attempt to connect to a primary (see the update-primary call), but will +maintain knowledge of its cluster ID and can be reconnected to the same +DR replication set without wiping local storage. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/dr/primary/demote` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/sys/replication/dr/primary/demote +``` + +## Disable DR Primary + +This endpoint disables DR replication entirely on the cluster. Any secondaries will +no longer be able to connect. Caution: re-enabling this node as a primary or +secondary will change its cluster ID; in the secondary case this means a wipe of +the underlying storage when connected to a primary, and in the primary case, +secondaries connecting back to the cluster (even if they have connected before) +will require a wipe of the underlying storage. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/dr/primary/disable` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/sys/replication/dr/primary/disable +``` + +## Generate DR Secondary Token + +This endpoint generates a DR secondary activation token for the +cluster with the given opaque identifier, which must be unique. This +identifier can later be used to revoke a DR secondary's access. + +**This endpoint requires 'sudo' capability.** + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/dr/primary/secondary-token` | `200 application/json` | + +### Parameters + +- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' + +- `ttl` `(string: "30m")` – Specifies the TTL for the secondary activation + token. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/sys/replication/dr/primary/secondary-token?id=us-east-1 +``` + +### Sample Response + +```json +{ + "request_id": "", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": null, + "warnings": null, + "wrap_info": { + "token": "fb79b9d3-d94e-9eb6-4919-c559311133d6", + "ttl": 300, + "creation_time": "2016-09-28T14:41:00.56961496-04:00", + "wrapped_accessor": "" + } +} +``` + +## Revoke DR Secondary Token + +This endpoint revokes a DR secondary's ability to connect to the DR primary cluster; +the DR secondary will immediately be disconnected and will not be allowed to +connect again unless given a new activation token. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/dr/primary/revoke-secondary` | `204 (empty body)` | + +### Parameters + +- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' + +### Sample Payload + +```json +{ + "id": "us-east" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/dr/primary/revoke-secondary +``` + +## Enable DR Secondary + +This endpoint enables replication on a DR secondary using a DR secondary activation +token. + +!> This will immediately clear all data in the secondary cluster! + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/dr/secondary/enable` | `204 (empty body)` | + +### Parameters + +- `token` `(string: )` – Specifies the secondary activation token fetched from the primary. + +- `primary_api_addr` `(string: "")` – Set this to the API address (normal Vault + address) to override the value embedded in the token. This can be useful if + the primary's redirect address is not accessible directly from this cluster + (e.g. through a load balancer). + +- `ca_file` `(string: "")` – Specifies the path to a CA root file (PEM format) + that the secondary can use when unwrapping the token from the primary. If this + and ca_path are not given, defaults to system CA roots. + +- `ca_path` `(string: "")` – Specifies the path to a CA root directory + containing PEM-format files that the secondary can use when unwrapping the + token from the primary. If this and ca_file are not given, defaults to system + CA roots. + +### Sample Payload + +```json +{ + "token": "..." +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/dr/secondary/enable +``` + +## Promote DR Secondary + +This endpoint promotes the DR secondary cluster to DR primary. For data safety and +security reasons, new secondary tokens will need to be issued to other +secondaries, and there should never be more than one primary at a time. + +If the DR secondary's primary cluster is also in a performace replication set, +the DR secondary will be promoted into that replication set. Care should be +taken when promoting to ensure multiple performance primary clusters are not +activate at the same time. + +If the DR secondary's primary cluster is a performance secondary, the promoted +cluster will attempt to connect to the performance primary cluster using the +same secondary token. + +!> Only one performance primary should be active at a given time. Multiple primaries may +result in data loss! + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/dr/secondary/promote` | `200 application/json` | + +### Parameters + +- `key` `(string "")` - Specifies a single master key share. This is required unless reset is true. +- `reset` `(bool false) - Specifies if previously-provided unseal keys are discarded and the promote process is reset. +- `primary_cluster_addr` `(string: "")` – Specifies the cluster address that the + primary gives to secondary nodes. Useful if the primary's cluster address is + not directly accessible and must be accessed via an alternate path/address + (e.g. through a load balancer). + +### Sample Payload + +```json +{ + "key": "ijH8tphEHaBtgx+IvPfxDsSi2LV4j9k+Lad6eqT5cJw=" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/dr/secondary/promote +``` + +### Sample Response + +```json +{ + "progress": 0, + "required": 1, + "complete": false, + "request_id": "ad8f9074-0e24-d30e-83cd-595c9652ff89", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "complete": false, + "progress": 0, + "required": 1 + }, + "wrap_info": null, + "warnings": null, + "auth": null +} +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/replication-performance.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/replication-performance.html.md new file mode 100644 index 0000000..bba8263 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/replication-performance.html.md @@ -0,0 +1,469 @@ +--- +layout: "api" +page_title: "/sys/replication - HTTP API" +sidebar_current: "docs-http-system-replication-performance" +description: |- + The '/sys/replication/performance' endpoint focuses on managing general operations in Vault Enterprise Performance Replication +--- + +# `/sys/replication/performance` + +~> **Enterprise Only** – These endpoints require Vault Enterprise. + +## Check Performance Status + +This endpoint prints information about the status of replication (mode, +sync progress, etc). + +This is an authenticated endpoint. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/sys/replication/performance/status` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + https://vault.rocks/v1/sys/replication/performance/status +``` + +### Sample Response + +The printed status of the replication environment. As an example, for a +primary, it will look something like: + +```json +{ + "mode": "perf-primary", + "cluster_id": "d4095d41-3aee-8791-c421-9bc7f88f7c3e", + "known_secondaries": [], + "last_wal": 0, + "merkle_root": "c3260c4c682ff2d6eb3c8bfd877134b3cec022d1", + "request_id": "009ea98c-06cd-6dc3-74f2-c4904b22e535", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "cluster_id": "d4095d41-3aee-8791-c421-9bc7f88f7c3e", + "known_secondaries": [], + "last_wal": 0, + "merkle_root": "c3260c4c682ff2d6eb3c8bfd877134b3cec022d1", + "mode": "primary" + }, + "wrap_info": null, + "warnings": null, + "auth": null +} +``` + +## Enable Performance Primary Replication + +This endpoint enables replication in primary mode. This is used when replication +is currently disabled on the cluster (if the cluster is already a secondary, it +must be promoted). + +!> Only one primary should be active at a given time. Multiple primaries may +result in data loss! + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/primary/enable` | `204 (empty body)` | + +### Parameters + +- `primary_cluster_addr` `(string: "")` – Specifies the cluster address that the + primary gives to secondary nodes. Useful if the primary's cluster address is + not directly accessible and must be accessed via an alternate path/address, + such as through a TCP-based load balancer. If not set, uses vault's configured + cluster address. + +### Sample Payload + +```json +{} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/performance/primary/enable +``` + +## Demote Performance Primary + +This endpoint demotes a performance primary cluster to a performance secondary. +This secondary cluster will not attempt to connect to a primary (see the update-primary call), +but will maintain knowledge of its cluster ID and can be reconnected to the same +replication set without wiping local storage. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/primary/demote` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/sys/replication/performance/primary/demote +``` + +## Disable Performance Primary + +This endpoint disables performance replication entirely on the cluster. Any +performance secondaries will no longer be able to connect. Caution: re-enabling +this node as a primary or secondary will change its cluster ID; in the secondary +case this means a wipe of the underlying storage when connected to a primary, +and in the primary case, secondaries connecting back to the cluster (even if +they have connected before) will require a wipe of the underlying storage. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/primary/disable` | `204 (empty body)` | + + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/sys/replication/performance/primary/disable +``` + +## Generate Performance Secondary Token + +This endpoint generates a performance secondary activation token for the +cluster with the given opaque identifier, which must be unique. This +identifier can later be used to revoke a secondary's access. + +**This endpoint requires 'sudo' capability.** + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/primary/secondary-token` | `200 application/json` | + +### Parameters + +- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' + +- `ttl` `(string: "30m")` – Specifies the TTL for the secondary activation + token. + +### Sample Payload + +```json +{ + "id": "us-east-1" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/performance/primary/secondary-token +``` + +### Sample Response + +```json +{ + "request_id": "", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": null, + "warnings": null, + "wrap_info": { + "token": "fb79b9d3-d94e-9eb6-4919-c559311133d6", + "ttl": 300, + "creation_time": "2016-09-28T14:41:00.56961496-04:00", + "wrapped_accessor": "" + } +} +``` + +## Revoke Performance Secondary Token + +This endpoint revokes a performance secondary's ability to connect to the +performance primary cluster; the secondary will immediately be disconnected and +will not be allowed to connect again unless given a new activation token. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/primary/revoke-secondary` | `204 (empty body)` | + +### Parameters + +- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' + +### Sample Payload + +```json +{ + "id": "us-east" +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/performance/primary/revoke-secondary +``` + +## Create Mounts Filter + +This endpoint is used to modify the mounts that are filtered to a secondary. +Filtering can be specified in whitelist mode or blacklist mode. In whitelist +mode the secret and auth mounts that are specified are included to the +selected secondary. In blacklist mode, the mount paths are excluded. + +| Method | Path | Produces | +| :------- | :------------------------------------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/primary/mount-filter/:id` | `204 (empty body)` | + +### Parameters + +- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' + +- `mode` `(string: "whitelist")` – Specifies the filtering mode. Available values + are "whitelist" and blacklist". + +- `paths` `(array: [])` – The list of mount paths that are filtered. + +### Sample Payload + +```json +{ + "mode": "whitelist", + "paths": ["secret/"] +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/performance/primary/mount-filter/us-east-1 +``` + +## Read Mounts Filter + +This endpoint is used to read the mode and the mount paths that are filtered +for a secondary. + +| Method | Path | Produces | +| :------- | :------------------------------------------------------- | :--------------------- | +| `GET` | `/sys/replication/performance/primary/mount-filter/:id` | `200 (empty body)` | + +### Parameters + +- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/sys/replication/performance/primary/mount-filter/us-east-1 +``` + +### Sample Response + +```json +{ + "mode": "whitelist", + "paths": ["secret/"] +} +``` + +## Delete Mounts Filter + +This endpoint is used to delete the mount filters for a secondary. + +| Method | Path | Produces | +| :------- | :------------------------------------------------------- | :--------------------- | +| `DELETE` | `/sys/replication/performance/primary/mount-filter/:id` | `204 (empty body)` | + +### Parameters + +- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/sys/replication/performance/primary/mount-filter/us-east-1 +``` + +## Enable Performance Secondary + +This endpoint enables performance replication on a secondary using a secondary activation +token. + +!> This will immediately clear all data in the secondary cluster! + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/secondary/enable` | `204 (empty body)` | + +### Parameters + +- `token` `(string: )` – Specifies the secondary activation token fetched from the primary. + +- `primary_api_addr` `(string: "")` – Set this to the API address (normal Vault + address) to override the value embedded in the token. This can be useful if + the primary's redirect address is not accessible directly from this cluster + (e.g. through a load balancer). + +- `ca_file` `(string: "")` – Specifies the path to a CA root file (PEM format) + that the secondary can use when unwrapping the token from the primary. If this + and ca_path are not given, defaults to system CA roots. + +- `ca_path` `(string: "")` – Specifies the path to a CA root directory + containing PEM-format files that the secondary can use when unwrapping the + token from the primary. If this and ca_file are not given, defaults to system + CA roots. + +### Sample Payload + +```json +{ + "token": "..." +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/performance/secondary/enable +``` + +## Promote Performance Secondary + +This endpoint promotes the performance secondary cluster to performance primary. +For data safety and security reasons, new secondary tokens will need to be +issued to other secondaries, and there should never be more than one performance +primary at a time. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/secondary/promote` | `204 (empty body)` | + +### Parameters + +- `primary_cluster_addr` `(string: "")` – Specifies the cluster address that the + primary gives to secondary nodes. Useful if the primary's cluster address is + not directly accessible and must be accessed via an alternate path/address + (e.g. through a load balancer). + +### Sample Payload + +```json +{} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/performance/secondary/promote +``` + +## Disable Performance Secondary + +This endpoint disables performance replication entirely on the cluster. The cluster will no +longer be able to connect to the performance primary. + +!> Re-enabling this node as a performance primary or secondary will change its cluster ID; +in the secondary case this means a wipe of the underlying storage when connected +to a primary, and in the primary case, secondaries connecting back to the +cluster (even if they have connected before) will require a wipe of the +underlying storage. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/secondary/disable` | `204 (empty body)` | + + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + https://vault.rocks/v1/sys/replication/performance/secondary/disable +``` + +## Update Performance Secondary's Primary + +This endpoint changes a performance secondary cluster's assigned primary cluster using a +secondary activation token. This does not wipe all data in the cluster. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/sys/replication/performance/secondary/update-primary` | `204 (empty body)` | + +### Parameters + +- `token` `(string: )` – Specifies the secondary activation token + fetched from the primary. If you set this to a blank string, the cluster will + stay a secondary but clear its knowledge of any past primary (and thus not + attempt to connect to the previous primary). This can be useful if the primary + is down to stop the secondary from trying to reconnect to it. + +- `primary_api_addr` `(string: )` – Specifies the API address (normal Vault + address) to override the value embedded in the token. This can be useful if + the primary's redirect address is not accessible directly from this cluster. + +- `ca_file` `(string: "")` – Specifies the path to a CA root file (PEM format) + that the secondary can use when unwrapping the token from the primary. If this + and ca_path are not given, defaults to system CA roots. + +- `ca_path` `string: ()` – Specifies the path to a CA root directory containing + PEM-format files that the secondary can use when unwrapping the token from the + primary. If this and ca_file are not given, defaults to system CA roots. + +### Sample Payload + +```json +{ + "token": "..." +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/sys/replication/performance/secondary/update-primary +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/replication.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/replication.html.md index 55ecdc3..6fca991 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/replication.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/replication.html.md @@ -3,7 +3,7 @@ layout: "api" page_title: "/sys/replication - HTTP API" sidebar_current: "docs-http-system-replication" description: |- - The '/sys/replication' endpoint focuses on managing general operations in Vault Enterprise replication sets + The '/sys/replication' endpoint focuses on managing general operations in Vault Enterprise replication --- # `/sys/replication` @@ -87,331 +87,32 @@ primary, it will look something like: ```json { - "mode": "primary", - "cluster_id": "d4095d41-3aee-8791-c421-9bc7f88f7c3e", - "known_secondaries": [], - "last_wal": 0, - "merkle_root": "c3260c4c682ff2d6eb3c8bfd877134b3cec022d1", - "request_id": "009ea98c-06cd-6dc3-74f2-c4904b22e535", - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": { - "cluster_id": "d4095d41-3aee-8791-c421-9bc7f88f7c3e", - "known_secondaries": [], - "last_wal": 0, - "merkle_root": "c3260c4c682ff2d6eb3c8bfd877134b3cec022d1", - "mode": "primary" - }, - "wrap_info": null, - "warnings": null, - "auth": null + "request_id": "d13e9665-d610-fea0-357f-8d652aa308cb", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "dr": { + "cluster_id": "a876f38b-7577-25ac-6007-277528c99a1a", + "known_secondaries": [ + "2" + ], + "last_wal": 43, + "merkle_root": "86d67839f47045f7d24beb4f39b14504d15a146c", + "mode": "dr-primary", + "primary_cluster_addr": "" + }, + "performance": { + "cluster_id": "11ab01df-32ea-1d79-b4bc-8bc973c1b749", + "known_secondaries": [ + "1" + ], + "last_wal": 43, + "merkle_root": "e0531d566b23403101b0868e85b63d6774ba0ef2", + "mode": "perf-primary", + "primary_cluster_addr": "" + } + }, + "warnings": null } ``` - -## Enable Primary Replication - -This endpoint enables replication in primary mode. This is used when replication -is currently disabled on the cluster (if the cluster is already a secondary, it -must be promoted). - -!> Only one primary should be active at a given time. Multiple primaries may -result in data loss! - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/primary/enable` | `204 (empty body)` | - -### Parameters - -- `primary_cluster_addr` `(string: "")` – Specifies the cluster address that the - primary gives to secondary nodes. Useful if the primary's cluster address is - not directly accessible and must be accessed via an alternate path/address, - such as through a TCP-based load balancer. - -### Sample Payload - -```json -{} -``` - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - --data @payload.json \ - https://vault.rocks/v1/sys/replication/primary/enable -``` - -## Demote Primary - -This endpoint demotes a primary cluster to a secondary. This secondary cluster -will not attempt to connect to a primary (see the update-primary call), but will -maintain knowledge of its cluster ID and can be reconnected to the same -replication set without wiping local storage. - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/primary/demote` | `204 (empty body)` | - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - https://vault.rocks/v1/sys/replication/primary/demote -``` - -## Disable Primary - -This endptoin disables replication entirely on the cluster. Any secondaries will -no longer be able to connect. Caution: re-enabling this node as a primary or -secondary will change its cluster ID; in the secondary case this means a wipe of -the underlying storage when connected to a primary, and in the primary case, -secondaries connecting back to the cluster (even if they have connected before) -will require a wipe of the underlying storage. - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/primary/disable` | `204 (empty body)` | - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - https://vault.rocks/v1/sys/replication/primary/disable -``` - -## Generate Secondary Token - -This endpoint generates a secondary activation token for the -cluster with the given opaque identifier, which must be unique. This -identifier can later be used to revoke a secondary's access. - -**This endpoint requires 'sudo' capability.** - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `GET` | `/sys/replication/primary/secondary-token` | `200 application/json` | - -### Parameters - -- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' - -- `ttl` `(string: "30m")` – Specifies the TTL for the secondary activation - token. - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - https://vault.rocks/v1/sys/replication/primary/secondary-token?id=us-east-1 -``` - -### Sample Response - -```json -{ - "request_id": "", - "lease_id": "", - "lease_duration": 0, - "renewable": false, - "data": null, - "warnings": null, - "wrap_info": { - "token": "fb79b9d3-d94e-9eb6-4919-c559311133d6", - "ttl": 300, - "creation_time": "2016-09-28T14:41:00.56961496-04:00", - "wrapped_accessor": "" - } -} -``` - -## Revoke Secondary Token - -This endpoint revokes a secondary's ability to connect to the primary cluster; -the secondary will immediately be disconnected and will not be allowed to -connect again unless given a new activation token. - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/secondary/revoke-secondary` | `204 (empty body)` | - -### Parameters - -- `id` `(string: )` – Specifies an opaque identifier, e.g. 'us-east' - -### Sample Payload - -```json -{ - "id": "us-east" -} -``` - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - --data @payload.json \ - https://vault.rocks/v1/sys/replication/secondary/revoke-secondary -``` - -## Enable Secondary - -This endpoint enables replication on a secondary using a secondary activation -token. - -!> This will immediately clear all data in the secondary cluster! - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/secondary/enable` | `204 (empty body)` | - -### Parameters - -- `token` `(string: )` – Specifies the secondary activation token fetched from the primary. - -- `primary_api_addr` `(string: "")` – Specifies Set this to the API address - (normal Vault address) to override the value embedded in the token. This can - be useful if the primary's redirect address is not accessible directly from - this cluster (e.g. through a load balancer). - -- `ca_file` `(string: "")` – Specifies the path to a CA root file (PEM format) - that the secondary can use when unwrapping the token from the primary. If this - and ca_path are not given, defaults to system CA roots. - -- `ca_path` `(string: "")` – Specifies the path to a CA root directory - containing PEM-format files that the secondary can use when unwrapping the - token from the primary. If this and ca_file are not given, defaults to system - CA roots. - -### Sample Payload - -```json -{ - "token": "..." -} -``` - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - --data @payload.json \ - https://vault.rocks/v1/sys/replication/secondary/enable -``` - -## Promote Secondary - -This endpoint promotes the secondary cluster to primary. For data safety and -security reasons, new secondary tokens will need to be issued to other -secondaries, and there should never be more than one primary at a time. - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/secondary/promote` | `204 (empty body)` | - -### Parameters - -- `primary_cluster_addr` `(string: "")` – Specifies the cluster address that the - primary gives to secondary nodes. Useful if the primary's cluster address is - not directly accessible and must be accessed via an alternate path/address - (e.g. through a load balancer). - -### Sample Payload - -```json -{} -``` - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - --data @payload.json \ - https://vault.rocks/v1/sys/replication/secondary/promote -``` - -## Disable Secondary - -This endpoint disables replication entirely on the cluster. The cluster will no -longer be able to connect to the primary. - -!> Re-enabling this node as a primary or secondary will change its cluster ID; -in the secondary case this means a wipe of the underlying storage when connected -to a primary, and in the primary case, secondaries connecting back to the -cluster (even if they have connected before) will require a wipe of the -underlying storage. - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/secondary/disable` | `204 (empty body)` | - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - https://vault.rocks/v1/sys/replication/secondary/disable -``` - -## Update Secondary's Primary - -This endpoint changes a secondary cluster's assigned primary cluster using a -secondary activation token. This does not wipe all data in the cluster. - -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | -| `POST` | `/sys/replication/secondary/update-primary` | `204 (empty body)` | - -### Parameters - -- `token` `(string: )` – Specifies the secondary activation token - fetched from the primary. If you set this to a blank string, the cluster will - stay a secondary but clear its knowledge of any past primary (and thus not - attempt to connect to the previous primary). This can be useful if the primary - is down to stop the secondary from trying to reconnect to it. - -- `primary_api_addr` `(string: )` – Specifies the API address (normal Vault - address) to override the value embedded in the token. This can be useful if - the primary's redirect address is not accessible directly from this cluster. - -- `ca_file` `(string: "")` – Specifies the path to a CA root file (PEM format) - that the secondary can use when unwrapping the token from the primary. If this - and ca_path are not given, defaults to system CA roots. - -- `ca_path` `string: ()` – Specifies the path to a CA root directory containing - PEM-format files that the secondary can use when unwrapping the token from the - primary. If this and ca_file are not given, defaults to system CA roots. - -### Sample Payload - -```json -{ - "token": "..." -} -``` - -### Sample Request - -``` -$ curl \ - --header "X-Vault-Token: ..." \ - --request POST \ - --data @payload.json \ - https://vault.rocks/v1/sys/replication/secondary/update-primary -``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/unseal.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/unseal.html.md index 9ba369e..3fd7d43 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/unseal.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/unseal.html.md @@ -1,9 +1,9 @@ --- layout: "api" -page_title: "/sys/seal-unseal - HTTP API" +page_title: "/sys/unseal - HTTP API" sidebar_current: "docs-http-system-unseal" description: |- - The `/sys/seal-unseal` endpoint is used to unseal the Vault. + The `/sys/unseal` endpoint is used to unseal the Vault. --- # `/sys/unseal` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-lookup.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-lookup.html.md index b54dbd2..b2b50e2 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-lookup.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-lookup.html.md @@ -49,9 +49,12 @@ $ curl \ "lease_duration": 0, "renewable": false, "data": { + "creation_path": "sys/wrapping/wrap", "creation_time": "2016-09-28T14:16:13.07103516-04:00", "creation_ttl": 300 }, - "warnings": null + "wrap_info": null, + "warnings": null, + "auth": null } ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-rewrap.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-rewrap.html.md index 2f86658..ebe6a10 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-rewrap.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-rewrap.html.md @@ -58,7 +58,7 @@ $ curl \ "token": "3b6f1193-0707-ac17-284d-e41032e74d1f", "ttl": 300, "creation_time": "2016-09-28T14:22:26.486186607-04:00", - "wrapped_accessor": "" + "creation_path": "sys/wrapping/wrap" } } ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-wrap.html.md b/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-wrap.html.md index c3c4065..18b704f 100644 --- a/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-wrap.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/api/system/wrapping-wrap.html.md @@ -61,7 +61,7 @@ $ curl \ "token": "fb79b9d3-d94e-9eb6-4919-c559311133d6", "ttl": 300, "creation_time": "2016-09-28T14:41:00.56961496-04:00", - "wrapped_accessor": "" + "creation_path": "sys/wrapping/wrap", } } ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/keys.png b/vendor/github.com/hashicorp/vault/website/source/assets/images/keys.png deleted file mode 100644 index 43ca359..0000000 Binary files a/vendor/github.com/hashicorp/vault/website/source/assets/images/keys.png and /dev/null differ diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/news/webinar-register-now.png b/vendor/github.com/hashicorp/vault/website/source/assets/images/news/webinar-register-now.png new file mode 100644 index 0000000..7999c9f Binary files /dev/null and b/vendor/github.com/hashicorp/vault/website/source/assets/images/news/webinar-register-now.png differ diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-auth-workflow.svg b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-auth-workflow.svg new file mode 100644 index 0000000..0cdd611 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-auth-workflow.svg @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-aws-ec2-auth-flow.png b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-aws-ec2-auth-flow.png new file mode 100644 index 0000000..a98fb91 Binary files /dev/null and b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-aws-ec2-auth-flow.png differ diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-gcp-gce-auth-workflow.svg b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-gcp-gce-auth-workflow.svg new file mode 100644 index 0000000..0a376bb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-gcp-gce-auth-workflow.svg @@ -0,0 +1,170 @@ + + + + Sequence Diagram 1@3x + Created with Sketch. + + + + + + + + + + + + + + + + + + + + Instance + + + Compute Engine + + + + + + + + + + + + + + + + + + + + + + + + Metadata Server + + + Compute Engine + + + + + + + + + + + + + + + + + + + + + + + + Google + OAuth2 API + + + + + + + + + Hashicorp Vault + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-gcp-iam-auth-workflow.svg b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-gcp-iam-auth-workflow.svg new file mode 100644 index 0000000..cd2fa18 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-gcp-iam-auth-workflow.svg @@ -0,0 +1,134 @@ + + + + Sequence Diagram 2@3x + Created with Sketch. + + + + + + + + + + + + + + + + + + + Hashicorp + Vault + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cloud IAM + + + + + + + + + + + + + + Developer + + + + + + \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-policy-workflow.svg b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-policy-workflow.svg new file mode 100644 index 0000000..4aba65e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-policy-workflow.svg @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-rekey-vs-rotate.svg b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-rekey-vs-rotate.svg new file mode 100644 index 0000000..e3a7ade --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-rekey-vs-rotate.svg @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-shamir-secret-sharing.svg b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-shamir-secret-sharing.svg new file mode 100644 index 0000000..b23fc60 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/assets/images/vault-shamir-secret-sharing.svg @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/_latest.scss b/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/_latest.scss index 6f34e6b..47eded4 100644 --- a/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/_latest.scss +++ b/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/_latest.scss @@ -15,6 +15,11 @@ border-bottom: none; } + img { + margin-bottom: 20px; + max-width: 100%; + } + h3 { padding: 0 0 0 15px; margin: 0; diff --git a/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/application.scss b/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/application.scss index ff6dec0..3af5a28 100755 --- a/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/application.scss +++ b/vendor/github.com/hashicorp/vault/website/source/assets/stylesheets/application.scss @@ -35,3 +35,21 @@ // Demo @import '_demo'; + + +// Docs - visual separation for parameter names and flags +span.param { + font-weight: 800; +} +span.param:after { + content: ":"; +} +span.param-flags { + font-style: italic; +} +span.param-flags:before { + content: "("; +} +span.param-flags:after { + content: ")"; +} diff --git a/vendor/github.com/hashicorp/vault/website/source/community.html.erb b/vendor/github.com/hashicorp/vault/website/source/community.html.erb index 7c8227f..05f6d0d 100644 --- a/vendor/github.com/hashicorp/vault/website/source/community.html.erb +++ b/vendor/github.com/hashicorp/vault/website/source/community.html.erb @@ -8,117 +8,28 @@ description: |-

Community

-Vault is an open source project with a growing community. There are -active, dedicated users willing to help you through various mediums. + Vault is an open source project with a growing community. There are active, + dedicated users willing to help you through various mediums.

-IRC: #vault-tool on Freenode + IRC: #vault-tool on Freenode

-Announcement list: -HashiCorp Announcement Google Group + Announcement list: + HashiCorp Announcement Google Group

-Discussion list: -Vault Google Group + Discussion list: + Vault Google Group

-Bug Tracker: -Issue tracker + Bug Tracker: + Issue tracker on GitHub. Please only use this for reporting bugs. Do not ask -for general help here. Use IRC or the mailing list for that. + for general help here. Use IRC or the mailing list for that.

-Training: -Paid HashiCorp training courses -are also available in a city near you. Private training courses are also available. + Training: + Paid HashiCorp training courses + are also available in a city near you. Private training courses are also available.

- -

People

-

-The following people are some of the faces behind Vault. They each -contribute to Vault in some core way. Over time, faces may appear and -disappear from this list as contributors come and go. In addition to -the faces below, Vault is a project by -HashiCorp, so many HashiCorp -employees actively contribute to Vault. -

-
-
- -
-

Mitchell Hashimoto (@mitchellh)

-

- Mitchell Hashimoto is the creator of Vault and works on all - layers of Vault from the core to backends. In addition to Vault, - Mitchell is the creator of - Vagrant, - Packer, - Consul, and - Terraform. -

-
-
- -
- -
-

Armon Dadgar (@armon)

-

- Armon Dadgar is a creator of Vault. He works on all aspects of Vault, - focusing on core architecture and security. Armon is also the creator of - Consul, - Serf, - Terraform, - Statsite, and - Bloomd. -

-
-
- -
- -
-

Jeff Mitchell (@jefferai)

-

- Jeff Mitchell is a core contributor to Vault. He works on all layers of - Vault, from the core to backends. Jeff is an employee of HashiCorp and - has also contributed to - Consul and - Terraform, - as well as many other open-source projects. -

-
-
- -
- -
-

Vishal Nayak (@vishalnayak)

-

- Vishal Nayak is a core contributor to Vault. He works on all layers - of Vault, from the core to backends. Vishal is an employee of - HashiCorp. -

-
-
- -
- -
-

Jack Pearkes (@pearkes)

-

- Jack Pearkes is the creator of the online interactive demo of Vault. - He maintains this demo as well as the design and interaction of the - Vault website. Jack is an employee of HashiCorp and a primary engineer - behind Atlas. - He is also a core committer to - Packer, - Consul, and - Terraform. -

-
-
- -
-
diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/audit/file.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/audit/file.html.md index 087b377..c4f76a2 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/audit/file.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/audit/file.html.md @@ -56,7 +56,7 @@ Following are the configuration options available for the backend. file_path required The path to where the audit log will be written. If this - path exists, the audit backend will append to it. + path exists, the audit backend will append to it. Specify `"stdout"` to write audit log to standard output; specify `"discard"` to discard output (useful in testing scenarios).
  • log_raw diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/approle.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/approle.html.md index fbfc265..9234921 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/approle.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/approle.html.md @@ -20,7 +20,7 @@ An AppRole represents a set of Vault policies and login constraints that must be met to receive a token with those policies. The scope can be as narrow or broad as desired -- an AppRole can be created for a particular machine, or even a particular user on that machine, or a service spread across machines. The -credentials required for successful login depend upon on the constraints set on +credentials required for successful login depend upon the constraints set on the AppRole associated with the credentials. ## Credentials/Constraints @@ -219,733 +219,7 @@ $ curl -X POST \ ``` ## API -### /auth/approle/role -#### LIST/GET -
    -
    Description
    -
    - Lists the existing AppRoles in the backend -
    -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/approle/role` (LIST) or `/auth/approle/role?list=true` (GET)
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "keys": [ - "dev", - "prod", - "test" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - - -### /auth/approle/role/[role_name] -#### POST -
    -
    Description
    -
    - Creates a new AppRole or updates an existing AppRole. This endpoint - supports both `create` and `update` capabilities. There can be one or more - constraints enabled on the role. It is required to have at least one of them - enabled while creating or updating a role. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]`
    - -
    Parameters
    -
    -
      -
    • - role_name - required - Name of the AppRole. -
    • -
    -
      -
    • - bind_secret_id - optional - Require `secret_id` to be presented when logging in using this AppRole. - Defaults to 'true'. -
    • -
    -
      -
    • - bound_cidr_list - optional - Comma-separated list of CIDR blocks; if set, specifies blocks of IP - addresses which can perform the login operation. -
    • -
    -
      -
    • - policies - optional - Comma-separated list of policies set on tokens issued via this AppRole. -
    • -
    -
      -
    • - secret_id_num_uses - optional - Number of times any particular SecretID can be used to fetch a token - from this AppRole, after which the SecretID will expire. -
    • -
    -
      -
    • - secret_id_ttl - optional - Duration in either an integer number of seconds (`3600`) or an integer - time unit (`60m`) after which any SecretID expires. -
    • -
    -
      -
    • - token_num_uses - optional - Number of times issued tokens can be used. -
    • -
    -
      -
    • - token_ttl - optional - Duration in either an integer number of seconds (`3600`) or an integer - time unit (`60m`) to set as the TTL for issued tokens and at renewal - time. -
    • -
    -
      -
    • - token_max_ttl - optional - Duration in either an integer number of seconds (`3600`) or an integer - time unit (`60m`) after which the issued token can no longer be - renewed. -
    • -
    -
      -
    • - period - optional - Duration in either an integer number of seconds (`3600`) or an integer - time unit (`60m`). If set, the token generated using this AppRole is a - _periodic_ token; so long as it is renewed it never expires, but the - TTL set on the token at each renewal is fixed to the value specified - here. If this value is modified, the token will pick up the new value - at its next renewal. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    -
    - Reads the properties of an existing AppRole. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/approle/role/[role_name]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "token_ttl": 1200, - "token_max_ttl": 1800, - "secret_id_ttl": 600, - "secret_id_num_uses": 40, - "policies": [ - "default" - ], - "period": 0, - "bind_secret_id": true, - "bound_cidr_list": "" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -#### DELETE -
    -
    Description
    -
    - Deletes an existing AppRole from the backend. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/approle/role/[role_name]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -### /auth/approle/role/[role_name]/role-id -#### GET -
    -
    Description
    -
    - Reads the RoleID of an existing AppRole. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/approle/role/[role_name]/role-id`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "role_id": "e5a7b66e-5d08-da9c-7075-71984634b882" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -#### POST -
    -
    Description
    -
    - Updates the RoleID of an existing AppRole to a custom value. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]/role-id`
    - -
    Parameters
    -
    -
      -
    • - role_id - required - Value to be set as RoleID. -
    • -
    -
    - -
    Returns
    -
    - `204` response code. -
    -
    - - - -### /auth/approle/role/[role_name]/secret-id -#### POST -
    -
    Description
    -
    - Generates and issues a new SecretID on an existing AppRole. Similar to - tokens, the response will also contain a `secret_id_accessor` value which can - be used to read the properties of the SecretID without divulging the SecretID - itself, and also to delete the SecretID from the AppRole. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]/secret-id`
    - -
    Parameters
    -
    -
      -
    • - metadata - optional - Metadata to be tied to the SecretID. This should be a JSON-formatted - string containing the metadata in key-value pairs. This metadata will - be set on tokens issued with this SecretID, and is logged in audit logs - _in plaintext_. -
    • -
    -
      -
    • - cidr_list - optional -Comma separated list of CIDR blocks enforcing secret IDs to be used from -specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the -list of CIDR blocks listed here should be a subset of the CIDR blocks listed on -the role. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780", - "secret_id": "841771dc-11c9-bbc7-bcac-6a3945a69cd9" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -#### LIST -
    -
    Description
    -
    - Lists the accessors of all the SecretIDs issued against the AppRole. - This includes the accessors for "custom" SecretIDs as well. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/approle/role/[role_name]/secret-id` (LIST) or `/auth/approle/role/[role_name]/secret-id?list=true` (GET)
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "keys": [ - "ce102d2a-8253-c437-bf9a-aceed4241491", - "a1c8dee4-b869-e68d-3520-2040c1a0849a", - "be83b7e2-044c-7244-07e1-47560ca1c787", - "84896a0c-1347-aa90-a4f6-aca8b7558780", - "239b1328-6523-15e7-403a-a48038cdc45a" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -### /auth/approle/role/[role_name]/secret-id/lookup -#### POST -
    -
    Description
    -
    - Reads out the properties of a SecretID. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]/secret-id/lookup`
    - -
    Parameters
    -
    -
      -
    • - secret_id - required -Secret ID attached to the role -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "request_id": "0d25d8ec-0d16-2842-1dda-c28c25aefd4b", - "lease_id": "", - "lease_duration": 0, - "renewable": false, - "data": { - "cidr_list": null, - "creation_time": "2016-09-28T21:00:46.760570318-04:00", - "expiration_time": "0001-01-01T00:00:00Z", - "last_updated_time": "2016-09-28T21:00:46.760570318-04:00", - "metadata": {}, - "secret_id_accessor": "b4bea6b2-0214-9f7f-33cf-e732155feadb", - "secret_id_num_uses": 10, - "secret_id_ttl": 0 - } - } - ``` - -
    -
    - -### /auth/approle/role/[role_name]/secret-id/destroy -#### POST -
    -
    Description
    -
    - Deletes a SecretID. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]/secret-id/destroy`
    - -
    Parameters
    -
    -
      -
    • - secret_id - required -Secret ID attached to the role -
    • -
    -
    - -
    Returns
    -
    - `204` response code. -
    -
    - -### /auth/approle/role/[role_name]/secret-id-accessor/lookup -#### POST -
    -
    Description
    -
    - Reads out the properties of the SecretID associated with the supplied - accessor. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]/secret-id-accessor/lookup`
    - -
    Parameters
    -
    -
      -
    • - secret_id_accessor - required -Accessor of the secret ID -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "request_id": "2132237e-d1b6-d298-6117-b54a2d938d00", - "lease_id": "", - "lease_duration": 0, - "renewable": false, - "data": { - "cidr_list": null, - "creation_time": "2016-09-28T22:09:02.834238344-04:00", - "expiration_time": "0001-01-01T00:00:00Z", - "last_updated_time": "2016-09-28T22:09:02.834238344-04:00", - "metadata": {}, - "secret_id_accessor": "54ba219d-b539-ac4f-e3cf-763c02f351fb", - "secret_id_num_uses": 10, - "secret_id_ttl": 0 - } - } - ``` - -
    -
    - -### /auth/approle/role/[role_name]/secret-id-accessor/destroy -#### POST -
    -
    Description
    -
    - Deletes the SecretID associated with the given accessor. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]/secret-id-accessor/destroy`
    - -
    Parameters
    -
    -
      -
    • - secret_id_accessor - required -Accessor of the secret ID -
    • -
    -
    - -
    Returns
    -
    - `204` response code. -
    -
    - - -### /auth/approle/role/[role_name]/custom-secret-id -#### POST -
    -
    Description
    -
    - Assigns a "custom" SecretID against an existing AppRole. This is used in the - "Push" model of operation. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/role/[role_name]/custom-secret-id`
    - -
    Parameters
    -
    -
      -
    • - secret_id - required - SecretID to be attached to the Role. -
    • -
    -
      -
    • - metadata - optional - Metadata to be tied to the SecretID. This should be a JSON-formatted - string containing the metadata in key-value pairs. This metadata will - be set on tokens issued with this SecretID, and is logged in audit logs - _in plaintext_. -
    • -
    -
      -
    • - cidr_list - optional -Comma separated list of CIDR blocks enforcing secret IDs to be used from -specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the -list of CIDR blocks listed here should be a subset of the CIDR blocks listed on -the role. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "secret_id_accessor": "a109dc4a-1fd3-6df6-feda-0ca28b2d4a81", - "secret_id": "testsecretid" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - - -### /auth/approle/login -#### POST -
    -
    Description
    -
    - Issues a Vault token based on the presented credentials. `role_id` is always - required; if `bind_secret_id` is enabled (the default) on the AppRole, - `secret_id` is required too. Any other bound authentication values on the - AppRole (such as client IP CIDR) are also evaluated. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/approle/login`
    - -
    Parameters
    -
    -
      -
    • - role_id - required - RoleID of the AppRole. -
    • -
    -
      -
    • - secret_id - required when `bind_secret_id` is enabled - SecretID belonging to AppRole. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "auth": { - "renewable": true, - "lease_duration": 1200, - "metadata": null, - "policies": [ - "default" - ], - "accessor": "fd6c9a00-d2dc-3b11-0be5-af7ae0e1d374", - "client_token": "5b1a0318-679c-9c45-e5c6-d1b9a9035d49" - }, - "warnings": null, - "wrap_info": null, - "data": null, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -### /auth/approle/role/[role_name]/policies -### /auth/approle/role/[role_name]/secret-id-num-uses -### /auth/approle/role/[role_name]/secret-id-ttl -### /auth/approle/role/[role_name]/token-ttl -### /auth/approle/role/[role_name]/token-max-ttl -### /auth/approle/role/[role_name]/bind-secret-id -### /auth/approle/role/[role_name]/bound-cidr-list -### /auth/approle/role/[role_name]/period -#### POST/GET/DELETE -
    -
    Description
    -
    - Updates the respective property in the existing AppRole. All of these - parameters of the AppRole can be updated using the `/auth/approle/role/[role_name]` - endpoint directly. The endpoints for each field is provided separately - to be able to delegate specific endpoints using Vault's ACL system. -
    - -
    Method
    -
    POST/GET/DELETE
    - -
    URL
    -
    `/auth/approle/role/[role_name]/[field_name]`
    - -
    Parameters
    -
    - Refer to `/auth/approle/role/[role_name]` endpoint. -
    - -
    Returns
    -
    - Refer to `/auth/approle/role/[role_name]` endpoint. -
    -
    +The AppRole authentication backend has a full HTTP API. Please see the +[AppRole API](/api/auth/approle/index.html) for more +details. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/aws.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/aws.html.md index 107ad2e..cce67fd 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/aws.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/aws.html.md @@ -33,22 +33,32 @@ for your use cases. ### EC2 Authentication Method -EC2 instances have access to metadata describing the instance. (For those not -familiar with instance metadata, details can be found -[here](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).) +Amazon EC2 instances have access to metadata which describes the instance. The +Vault EC2 authentication method leverages this components of this metadata to +authenticate and distribute an initial Vault token to an EC2 instance. The data +flow (which is also represented in the graphic below) is as follows: -One piece of "dynamic metadata" available to the EC2 instance, is the instance -identity document, a JSON representation of a collection of instance metadata. -AWS also provides PKCS#7 signature of the instance metadata document, and -publishes the public keys (grouped by region) which can be used to verify the -signature. Details on the instance identity document and the signature can be -found -[here](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html). +[![Vault AWS EC2 Authentication Flow](/assets/images/vault-aws-ec2-auth-flow.png)](/assets/images/vault-aws-ec2-auth-flow.png) -During login, the backend verifies the signature on the PKCS#7 document, -ensuring that the information contained within, is certified accurate by AWS. -Before succeeding the login attempt and returning a Vault token, the backend -verifies the current running status of the instance via the EC2 API. +1. An AWS EC2 instance fetches its [AWS Instance Identity Document][aws-iid] +from the [EC2 Metadata Service][aws-ec2-mds]. In addition to data itself, AWS +also provides the PKCS#7 signature of the data, and publishes the public keys +(by region) which can be used to verify the signature. + +1. The AWS EC2 instance makes a request to Vault with the Instance Identity +Document and the PKCS#7 signature of the document. + +1. Vault verifies the signature on the PKCS#7 document, ensuring the information +is certified accurate by AWS. This process validates both the validity and +integrity of the document data. As an added security measure, Vault verifies +that the instance is currently running using the public EC2 API endpoint. + +1. Provided all steps are successful, Vault returns the initial Vault token to +the EC2 instance. This token is mapped to any configured policies based on the +instance metadata. + +[aws-iid]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +[aws-ec2-mds]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html There are various modifications to this workflow that provide more or less security, as detailed later in this documentation. @@ -87,7 +97,7 @@ and relies upon AWS to authenticate that signature. While AWS API endpoints support both signed GET and POST requests, for simplicity, the aws backend supports only POST requests. It also does not support `presigned` requests, i.e., requests with `X-Amz-Credential`, -`X-Amz-signature`, and `X-Amz-SignedHeaders` GET query parameters containing the +`X-Amz-Signature`, and `X-Amz-SignedHeaders` GET query parameters containing the authenticating information. It's also important to note that Amazon does NOT appear to include any sort @@ -109,6 +119,17 @@ are to be met during the login. For example, one such constraint that is supported is to bind against AMI ID. A role which is bound to a specific AMI, can only be used for login by EC2 instances that are deployed on the same AMI. +The iam authentication method allows you to specify a bound IAM principal ARN. +Clients authenticating to Vault must have an ARN that matches the ARN bound to +the role they are attempting to login to. The bound ARN allows specifying a +wildcard at the end of the bound ARN. For example, if the bound ARN were +`arn:aws:iam::123456789012:*` it would allow any principal in AWS account +123456789012 to login to it. Similarly, if it were +`arn:aws:iam::123456789012:role/*` it would allow any IAM role in the AWS +account to login to it. If you wish to specify a wildcard, you must give Vault +`iam:GetUser` and `iam:GetRole` permissions to properly resolve the full user +path. + In general, role bindings that are specific to an EC2 instance are only checked when the ec2 auth method is used to login, while bindings specific to IAM principals are only checked when the iam auth method is used to login. However, @@ -253,6 +274,60 @@ comparison of the two authentication methods. to, or make use of inferencing. If you need to make use of role tags, then you will need to use the ec2 auth method. +## Recommended Vault IAM Policy + +This specifies the recommended IAM policy needed by the AWS auth backend. Note +that if you are using the same credentials for the AWS auth and secret backends +(e.g., if you're running Vault on an EC2 instance in an IAM instance profile), +then you will need to add additional permissions as required by the AWS secret +backend. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "iam:GetInstanceProfile", + "iam:GetUser", + "iam:GetRole" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": ["sts:AssumeRole"], + "Resource": [ + "arn:aws:iam::role/" + ] + } + ] +} +``` + +Here are some of the scenarios in which Vault would need to use each of these +permissions. This isn't intended to be an exhaustive list of all the scenarios +in which Vault might make an AWS API call, but rather illustrative of why these +are needed. + +* `ec2:DescribeInstances` is necessary when you are using the `ec2` auth method + or when you are inferring an `ec2_instance` entity type to validate the EC2 + instance meets binding requirements of the role +* `iam:GetInstanceProfile` is used when you have a `bound_iam_role_arn` in the + ec2 auth method. Vault needs determine which IAM role is attached to the + instance profile. +* `iam:GetUser` and `iam:GetRole` are used when using the iam auth method and + binding to an IAM user or role principal to determine the unique AWS user ID + or when using a wildcard on the bound ARN to resolve the full ARN of the user + or role. +* The `sts:AssumeRole` stanza is necessary when you are using [Cross Account + Access](#cross-account-access). The `Resource`s specified should be a list of + all the roles for which you have configured cross-account access, and each of + those roles should have this IAM policy attached (except for the + `sts:AssumeRole` statement). + ## Client Nonce Note: this only applies to the ec2 authentication method. @@ -507,7 +582,7 @@ $ vault write auth/aws/config/client secret_key=vCtSM8ZUEQ3mOFVlYPBQkf2sO6F/W7a5 #### Configure the policies on the role. ``` -$ vault write auth/aws/role/dev-role bound_ami_id=ami-fce3c696 policies=prod,dev max_ttl=500h +$ vault write auth/aws/role/dev-role auth_type=ec2 bound_ami_id=ami-fce3c696 policies=prod,dev max_ttl=500h $ vault write auth/aws/role/dev-role-iam auth_type=iam \ bound_iam_principal_arn=arn:aws:iam::123456789012:role/MyRole policies=prod,dev max_ttl=500h @@ -516,7 +591,7 @@ $ vault write auth/aws/role/dev-role-iam auth_type=iam \ #### Configure a required X-Vault-AWS-IAM-Server-ID Header (recommended) ``` -$ vault write auth/aws/client/config iam_auth_header_vaule=vault.example.xom +$ vault write auth/aws/config/client iam_server_id_header_value=vault.example.com ``` @@ -536,10 +611,10 @@ $ vault auth -method=aws header_value=vault.example.com role=dev-role-iam This assumes you have AWS credentials configured in the standard locations AWS SDKs search for credentials (environment variables, ~/.aws/credentials, IAM -instance profile in that order). If you do not have IAM credentials available at -any of these locations, you can explicitly pass them in on the command line -(though this is not recommended), omitting `aws_security_token` if not -applicable . +instance profile, or ECS task role, in that order). If you do not have IAM +credentials available at any of these locations, you can explicitly pass them +in on the command line (though this is not recommended), omitting +`aws_security_token` if not applicable. ``` $ vault auth -method=aws header_value=vault.example.com role=dev-role-iam \ @@ -626,1559 +701,7 @@ The response will be in JSON. For example: ``` ## API -### /auth/aws/config/client -#### POST -
    -
    Description
    -
    - Configures the credentials required to perform API calls to AWS as well as - custom endpoints to talk to AWS APIs. The instance identity document - fetched from the PKCS#7 signature will provide the EC2 instance ID. The - credentials configured using this endpoint will be used to query the status - of the instances via DescribeInstances API. If static credentials are not - provided using this endpoint, then the credentials will be retrieved from - the environment variables `AWS_ACCESS_KEY`, `AWS_SECRET_KEY` and - `AWS_REGION` respectively. If the credentials are still not found and if the - backend is configured on an EC2 instance with metadata querying - capabilities, the credentials are fetched automatically. -
    -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/config/client`
    - -
    Parameters
    -
    -
      -
    • - access_key - optional - AWS Access key with permissions to query AWS APIs. The permissions - required depend on the specific configurations. If using the `iam` auth - method without inferencing, then no credentials are necessary. If using - the `ec2` auth method or using the `iam` auth method with inferencing, - then these credentials need access to `ec2:DescribeInstances`. If - additionally a `bound_iam_role` is specified, then these credentials - also need access to `iam:GetInstanceProfile`. If, however, an alterate - sts configuration is set for the target account, then the credentials - must be permissioned to call `sts:AssumeRole` on the configured role, - and that role must have the permissions described here. -
    • -
    -
      -
    • - secret_key - optional - AWS Secret key with permissions to query AWS APIs. -
    • -
    -
      -
    • - endpoint - optional - URL to override the default generated endpoint for making AWS EC2 API calls. -
    • -
    -
      -
    • - iam_endpoint - optional - URL to override the default generated endpoint for making AWS IAM API calls. -
    • -
    -
      -
    • - sts_endpoint - optional - URL to override the default generated endpoint for making AWS STS API calls. -
    • -
    -
      -
    • - iam_server_id_header_value - optional - The value to require in the `X-Vault-AWS-IAM-Server-ID` header as part of - GetCallerIdentity requests that are used in the iam auth method. If not - set, then no value is required or validated. If set, clients must - include an X-Vault-AWS-IAM-Server-ID header in the headers of login - requests, and further this header must be among the signed headers - validated by AWS. This is to protect against different types of replay - attacks, for example a signed request sent to a dev server being resent - to a production server. Consider setting this to the Vault server's DNS - name. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    - Returns the previously configured AWS access credentials. -
    - -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/config/client`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -``` -{ - "auth": null, - "warnings": null, - "data": { - "secret_key": "vCtSM8ZUEQ3mOFVlYPBQkf2sO6F/W7a5TVzrl3Oj", - "access_key": "VKIAJBRHKH6EVTTNXDHA" - "endpoint" "", - "iam_endpoint" "", - "sts_endpoint" "", - "iam_server_id_header_value" "", - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -#### DELETE -
    -
    Description
    -
    - Deletes the previously configured AWS access credentials. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/aws/config/client`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -### /auth/aws/config/certificate/ -#### POST -
    -
    Description
    -
    - Registers an AWS public key to be used to verify the instance identity - documents. While the PKCS#7 signature of the identity documents have DSA - digest, the identity signature will have RSA digest, and hence the public - keys for each type varies respectively. Indicate the type of the public key - using the "type" parameter. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/config/certificate/`
    - -
    Parameters
    -
    -
      -
    • - cert_name - required - Name of the certificate. -
    • -
    -
      -
    • - aws_public_cert - required - AWS Public key required to verify PKCS7 signature of the EC2 instance metadata. -
    • -
    -
      -
    • - type - optional - Takes the value of either "pkcs7" or "identity", indicating the type of - document which can be verified using the given certificate. The PKCS#7 - document will have a DSA digest and the identity signature will have an - RSA signature, and accordingly the public certificates to verify those - also vary. Defaults to "pkcs7". -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -#### GET -
    -
    Description
    -
    - Returns the previously configured AWS public key. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/config/certificate/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "aws_public_cert": "-----BEGIN CERTIFICATE-----\nMIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw\nFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD\nVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z\nODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u\nIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl\ncnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e\nih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3\nVyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P\nhviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j\nk+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U\nhhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF\nlRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf\nMNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW\nMXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw\nvSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw\n7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K\n-----END CERTIFICATE-----\n" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - -#### LIST -
    -
    Description
    -
    - Lists all the AWS public certificates that are registered with the backend. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/aws/config/certificates` (LIST) or `/auth/aws/config/certificates?list=true` (GET)
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "keys": [ - "cert1" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -### /auth/aws/config/sts/ -#### POST -
    -
    Description
    -
    - Allows the explicit association of STS roles to satellite AWS accounts - (i.e. those which are not the account in which the Vault server is - running.) Login attempts from EC2 instances running in these accounts will - be verified using credentials obtained by assumption of these STS roles. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/config/sts/`
    - -
    Parameters
    -
    -
      -
    • - account_id - required - AWS account ID to be associated with STS role. If set, Vault will use - assumed credentials to verify any login attempts from EC2 instances in - this account. -
    • -
    -
      -
    • - sts_role - required - AWS ARN for STS role to be assumed when interacting with the account - specified. The Vault server must have permissions to assume this role. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    -
    - Returns the previously configured STS role. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/config/sts/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "sts_role ": "arn:aws:iam::role/myRole" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - -#### LIST -
    -
    Description
    -
    - Lists all the AWS Account IDs for which an STS role is registered -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/aws/config/sts` (LIST) or `/auth/aws/config/sts?list=true` (GET)
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "keys": [ - "", - "" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - -#### DELETE -
    -
    Description
    -
    - Deletes a previously configured AWS account/STS role association -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/aws/config/sts/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/aws/config/tidy/identity-whitelist -##### POST -
    -
    Description
    -
    - Configures the periodic tidying operation of the whitelisted identity entries. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/config/tidy/identity-whitelist`
    - -
    Parameters
    -
    -
      -
    • - safety_buffer - optional - The amount of extra time that must have passed beyond the `roletag` - expiration, before it is removed from the backend storage. Defaults to - 72h. -
    • -
    -
      -
    • - disable_periodic_tidy - optional - If set to 'true', disables the periodic tidying of the - 'identity-whitelist/' entries. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -#### GET -
    -
    Description
    -
    - Returns the previously configured periodic whitelist tidying settings. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/config/tidy/identity-whitelist`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "safety_buffer": 60, - "disable_periodic_tidy": false - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - -#### DELETE -
    -
    Description
    -
    - Deletes the previously configured periodic whitelist tidying settings. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/aws/config/tidy/identity-whitelist`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - - -### /auth/aws/config/tidy/roletag-blacklist -##### POST -
    -
    Description
    -
    - Configures the periodic tidying operation of the blacklisted role tag entries. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/config/tidy/roletag-blacklist`
    - -
    Parameters
    -
    -
      -
    • - safety_buffer - optional - The amount of extra time that must have passed beyond the `roletag` - expiration, before it is removed from the backend storage. Defaults to - 72h. -
    • -
    -
      -
    • - disable_periodic_tidy - optional - If set to 'true', disables the periodic tidying of the - 'roletag-blacklist/' entries. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -#### GET -
    -
    Description
    -
    - Returns the previously configured periodic blacklist tidying settings. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/config/tidy/roletag-blacklist`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "safety_buffer": 60, - "disable_periodic_tidy": false - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - -#### DELETE -
    -
    Description
    -
    - Deletes the previously configured periodic blacklist tidying settings. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/aws/config/tidy/roletag-blacklist`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - - -### /auth/aws/role/[role] -#### POST -
    -
    Description
    -
    - Registers a role in the backend. Only those instances or principals which - are using the role registered using this endpoint, will be able to perform - the login operation. Contraints can be specified on the role, that are - applied on the instances or principals attempting to login. At least one - constraint should be specified on the role. The available constraints you - can choose are dependent on the `auth_type` of the role and, if the - `auth_type` is `iam`, then whether inferencing is enabled. A role will not - let you configure a constraint if it is not checked by the `auth_type` and - inferencing configuration of that role. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/role/`
    - -
    Parameters
    -
    -
      -
    • - role - required - Name of the role. -
    • -
    -
      -
    • - auth_type - optional - The auth type permitted for this role. Valid choices are "ec2" or "iam". - If no value is specified, then it will default to "iam" (except for - legacy `aws-ec2` auth types, for which it will default to "ec2"). Only - those bindings applicable to the auth type chosen will be allowed to be - configured on the role. -
    • -
    -
      -
    • - bound_ami_id - optional - If set, defines a constraint on the EC2 instances that they should be - using the AMI ID specified by this parameter. This constraint is checked - during ec2 auth as well as the iam auth method only when inferring an - EC2 instance. -
    • -
    -
      -
    • - bound_account_id - optional - If set, defines a constraint on the EC2 instances that the account ID in - its identity document to match the one specified by this parameter. This - constraint is checked during ec2 auth as well as the iam auth method - only when inferring an EC2 instance. -
    • -
    -
      -
    • - bound_region - optional - If set, defines a constraint on the EC2 instances that the region in - its identity document must match the one specified by this parameter. This - constraint is only checked by the ec2 auth method as well as the iam - auth method only when inferring an ec2 instance.. -
    • -
    -
      -
    • - bound_vpc_id - optional - If set, defines a constraint on the EC2 instance to be associated with - the VPC ID that matches the value specified by this parameter. This - constraint is only checked by the ec2 auth method as well as the iam - auth method only when inferring an ec2 instance. -
    • -
    -
      -
    • - bound_subnet_id - optional - If set, defines a constraint on the EC2 instance to be associated with - the subnet ID that matches the value specified by this parameter. This - constraint is only checked by the ec2 auth method as well as the iam - auth method only when inferring an ec2 instance. -
    • -
    -
      -
    • - bound_iam_role_arn - optional - If set, defines a constraint on the authenticating EC2 instance that it must - match the IAM role ARN specified by this parameter. The value is - prefix-matched (as though it were a glob ending in `*`). The configured IAM - user or EC2 instance role must be allowed to execute the - `iam:GetInstanceProfile` action if this is specified. This constraint is - checked by the ec2 auth method as well as the iam auth method only when - inferring an EC2 instance. -
    • -
    -
      -
    • - bound_iam_instance_profile_arn - optional - If set, defines a constraint on the EC2 instances to be associated with - an IAM instance profile ARN which has a prefix that matches the value - specified by this parameter. The value is prefix-matched (as though it - were a glob ending in `*`). This constraint is checked by the ec2 auth - method as well as the iam auth method only when inferring an ec2 - instance. -
    • -
    -
      -
    • - role_tag - optional - If set, enables the role tags for this role. The value set for this - field should be the 'key' of the tag on the EC2 instance. The 'value' - of the tag should be generated using `role//tag` endpoint. - Defaults to an empty string, meaning that role tags are disabled. This - constraint is valid only with the ec2 auth method and is not allowed - when an auth_type is iam. -
    • -
    -
      -
    • - bound_iam_principal_arn - optional - Defines the IAM principal that must be authenticated using the iam - auth method. It should look like - "arn:aws:iam::123456789012:user/MyUserName" or - "arn:aws:iam::123456789012:role/MyRoleName". This constraint is only - checked by the iam auth method. -
    • -
    -
      -
    • - inferred_entity_type - optional - When set, instructs Vault to turn on inferencing. The only current valid - value is "ec2_instance" instructing Vault to infer that the role comes - from an EC2 instance in an IAM instance profile. This only applies to - the iam auth method. -
    • -
    -
      -
    • - inferred_aws_region - optional - When role inferencing is activated, the region to search for the - inferred entities (e.g., EC2 instances). Required if role inferencing is - activated. This only applies to the iam auth method. -
    • -
    -
      -
    • - ttl - optional - The TTL period of tokens issued using this role, provided as "1h", - where hour is the largest suffix. -
    • -
    -
      -
    • - max_ttl - optional - The maximum allowed lifetime of tokens issued using this role. -
    • -
    -
      -
    • - period - optional - If set, indicates that the token generated using this role should never - expire. The token should be renewed within the duration specified by - this value. At each renewal, the token's TTL will be set to the value - of this parameter. The maximum allowed lifetime of tokens issued using - this role. -
    • -
    -
      -
    • - policies - optional - Policies to be set on tokens issued using this role. -
    • -
    -
      -
    • - allow_instance_migration - optional - If set, allows migration of the underlying instance where the client - resides. This keys off of pendingTime in the metadata document, so - essentially, this disables the client nonce check whenever the instance - is migrated to a new host and pendingTime is newer than the - previously-remembered time. Use with caution. This only applies to - authentications via the ec2 auth method. -
    • -
    -
      -
    • - disallow_reauthentication - optional - If set, only allows a single token to be granted per instance ID. In - order to perform a fresh login, the entry in whitelist for the instance - ID needs to be cleared using - 'auth/aws/identity-whitelist/' endpoint. Defaults to - 'false'. This only applies to authentications via the ec2 auth method. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -#### GET -
    -
    Description
    -
    - Returns the previously registered role configuration. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/role/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "bound_ami_id": "ami-fce36987", - "role_tag": "", - "policies": [ - "default", - "dev", - "prod" - ], - "max_ttl": 1800000, - "disallow_reauthentication": false, - "allow_instance_migration": false - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -#### LIST -
    -
    Description
    -
    - Lists all the roles that are registered with the backend. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/aws/roles` (LIST) or `/auth/aws/roles?list=true` (GET)
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "keys": [ - "dev-role", - "prod-role" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -#### DELETE -
    -
    Description
    -
    - Deletes the previously registered role. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/aws/role/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -### /auth/aws/role/[role]/tag -#### POST -
    -
    Description
    -
    - Creates a role tag on the role, which help in restricting the capabilities - that are set on the role. Role tags are not tied to any specific ec2 - instance unless specified explicitly using the `instance_id` parameter. By - default, role tags are designed to be used across all instances that - satisfies the constraints on the role. Regardless of which instances have - role tags on them, capabilities defined in a role tag must be a strict - subset of the given role's capabilities. Note that, since adding and - removing a tag is often a widely distributed privilege, care needs to be - taken to ensure that the instances are attached with correct tags to not - let them gain more privileges than what were intended. If a role tag is - changed, the capabilities inherited by the instance will be those defined - on the new role tag. Since those must be a subset of the role - capabilities, the role should never provide more capabilities than any - given instance can be allowed to gain in a worst-case scenario. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/role//tag`
    - -
    Parameters
    -
    -
      -
    • - role - required - Name of the role. -
    • -
    -
      -
    • - policies - optional - Policies to be associated with the tag. If set, must be a subset of the - role's policies. If set, but set to an empty value, only the 'default' - policy will be given to issued tokens. -
    • -
    -
      -
    • - max_ttl - optional - If set, specifies the maximum allowed token lifetime. -
    • -
    -
      -
    • - instance_id - optional - Instance ID for which this tag is intended for. If set, the created tag - can only be used by the instance with the given ID. -
    • -
    -
      -
    • - disallow_reauthentication - optional - If set, only allows a single token to be granted per instance ID. This - can be cleared with the auth/aws/identity-whitelist endpoint. - Defaults to 'false'. -
    • -
    -
      -
    • - allow_instance_migration - optional - If set, allows migration of the underlying instance where the client - resides. This keys off of pendingTime in the metadata document, so - essentially, this disables the client nonce check whenever the instance - is migrated to a new host and pendingTime is newer than the - previously-remembered time. Use with caution. Defaults to 'false'. -
    • -
    -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "tag_value": "v1:09Vp0qGuyB8=:r=dev-role:p=default,prod:d=false:t=300h0m0s:uPLKCQxqsefRhrp1qmVa1wsQVUXXJG8UZP/pJIdVyOI=", - "tag_key": "VaultRole" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -### /auth/aws/login -#### POST -
    -
    Description
    -
    - Fetch a token. This endpoint verifies the pkcs7 signature of the instance - identity document or the signature of the signed GetCallerIdentity request. - With the ec2 auth method, or when inferring an EC2 instance, verifies that - the instance is actually in a running state. Cross checks the constraints - defined on the role with which the login is being performed. With the ec2 - auth method, as an alternative to pkcs7 signature, the identity document - along with its RSA digest can be supplied to this endpoint. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/login`
    - -
    Parameters
    -
    -
      -
    • - role - optional - Name of the role against which the login is being attempted. - If `role` is not specified, then the login endpoint looks for a role - bearing the name of the AMI ID of the EC2 instance that is trying to - login if using the ec2 auth method, or the "friendly name" (i.e., role - name or username) of the IAM principal authenticated. - If a matching role is not found, login fails. -
    • -
    -
      -
    • - identity - required - Base64 encoded EC2 instance identity document. This needs to be - supplied along with the `signature` parameter. If using `curl` for - fetching the identity document, consider using the option `-w 0` while - piping the output to `base64` binary. -
    • -
    -
      -
    • - signature - required - Base64 encoded SHA256 RSA signature of the instance identity document. - This needs to be supplied along with `identity` parameter when using the - ec2 auth method. -
    • -
    -
      -
    • - pkcs7 - required - PKCS7 signature of the identity document with all `\n` characters - removed. Either this needs to be set *OR* both `identity` and - `signature` need to be set when using the ec2 auth method. -
    • -
    -
      -
    • - nonce - optional - The nonce to be used for subsequent login requests. If this parameter - is not specified at all and if reauthentication is allowed, then the - backend will generate a random nonce, attaches it to the instance's - identity-whitelist entry and returns the nonce back as part of auth - metadata. This value should be used with further login requests, to - establish client authenticity. Clients can choose to set a custom nonce - if preferred, in which case, it is recommended that clients provide a - strong nonce. If a nonce is provided but with an empty value, it - indicates intent to disable reauthentication. Note that, when - `disallow_reauthentication` option is enabled on either the role or the - role tag, the `nonce` holds no significance. This is ignored unless - using the ec2 auth method. -
    • -
    -
      -
    • - iam_http_request_method - required - HTTP method used in the signed request. Currently only POST is - supported, but other methods may be supported in the future. This is - required when using the iam auth method. -
    • -
    -
      -
    • - iam_request_url - required - Base64-encoded HTTP URL used in the signed request. Most likely just - `aHR0cHM6Ly9zdHMuYW1hem9uYXdzLmNvbS8=` (base64-encoding of - `https://sts.amazonaws.com/`) as most requests will probably use POST - with an empty URI. This is required when using the iam auth method. -
    • -
    -
      -
    • - iam_request_body - required - Base64-encoded body of the signed request. Most likely - `QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNQ==` - which is the base64 encoding of - `Action=GetCallerIdentity&Version=2011-06-15`. This is required - when using the iam auth method. -
    • -
    -
      -
    • - iam_request_headers - required - Base64-encoded, JSON-serialized representation of the HTTP request - headers. The JSON serialization assumes that each header key maps to an - array of string values (though the length of that array will probably - only be one). If the `iam_server_id_header_value` is configured in Vault - for the aws auth mount, then the headers must include the - X-Vault-AWS-IAM-Server-ID header, its value must match the value - configured, and the header must be included in the signed headers. This - is required when using the iam auth method. -
    • -
    -
    - -
    Returns
    -
    - -```javascript -{ - "auth": { - "renewable": true, - "lease_duration": 1800000, - "metadata": { - "role_tag_max_ttl": "0", - "instance_id": "i-de0f1344" - "ami_id": "ami-fce36983" - "role": "dev-role", - "auth_type": "ec2" - }, - "policies": [ - "default", - "dev", - ], - "accessor": "20b89871-e6f2-1160-fb29-31c2f6d4645e", - "client_token": "c9368254-3f21-aded-8a6f-7c818e81b17a" - }, - "warnings": null, - "data": null, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -### /auth/aws/roletag-blacklist/ -#### POST -
    -
    Description
    -
    - Places a valid role tag in a blacklist. This ensures that the role tag - cannot be used by any instance to perform a login operation again. Note - that if the role tag was previously used to perform a successful login, - placing the tag in the blacklist does not invalidate the already issued - token. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/roletag-blacklist/`
    - -
    Parameters
    -
    -
      -
    • - role_tag - required - Role tag to be blacklisted. The tag can be supplied as-is. In order to - avoid any encoding problems, it can be base64 encoded. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -#### GET -
    -
    Description
    -
    - Returns the blacklist entry of a previously blacklisted role tag. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/broletag-blacklist/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "expiration_time": "2016-04-25T10:35:20.127058773-04:00", - "creation_time": "2016-04-12T22:35:01.178348124-04:00" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -#### LIST -
    -
    Description
    -
    - Lists all the role tags that are blacklisted. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/aws/roletag-blacklist` (LIST) or `/auth/aws/roletag-blacklist?list=true` (GET)
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "keys": [ - "v1:09Vp0qGuyB8=:a=ami-fce3c696:p=default,prod:d=false:t=300h0m0s:uPLKCQxqsefRhrp1qmVa1wsQVUXXJG8UZP/" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -#### DELETE -
    -
    Description
    -
    - Deletes a blacklisted role tag. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/aws/roletag-blacklist/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -### /auth/aws/tidy/roletag-blacklist -#### POST -
    -
    Description
    -
    - Cleans up the entries in the blacklist based on expiration time on the - entry and `safety_buffer`. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/tidy/roletag-blacklist`
    - -
    Parameters
    -
    -
      -
    • - safety_buffer - optional - The amount of extra time that must have passed beyond the `roletag` - expiration, before it is removed from the backend storage. Defaults to - 72h. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -### /auth/aws/identity-whitelist/ -#### GET -
    -
    Description
    -
    - Returns an entry in the whitelist. An entry will be created/updated by - every successful login. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/aws/identity-whitelist/`
    - -
    Parameters
    -
    -
      -
    • - instance_id - required - EC2 instance ID. A successful login operation from an EC2 instance gets - cached in this whitelist, keyed off of instance ID. -
    • -
    -
    - -
    Returns
    -
    - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "pending_time": "2016-04-14T01:01:41Z", - "expiration_time": "2016-05-05 10:09:16.67077232 +0000 UTC", - "creation_time": "2016-04-14 14:09:16.67077232 +0000 UTC", - "client_nonce": "5defbf9e-a8f9-3063-bdfc-54b7a42a1f95", - "role": "dev-role" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -#### LIST -
    -
    Description
    -
    - Lists all the instance IDs that are in the whitelist of successful logins. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/aws/identity-whitelist` (LIST) or `/auth/aws/identity-whitelist?list=true` (GET)
    -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. - -```javascript -{ - "auth": null, - "warnings": null, - "data": { - "keys": [ - "i-aab47d37" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" -} -``` - -
    -
    - - -#### DELETE -
    -
    Description
    -
    - Deletes a cache of the successful login from an instance. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/aws/identity-whitelist/`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -### /auth/aws/tidy/identity-whitelist -#### POST -
    -
    Description
    -
    - Cleans up the entries in the whitelist based on expiration time and `safety_buffer`. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/aws/tidy/identity-whitelist`
    - -
    Parameters
    -
    -
      -
    • - safety_buffer - optional - The amount of extra time that must have passed beyond the identity - expiration, before it is removed from the backend storage. Defaults to - 72h. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    +The AWS authentication backend has a full HTTP API. Please see the +[AWS Auth API](/api/auth/aws/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/cert.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/cert.html.md index 4f09405..0bfd90c 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/cert.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/cert.html.md @@ -124,350 +124,5 @@ of the header should be "X-Vault-Token" and the value should be the token. ## API -### /auth/cert/certs - -#### DELETE - -
    -
    Description
    -
    - Deletes the named role and CA cert from the backend mount. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/cert/certs/`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - A `204` response code. -
    -
    - -#### GET - -
    -
    Description
    -
    - Gets information associated with the named role. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/cert/certs/`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": { - "certificate": "-----BEGIN CERTIFICATE-----\nMIIEtzCCA5+.......ZRtAfQ6r\nwlW975rYa1ZqEdA=\n-----END CERTIFICATE-----", - "display_name": "test", - "policies": "", - "allowed_names": "", - "ttl": 2764800 - }, - "warnings": null, - "auth": null - } - ``` - -
    -
    - -#### LIST - -
    -
    Description
    -
    - Lists configured certificate names. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/cert/certs` (LIST) or `/auth/cert/certs?list=true` (GET)
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": { - "keys": ["cert1", "cert2"] - }, - "warnings": null, - "auth": null - } - ``` - -
    -
    - -#### POST - -
    -
    Description
    -
    - Sets a CA cert and associated parameters in a role name. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/cert/certs/`
    - -
    Parameters
    -
    -
      -
    • - certificate - required - The PEM-format CA certificate. -
    • -
    • - allowed_names - optional - Constrain the Common and Alternative Names in the client certificate - with a [globbed pattern](https://github.com/ryanuber/go-glob/blob/master/README.md#example). - Value is a comma-separated list of patterns. - Authentication requires at least one Name matching at least one pattern. - If not set, defaults to allowing all names. -
    • -
    • - policies - optional - A comma-separated list of policies to set on tokens issued when - authenticating against this CA certificate. -
    • -
    • - display_name - optional - The `display_name` to set on tokens issued when authenticating - against this CA certificate. If not set, defaults to the name - of the role. -
    • -
    • - ttl - optional - The TTL period of the token, provided as a number of seconds. If not - provided, the token is valid for the the mount or system default TTL - time, in that order. -
    • -
    -
    - -
    Returns
    -
    - A `204` response code. -
    -
    - -### /auth/cert/crls - -#### DELETE - -
    -
    Description
    -
    - Deletes the named CRL from the backend mount. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/cert/crls/`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - A `204` response code. -
    -
    - -#### GET - -
    -
    Description
    -
    - Gets information associated with the named CRL (currently, the serial - numbers contained within). As the serials can be integers up to an - arbitrary size, these are returned as strings. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/cert/crls/`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "data": { - "serials": { - "13": {} - } - }, - "lease_duration": 0, - "lease_id": "", - "renewable": false, - "warnings": null - } - - ``` - -
    -
    - -#### POST - -
    -
    Description
    -
    - Sets a named CRL. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/cert/crls/`
    - -
    Parameters
    -
    -
      -
    • - crl - required - The PEM-format CRL. -
    • -
    -
    - -
    Returns
    -
    - A `204` response code. -
    -
    - -### /auth/cert/login - -#### POST - -
    -
    Description
    -
    - Log in and fetch a token. If there is a valid chain to a CA configured in - the backend and all role constraints are matched, a token will be issued. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/cert/login`
    - -
    Parameters
    -
    -
      -
    • - name - optional - Authenticate against only the named certificate role, returning its - policy list if successful. If not set, defaults to trying all - certificate roles and returning any one that matches. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "auth": { - "client_token": "ABCD", - "policies": ["web", "stage"], - "lease_duration": 3600, - "renewable": true, - } - } - ``` - -
    -
    - -### /auth/cert/config - -#### POST - -
    -
    Description
    -
    - Configuration options for the backend. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/cert/config`
    - -
    Parameters
    -
    -
      -
    • - disable_binding - optional - If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false. -
    • -
    -
    - -
    Returns
    -
    - A `204` response code. -
    -
    +The TLS Certificate authentication backend has a full HTTP API. Please see the +[TLS Certificate API](/api/auth/cert/index.html) for more details. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/gcp.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/gcp.html.md new file mode 100644 index 0000000..030ecb6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/gcp.html.md @@ -0,0 +1,385 @@ +--- +layout: "docs" +page_title: "Auth Plugin Backend: GCP" +sidebar_current: "docs-auth-gcp" +description: |- + The gcp backend plugin allows automated authentication of AWS entities. +--- + +# Auth Plugin Backend: gcp + +The `gcp` plugin backend allows authentication against Vault using +Google credentials. It treats GCP as a Trusted Third Party and expects a +[JSON Web Token (JWT)](https://tools.ietf.org/html/rfc7519) signed by Google +credentials from the authenticating entity. This token can be generated through +different GCP APIs depending on the type of entity. + +Currently supports authentication for: + + * GCP IAM service accounts (`iam`) + * GCE IAM service accounts (`gce`) + +We will update the documentation as we introduce more supported entities. + +The following documentation assumes that the backend has been +[mounted](/docs/plugin/index.html) at `auth/gcp`. + +~> Note: The `gcp` backend is implemented as a +[Vault plugin](/docs/internals/plugins.html) backend. You must be using Vault +v0.8.0+ to use plugins. + +## Authentication Workflow + +### IAM + +The Vault authentication workflow for IAM service accounts is as follows: + + 1. A client with IAM service account credentials generates a signed JWT using the IAM [projects.serviceAccounts.signJwt](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt) method. See [usage](#the-iam-authentication-token) for the expected format and example code. + 2. The client sends this JWT to Vault in a login request with a role name. This role should have type `iam`. + 3. Vault grabs the `kid` header value, which contains the ID of the key-pair used to generate the JWT, and the `sub` ID/email to find the service account key. If the service account does not exist or the key is not linked to the service account, Vault will deny authentication. + 4. Vault authorizes the confirmed service account against the given role. See [authorization section](#authorization-workflow) to see how each type of role handles authorization. + +[![IAM Login Workflow](/assets/images/vault-gcp-iam-auth-workflow.svg)](/assets/images/vault-gcp-iam-auth-workflow.svg) + +#### The `iam` Authentication Token + +The expected format of the JWT payload is as follows: + +```json +{ + "sub" : "[SERVICE ACCOUNT IDENTIFIER]", + "aud" : "vault/[ROLE NAME]", + "exp" : "[EXPIRATION]" +} +``` + +* `[SERVICE ACCOUNT ID OR EMAIL]`: Either the email or the unique ID of a service account. +* `[ROLE NAME]`: Name of the role that this token will be used to login against. The full expected `aud` string should end in "vault/$roleName". +* `[EXPIRATION]` : A [NumericDate](https://tools.ietf.org/html/rfc7519#section-2) value (seconds from Epoch). This value must be before the max JWT expiration allowed for a role (see `max_jwt_exp` parameter for creating a role). This defaults to 15 minutes and cannot be more than a hour. + +**Note:** By default, we enforce a shorter `exp` period than the default length +for a given token (1 hour) in order to make reuse of tokens difficult. You can +customize this value for a given role but it will be capped at an hour. + +To generate this token, we use the Google IAM API method [projects.serviceAccounts.signJwt](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt). +See an [example of how to generate this token](#generating-iam-token). + +### GCE + +The Vault authentication workflow for GCE instances is as follows: + + 1. A client logins into a GCE instances and [obtains an instance identity metadata token](https://cloud.google.com/compute/docs/instances/verifying-instance-identity). + 2. The client request to login using this token (a JWT) and gives a role name to Vault. + 3. Vault uses the `kid` header value, which contains the ID of the key-pair used to generate the JWT, to find the OAuth2 public cert + to verify this JWT. + 4. Vault authorizes the confirmed instance against the given role. See the [authorization section](#authorization-workflow) to see how each type of role handles authorization. + +[![GCE Login Workflow](/assets/images/vault-gcp-gce-auth-workflow.svg)](/assets/images/vault-gcp-gce-auth-workflow.svg) + +#### The `gce` Authentication Token + +The token can be obtained from the `service-accounts/default/identity` endpoint for a instance's +[metadata server](https://cloud.google.com/compute/docs/storing-retrieving-metadata). You can use the +[example of how to obtain an instance metadata token](#generating-gce-token) to get started. + +Learn more about the JWT format from the +[documentation](https://cloud.google.com/compute/docs/instances/verifying-instance-identity#token_format) +for the identity metadata token. The params the user provides are: + +* `[AUD]`: The full expected `aud` string should end in "vault/$roleName". Note that Google requires the `aud` + claim to contain a scheme or authority but Vault will only check for a suffix. +* `[FORMAT]`: MUST BE `full` for Vault. Format of the metadata token generated (`standard` or `full`). + +### Examples for Obtaining Auth Tokens + +#### Generating IAM Token + +**HTTP Request Example** + +This uses [Google API HTTP annotation](https://github.com/googleapis/googleapis/blob/master/google/api/http.proto). +Note the `$PAYLOAD` must be a marshaled JSON string with escaped double quotes. + +```sh +#!/bin/sh +# [START PARAMS] +ROLE="test-role" +PROJECT="project-123456" +SERVICE_ACCOUNT="my-account@project-123456.iam.gserviceaccount.com" +OAUTH_TOKEN=$(oauth2l header cloud-platform) +# [END PARAMS] + + +PAYLOAD=$(echo "{ \"aud\": \"vault/$ROLE\", \"sub\": \"$SERVICE_ACCOUNT\"}" | sed -e 's/"/\\&/g') +curl -H "$OAUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -X POST -d "{\"payload\":\"$PAYLOAD\"}" https://iam.googleapis.com/v1/projects/$PROJECT/serviceAccounts/$SERVICE_ACCOUNT:signJwt``` +``` + +**Golang Example** + +We use the Go OAuth2 libraries, GCP IAM API, and Vault API. The example generates a token valid for the `dev-role` role (as indicated by the `aud` field of `jwtPayload`). + +```go +// Abbreviated imports to show libraries. +import ( + vaultapi "github.com/hashicorp/vault/api" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/iam/v1" + ... +) + +func main() { + // Start [PARAMS] + project := "project-123456" + serviceAccount := "myserviceaccount@project-123456.iam.gserviceaccount.com" + credsPath := "path/to/creds.json" + + os.Setenv("VAULT_ADDR", "https://vault.mycompany.com") + defer os.Setenv("VAULT_ADDR", "") + // End [PARAMS] + + // Start [GCP IAM Setup] + jsonBytes, err := ioutil.ReadFile(credsPath) + if err != nil { + log.Fatal(err) + } + config, err := google.JWTConfigFromJSON(jsonBytes, iam.CloudPlatformScope) + if err != nil { + log.Fatal(err) + } + + httpClient := config.Client(oauth2.NoContext) + iamClient, err := iam.New(httpClient) + if err != nil { + log.Fatal(err) + } + // End [GCP IAM Setup] + + // 1. Generate signed JWT using IAM. + resourceName := fmt.Sprintf("projects/%s/serviceAccounts/%s", project, serviceAccount) + jwtPayload := map[string]interface{}{ + "aud": "vault/dev-role", + "sub": serviceAccount, + "exp": time.Now().Add(time.Minute * 10).Unix(), + } + + payloadBytes, err := json.Marshal(jwtPayload) + if err != nil { + log.Fatal(err) + } + signJwtReq := &iam.SignJwtRequest{ + Payload: string(payloadBytes), + } + + resp, err := iamClient.Projects.ServiceAccounts.SignJwt(resourceName, signJwtReq).Do() + if err != nil { + log.Fatal(err) + } + + // 2. Send signed JWT in login request to Vault. + vaultClient, err := vaultapi.NewClient(vaultapi.DefaultConfig()) + if err != nil { + log.Fatal(err) + } + + vaultResp, err := vaultClient.Logical().Write( + "auth/gcp/login", + map[string]interface{}{ + "role": "test", + "jwt": resp.SignedJwt, + }) + + if err != nil { + log.Fatal(err) + } + + // 3. Use auth token from response. + log.Println("Access token: %s", vaultResp.Auth.ClientToken) + vaultClient.SetToken(vaultResp.Auth.ClientToken) + // ... +} +``` + +#### Generating GCE Token + +**HTTP Request Example** + +This uses [Google API HTTP annotation](https://github.com/googleapis/googleapis/blob/master/google/api/http.proto) +and must be run on a GCE instance. + +```sh +# [START PARAMS] +VAULT_ADDR="https://127.0.0.1:8200/" +ROLE="my-gce-role" +SERVICE_ACCOUNT="default" # replace with an instance's service account if needed +# [END PARAMS] + +curl -H "Metadata-Flavor: Google"\ + -G + --data-urlencode "audience=$VAULT_ADDR/vault/$ROLE"\ + --data-urlencode "format=full" \ + "http://metadata/computeMetadata/v1/instance/service-accounts/$SERVICE_ACCOUNT/identity" +``` + +## Authorization Workflow + +For `gcp`, login is per-role. Each role has a specific set of restrictions that +an authorized entity must fit in order to login. These restrictions are specific +to the role type. + +Currently supported role types are: + +* `iam` (Supports both IAM and inference for GCE tokens) +* `gce` (Only supports GCE tokens) + +Vault validates an authenticated entity against the role and uses the role to +determine information about the lease, including Vault policies assigned and +TTLs. For a full list of accepted restrictions, see [role API docs](/api/auth/gcp/index.html#create-role). + +If a GCE token is provided for login under an `iam` role, the service account associated with the token +(`sub` claim) is inferred and used to login. + +## Usage + +### Via the CLI. + +#### Enable GCP authentication in Vault + +``` +$ vault auth-enable gcp +``` + +#### Configure the GCP authentication backend + +``` +$ vault write auth/gcp/config credentials=@path/to/creds.json +``` + +**Configuration**: This includes GCP credentials Vault will use these to make calls to +GCP APIs. If credentials are not configured or if the user explicitly sets the +config with no credentials, the Vault server will attempt to use +[Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) +as set on the Vault server. + +See [API documentation](/api/auth/gcp/index.html#configure) +to learn more about parameters. + +#### Create a role + +``` +$ vault write auth/gcp/role/dev-role \ + type="iam" \ + project_id="project-123456" \ + policies="prod,dev" \ + bound_service_accounts="serviceaccount1@project1234.iam.gserviceaccount.com,uuid123,..." + ... +``` + +**Roles**: Roles are associated with an authentication type/entity and a set of +Vault [policies](/docs/concepts/policies.html). Roles are configured with constraints +specific to the authentication type, as well as overall constraints and +configuration for the generated auth tokens. + +We also expose a helper path for updating the service accounts attached to an existing `iam` role: + + ```sh + vault write auth/gcp/role/iam-role/service-accounts \ + add='serviceAccountToAdd,...' \ + remove='serviceAccountToRemove,...' \ + ``` + +and for updating the labels attached to an existing `gce` role: + + ```sh + vault write auth/gcp/role/gce-role/labels \ + add='label1:value1,foo:bar,...' \ + remove='key1,key2,...' \ + ``` + + +See [API docs](/api/auth/gcp/index.html#create-role) to view +parameters for role creation and updates. + +#### Login to get a Vault Token + +Once the backend is setup and roles are registered with the backend, +the user can login against a specific role. + +``` +$ vault write auth/gcp/login role='dev-role' jwt='eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...' +``` + +The `role` and `jwt` parameters are required. These map to the name of the +role to login against, and the signed JWT token for authenticating a role +respectively. The format of the provided JWT differs depending on the +authenticating entity. + +### Via the API + +#### Enable GCP authentication in Vault + +``` +$ curl $VAULT_ADDR/v1/sys/auth/gcp -d '{ "type": "gcp" }' +``` + +#### Configure the GCP authentication backend + +``` +$ curl $VAULT_ADDR/v1/auth/gcp/config \ +-d '{ "credentials": "{...}" }' +``` + +#### Create a role + +``` +$ curl $VAULT_ADDR/v1/auth/gcp/role/dev-role \ +-d '{ "type": "iam", "project_id": "project-123456", ...}' +``` + +#### Login to get a Vault Token + +The endpoint for the GCP login is `auth/gcp/login`. + +The `gcp` mountpoint value in the url is the default mountpoint value. +If you have mounted the `gcp` backend with a different mountpoint, use that value. + +The `role` and `jwt` should be sent in the POST body encoded as JSON. + +``` +$ curl $VAULT_ADDR/v1/auth/gcp/login \ + -d '{ "role": "dev-role", "jwt": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." }' +``` + +The response will be in JSON. For example: + +```json +{ + "auth":{ + "client_token":"f33f8c72-924e-11f8-cb43-ac59d697597c", + "accessor":"0e9e354a-520f-df04-6867-ee81cae3d42d", + "policies":[ + "default", + "dev", + "prod" + ], + "metadata":{ + "role": "dev-role", + "service_account_email": "dev1@project-123456.iam.gserviceaccount.com", + "service_account_id": "111111111111111111111" + }, + "lease_duration":2764800, + "renewable":true + }, + ... +} +``` + +## Contributing + +This plugin is developed in a separate Github repository: [`hashicorp/vault-plugin-auth-gcp`](https://github.com/hashicorp/vault-plugin-auth-gcp). Please file all feature requests, bugs, and pull requests specific to the GCP plugin under that repository. + +## API + +The GCP Auth Plugin has a full HTTP API. Please see the +[API docs](/api/auth/gcp/index.html) for more details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/github.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/github.html.md index eb678fd..aff60e2 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/github.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/github.html.md @@ -14,6 +14,16 @@ The GitHub auth backend can be used to authenticate with Vault using a GitHub personal access token. This method of authentication is most useful for humans: operators or developers using Vault directly via the CLI. +**N.B.**: Vault does not support an OAuth workflow to generate GitHub tokens, +so does not act as a GitHub application. As a result, this backend uses +personal access tokens. An important consequence is that any valid GitHub +access token with the `read:org` scope can be used for authentication. If such +a token is stolen from a third party service, and the attacker is able to make +network calls to Vault, they will be able to log in as the user that generated +the access token. When using this backend it is a good idea to ensure that +access to Vault is restricted at a network level rather than public. If these +risks are unacceptable to you, you should use a different backend. + ## Authentication #### Via the CLI @@ -148,3 +158,9 @@ token_policies: [default dev-policy] Clients can use this token to perform an allowed set of operations on all the paths contained by the policy set. + +## API + +The GitHub authentication backend has a full HTTP API. Please see the +[GitHub Auth API](/api/auth/github/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/kubernetes.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/kubernetes.html.md new file mode 100644 index 0000000..1f7c1ba --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/kubernetes.html.md @@ -0,0 +1,159 @@ +--- +layout: "docs" +page_title: "Auth Plugin Backend: Kubernetes" +sidebar_current: "docs-auth-kubernetes" +description: |- + The Kubernetes auth backend allows automated authentication of Kubernetes + Service Accouts. +--- + +# Auth Backend: Kubernetes + +Name: `kubernetes` + +The Kubernetes auth backend can be used to authenticate with Vault using a +Kubernetes Service Account Token. This method of authentication makes it easy to +introduce a Vault token into a Kubernetes Pod. + +## Authentication + +#### Via the CLI + +``` +$ vault write auth/kubernetes/login role=demo jwt=... + +Key Value +--- ----- +token 1a445c6a-1ff5-7085-18f7-eca12210981d +token_accessor fa82afb3-298b-41b0-6593-8b861bd3dc12 +token_duration 768h0m0s +token_renewable true +token_policies [default] +token_meta_service_account_secret_name "vault-auth-token-pd21c" +token_meta_service_account_uid "aa9aa8ff-98d0-11e7-9bb7-0800276d99bf" +token_meta_role "demo" +token_meta_service_account_name "vault-auth" +token_meta_service_account_namespace "default" +``` + +#### Via the API + +The endpoint for the kubernetes login is `auth/kubernetes/login`. + +The `kubernetes` mountpoint value in the url is the default mountpoint value. +If you have mounted the `kubernetes` backend with a different mountpoint, use that value. + +```shell +$ curl $VAULT_ADDR/v1/auth/kubernetes/login \ + -d '{ "jwt": "your_service_account_jwt", "role": "demo" }' +``` + +The response will be in JSON. For example: + +```javascript +{ + "request_id": "e344f8c2-fffc-c3e0-d118-e3a2e5de2d0d", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": null, + "warnings": null, + "auth": { + "client_token": "38fe9691-e623-7238-f618-c94d4e7bc674", + "accessor": "78e87a38-84ed-2692-538f-ca8b9f400ab3", + "policies": [ + "default" + ], + "metadata": { + "role": "test", + "service_account_name": "vault-auth", + "service_account_namespace": "default", + "service_account_secret_name": "vault-auth-token-pd21c", + "service_account_uid": "aa9aa8ff-98d0-11e7-9bb7-0800276d99bf" + }, + "lease_duration": 2764800, + "renewable": true + } +} +``` + +## Configuration + +First, you must enable the Kubernetes auth backend: + +``` +$ vault auth-enable kubernetes +Successfully enabled 'kubernetes' at 'kubernetes'! +``` + +Now when you run `vault auth -methods`, the Kubernetes backend is available: + +``` +Path Type Description +kubernetes/ kubernetes +token/ token token based credentials +``` + +Prior to using the Kubernetes auth backend, it must be configured. To +configure it, use the `/config` endpoint. + +``` +$ vault write auth/kubernetes/config \ + pem_keys=@signingkey.crt \ + kubernetes_host=https://192.168.99.100:8443 \ + kubernetes_ca_cert=@ca.crt +``` + +## Creating a Role + +Authentication with this backend is role based. Before a token can be used to +login it first must be configured in a role. + +``` +vault write auth/kubernetes/role/demo \ + bound_service_account_names=vault-auth \ + bound_service_account_namespaces=default \ + policies=default \ + ttl=1h +``` + +This role Authorizes the vault-auth service account in the default namespace and +it gives it the default policy. + +## Configuring Kubernetes + +This backend accesses the [Kubernetes TokenReview +API](https://kubernetes.io/docs/api-reference/v1.7/#tokenreview-v1-authentication) +to validate the provided JWT is still valid. Kubernetes should be running with +`--service-account-lookup`. This is defaulted to true in Kubernetes 1.7, but any +versions prior should ensure the Kubernetes API server is started with with this +setting. Otherwise deleted tokens in Kubernetes will not be properly revoked and +will be able to authenticate to this backend. + +Service Accounts used in this backend will need to have access to the +TokenReview API. If Kubernetes is configured to use RBAC roles the Service +Account should be granted permissions to access this API. The following +example ClusterRoleBinding could be used to grant these permissions: + +``` +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: role-tokenreview-binding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: vault-auth + namespace: default +``` + +## API + +The Kubernetes Auth Plugin has a full HTTP API. Please see the +[API docs](/api/auth/kubernetes/index.html) for more details. + + diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/ldap.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/ldap.html.md index 4397a3a..b136014 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/ldap.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/ldap.html.md @@ -263,524 +263,7 @@ default, foobar, zoobar It should be noted that user -> policy mapping happens at token creation time. And changes in group membership on the LDAP server will not affect tokens that have already been provisioned. To see these changes, old tokens should be revoked and the user should be asked to reauthenticate. ## API -### /auth/ldap/config -#### POST -
    -
    Description
    -
    - Configures the LDAP authentication backend. -
    -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/ldap/config`
    - -
    Parameters
    -
    -
      -
    • - url - required - The LDAP server to connect to. Examples: `ldap://ldap.myorg.com`, - `ldaps://ldap.myorg.com:636` -
    • -
    -
      -
    • - starttls - optional - If true, issues a `StartTLS` command after establishing an unencrypted - connection. Defaults to `false`. -
    • -
    -
      -
    • - tls_min_version - optional - Minimum TLS version to use. Accepted values are `tls10`, `tls11` or - `tls12`. Defaults to `tls12`. -
    • -
    -
      -
    • - tls_max_version - optional - Maximum TLS version to use. Accepted values are `tls10`, `tls11` or - `tls12`. Defaults to `tls12`. -
    • -
    -
      -
    • - insecure_tls - optional - If true, skips LDAP server SSL certificate verification - insecure, use - with caution! Defaults to `false`. -
    • -
    -
      -
    • - certificate - optional - CA certificate to use when verifying LDAP server certificate, must be - x509 PEM encoded. -
    • -
    -
      -
    • - binddn - optional - Distinguished name of object to bind when performing user search. - Example: `cn=vault,ou=Users,dc=example,dc=com` -
    • -
    -
      -
    • - bindpass - optional - Password to use along with `binddn` when performing user search. -
    • -
    -
      -
    • - userdn - optional - Base DN under which to perform user search. Example: - `ou=Users,dc=example,dc=com` -
    • -
    -
      -
    • - userattr - optional - Attribute on user attribute object matching the username passed when - authenticating. Examples: `sAMAccountName`, `cn`, `uid` -
    • -
    -
      -
    • - discoverdn - optional - Use anonymous bind to discover the bind DN of a user. Defaults to - `false`. -
    • -
    -
      -
    • - deny_null_bind - optional - This option prevents users from bypassing authentication when providing - an empty password. Defaults to `true`. -
    • -
    -
      -
    • - upndomain - optional - userPrincipalDomain used to construct the UPN string for the - authenticating user. The constructed UPN will appear as - `[username]@UPNDomain`. Example: `example.com`, which will cause - vault to bind as `username@example.com`. -
    • -
    -
      -
    • - groupfilter - optional - Go template used when constructing the group membership query. The - template can access the following context variables: - \[`UserDN`, `Username`\]. The default is `(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`, - which is compatible with several common directory schemas. To support - nested group resolution for Active Directory, instead use the following - query: `(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))`. -
    • -
    -
      -
    • - groupdn - optional - LDAP search base to use for group membership search. This can be the - root containing either groups or users. - Example: `ou=Groups,dc=example,dc=com` -
    • -
    -
      -
    • - groupattr - optional - LDAP attribute to follow on objects returned by `groupfilter` in order - to enumerate user group membership. Examples: for groupfilter queries - returning _group_ objects, use: `cn`. For queries returning _user_ - objects, use: `memberOf`. The default is `cn`. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    -
    - Retrieves the LDAP configuration. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/ldap/config`
    - -
    Parameters
    - None. - - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "binddn": "cn=vault,ou=Users,dc=example,dc=com", - "bindpass": "", - "certificate": "", - "deny_null_bind": true, - "discoverdn": false, - "groupattr": "cn", - "groupdn": "ou=Groups,dc=example,dc=com", - "groupfilter": "(\u0026(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))", - "insecure_tls": false, - "starttls": false, - "tls_max_version": "tls12", - "tls_min_version": "tls12", - "upndomain": "", - "url": "ldaps://ldap.myorg.com:636", - "userattr": "samaccountname", - "userdn": "ou=Users,dc=example,dc=com" - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -### /auth/ldap/groups -#### LIST -
    -
    Description
    -
    - Lists the existing groups in the backend. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/ldap/groups` (LIST) or `/auth/ldap/groups?list=true` (GET)
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "keys": [ - "scientists", - "engineers" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -### /auth/ldap/groups/[group_name] -#### POST -
    -
    Description
    -
    - Creates and updates the LDAP group policy associations. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/ldap/groups/[group_name]`
    - -
    Parameters
    -
    -
      -
    • - policies - required - Comma-separated list of policies associated to the group. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    -
    - Reads the LDAP group policy mappings. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/ldap/groups/[group_name]`
    - -
    Returns
    -
    - - ```javascript - { - "data": { - "policies": "admin,default" - }, - "renewable": false, - "lease_id": "" - "lease_duration": 0, - "warnings": null - } - ``` - -
    -
    - -#### DELETE -
    -
    Description
    -
    - Deletes an LDAP group. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/ldap/groups/[group_name]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/ldap/users -#### LIST -
    -
    Description
    -
    - Lists the existing users in the backend. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/ldap/users` (LIST) or `/auth/ldap/users?list=true` (GET)
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "auth": null, - "warnings": null, - "wrap_info": null, - "data": { - "keys": [ - "tesla" - ] - }, - "lease_duration": 0, - "renewable": false, - "lease_id": "" - } - ``` - -
    -
    - -### /auth/ldap/users/[username] -#### POST -
    -
    Description
    -
    - Creates and updates the LDAP user group and policy mappings. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/ldap/users/[username]`
    - -
    Parameters
    -
    -
      -
    • - groups - optional - Comma-separated list of groups associated to the user. -
    • -
    -
      -
    • - policies - optional - Comma-separated list of policies associated to the user. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    -
    - Reads the LDAP user. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/ldap/users/[username]`
    - -
    Returns
    -
    - - ```javascript - { - "data": { - "policies": "admins,default", - "groups": "" - }, - "renewable": false, - "lease_id": "" - "lease_duration": 0, - "warnings": null - } - ``` - -
    -
    - -#### DELETE -
    -
    Description
    -
    - Deletes an LDAP user. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/ldap/users/[username]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/ldap/login/[username] -#### POST -
    -
    Description
    -
    - Creates and updates the LDAP user group and policy associations. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/ldap/login/[username]`
    - -
    Parameters
    -
    -
      -
    • - password - required - Password for the user. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": null, - "auth": { - "client_token": "c4f280f6-fdb2-18eb-89d3-589e2e834cdb", - "policies": [ - "admins", - "default" - ], - "metadata": { - "username": "mitchellh" - }, - "lease_duration": 0, - "renewable": false - } - } - ``` - -
    -
    +The LDAP authentication backend has a full HTTP API. Please see the +[LDAP auth backend API](/api/auth/ldap/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/mfa.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/mfa.html.md index 41f4bfe..46617e1 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/mfa.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/mfa.html.md @@ -12,7 +12,7 @@ Several authentication backends support multi-factor authentication (MFA). Once a backend, users are required to provide additional verification, like a one-time passcode, before being authenticated. -Currently, the "ldap" and "userpass" backends support MFA. +Currently, the "ldap", "radius" and "userpass" backends support MFA. ## Authentication diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/okta.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/okta.html.md index 6683910..539c351 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/okta.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/okta.html.md @@ -87,9 +87,13 @@ Configuration is written to `auth/okta/config`. ### Connection parameters -* `organization` (string, required) - The Okta organization. This will be the first part of the url `https://XXX.okta.com` url. -* `token` (string, optional) - The Okta API token. This is required to query Okta for user group membership. If this is not supplied only locally configured groups will be enabled. This can be generated from http://developer.okta.com/docs/api/getting_started/getting_a_token.html +* `org_name` (string, required) - The Okta organization. This will be the first part of the url `https://XXX.okta.com` url. +* `api_token` (string, optional) - The Okta API token. This is required to query Okta for user group membership. If this is not supplied only locally configured groups will be enabled. This can be generated from http://developer.okta.com/docs/api/getting_started/getting_a_token.html * `base_url` (string, optional) - The Okta url. Examples: `oktapreview.com`, The default is `okta.com` +* `max_ttl` (string, optional) - Maximum duration after which authentication will be expired. + Either number of seconds or in a format parsable by Go's [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) +* `ttl` (string, optional) - Duration after which authentication will be expired. + Either number of seconds or in a format parsable by Go's [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) Use `vault path-help` for more details. @@ -102,7 +106,7 @@ Use `vault path-help` for more details. ``` $ vault write auth/okta/config \ - organization="XXXTest" + org_name="XXXTest" ... ``` @@ -114,8 +118,8 @@ $ vault write auth/okta/config \ ``` $ vault write auth/okta/config base_url="oktapreview.com" \ - organization="dev-123456" \ - token="00KzlTNCqDf0enpQKYSAYUt88KHqXax6dT11xEZz_g" + org_name="dev-123456" \ + api_token="00KzlTNCqDf0enpQKYSAYUt88KHqXax6dT11xEZz_g" ... ``` @@ -157,3 +161,9 @@ Groups can only be pulled from Okta if an API token is configured via `token` ## Note on policy mapping It should be noted that user -> policy mapping (via group membership) happens at token creation time. And changes in group membership in Okta will not affect tokens that have already been provisioned. To see these changes, old tokens should be revoked and the user should be asked to reauthenticate. + +## API + +The Okta authentication backend has a full HTTP API. Please see the +[Okta Auth API](/api/auth/okta/index.html) for more +details. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/radius.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/radius.html.md index 05a62c4..4cdc580 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/radius.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/radius.html.md @@ -108,258 +108,7 @@ This is done through the `unregistered_user_policies` configuration parameter. ## API -### /auth/radius/config -#### POST - -
    -
    Description
    -
    - Configures the connection parameters and shard secret used to communicate with RADIUS -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/radius/config`
    - -
    Parameters
    -
    -
      -
    • - host - required - The RADIUS server to connect to. Examples: `radius.myorg.com`, `127.0.0.1` -
    • -
    • - port - optional - The UDP port where the RADIUS server is listening on. Defaults is 1812 -
    • -
    • - secret - required - The RADIUS shared secret -
    • -
    • - unregistered_user_policies - optional - A Comma-Separated list of policies to be granted to unregistered users -
    • -
    • - dial_timeout - optional - Number of second to wait for a backend connection before timing out. Defaults is 10 -
    • -
    • - read_timeout - optional - Number of second to wait for a backend response before timing out. Defaults is 10 -
    • -
    • - nas_port - optional - The NAS-Port attribute of the RADIUS request. Defaults is 10 -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/radius/users/[username] -#### POST - -
    -
    Description
    -
    - Registers a new user and maps a set of policies to it. - This path honors the distinction between the `create` and `update` capabilities inside ACL policies. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/radius/users/`
    - -
    Parameters
    -
    -
      -
    • - username - required - Username for this user. -
    • -
    -
    -
    -
      -
    • - policies - optional - Comma-separated list of policies. - If set to empty string, only the `default` policy will be applicable to the user. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    -
    - Reads the properties of an existing username. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/radius/users/[username]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", - "lease_id": "", - "lease_duration": 0, - "renewable": false, - "data": { - "policies": "default,dev" - }, - "warnings": null -} -``` - -
    -
    - - -#### DELETE -
    -
    Description
    -
    - Deletes an existing username from the backend. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/radius/users/[username]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/radius/login -### /auth/radius/login/[username] -#### POST -
    -
    Description
    -
    - Login with the username and password. -
    - -
    Method
    -
    POST
    - -
    URLS
    -
    `/auth/radius/login`
    -
    `/auth/radius/login/[username]`
    - -
    Parameters
    -
    -
      -
    • - username - required - Username for the authenticating user. -
    • -
    • - password - required - Password for the authenticating user. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": null, - "warnings": null, - "auth": { - "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344", - "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d", - "policies": ["default"], - "metadata": { - "username": "vishal" - }, - "lease_duration": 7200, - "renewable": true - } - } - ``` - -
    -
    - -### /auth/radius/users -#### LIST -
    -
    Description
    -
    -List the users registered with the backend. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/radius/users` (LIST) `/auth/radius/users?list=true` (GET)
    - -
    Parameters
    -
    -None -
    - -
    Returns
    -
    - - ```javascript -[ - "devuser", - "produser" -] - ``` - -
    -
    - +The RADIUS authentication backend has a full HTTP API. Please see the +[RADIUS Auth API](/api/auth/radius/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/token.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/token.html.md index 713a396..9718ad9 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/token.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/token.html.md @@ -26,823 +26,20 @@ to tokens. ## Authentication -#### Via the CLI +### Via the CLI ``` $ vault auth ... ``` -#### Via the API +### Via the API The token is set directly as a header for the HTTP API. The name of the header should be "X-Vault-Token" and the value should be the token. ## API -### /auth/token/accessors -#### LIST - -
    -
    Description
    -
    - Lists token accessors. This requires `sudo` capability, and access to it - should be tightly controlled as the accessors can be used to revoke very - large numbers of tokens and their associated leases at once. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/token/accessors` (LIST) or `/auth/token/accessors?list=true` (GET)
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "data": { - "keys": ["476ea048-ded5-4d07-eeea-938c6b4e43ec", "bb00c093-b7d3-b0e9-69cc-c4d85081165b"] - } - } - ``` - -
    -
    - -### /auth/token/create -### /auth/token/create-orphan -### /auth/token/create/[role_name] -#### POST - -
    -
    Description
    -
    - Creates a new token. Certain options are only available when called by a - root token. If used via the `/auth/token/create-orphan` endpoint, a root - token is not required to create an orphan token (otherwise set with the - `no_parent` option). If used with a role name in the path, the token will - be created against the specified role name; this may override options set - during this call. -
    - -
    Method
    -
    POST
    - -
    URLs
    -
    `/auth/token/create`
    -
    `/auth/token/create-orphan`
    -
    `/auth/token/create/`
    - -
    Parameters
    -
    -
      -
    • - id - optional - The ID of the client token. Can only be specified by a root token. - Otherwise, the token ID is a randomly generated UUID. -
    • -
    • - policies - optional - A list of policies for the token. This must be a subset of the - policies belonging to the token making the request, unless root. - If not specified, defaults to all the policies of the calling token. -
    • -
    • - meta - optional - A map of string to string valued metadata. This is passed through - to the audit backends. -
    • -
    • - no_parent - optional - If true and set by a root caller, the token will not have the - parent token of the caller. This creates a token with no parent. -
    • -
    • - no_default_policy - optional - If true the `default` policy will not be contained in this token's - policy set. -
    • -
    • - renewable - optional - Set to `false` to disable the ability of the token to be renewed past - its initial TTL. Specifying `true`, or omitting this option, will allow - the token to be renewable up to the system/mount maximum TTL. -
    • -
    • - lease - optional - DEPRECATED; use "ttl" instead. -
    • -
    • - ttl - optional - The TTL period of the token, provided as "1h", where hour is - the largest suffix. If not provided, the token is valid for the - [default lease TTL](/docs/configuration/index.html), or - indefinitely if the root policy is used. -
    • -
    • - explicit_max_ttl - optional - If set, the token will have an explicit max TTL set upon it. This - maximum token TTL *cannot* be changed later, and unlike with normal - tokens, updates to the system/mount max TTL value will have no effect - at renewal time -- the token will never be able to be renewed or used - past the value set at issue time. -
    • -
    • - display_name - optional - The display name of the token. Defaults to "token". -
    • -
    • - num_uses - optional - The maximum uses for the given token. This can be used to create - a one-time-token or limited use token. Defaults to 0, which has - no limit to the number of uses. -
    • -
    • - period - optional - If specified, the token will be periodic; it will have no maximum TTL - (unless an "explicit-max-ttl" is also set) but every renewal will use - the given period. Requires a root/sudo token to use. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "auth": { - "client_token": "ABCD", - "policies": ["web", "stage"], - "metadata": {"user": "armon"}, - "lease_duration": 3600, - "renewable": true, - } - } - ``` - -
    -
    - -### /auth/token/lookup[/token] -#### GET - -
    -
    Description
    -
    - Returns information about the client token provided in the request path. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/token/lookup/`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "data": { - "id": "ClientToken", - "policies": ["web", "stage"], - "path": "auth/github/login", - "meta": {"user": "armon", "organization": "hashicorp"}, - "display_name": "github-armon", - "num_uses": 0, - } - } - ``` - -
    -
    - -#### POST - -
    -
    Description
    -
    - Returns information about the client token provided in the request body. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/lookup`
    - -
    Parameters
    -
    -
      -
    • - token - required - Token to lookup. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "data": { - "id": "ClientToken", - "policies": ["web", "stage"], - "path": "auth/github/login", - "meta": {"user": "armon", "organization": "hashicorp"}, - "display_name": "github-armon", - "num_uses": 0, - } - } - ``` - -
    -
    - -### /auth/token/lookup-accessor[/accessor] -#### POST - -
    -
    Description
    -
    - Fetch the properties of the token associated with the accessor, except the token ID. - This is meant for purposes where there is no access to token ID but there is need - to fetch the properties of a token. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/lookup-accessor`
    - -
    Parameters
    -
    -
      -
    • - accessor - required - Accessor of the token to lookup. This can be part of the URL or the body. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": { - "creation_time": 1457533232, - "creation_ttl": 2764800, - "display_name": "token", - "id": "", - "meta": null, - "num_uses": 0, - "orphan": false, - "path": "auth/token/create", - "policies": ["default", "web"], - "ttl": 2591976 - }, - "warnings": null, - "auth": null - } - ``` - -
    -
    - -### /auth/token/lookup-self -#### GET - -
    -
    Description
    -
    - Returns information about the current client token. -
    - -
    Method
    -
    GET
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "data": { - "accessor": "REDACTED", - "creation_time": 1484093665, - "creation_ttl": 3600, - "display_name": "github-armon", - "explicit_max_ttl": 0, - "id": "ClientToken", - "meta": {"user": "armon", "organization": "hashicorp"}, - "num_uses": 0, - "orphan": true, - "path": "auth/github/login", - "policies": ["web", "stage"], - "renewable": true, - "ttl": 3655 - } - } - ``` -
    -
    - -### /auth/token/renew[/token] -#### POST - -
    -
    Description
    -
    - Renews a lease associated with a token. This is used to prevent the - expiration of a token, and the automatic revocation of it. Token - renewal is possible only if there is a lease associated with it. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/renew`
    - -
    Parameters
    -
    -
      -
    • - token - required - Token to renew. This can be part of the URL or the body. -
    • -
    -
    -
    -
      -
    • - increment - optional - An optional requested lease increment can be provided. This - increment may be ignored. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "auth": { - "client_token": "ABCD", - "policies": ["web", "stage"], - "metadata": {"user": "armon"}, - "lease_duration": 3600, - "renewable": true, - } - } - ``` - -
    -
    - -### /auth/token/renew-self -#### POST - -
    -
    Description
    -
    - Renews a lease associated with the calling token. This is used to prevent - the expiration of a token, and the automatic revocation of it. Token - renewal is possible only if there is a lease associated with it. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/renew-self`
    - -
    Parameters
    -
    -
      -
    • - increment - optional - An optional requested lease increment can be provided. This - increment may be ignored. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "auth": { - "client_token": "ABCD", - "policies": ["web", "stage"], - "metadata": {"user": "armon"}, - "lease_duration": 3600, - "renewable": true, - } - } - ``` - -
    -
    - -### /auth/token/revoke -#### POST - -
    -
    Description
    -
    - Revokes a token and all child tokens. When the token is revoked, - all secrets generated with it are also revoked. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/revoke`
    - -
    Parameters
    -
    -
      -
    • - token - required - Token to revoke. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/token/revoke-accessor -#### POST - -
    -
    Description
    -
    - Revoke the token associated with the accessor and all the child tokens. - This is meant for purposes where there is no access to token ID but - there is need to revoke a token and its children. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/revoke-accessor`
    - -
    Parameters
    -
    -
      -
    • - accessor - required - Accessor of the token. -
    • -
    -
    - -
    Returns
    -
    - A `204` response code. -
    -
    - -### /auth/token/revoke-orphan[/token] -#### POST - -
    -
    Description
    -
    - Revokes a token but not its child tokens. When the token is revoked, all - secrets generated with it are also revoked. All child tokens are orphaned, - but can be revoked sub-sequently using `/auth/token/revoke/`. This is a - root-protected endpoint. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/revoke-orphan`
    - -
    Parameters
    -
    -
      -
    • - token - required - Token to revoke. This can be part of the URL or the body. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/token/revoke-self/ -#### POST - -
    -
    Description
    -
    - Revokes the token used to call it and all child tokens. - When the token is revoked, all dynamic secrets generated - with it are also revoked. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/revoke-self`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/token/roles/[role_name] - -#### DELETE - -
    -
    Description
    -
    - Deletes the named role. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/token/roles/`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - A `204` response code. -
    -
    - -#### GET - -
    -
    Description
    -
    - Fetches the named role configuration. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/token/roles/`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript -{ - "request_id": "075a19cd-4e56-a3ca-d956-7609819831ec", - "lease_id": "", - "lease_duration": 0, - "renewable": false, - "data": { - "allowed_policies": [ - "dev" - ], - "disallowed_policies": [], - "explicit_max_ttl": 0, - "name": "nomad", - "orphan": false, - "path_suffix": "", - "period": 0, - "renewable": true - }, - "warnings": null -} - ``` - -
    -
    - -#### LIST - -
    -
    Description
    -
    - Lists available roles. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/token/roles` (LIST) or `/auth/token/roles?list=true` (GET)
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    - - ```javascript - { - "data": { - "keys": ["role1", "role2"] - } - } - ``` - -
    -
    - -#### POST - -
    -
    Description
    -
    - Creates (or replaces) the named role. Roles enforce specific behavior when - creating tokens that allow token functionality that is otherwise not - available or would require `sudo`/root privileges to access. Role - parameters, when set, override any provided options to the `create` - endpoints. The role name is also included in the token path, allowing all - tokens created against a role to be revoked using the `sys/revoke-prefix` - endpoint. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/roles/`
    - -
    Parameters
    -
    -
      -
    • - allowed_policies - optional - If set, tokens can be created with any subset of the policies in this - list, rather than the normal semantics of tokens being a subset of the - calling token's policies. The parameter is a comma-delimited string of - policy names. If at creation time `no_default_policy` is not set and - `"default"` is not contained in `disallowed_policies`, the `"default"` - policy will be added to the created token automatically. -
    • -
    • - disallowed_policies - optional - If set, successful token creation via this role will require that no - policies in the given list are requested. The parameter is a - comma-delimited string of policy names. Adding `"default"` to this list - will prevent `"default"` from being added automatically to created - tokens. -
    • -
    • - orphan - optional - If `true`, tokens created against this policy will be orphan tokens - (they will have no parent). As such, they will not be automatically - revoked by the revocation of any other token. -
    • -
    • - period - optional - If set, tokens created against this role will not have a maximum - lifetime. Instead, they will have a fixed TTL that is refreshed with - each renewal. So long as they continue to be renewed, they will never - expire. The parameter is an integer duration of seconds. Tokens issued - track updates to the role value; the new period takes effect upon next - renew. This cannot be used in conjunction with `explicit_max_ttl`. -
    • -
    • - renewable - optional - Set to `false` to disable the ability of token created against this - role to be renewed past their initial TTL. Defaults to `true`, which - allows tokens to be renewed up to the system/mount maximum TTL. -
    • -
    • - path_suffix - optional - If set, tokens created against this role will have the given suffix as - part of their path in addition to the role name. This can be useful in - certain scenarios, such as keeping the same role name in the future but - revoking all tokens created against it before some point in time. The - suffix can be changed, allowing new callers to have the new suffix as - part of their path, and then tokens with the old suffix can be revoked - via `sys/revoke-prefix`. -
    • -
    • - explicit_max_ttl - optional - If set, tokens created with this role have an explicit max TTL set upon - them. This maximum token TTL *cannot* be changed later, and unlike with - normal tokens, updates to the role or the system/mount max TTL value - will have no effect at renewal time -- the token will never be able to - be renewed or used past the value set at issue time. This cannot be - used in conjunction with `period`. -
    • -
    -
    - -
    Returns
    -
    - A `204` return code. -
    -
    - -### /auth/token/tidy -#### POST - -
    -
    Description
    -
    - Performs some maintenance tasks to clean up invalid entries that may remain - in the token store. Generally, running this is not needed unless upgrade - notes or support personnel suggest it. This may perform a lot of I/O to the - storage backend so should be used sparingly. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/token/tidy`
    - -
    Parameters
    -
    - None -
    - -
    Returns
    -
    `204` response code. -
    -
    - - +The Token authentication backend has a full HTTP API. Please see the +[Token auth backend API](/api/auth/token/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/auth/userpass.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/auth/userpass.html.md index efa15a8..91f30c3 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/auth/userpass.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/auth/userpass.html.md @@ -17,6 +17,9 @@ The username/password combinations are configured directly to the auth backend using the `users/` path. This backend cannot read usernames and passwords from an external source. +The backend lowercases all submitted usernames, e.g. `Mary` and `mary` are the +same entry. + ## Authentication #### Via the CLI @@ -95,303 +98,7 @@ necessary. ## API -### /auth/userpass/users/[username] -#### POST - -
    -
    Description
    -
    - Create a new user or update an existing user. - This path honors the distinction between the `create` and `update` capabilities inside ACL policies. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/userpass/users/`
    - -
    Parameters
    -
    -
      -
    • - username - required - Username for this user. -
    • -
    -
    -
    -
      -
    • - password - required - Password for this user. -
    • -
    -
    -
    -
      -
    • - policies - optional - Comma-separated list of policies. - If set to empty string, only the `default` policy will be applicable to the user. -
    • -
    -
    -
    -
      -
    • - ttl - optional - The lease duration which decides login expiration. -
    • -
    -
    -
    -
      -
    • - max_ttl - optional - Maximum duration after which login should expire. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -#### GET -
    -
    Description
    -
    - Reads the properties of an existing username. -
    - -
    Method
    -
    GET
    - -
    URL
    -
    `/auth/userpass/users/[username]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    - -```javascript -{ - "request_id": "812229d7-a82e-0b20-c35b-81ce8c1b9fa6", - "lease_id": "", - "lease_duration": 0, - "renewable": false, - "data": { - "max_ttl": 0, - "policies": "default,dev", - "ttl": 0 - }, - "warnings": null -} -``` - -
    -
    - - -#### DELETE -
    -
    Description
    -
    - Deletes an existing username from the backend. -
    - -
    Method
    -
    DELETE
    - -
    URL
    -
    `/auth/userpass/users/[username]`
    - -
    Parameters
    -
    - None. -
    - -
    Returns
    -
    `204` response code. -
    -
    - - - - -### /auth/userpass/users/[username]/password -#### POST -
    -
    Description
    -
    - Update the password for an existing user. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/userpass/users//password`
    - -
    Parameters
    -
    -
      -
    • - username - required - Username for this user. -
    • -
    -
    -
    -
      -
    • - password - required - Password for this user. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - -### /auth/userpass/users/[username]/policies -#### POST -
    -
    Description
    -
    - Update the policies associated with an existing user. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/userpass/users//policies`
    - -
    Parameters
    -
    -
      -
    • - username - required - Username for this user. -
    • -
    -
    -
    -
      -
    • - policies - optional - Comma-separated list of policies. - If this is field is not supplied, the policies will be unchanged. - If set to empty string, only the `default` policy will be applicable to the user. -
    • -
    -
    - -
    Returns
    -
    `204` response code. -
    -
    - - -### /auth/userpass/login/[username] -#### POST -
    -
    Description
    -
    - Login with the username and password. -
    - -
    Method
    -
    POST
    - -
    URL
    -
    `/auth/userpass/login/`
    - -
    Parameters
    -
    -
      -
    • - password - required - Password for this user. -
    • -
    -
    - -
    Returns
    -
    - - ```javascript - { - "lease_id": "", - "renewable": false, - "lease_duration": 0, - "data": null, - "warnings": null, - "auth": { - "client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344", - "accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d", - "policies": ["default"], - "metadata": { - "username": "vishal" - }, - "lease_duration": 7200, - "renewable": true - } - } - ``` - -
    -
    - -### /auth/userpass/users -#### LIST -
    -
    Description
    -
    -List the users registered with the backend. -
    - -
    Method
    -
    LIST/GET
    - -
    URL
    -
    `/auth/userpass/users` (LIST) `/auth/userpass/users?list=true` (GET)
    - -
    Parameters
    -
    -None -
    - -
    Returns
    -
    - - ```javascript -[ - "devuser", - "produser" -] - ``` - -
    -
    - +The Username & Password authentication backend has a full HTTP API. Please see the +[Userpass auth backend API](/api/auth/userpass/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/commands/environment.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/commands/environment.html.md index cd155bd..e19fc53 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/commands/environment.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/commands/environment.html.md @@ -43,6 +43,10 @@ The following table describes them: VAULT_CLIENT_KEY Path to an unencrypted PEM-encoded private key matching the client certificate. + + VAULT_CLIENT_TIMEOUT + Timeout variable for the vault client. Default value is 60 seconds. + VAULT_CLUSTER_ADDR The address that should be used for other cluster members to connect to this node when in High Availability mode. @@ -63,4 +67,9 @@ The following table describes them: VAULT_TLS_SERVER_NAME If set, use the given name as the SNI host when connecting via TLS. + + VAULT_MFA + (Enterprise Only) MFA credentials in the format **mfa_method_name[:key[=value]]** (items in `[]` are optional). Note that when using the environment variable, only one credential can be supplied. If a MFA method expects multiple credential values, or if there are multiple MFA methods specified on a path, then the CLI flag `-mfa` should be used. + + diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/commands/help.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/commands/help.html.md index 48f596b..408bbcd 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/commands/help.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/commands/help.html.md @@ -45,7 +45,7 @@ the paths it supports. $ vault path-help secret ## DESCRIPTION -The generic backend reads and writes arbitrary secrets to the backend. +The key/value backend reads and writes arbitrary secrets to the backend. The secrets are encrypted/decrypted by Vault: they are never stored unencrypted in the backend and the backend never has an opportunity to see the unencrypted value. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/commands/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/commands/index.html.md index 08e1069..297f1e8 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/commands/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/commands/index.html.md @@ -24,3 +24,19 @@ with the `-h` argument. The help output is very comprehensive, so we defer you to that for documentation. We've included some guides to the left of common interactions with the CLI. + +## Autocompletion + +The `vault` command features opt-in subcommand autocompletion that you can +enable for your shell with `vault -autocomplete-install`. After doing so, +you can invoke a new shell and use the feature. + +For example, assume a tab is typed at the end of each prompt line: + +``` +$ vault au +audit-disable audit-enable audit-list auth auth-disable auth-enable + +$ vault s +seal server ssh status step-down +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/auth.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/auth.html.md index 0c17161..899c821 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/auth.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/auth.html.md @@ -8,30 +8,43 @@ description: |- # Authentication -Before performing any operation with Vault, the connecting client must be -_authenticated_. Authentication is the process of verifying a person or -machine is who they say they are and assigning an identity to them. This -identity is then used when making requests with Vault. +Authentication in Vault is the process by which user or machine supplied +information is verified against an internal or external system. Vault supports +multiple [authentication backends](/docs/auth/index.html) including GitHub, +LDAP, AppRole, and more. Each authentication backend has a specific use case. -Authentication in Vault is pluggable via authentication backends. This -allows you to authenticate with Vault using a method that works best for your -organization. For example, you can authenticate using GitHub, certs, etc. +Before a client can interact with Vault, it must _authenticate_ against an +authentication backend. Upon authentication, a token is generated. This token is +conceptually similar to a session ID on a website. The token may have attached +policy, which is mapped at authentication time. This process is described in +detail in the [policies concepts](/docs/concepts/policies.html) documentation. ## Authentication Backends -There are many authentication backends available for Vault. They -are enabled using `vault auth-enable`. After they're enabled, you can -learn more about them using `vault path-help auth/`. For example, -if you enable GitHub, you can use `vault path-help auth/github` to learn more -about how to configure it and login. +Vault supports a number of authentication backends. Some backends are targeted +toward users while others are targeted toward machines. Most authentication +backends must be enabled before use. To enable an authentication backend: -Multiple authentication backends can be enabled, but only one is required -to gain authentication. It is not currently possible to force a user through -multiple authentication backends to gain access. +```sh +$ vault write sys/auth/my-auth type=userpass +``` -This allows you to enable human-friendly as well as machine-friendly -backends at the same time. For example, for humans you might use the -"github" auth backend, and for machines you might use the "approle" backend. +This mounts the "userpass" authentication backend at the path "my-auth". This +authentication will be accessible at the path "my-auth". Often you will see +authentications at the same path as their name, but this is not a requirement. + +To learn more about this authentication, use the built-in `path-help` command: + +```sh +$ vault path-help auth/my-auth +# ... +``` + +Vault supports multiple authentication backends simultaneously, and you can even +mount the same type of authentication backend at different paths. Only one +authentication is required to gain access to Vault, and it is not currently +possible to force a user through multiple authentication backends to gain +access, although some backends do support MFA. ## Tokens @@ -51,7 +64,7 @@ revoking tokens, and renewing tokens. This is all covered on the ## Authenticating -#### Via the CLI +### Via the CLI To authenticate with the CLI, `vault auth` is used. This supports many of the built-in authentication methods. For example, with GitHub: @@ -73,7 +86,7 @@ will be shown. If you're using a method that isn't supported via the CLI, then the API must be used. -#### Via the API +### Via the API API authentication is generally used for machine authentication. Each auth backend implements its own login endpoint. Use the `vault path-help` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/ha.html.markdown b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/ha.html.markdown index e400d2f..5e36dcf 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/ha.html.markdown +++ b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/ha.html.markdown @@ -74,8 +74,8 @@ there is an error performing the forwarding. As such, a redirect address is always required for all HA setups. Some HA data store drivers can autodetect the redirect address, but it is often -necessary to configure it manually via setting a value in the `backend` -configuration block (or `ha_backend` if using split data/HA mode). The key for +necessary to configure it manually via setting a value in the `storage` +configuration block (or `ha_storage` if using split data/HA mode). The key for this value is `redirect_addr` and the value can also be specified by the `VAULT_REDIRECT_ADDR` environment variable, which takes precedence. @@ -129,7 +129,7 @@ it will start cluster listeners, and when it becomes standby it will stop them. Similar to the `redirect_addr`, `cluster_addr` is the value that each node, if active, should advertise to the standbys to use for server-to-server -communications, and lives in the `backend` (or `ha_backend`) block. On each +communications, and lives in the `storage` (or `ha_storage`) block. On each node, this should be set to a host name or IP address that a standby can use to reach one of that node's `cluster_address` values set in the `listener` blocks, including port. (Note that this will always be forced to `https` since only TLS @@ -138,10 +138,10 @@ connections are used between servers.) This value can also be specified by the `VAULT_CLUSTER_ADDR` environment variable, which takes precedence. -## Backend Support +## Storage Support -Currently there are several backends that support high availability mode, -including Consul, ZooKeeper and etcd. These may change over time, and the +Currently there are several storage backends that support high availability +mode, including Consul, ZooKeeper and etcd. These may change over time, and the [configuration page](/docs/configuration/index.html) should be referenced. The Consul backend is the recommended HA backend, as it is used in production diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/lease.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/lease.html.md index 5b3bd83..44aefc9 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/lease.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/lease.html.md @@ -8,77 +8,76 @@ description: |- # Lease, Renew, and Revoke -With every secret and authentication token, Vault creates a _lease_: -metadata containing information such as a time duration, renewability, -and more. Vault promises that the data will be valid for the given -duration, or Time To Live (TTL). Once the lease is expired, Vault -can automatically revoke the data, and the consumer of the secret can -no longer be certain that it is valid. +With every dynamic secret and authentication token, Vault creates a _lease_: +metadata containing information such as a time duration, renewability, and +more. Vault promises that the data will be valid for the given duration, or +Time To Live (TTL). Once the lease is expired, Vault can automatically revoke +the data, and the consumer of the secret can no longer be certain that it is +valid. The benefit should be clear: consumers of secrets need to check in with Vault routinely to either renew the lease (if allowed) or request a replacement secret. This makes the Vault audit logs more valuable and also makes key rolling a lot easier. -All secrets in Vault are required to have a lease. Even if the data is +All dynamic secrets in Vault are required to have a lease. Even if the data is meant to be valid for eternity, a lease is required to force the consumer to check in routinely. -In addition to renewals, a lease can be _revoked_. When a lease is revoked, -it invalidates that secret immediately and prevents any further renewals. -For -[dynamic secrets](#), -the secrets themselves are often immediately disabled. For example, with -the -[AWS secret backend](/docs/secrets/aws/index.html), the access keys will -be deleted from AWS the moment a secret is revoked. This renders the access -keys invalid from that point forward. +In addition to renewals, a lease can be _revoked_. When a lease is revoked, it +invalidates that secret immediately and prevents any further renewals. For +example, with the [AWS secret backend](/docs/secrets/aws/index.html), the +access keys will be deleted from AWS the moment a secret is revoked. This +renders the access keys invalid from that point forward. -Revocation can happen manually via the API, via the `vault revoke` cli -command, or automatically by Vault. When a lease is expired, Vault will automatically revoke that lease. +Revocation can happen manually via the API, via the `vault revoke` cli command, +or automatically by Vault. When a lease is expired, Vault will automatically +revoke that lease. + +**Note**: The [Key/Value Backend](/docs/secrets/kv/index.html) which stores +arbitrary secrets does not issue leases. ## Lease IDs -When reading a secret, such as via `vault read`, Vault always returns -a `lease_id`. This is the ID used with commands such as `vault renew` and -`vault revoke` to manage the lease of the secret. +When reading a dynamic secret, such as via `vault read`, Vault always returns a +`lease_id`. This is the ID used with commands such as `vault renew` and `vault +revoke` to manage the lease of the secret. ## Lease Durations and Renewal -Along with the lease ID, a _lease duration_ can be read. The lease duration -is a Time To Live value: the time in seconds for which the lease is valid. -A consumer of this secret must renew the lease within that time. +Along with the lease ID, a _lease duration_ can be read. The lease duration is +a Time To Live value: the time in seconds for which the lease is valid. A +consumer of this secret must renew the lease within that time. -When renewing the lease, the user can request a specific amount of time -from now to extend the lease. For example: `vault renew my-lease-id 3600` -would request to extend the lease of "my-lease-id" by 1 hour (3600 seconds). +When renewing the lease, the user can request a specific amount of time from +now to extend the lease. For example: `vault renew my-lease-id 3600` would +request to extend the lease of "my-lease-id" by 1 hour (3600 seconds). -The requested increment is completely advisory. The backend in charge -of the secret can choose to completely ignore it. For most secrets, the -backend does its best to respect the increment, but often limits it to -ensure renewals every so often. +The requested increment is completely advisory. The backend in charge of the +secret can choose to completely ignore it. For most secrets, the backend does +its best to respect the increment, but often limits it to ensure renewals every +so often. -As a result, the return value of renews should be carefully inspected -to determine what the new lease is. +As a result, the return value of renewals should be carefully inspected to +determine what the new lease is. -Note: Prior to version 0.3, Vault documentation and help text did not -distinguish sufficiently between a _lease_ and a _lease duration_. -Starting with version 0.3, Vault will start migrating to the term _ttl_ to -describe lease durations, at least for user-facing text. As _lease duration_ -is still a legitimate (but more verbose) description, there are currently -no plans to change the JSON key used in responses, in order to retain +**Note**: Prior to version 0.3, Vault documentation and help text did not +distinguish sufficiently between a _lease_ and a _lease duration_. Starting +with version 0.3, Vault will start migrating to the term _ttl_ to describe +lease durations, at least for user-facing text. As _lease duration_ is still a +legitimate (but more verbose) description, there are currently no plans to +change the JSON key used in responses, in order to retain backwards-compatibility. ## Prefix-based Revocation -In addition to revoking a single secret, operators with proper access -control can revoke multiple secrets based on their lease ID prefix. +In addition to revoking a single secret, operators with proper access control +can revoke multiple secrets based on their lease ID prefix. -Lease IDs are structured in a way that their prefix is always the path -where the secret was requested from. This lets you revoke trees of -secrets. For example, to revoke all AWS access keys, you can do -`vault revoke -prefix aws/`. +Lease IDs are structured in a way that their prefix is always the path where +the secret was requested from. This lets you revoke trees of secrets. For +example, to revoke all AWS access keys, you can do `vault revoke -prefix aws/`. -This is very useful if there is an intrusion within a specific system: -all secrets of a specific backend or a certain configured backend can -be revoked quickly and easily. +This is very useful if there is an intrusion within a specific system: all +secrets of a specific backend or a certain configured backend can be revoked +quickly and easily. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/policies.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/policies.html.md index d9f87e7..3c17151 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/policies.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/policies.html.md @@ -6,148 +6,343 @@ description: |- Policies are how authorization is done in Vault, allowing you to restrict which parts of Vault a user can access. --- -# Access Control Policies +# Policies -After [authenticating](/docs/concepts/auth.html) with Vault, the -next step is authorization. This is the process of determining what -a user is allowed to do. Authorization is unified in Vault in the form -of _policies_. +Everything in Vault is path based, and policies are no exception. Policies +provide a declarative way to grant or forbid access to certain paths and +operations in Vault. This section discusses policy workflows and syntaxes. -Policies are [HCL](https://github.com/hashicorp/hcl) or JSON documents -that describe what parts of Vault a user is allowed to access. An example -of a policy is shown below: +Policies are **deny by default**, so an empty policy grants no permission in the +system. -```javascript -path "sys/*" { - policy = "deny" -} +## Policy-Authorization Workflow -path "secret/*" { - policy = "write" -} +Before a human or machine can gain access, an administrator must configure Vault +with an [authentication backend](/docs/concepts/auth.html). Authentication is +the process by which human or machine-supplied information is verified against +an internal or external system. +Consider the following diagram, which illustrates the steps a security team +would take to configure Vault to authenticate using a corporate LDAP or +ActiveDirectory installation. Even though this example uses LDAP, the concept +applies to all authentication backends. + +[![Vault Auth Workflow](/assets/images/vault-policy-workflow.svg)](/assets/images/vault-policy-workflow.svg) + +1. The security team configures Vault to connect to an authentication backend. +This configuration varies by authentication backend. In the case of LDAP, Vault +needs to know the address of the LDAP server and whether to connect using TLS. +It is important to note that Vault does not store a copy of the LDAP database - +Vault will delegate the authentication to the authentication backend. + +1. The security team authors a policy (or uses an existing policy) which grants +access to paths in Vault. Policies are written in HCL in your editor of +preference and saved to disk. + +1. The policy's contents are uploaded and store in Vault and referenced by name. +You can think of the policy's name as a pointer or symlink to its set of rules. + +1. Most importantly, the security team maps data in the authentication backend to a +policy. For example, the security team might create mappings like: + + > Members of the OU group "dev" map to the Vault policy named "readonly-dev". + + or + + > Members of the OU group "ops" map to the Vault policies "admin" and "auditor". + +Now Vault has an internal mapping between a backend authentication system and +internal policy. When a user authenticates to Vault, the actual authentication +is delegated to the authentication backend. As a user, the flow looks like: + +[![Vault Auth Workflow](/assets/images/vault-auth-workflow.svg)](/assets/images/vault-auth-workflow.svg) + +1. A user attempts to authenticate to Vault using their LDAP credentials, +providing Vault with their LDAP username and password. + +1. Vault establishes a connection to LDAP and asks the LDAP server to verify the +given credentials. Assuming this is successful, the LDAP server returns the +information about the user, including the OU groups. + +1. Vault maps the result from the LDAP server to policies inside Vault using the +mapping configured by the security team in the previous section. Vault then +generates a token and attaches the matching policies. + +1. Vault returns the token to the user. This token has the correct policies +assigned, as dictated by the mapping configuration that was setup by the +security team in advance. + +The user then uses this Vault token for future operations. If the user performs +the authentication steps again, they will get a _new_ token. The token will have +the same permissions, but the actual token will be different. Authenticating a +second time does not invalidate the original token. + +## Policy Syntax + +Policies are written in [HCL][hcl] or JSON and describe which paths in Vault a +user or machine is allowed to access. + +[hcl]: https://github.com/hashicorp/hcl + +Here is a very simple policy which grants read capabilities to the path +"secret/foo": + +```ruby path "secret/foo" { - policy = "read" - capabilities = ["create", "sudo"] + capabilities = ["read"] +} +``` + +When this policy is assigned to a token, the token can read from `"secret/foo"`. +However, the token could not update or delete `"secret/foo"`, since the +capabilities do not allow it. Because policies are **deny by default**, the +token would have no other access in Vault. + +Here is a more detailed policy, and it is documented inline: + +```ruby +# This section grants all access on "secret/*". Further restrictions can be +# applied to this broad policy, as shown below. +path "secret/*" { + capabilities = ["create", "read", "update", "delete", "list"] } +# Even though we allowed secret/*, this line explicitly denies +# secret/super-secret. This takes precedence. path "secret/super-secret" { capabilities = ["deny"] } -path "secret/bar" { +# Policies can also specify allowed and disallowed parameters. Here the key +# "secret/restricted" can only contain "foo" (any value) and "bar" (one of "zip" +# or "zap"). +path "secret/restricted" { capabilities = ["create"] allowed_parameters = { - "*" = [] - } - denied_parameters = { - "foo" = ["bar"] + "foo" = [] + "bar" = ["zip", "zap"] } } ``` -Policies use path based matching to apply rules. A policy may be an exact -match, or might be a glob pattern which uses a prefix. Vault operates in a -whitelisting mode, so if a path isn't explicitly allowed, Vault will reject -access to it. This works well due to Vault's architecture of being like a -filesystem: everything has a path associated with it, including the core -configuration mechanism under "sys". +Policies use path-based matching to test the set of capabilities against a +request. A policy `path` may specify an exact path to match, or it could specify +a glob pattern which instructs Vault to use a prefix match: -~> Policy paths are matched using the most specific defined policy. This may -be an exact match or the longest-prefix match of a glob. This means if you -define a policy for `"secret/foo*"`, the policy would also match `"secret/foobar"`. -The glob character is only supported at the end of the path specification. +```ruby +# Permit reading only "secret/foo". An attached token cannot read "secret/food" +# or "secret/foo/bar". +path "secret/foo" { + capabilities = ["read"] +} -## Capabilities and Policies +# Permit reading everything under "secret/bar". An attached token could read +# "secret/bar/zip", "secret/bar/zip/zap", but not "secret/bars/zip". +path "secret/bar/*" { + capabilities = ["read"] +} -Paths have an associated set of capabilities that provide fine-grained control -over operations. The capabilities are: +# Permit reading everything prefixed with "zip-". An attached token could read +# "secret/zip-zap" or "secret/zip-zap/zong", but not "secret/zip/zap +path "secret/zip-*" { + capabilities = ["read"] +} +``` - * `create` - Create a value at a path. (At present, few parts of Vault - distinguish between `create` and `update`, so most operations require - `update`. Parts of Vault that provide such a distinction, such as - the `generic` backend, are noted in documentation.) +Vault's architecture is similar to a filesystem. Every action in Vault has a +corresponding path and capability - even Vault's internal core configuration +endpoints live under the "sys/" path. Policies define access to these paths and +capabilities, which controls a token's access to credentials in Vault. - * `read` - Read the value at a path. +~> Policy paths are matched using the **most specific path match**. This may be +an exact match or the longest-prefix match of a glob. This means if you define a +policy for `"secret/foo*"`, the policy would also match `"secret/foobar"`. - * `update` - Change the value at a path. In most parts of Vault, this also - includes the ability to create the initial value at the path. +!> The glob character is only supported as the **last character of the path**, +and **is not a regular expression**! - * `delete` - Delete the value at a path. +### Capabilities - * `list` - List key names at a path. Note that the keys returned by a - `list` operation are *not* filtered by policies. Do not encode sensitive - information in key names. +Each path must define one or more capabilities which provide fine-grained +control over permitted (or denied) operations. As shown in the examples above, +capabilities are always specified as a list of strings, even if there is only +one capability. The list of capabilities are: - * `sudo` - Gain access to paths that are _root-protected_. This is _additive_ - to other capabilities, so a path that requires `sudo` access will also - require `read`, `update`, etc. as appropriate. +~> In the list below, the associated HTTP verbs are shown in parenthesis next to +the capability. When authoring policy, it is usually helpful to look at the HTTP +API documentation for the paths and HTTP verbs and map them back onto +capabilities. While the mapping is not strictly 1:1, they are often very +similarly matched. - * `deny` - No access allowed. This always takes precedence regardless of any + * `create` (`POST/PUT`) - Allows creating data at the given path. Very few + parts of Vault distinguish between `create` and `update`, so most operations + require both `create` and `update` capabilities. Parts of Vault that + provide such a distinction are noted in documentation. + + * `read` (`GET`) - Allows reading the data at the given path. + + * `update` (`POST/PUT`) - Allows change the data at the given path. In most + parts of Vault, this implicitly includes the ability to create the initial + value at the path. + + * `delete` (`DELETE`) - Allows deleting the data at the given path. + + * `list` (`LIST`) - Allows listing values at the given path. Note that the + keys returned by a `list` operation are *not* filtered by policies. Do not + encode sensitive information in key names. Not all backends support listing. + +In addition to the standard set, there are some capabilities that do not map to +HTTP verbs. + + * `sudo` - Allows access to paths that are _root-protected_. Tokens are not + permitted to interact with these paths unless they are have the `sudo` + capability (in addition to the other necessary capabilities for performing + an operation against that path, such as `read` or `delete`). + + For example, modifying the audit log backends requires a token with `sudo` + privileges. + + * `deny` - Disallows access. This always takes precedence regardless of any other defined capabilities, including `sudo`. -The only non-obvious capability is `sudo`. Some routes within Vault and mounted -backends are marked as _root-protected_ paths. Clients aren't allowed to access -root paths unless they are a root user (have the special policy "root" attached -to their token) or have access to that path with the `sudo` capability (in -addition to the other necessary capabilities for performing an operation -against that path, such as `read` or `delete`). - -For example, modifying the audit log backends is done via root paths. -Only root or `sudo` privilege users are allowed to do this. - -Prior to Vault 0.5, the `policy` keyword was used per path rather than a set of -`capabilities`. In Vault 0.5+ these are still supported as shorthand and to -maintain backwards compatibility, but internally they map to a set of -capabilities. These mappings are as follows: - - * `deny` - `["deny"]` - - * `sudo` - `["create", "read", "update", "delete", "list", "sudo"]` - - * `write` - `["create", "read", "update", "delete", "list"]` - - * `read` - `["read", "list"]` +~> Note that capabilities usually map to the HTTP verb, not the underlying +action taken. This can be a common source of confusion. Generating database +credentials _creates_ database credentials, but the HTTP request is a GET which +corresponds to a `read` capability. Thus, to grant access to generate database +credentials, the policy would grant `read` access on the appropriate path. ## Fine-Grained Control -There are a few optional fields that allow for fine-grained control over client -behavior on a given path. The capabilities associated with this path take -precedence over permissions on parameters. +In addition to the standard set of capabilities, Vault offers finer-grained +control over permissions at a given path. The capabilities associated with a +path take precedence over permissions on parameters. -### Allowed and Disallowed Parameters +### Allowed and Denied Parameters -These parameters allow the administrator to restrict the keys (and optionally -values) that a user is allowed to specify when calling a path. +In Vault, data is represented as `key=value` pairs. Vault policies can +optionally further restrict paths based on the keys and data at those keys when +evaluating the permissions for a path. The optional finer-grained control +options are: - * `allowed_parameters` - A map of keys to an array of values that acts as a - whitelist. Setting a key with an `[]` value will allow changes to - parameters with that name. Setting a key with a populated value array (e.g. - `["foo", "bar"]`, `[3600, 7200]` or `[true]` will allow that parameter to - only be set to one of the values in the array. If any keys exist in the - `allowed_parameters` object all keys not specified will be denied unless - there the key `"*"` is set (mapping to an empty array), which will allow - all other parameters to be modified; parameters with specific values will - still be restricted to those values. - * `denied_parameters` - A map of keys to an array of values that acts as a - blacklist, and any parameter set here takes precedence over - `allowed_parameters`. Setting to "*" will deny any parameter (so only calls - made without specifying any parameters will be allowed). Otherwise setting - a key with an `[]` value will deny any changes to parameters with that - name. Setting a key with a populated value array will deny any attempt to - set a parameter with that name and value. If keys exist in the - `denied_parameters` object all keys not specified will be allowed (unless - `allowed_parameters` is also set, in which case normal rules will apply). + * `allowed_parameters` - Whitelists a list of keys and values that are + permitted on the given path. -String values inside a populated value array support prefix/suffix globbing. -Globbing is enabled by prepending or appending a `*` to the value (e.g. -`["*foo", "bar*"]` would match `"...foo"` and `"bar..."`). + * Setting a parameter with a value of the empty list allows the parameter to + contain any value. -### Required Minimum/Maximum Response Wrapping TTLs + ```ruby + # This allows the user to create "secret/foo" with a parameter named + # "bar". It cannot contain any other parameters, but "bar" can contain + # any value. + path "secret/foo" { + capabilities = ["create"] + allowed_parameters = { + "bar" = [] + } + } + ``` + + * Setting a parameter with a value of a populated list allows the parameter + to contain only those values. + + ```ruby + # This allows the user to create "secret/foo" with a parameter named + # "bar". It cannot contain any other parameters, and "bar" can only + # contain the values "zip" or "zap". + path "secret/foo" { + capabilities = ["create"] + allowed_parameters = { + "bar" = ["zip", "zap"] + } + } + ``` + + * If any keys are specified, all non-specified parameters will be denied + unless there the parameter `"*"` is set to an empty array, which will + allow all other parameters to be modified. Parameters with specific values + will still be restricted to those values. + + ```ruby + # This allows the user to create "secret/foo" with a parameter named + # "bar". The parameter "bar" can only contain the values "zip" or "zap", + # but any other parameters may be created with any value. + path "secret/foo" { + capabilities = ["create"] + allowed_parameters = { + "bar" = ["zip", "zap"] + "*" = [] + } + } + ``` + + * `denied_parameters` - Blacklists a list of parameter and values. Any values + specified here take precedence over `allowed_parameters`. + + * Setting a parameter with a value of the empty list denies any changes to + that parameter. + + ```ruby + # This allows the user to create "secret/foo" with any parameters not + # named "bar". + path "secret/foo" { + capabilities = ["create"] + denied_parameters = { + "bar" = [] + } + } + ``` + + * Setting a parameter with a value of a populated list denies any parameter + containing those values. + + ```ruby + # This allows the user to create "secret/foo" with a parameter named + # "bar". It can contain any other parameters, but "bar" cannot contain + # the values "zip" or "zap". + path "secret/foo" { + capabilities = ["create"] + denied_parameters = { + "bar" = ["zip", "zap"] + } + } + ``` + + * Setting to `"*"` will deny any parameter. + + ```ruby + # This allows the user to create "secret/foo", but it cannot have any + # parameters. + path "secret/foo" { + capabilities = ["create"] + denied_parameters = { + "*" = [] + } + } + ``` + + * If any parameters are specified, all non-specified parameters are allowed, + unless `allowed_parameters` is also set, in which case normal rules apply. + +Parameter values also support prefix/suffix globbing. Globbing is enabled by +prepending or appending or prepending a splat (`*`) to the value: + +```ruby +# Allow any parameter as long as the value starts with "foo-*". +path "secret/foo" { + capabilities = ["create"] + allowed_parameters = { + "*" = ["foo-*"] + } +} +``` + +### Required Response Wrapping TTLs These parameters can be used to set minimums/maximums on TTLs set by clients when requesting that a response be -[wrapped](/docs/concepts/response-wrapping.html), with a granularity of a second. These can either be specified as a number of seconds or a string with a `s`, `m`, or `h` suffix indicating seconds, minutes, and hours respectively. +[wrapped](/docs/concepts/response-wrapping.html), with a granularity of a +second. These can either be specified as a number of seconds or a string with a +`s`, `m`, or `h` suffix indicating seconds, minutes, and hours respectively. In practice, setting a minimum TTL of one second effectively makes response wrapping mandatory for a particular path. @@ -157,39 +352,217 @@ wrapping mandatory for a particular path. effectively makes response wrapping mandatory for a particular path. It can also be used to ensure that the TTL is not too low, leading to end targets being unable to unwrap before the token expires. + * `max_wrapping_ttl` - The maximum allowed TTL that clients can specify for a wrapped response. If both are specified, the minimum value must be less than the maximum. In -addition, if paths are merged from different stanzas, the lowest value -specified for each is the value that will result, in line with the idea of -keeping token lifetimes as short as possible. +addition, if paths are merged from different stanzas, the lowest value specified +for each is the value that will result, in line with the idea of keeping token +lifetimes as short as possible. -## Root Policy +## Builtin Policies -The "root" policy is a special policy that can not be modified or removed. -Any user associated with the "root" policy becomes a root user. A root -user can do _anything_ within Vault. +Vault has two built-in policies: `default` and `root`. This section describes +the two builtin policies. -There always exists at least one root user (associated with the token -when initializing a new server). After this root user, it is recommended -to create more strictly controlled users. The original root token should -be protected accordingly. +### Default Policy + +The `default` policy is a builtin Vault policy that cannot be modified or +removed. By default, it is attached to all tokens, but may be explicitly +detached at creation time. The policy contains basic functionality such as the +ability for the token to lookup data about itself and to use its cubbyhole data. + +To view all permissions granted by the default policy on your Vault +installation, run: + +```sh +$ vault read sys/policy/default +``` + +To disable attachment of the default policy: + +```sh +$ vault token-create -no-default-policy +``` + +or via the API: + +```sh +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data '{"no_default_policy": "true"}' \ + https://vault.hashicorp.rocks/v1/auth/token/create +``` + +### Root Policy + +The `root` policy is a builtin Vault policy that can not be modified or removed. +Any user associated with this policy becomes a root user. A root user can do +_anything_ within Vault. As such, it is **highly recommended** that you revoke +any root tokens before running Vault in production. + +When a Vault server is first initialized, there always exists one root user. +This user is used to do the initial configuration and setup of Vault. After +configured, the initial root token should be revoked and more strictly +controlled users and authentication should be used. + +To revoke a root token, run: + +```sh +$ vault token-revoke "" +``` + +or via the API: + +```sh +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data '{"token": ""}' \ + https://vault.hashicorp.rocks/v1/auth/token/revoke +``` + +For more information, please read: + +- [Production Hardening](/guides/production.html) +- [Generating a Root Token](/guides/generate-root.html) ## Managing Policies -Policy management can be done via the API or CLI. The CLI commands are -`vault policies` and `vault policy-write`. Please see the help associated -with these commands for more information. They are very easy to use. +Policies are authored (written) in your editor of choice. They can be authored +in HCL or JSON, and the syntax is described in detail above. Once saved, +policies must be uploaded to Vault before they can be used. + +### Listing Policies + +To list all registered policies in Vault: + +```sh +$ vault read sys/policy +``` + +or via the API: + +```sh +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.hashicorp.rocks/v1/sys/policy +``` + +~> You may also see the CLI command `vault policies`. This is a convenience +wrapper around reading the sys endpoint directly. It provides the same +functionality but formats the output in a special manner. + +### Creating Policies + +Policies may be created (uploaded) via the CLI or via the API. To create a new +policy in Vault: + +```sh +$ vault write sys/policy/my-policy rules=@my-policy.hcl +``` + +-> The `@` tells Vault to read from a file on disk. In the example above, Vault +-will read the contents of `my-policy.hcl` in the current working directory into +-the value for that parameter. + +or via the API: + +```sh +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data 'path "..." {} \' + https://vault.hashicorp.rocks/v1/sys/policy/my-policy +``` + +In both examples, the name of the policy is "my-policy". You can think of this +name as a pointer or symlink to the policy ACLs. Tokens are attached policies by +name, which are then mapped to the set of rules corresponding to that name. + +### Updating Policies + +Existing policies may be updated to change permissions via the CLI or via the +API. To update an existing policy in Vault, follow the same steps as creating a +policy, but use an existing policy name: + +```sh +$ vault write sys/policy/my-existing-policy rules=@updated-policy.json +``` + +or via the API: + +```sh +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data 'path "..." {} \' + https://vault.hashicorp.rocks/v1/sys/policy/my-existing-policy +``` + +### Deleting Policies + +Existing policies may be deleted via the CLI or API. To delete a policy: + +```sh +$ vault delete sys/policy/my-policy +``` + +or via the API: + +```sh +$ curl \ + --request DELETE \ + --header "X-Vault-Token: ..." \ + https://vault.hashicorp.rocks/v1/sys/policy/my-policy +``` + +This is an idempotent operation. Vault will not return an error when deleting a +policy that does not exist. ## Associating Policies -To associate a policy with a user, you must consult the documentation for -the authentication backend you're using. +Vault can automatically associate a set of policies to a token based on an +authorization. This configuration varies significantly between authentication +backends. For simplicity, this example will use Vault's built-in userpass +authentication backend. -For tokens, they are associated at creation time with `vault token-create` -and the `-policy` flags. Child tokens can be associated with a subset of -a parent's policies. Root users can assign any policies. +A Vault administrator or someone from the security team would create the user in +Vault with a list of associated policies: + +```sh +$ vault write auth/userpass/users/sethvargo \ + password="s3cr3t!" \ + policies="dev-readonly,logs" +``` + +This creates an authentication mapping to the policy such that, when the user +authenticates successful to Vault, they will be given a token which has the list +of policies attached. + +The user wishing to authenticate would run + +```sh +$ vault auth -method="userpass" username="sethvargo" +Password (will be hidden): ... +``` + +If the provided information is correct, Vault will generate a token, assign the +list of configured policies to the token, and return that token to the +authenticated user. + +### Tokens + +Tokens are associated their policies at creation time. For example: + +```sh +$ vault token-create -policy=dev-readonly,logs +``` + +Child tokens can be associated with a subset of a parent's policies. Root users +can assign any policies. There is no way to modify the policies associated with a token once the token has been issued. The token must be revoked and a new one acquired to receive a diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/response-wrapping.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/response-wrapping.html.md index be1e118..373961a 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/response-wrapping.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/response-wrapping.html.md @@ -8,6 +8,11 @@ description: |- # Response Wrapping +_Note_: Some of this information relies on features of response-wrapping tokens +introduced in Vault 0.8 and may not be available in earlier releases. + +## Overview + In many Vault deployments, clients can access Vault directly and consume returned secrets. In other situations, it may make sense to or be desired to separate privileges such that one trusted entity is responsible for interacting @@ -15,45 +20,155 @@ with most of the Vault API and passing secrets to the end consumer. However, the more relays a secret travels through, the more possibilities for accidental disclosure, especially if the secret is being transmitted in -plaintext. +plaintext. For instance, you may wish to get a TLS private key to a machine +that has been cold-booted, but since you do not want to store a decryption key +in persistent storage, you cannot encrypt this key in transit. -In Vault 0.3 the -[`cubbyhole`](/docs/secrets/cubbyhole/index.html) -backend was introduced, providing storage scoped to a single token. The -[Cubbyhole Principles blog -post](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) described -how this, along with the limited-use and time-to-live features of Vault tokens, -could be used to securely authenticate a Vault client in such a way that the -final Vault token was only readable by the end consumer, and malfeasance could -be detected. The major downside to this operation was the need to write -programs to perform this wrapping (and by extension, those programs need to be -trusted). +To help address this problem, Vault includes a feature called _response +wrapping_. When requested, Vault can take the response it would have sent to an +HTTP client and instead insert it into the +[`cubbyhole`](/docs/secrets/cubbyhole/index.html) of a single-use token, +returning that single-use token instead. Logically speaking, the response is +wrapped by the token, and retrieving it requires an unwrap operation against +this token. -Starting in 0.6, this concept is taken to its logical conclusion: almost every -response that Vault generates can be automatically wrapped inside a single-use, -limited-time-to-live token's cubbyhole. Details can be found in the -[`cubbyhole` backend -documentation](/docs/secrets/cubbyhole/index.html). +This provides a powerful mechanism for information sharing in many +environments. In the types of scenarios, described above, often the best +practical option is to provide _cover_ for the secret information, be able to +_detect malfeasance_ (interception, tampering), and limit _lifetime_ of the +secret's exposure. Response wrapping performs all three of these duties: -This capability should be carefully considered when planning your security -architecture. For instance, many Vault deployments use the -[`pki`](/docs/secrets/pki/index.html) backend to -generate TLS certificates and private keys for services. If you do not wish -these services to have access to the generation API, a trusted third party -could generate the certificates and private keys and pass the resulting -wrapping tokens directly to the services in need. A simple API call will return -the original PKI information; if the call fails, a security alert can be -raised. + * It provides _cover_ by ensuring that the value being transmitted across the + wire is not the actual secret but a reference to such a secret, namely the + response-wrapping token. Information stored in logs or captured along the + way do not directly see the sensitive information. + * It provides _malfeasance detection_ by ensuring that only a single party can + ever unwrap the token and see what's inside. A client receiving a token that + cannot be unwrapped can trigger an immediate security incident. In addition, + a client can inspect a given token before unwrapping to ensure that its + origin is from the expected location in Vault. + * It _limits the lifetime_ of secret exposure because the response-wrapping + token has a lifetime that is separate from the wrapped secret (and often can + be much shorter), so if a client fails to come up and unwrap the token, the + token can expire very quickly. -To look at the above example another way, response wrapping also frees end -services from needing to generate a CSR and pass it to Vault through the -trusted third party simply to ensure that the private key corresponding to the -eventual certificate remains private. The end service can be assured that only -it will see the generated private key and that any malfeasance is detected. -This can significantly reduce the complexity of any relaying third party. +## Response-Wrapping Tokens -One final note: if the wrapped response is an authentication response -containing a Vault token, the token's accessor will be made available in the -returned wrap information. This allows privileged callers to generate tokens -for clients and revoke these tokens (and their created leases) at an -appropriate time, while never being exposed to the actual generated token IDs. +When a response is wrapped, the normal API response from Vault does not contain +the original secret, but rather contains a set of information related to the +response-wrapping token: + + * TTL: The TTL of the response-wrapping token itself + * Token: The actual token value + * Creation Time: The time that the response-wrapping token was created + * Creation Path: The API path that was called in the original request + * Wrapped Accessor: If the wrapped response is an authentication response + containing a Vault token, this is the value of the wrapped token's accessor. + This is useful for orchestration systems (such as Nomad) to able to control + the lifetime of secrets based on their knowledge of the lifetime of jobs, + without having to actually unwrap the response-wrapping token or gain + knowledge of the token ID inside. + +Vault currently does not provide signed response-wrapping tokens, as it +provides little extra protection. If you are being pointed to the correct Vault +server, token validation is performed by interacting with the server itself; a +signed token does not remove the need to validate the token with the server, +since the token is not carrying data but merely an access mechanism and the +server will not release data without validating it. If you are being attacked +and pointed to the wrong Vault server, the same attacker could trivially give +you the wrong signing public key that corresponds to the wrong Vault server. +You could cache a previously valid key, but could also cache a previously valid +address (and in most cases the Vault address will not change or will be set via +a service discovery mechanism). As such, we rely on the fact that the token +itself is not carrying authoritative data and do not sign it. + +## Response-Wrapping Token Operations + +Via the `sys/wrapping` path, several operations can be run against wrapping +tokens: + + * Lookup (`sys/wrapping/lookup`): This allows fetching the response-wrapping + token's creation time, creation path, and TTL. This path is unauthenticated + and available to response-wrapping tokens themselves. In other words, a + response-wrapping token holder wishing to perform validation is always + allowed to look up the properties of the token. + * Unwrap (`sys/wrapping/unwrap`): Unwrap the token, returning the response + inside. The response that is returned will be the original wire-format + response; it can be used directly with API clients. + * Rewrap (`sys/wrapping/rewrap`): Allows migrating the wrapped data to a new + response-wrapping token. This can be useful for long-lived secrets. For + example, an organization may wish (or be required in a compliance scenario) + to have the `pki` backend's root CA key be returned in a long-lived + response-wrapping token to ensure that nobody has seen the key (easily + verified by performing lookups on the response-wrapping token) but available + for signing CRLs in case they ever accidentally change or lose the `pki` + mount. Often, compliance schemes require periodic rotation of secrets, so + this helps achieve that compliance goal without actually exposing what's + inside. + * Wrap (`sys/wrapping/wrap`): A helper endpoint that echoes back the data sent + to it in a response-wrapping token. Note that blocking access to this + endpoint does not remove the ability for arbitrary data to be wrapped, as it + can be done elsewhere in Vault. + +## Response-Wrapping Token Creation + +Response wrapping is per-request and is triggered by providing to Vault the +desired TTL for a response-wrapping token for that request. This is set by the +client using the `X-Vault-Wrap-TTL` header and can be either an integer number +of seconds or a string duration of seconds (`15s`), minutes (`20m`), or hours +(`25h`). When using the Vault CLI, you can set this via the `-wrap-ttl` +parameter. When using the Go API, wrapping is triggered by [setting a helper +function](https://godoc.org/github.com/hashicorp/vault/api#Client.SetWrappingLookupFunc) +that tells the API the conditions under which to request wrapping, by mapping +an operation and path to a desired TTL. + +If a client requests wrapping: + +1. The original HTTP response is serialized +2. A new single-use token is generated with the TTL supplied by the client +3. Internally, the original serialized response is stored in the single-use + token's cubbyhole +4. A new response is generated, with the token ID, TTL, and path stored in the + new response's wrap information object +5. The new response is returned to the caller + +Note that policies can control minimum/maximum wrapping TTLs; see the [policies +concepts page](https://www.vaultproject.io/docs/concepts/policies.html) for +more information. + +## Response-Wrapping Token Validation + +Proper validation of response-wrapping tokens is essential to ensure that any +malfeasance is detected. It's also pretty straightforward. + +Validation is best performed by the following steps: + +1. If a client has been expecting delivery of a response-wrapping token and + none arrives, this may be due to an attacker intercepting the token and then + preventing it from traveling further. This should cause an alert to trigger + an immediate investigation. +2. Perform a lookup on the response-wrapping token. This immediately tells you + if the token has already been unwrapped or is expired (or otherwise + revoked). If the lookup indicates that a token is invalid, it does not + necessarily mean that the data was intercepted (for instance, perhaps the + client took a long time to start up and the TTL expired) but should trigger + an alert for immediate investigation, likely with the assistance of Vault's + audit logs to see if the token really was unwrapped. +3. With the token information in hand, validate that the creation path matches + expectations. If you expect to find a TLS key/certificate inside, chances + are the path should be something like `pki/issue/...`. If the path is not + what you expect, it is possible that the data contained inside was read and + then put into a new response-wrapping token. (This is especially likely if + the path starts with `cubbyhole` or `sys/wrapping/wrap`.) Particular care + should be taken with `kv` mounts: exact matches on the path are best + there. For example, if you expect a secret to come from `secret/foo` and + the interceptor provides a token with `secret/bar` as the path, simply + checking for a prefix of `secret/` is not enough. +4. After prefix validation, unwrap the token. If the unwrap fails, the response + is similar to if the initial lookup fails: trigger an alert for immediate + investigation. + +Following those steps provides very strong assurance that the data contained +within the response-wrapping token has never been seen by anyone other than the +intended client and that any interception or tampering has resulted in a +security alert. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/tokens.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/tokens.html.md index f1d72d9..5234239 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/concepts/tokens.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/concepts/tokens.html.md @@ -54,7 +54,7 @@ of version 0.6.1, there are only three ways to create root tokens: expiration 2. By using another root token; a root token with an expiration cannot create a root token that never expires -3. By using `vault generate-root` ([example](../guides/generate-root.html)) +3. By using `vault generate-root` ([example](/guides/generate-root.html)) with the permission of a quorum of unseal key holders Root tokens are useful in development but should be extremely carefully guarded diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/config/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/config/index.html.md new file mode 100644 index 0000000..5b37f79 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/config/index.html.md @@ -0,0 +1,818 @@ +--- +layout: "docs" +page_title: "Server Configuration" +sidebar_current: "docs-config" +description: |- + Vault server configuration reference. +--- + +# Server Configuration + +Outside of development mode, Vault servers are configured using a file. +The format of this file is [HCL](https://github.com/hashicorp/hcl) or JSON. +An example configuration is shown below: + +```javascript +backend "consul" { + address = "127.0.0.1:8500" + path = "vault" +} + +listener "tcp" { + address = "127.0.0.1:8200" + tls_disable = 1 +} + +telemetry { + statsite_address = "127.0.0.1:8125" + disable_hostname = true +} +``` + +After the configuration is written, use the `-config` flag with `vault server` +to specify where the configuration is. + +Starting with 0.5.2, limited configuration options can be changed on-the-fly by +sending a SIGHUP to the server process. These are denoted below. + +## Reference + +* `backend` (required) - Configures the storage backend where Vault data + is stored. There are multiple options available for storage backends, + and they're documented below. + +* `ha_backend` (optional) - Configures the storage backend where Vault HA + coordination will take place. Must be an HA-supporting backend using the + configuration options as documented below. If not set, HA will be attempted + on the backend given in the `backend` parameter. + +* `cluster_name` (optional) - An identifier for your Vault cluster. If omitted, + Vault will generate a value for `cluster_name`. If connecting to Vault + Enterprise, this value will be used in the interface. + +* `listener` (required) - Configures how Vault is listening for API requests. + "tcp" and "atlas" are valid values. A full reference for the + inner syntax is below. + +* `cache_size` (optional) - If set, the size of the read cache used + by the physical storage subsystem will be set to this value. The + value is in number of entries so the total cache size is dependent + on the entries being stored. Defaults to 32k entries. + +* `disable_cache` (optional) - A boolean. If true, this will disable all caches + within Vault, including the read cache used by the physical storage + subsystem. This will very significantly impact performance. + +* `disable_mlock` (optional) - A boolean. If true, this will disable the + server from executing the `mlock` syscall to prevent memory from being + swapped to disk. This is not recommended in production (see below). + +* `telemetry` (optional) - Configures the telemetry reporting system + (see below). + +* `default_lease_ttl` (optional) - Configures the default lease duration + for tokens and secrets. This is a string value using a suffix, e.g. "768h". + Default value is 32 days. This value cannot be larger than `max_lease_ttl`. + +* `max_lease_ttl` (optional) - Configures the maximum possible + lease duration for tokens and secrets. This is a string value using a suffix, + e.g. "768h". Default value is 32 days. + +* `ui` (optional, Vault Enterprise only) - If set `true`, enables the built-in + web-based UI. Once enabled, the UI will be available to browsers at the + standard Vault address. + +In production it is a risk to run Vault on systems where `mlock` is +unavailable or the setting has been disabled via the `disable_mlock`. +Disabling `mlock` is not recommended unless the systems running Vault only +use encrypted swap or do not use swap at all. Vault only supports memory +locking on UNIX-like systems (Linux, FreeBSD, Darwin, etc). Non-UNIX like +systems (e.g. Windows, NaCL, Android) lack the primitives to keep a process's +entire memory address space from spilling to disk and is therefore automatically +disabled on unsupported platforms. + +On Linux, to give the Vault executable the ability to use the `mlock` syscall +without running the process as root, run: + +```shell +sudo setcap cap_ipc_lock=+ep $(readlink -f $(which vault)) +``` + +## Listener Reference + +For the `listener` section, the only required listener is "tcp". +Regardless of future plans, this is the recommended listener, +as it allows for HA mode. If you wish to use the Vault +Enterprise interface in HashiCorp Atlas, you may add an ["atlas" listener block](#connecting-to-vault-enterprise-in-hashicorp-atlas) +in addition to the "tcp" one. + +The supported options are: + + * `address` (optional) - The address to bind to for listening. This + defaults to "127.0.0.1:8200". + + * `cluster_address` (optional) - The address to bind to for cluster + server-to-server requests. This defaults to one port higher than the + value of `address`, so with the default value of `address`, this would be + "127.0.0.1:8201". + + * `tls_disable` (optional) - If true, then TLS will be disabled. + This will parse as boolean value, and can be set to "0", "no", + "false", "1", "yes", or "true". This is an opt-in; Vault assumes + by default that TLS will be used. + + * `tls_cert_file` (required unless disabled) - The path to the certificate + for TLS. To configure the listener to use a CA certificate, concatenate + the primary certificate and the CA certificate together. The primary + certificate should appear first in the combined file. This is reloaded + via SIGHUP. + + * `tls_key_file` (required unless disabled) - The path to the private key + for the certificate. This is reloaded via SIGHUP. + + * `tls_min_version` (optional) - **(Vault > 0.2)** If provided, specifies + the minimum supported version of TLS. Accepted values are "tls10", "tls11" + or "tls12". This defaults to "tls12". WARNING: TLS 1.1 and lower + are generally considered less secure; avoid using these if + possible. + +### Connecting to Vault Enterprise in HashiCorp Atlas + +Adding an "atlas" block will initiate a long-running connection to the +[SCADA](https://scada.hashicorp.com) service. The SCADA connection allows the +Vault Enterprise interface to securely communicate with and operate on your +Vault cluster. + +The "atlas" `listener` supports these options: + + * `endpoint` (optional) - The endpoint address used for Vault Enterprise interface + integration. Defaults to the public Vault Enterprise endpoints on Atlas. + + * `infrastructure` (required) - Used to provide the Atlas infrastructure name and + the SCADA connection. The format of this is `username/environment`. + + * `node_id` (required) - The identifier for an individual node—used in + the Vault Enterprise dashboard. + + * `token` (required) - A token from Atlas used to authenticate SCADA session. Generate + one in the [Atlas](https://atlas.hashicorp.com/settings/tokens). + +Additionally, the [`cluster_name`](#cluster_name) config option will be used to +identify your cluster members inside the infrastructure in the Vault Enterprise +interface. It is important for operators to use the same value for +`cluster_name` across cluster members because Vault overwrites this value +internally on instance instantiation. + +This allows the connection of multiple clusters to a single `infrastructure`. + +For more on Vault Enterprise, see the [help documentation](https://atlas.hashicorptest.com/help/vault/features). + + +## Telemetry Reference + +For the `telemetry` section, there is no resource name. All configuration +is within the object itself. + +* `statsite_address` (optional) - An address to a [Statsite](https://github.com/armon/statsite) + instance for metrics. This is highly recommended for production usage. + +* `statsd_address` (optional) - This is the same as `statsite_address` but + for StatsD. + +* `disable_hostname` (optional) - Whether or not to prepend runtime telemetry + with the machines hostname. This is a global option. Defaults to false. + +* `circonus_api_token` + A valid [Circonus](http://circonus.com/) API Token used to create/manage check. If provided, metric management is enabled. + +* `circonus_api_app` + A valid app name associated with the API token. By default, this is set to "consul". + +* `circonus_api_url` + The base URL to use for contacting the Circonus API. By default, this is set to "https://api.circonus.com/v2". + +* `circonus_submission_interval` + The interval at which metrics are submitted to Circonus. By default, this is set to "10s" (ten seconds). + +* `circonus_submission_url` + The `check.config.submission_url` field, of a Check API object, from a previously created HTTPTRAP check. + +* `circonus_check_id` + The Check ID (not **check bundle**) from a previously created HTTPTRAP check. The numeric portion of the `check._cid` field in the Check API object. + +* `circonus_check_force_metric_activation` + Force activation of metrics which already exist and are not currently active. If check management is enabled, the default behavior is to add new metrics as they are encountered. If the metric already exists in the check, it will **not** be activated. This setting overrides that behavior. By default, this is set to "false". + +* `circonus_check_instance_id` + Serves to uniquely identify the metrics coming from this *instance*. It can be used to maintain metric continuity with transient or ephemeral instances as they move around within an infrastructure. By default, this is set to hostname:application name (e.g. "host123:vault"). + +* `circonus_check_search_tag` + A special tag which, when coupled with the instance id, helps to narrow down the search results when neither a Submission URL or Check ID is provided. By default, this is set to service:app (e.g. "service:vault"). + +* `circonus_check_display_name` + Specifies a name to give a check when it is created. This name is displayed in the Circonus UI Checks list. + +* `circonus_check_tags` + Comma separated list of additional tags to add to a check when it is created. + +* `circonus_broker_id` + The ID of a specific Circonus Broker to use when creating a new check. The numeric portion of `broker._cid` field in a Broker API object. If metric management is enabled and neither a Submission URL nor Check ID is provided, an attempt will be made to search for an existing check using Instance ID and Search Tag. If one is not found, a new HTTPTRAP check will be created. By default, this is not used and a random Enterprise Broker is selected, or, the default Circonus Public Broker. + +* `circonus_broker_select_tag` + A special tag which will be used to select a Circonus Broker when a Broker ID is not provided. The best use of this is to as a hint for which broker should be used based on *where* this particular instance is running (e.g. a specific geo location or datacenter, dc:sfo). By default, this is not used. + +## Backend Reference + +For the `backend` section, the supported physical backends are shown below. +Vault requires that the backend itself will be responsible for backups, +durability, etc. + +__*Please note*__: The only physical backends actively maintained by HashiCorp +are `consul`, `inmem`, and `file`. The other backends are community-derived and +community-supported. We include them in the hope that they will be useful to +those users that wish to utilize them, but they receive minimal validation and +testing from HashiCorp, and HashiCorp staff may not be knowledgeable about the +data store being utilized. If you encounter problems with them, we will attempt +to help you, but may refer you to the backend author. + + * `consul` - Store data within [Consul](https://www.consul.io). This + backend supports HA. It is the most recommended backend for Vault and has + been shown to work at high scale under heavy load. + + * `etcd` - Store data within [etcd](https://coreos.com/etcd/). + This backend supports HA. This is a community-supported backend. + + * `zookeeper` - Store data within [Zookeeper](https://zookeeper.apache.org/). + This backend supports HA. This is a community-supported backend. + + * `dynamodb` - Store data in a [DynamoDB](https://aws.amazon.com/dynamodb/) table. + This backend optionally supports HA. This is a community-supported backend. + + * `s3` - Store data within an S3 bucket [S3](https://aws.amazon.com/s3/). + This backend does not support HA. This is a community-supported backend. + + * `gcs` - Store data within a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. + This backend does not support HA. This is a community-supported backend. + + * `azure` - Store data in an Azure Storage container [Azure](https://azure.microsoft.com/en-us/services/storage/). + This backend does not support HA. This is a community-supported backend. + + * `swift` - Store data within an OpenStack Swift container [Swift](http://docs.openstack.org/developer/swift/). + This backend does not support HA. This is a community-supported backend. + + * `mysql` - Store data within MySQL. This backend does not support HA. This + is a community-supported backend. + + * `postgresql` - Store data within PostgreSQL. This backend does not support HA. This + is a community-supported backend. + + * `cassandra` – Store data within Cassandra. This backend does not support HA. This + is a community-supported backend. + + * `inmem` - Store data in-memory. This is only really useful for + development and experimentation. Data is lost whenever Vault is + restarted. + + * `file` - Store data on the filesystem using a directory structure. + This backend does not support HA. + + +#### High Availability Options + +All HA backends support the following options. These are discussed in much more +detail in the [High Availability concepts +page](https://www.vaultproject.io/docs/concepts/ha.html). + + * `redirect_addr` (required) - This is the address to advertise to other + Vault servers in the cluster for client redirection. This can also be + set via the `VAULT_REDIRECT_ADDR` environment variable, which takes + precedence. Some HA backends may be able to autodetect this value, but if + not it is required to be manually specified. + + * `cluster_addr` (optional) - This is the address to advertise to other Vault + servers in the cluster for request forwarding. This can also be set via the + `VAULT_CLUSTER_ADDR` environment variable, which takes precedence. + + * `disable_clustering` (optional) - This controls whether clustering features + (currently, request forwarding) are enabled. Setting this on a node will + disable these features _when that node is the active node_. + +#### Backend Reference: Consul + +For Consul, the following options are supported: + + * `path` (optional) - The path within Consul where data will be stored. + Defaults to "vault/". + + * `address` (optional) - The address of the Consul agent to talk to. + Defaults to the local agent address, if available. + + * `scheme` (optional) - "http" or "https" for talking to Consul. + + * `check_timeout` (optional) - The check interval used to send health check + information to Consul. Defaults to "5s". + + * `disable_registration` (optional) - If true, then Vault will not register + itself with Consul. Defaults to "false". + + * `service` (optional) - The name of the service to register with Consul. + Defaults to "vault". + + * `service_tags` (optional) - Comma separated list of tags that are to be + applied to the service that gets registered with Consul. + + * `token` (optional) - An access token to use to write data to Consul. + + * `max_parallel` (optional) - The maximum number of concurrent requests to Consul. + Defaults to `"128"`. + + * `tls_skip_verify` (optional) - If non-empty, then TLS host verification + will be disabled for Consul communication. Defaults to false. + + * `tls_min_version` (optional) - Minimum TLS version to use. Accepted values + are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'. + +The following settings should be set according to your [Consul encryption +settings](https://www.consul.io/docs/agent/encryption.html): + + * `tls_ca_file` (optional) - The path to the CA certificate used for Consul + communication. Defaults to system bundle if not specified. Set + accordingly to the + [ca_file](https://www.consul.io/docs/agent/options.html#ca_file) setting in + Consul. + + * `tls_cert_file` (optional) - The path to the certificate for Consul + communication. Set accordingly to the + [cert_file](https://www.consul.io/docs/agent/options.html#cert_file) + setting in Consul. + + * `tls_key_file` (optional) - The path to the private key for Consul + communication. Set accordingly to the + [key_file](https://www.consul.io/docs/agent/options.html#key_file) setting + in Consul. + +``` +// Sample Consul Backend configuration with local Consul Agent +backend "consul" { + // address MUST match Consul's `addresses.http` config value (or + // `addresses.https` depending on the scheme provided below). + address = "127.0.0.1:8500" + #address = "unix:///tmp/.consul.http.sock" + + // scheme defaults to "http" (suitable for loopback and UNIX sockets), but + // should be "https" when Consul exists on a remote node (a non-standard + // deployment). All decryption happen within Vault so this value does not + // change Vault's Threat Model. + scheme = "http" + + // token is a Consul ACL Token that has write privileges to the path + // specified below. Use of a Consul ACL Token is a best pracitce. + token = "[redacted]" // Vault's Consul ACL Token + + // path must be writable by the Consul ACL Token + path = "vault/" +} +``` + +Once properly configured, an unsealed Vault installation should be available +on the network at `active.vault.service.consul`. Unsealed Vault instances in +the standby state are available at `standby.vault.service.consul`. All +unsealed Vault instances are available as healthy in the +`vault.service.consul` pool. Sealed Vault instances will mark themselves as +critical to avoid showing up by default in Consul's service discovery. + +``` +% dig active.vault.service.consul srv +; <<>> DiG 9.8.3-P1 <<>> active.vault.service.consul srv +; (1 server found) +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 11331 +;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 +;; WARNING: recursion requested but not available + +;; QUESTION SECTION: +;active.vault.service.consul. IN SRV + +;; ANSWER SECTION: +active.vault.service.consul. 0 IN SRV 1 1 8200 vault1.node.dc1.consul. + +;; ADDITIONAL SECTION: +vault1.node.dc1.consul. 0 IN A 172.17.33.46 + +;; Query time: 0 msec +;; SERVER: 127.0.0.1#53(127.0.0.1) +;; WHEN: Sat Apr 23 17:33:14 2016 +;; MSG SIZE rcvd: 172 +% dig +short standby.vault.service.consul srv +1 1 8200 vault3.node.dc1.consul. +1 1 8200 vault2.node.dc1.consul. +% dig +short vault.service.consul srv +1 1 8200 vault3.node.dc1.consul. +1 1 8200 vault1.node.dc1.consul. +1 1 8200 vault2.node.dc1.consul. +% dig +short vault.service.consul a +172.17.33.46 +172.17.34.32 +172.17.35.29 +vault1% vault seal +% dig +short vault.service.consul srv +1 1 8200 vault3.node.dc1.consul. +1 1 8200 vault2.node.dc1.consul. +vault1% vault unseal +Key (will be hidden): +Sealed: false +Key Shares: 5 +Key Threshold: 3 +Unseal Progress: 0 +% dig +short vault.service.consul srv +1 1 8200 vault1.node.dc1.consul. +1 1 8200 vault3.node.dc1.consul. +1 1 8200 vault2.node.dc1.consul. +``` + +#### Backend Reference: etcd (Community-Supported) + +For etcd, the following options are supported: + + * `path` (optional) - The path within etcd where data will be stored. + Defaults to "vault/". + + * `address` (optional) - The address(es) of the etcd instance(s) to talk to. + Can be comma separated list (protocol://host:port) of many etcd instances. + Defaults to "http://localhost:2379" if not specified. May also be specified + via the ETCD_ADDR environment variable. + + * `sync` (optional) - Should we synchronize the list of available etcd + servers on startup? This is a **string** value to allow for auto-sync to + be implemented later. It can be set to "0", "no", "n", "false", "1", "yes", + "y", or "true". Defaults to on. Set to false if your etcd cluster is + behind a proxy server and syncing causes Vault to fail. + + * `ha_enabled` (optional) - Setting this to `"1"`, `"t"`, or `"true"` will + enable HA mode. _This is currently *known broken*._ This option can also be + provided via the environment variable `ETCD_HA_ENABLED`. If you are + upgrading from a version of Vault where HA support was enabled by default, + it is _very important_ that you set this parameter _before_ upgrading! + + * `username` (optional) - Username to use when authenticating with the etcd + server. May also be specified via the ETCD_USERNAME environment variable. + + * `password` (optional) - Password to use when authenticating with the etcd + server. May also be specified via the ETCD_PASSWORD environment variable. + + * `tls_ca_file` (optional) - The path to the CA certificate used for etcd + communication. Defaults to system bundle if not specified. + + * `tls_cert_file` (optional) - The path to the certificate for etcd + communication. + + * `tls_key_file` (optional) - The path to the private key for etcd + communication. + +#### Backend Reference: Zookeeper (Community-Supported) + +For Zookeeper, the following options are supported: + + * `path` (optional) - The path within Zookeeper where data will be stored. + Defaults to "vault/". + + * `address` (optional) - The address(es) of the Zookeeper instance(s) to talk + to. Can be comma separated list (host:port) of many Zookeeper instances. + Defaults to "localhost:2181" if not specified. + +The following optional settings can be used to configure zNode ACLs: + + * `auth_info` (optional) - Authentication string in Zookeeper AddAuth format + (`schema:auth`). As an example, `digest:UserName:Password` could be used to + authenticate as user `UserName` using password `Password` with the `digest` + mechanism. + + * `znode_owner` (optional) - If specified, Vault will always set all + permissions (CRWDA) to the ACL identified here via the Schema and User + parts of the Zookeeper ACL format. The expected format is + `schema:user-ACL-match`. Some examples: + * `digest:UserName:HIDfRvTv623G==` - Access for the user `UserName` with + the corresponding digest `HIDfRvTv623G==` + * `ip:127.0.0.1` - Access from localhost only + * `ip:70.95.0.0/16` - Any host on the 70.95.0.0 network (CIDRs are + supported starting from Zookeeper 3.5.0) + +If neither of these is set, the backend will not authenticate with Zookeeper +and will set the OPEN_ACL_UNSAFE ACL on all nodes. In this scenario, anyone +connected to Zookeeper could change Vault’s znodes and, potentially, take Vault +out of service. + +Some sample configurations: + +``` +backend "zookeeper" { + znode_owner = "digest:vaultUser:raxgVAfnDRljZDAcJFxznkZsExs=" + auth_info = "digest:vaultUser:abc" +} +``` + +The above configuration causes Vault to set an ACL on all of its zNodes +permitting access to vaultUser only. If the `digest` schema is used, please +protect this file as it contains the cleartext password. As per Zookeeper's ACL +model, the digest value (in znode_owner) must match the user (in znode_owner). + +``` +backend "zookeeper" { + znode_owner = "ip:127.0.0.1" +} +``` + +The above example allows access from localhost only - as this is the `ip` no +auth_info is required since Zookeeper uses the address of the client for the +ACL check. + +#### Backend Reference: DynamoDB (Community-Supported) + +The DynamoDB optionally supports HA. Because Dynamo does not support session +lifetimes on its locks, a Vault node that has failed, rather than shut down in +an orderly fashion, will require manual cleanup rather than failing over +automatically. See the documentation of `recovery_mode` to better understand +this process. To enable HA, set the `ha_enabled` option. + +The DynamoDB backend has the following options: + + * `table` (optional) - The name of the DynamoDB table to store data in. The + default table name is `vault-dynamodb-backend`. This option can also be + provided via the environment variable `AWS_DYNAMODB_TABLE`. If the + specified table does not yet exist, it will be created during + initialization. + + * `read_capacity` (optional) - The read capacity to provision when creating + the DynamoDB table. This is the maximum number of reads consumed per second + on the table. The default value is 5. This option can also be provided via + the environment variable `AWS_DYNAMODB_READ_CAPACITY`. + + * `write_capacity` (optional) - The write capacity to provision when creating + the DynamoDB table. This is the maximum number of writes performed per + second on the table. The default value is 5. This option can also be + provided via the environment variable `AWS_DYNAMODB_WRITE_CAPACITY`. + + * `access_key` - (required) The AWS access key. It must be provided, but it + can also be sourced from the `AWS_ACCESS_KEY_ID` environment variable. + + * `secret_key` - (required) The AWS secret key. It must be provided, but it + can also be sourced from the `AWS_SECRET_ACCESS_KEY` environment variable. + + * `session_token` - (optional) The AWS session token. It can also be sourced + from the `AWS_SESSION_TOKEN` environment variable. + + * `endpoint` - (optional) An alternative (AWS compatible) DynamoDB endpoint + to use. It can also be sourced from the `AWS_DYNAMODB_ENDPOINT` environment + variable. + + * `region` (optional) - The AWS region. It can be sourced from the + `AWS_DEFAULT_REGION` environment variable and will default to `us-east-1` + if not specified. + + * `max_parallel` (optional) - The maximum number of concurrent requests to + DynamoDB. Defaults to `"128"`. + + * `ha_enabled` (optional) - Setting this to `"1"`, `"t"`, or `"true"` will + enable HA mode. Please ensure you have read the documentation for the + `recovery_mode` option before enabling this. This option can also be + provided via the environment variable `DYNAMODB_HA_ENABLED`. If you are + upgrading from a version of Vault where HA support was enabled by default, + it is _very important_ that you set this parameter _before_ upgrading! + + * `recovery_mode` (optional) - When the Vault leader crashes or is killed + without being able to shut down properly, no other node can become the new + leader because the DynamoDB table still holds the old leader's lock record. + To recover from this situation, one can start a single Vault node with this + option set to `"1"`, `"t"`, or `"true"` and the node will remove the old + lock from DynamoDB. It is important that only one node is running in + recovery mode! After this node has become the leader, other nodes can be + started with regular configuration. This option can also be provided via + the environment variable `RECOVERY_MODE`. + +For more information about the read/write capacity of DynamoDB tables, see the +[official AWS DynamoDB +docs](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput). +If you are running your Vault server on an EC2 instance, you can also make use +of the EC2 instance profile service to provide the credentials Vault will use +to make DynamoDB API calls. Leaving the `access_key` and `secret_key` fields +empty will cause Vault to attempt to retrieve credentials from the metadata +service. + +#### Backend Reference: S3 (Community-Supported) + +For S3, the following options are supported: + + * `bucket` (required) - The name of the S3 bucket to use. It must be provided, but it can also be sourced from the `AWS_S3_BUCKET` environment variable. + + * `access_key` - (required) The AWS access key. It must be provided, but it can also be sourced from the `AWS_ACCESS_KEY_ID` environment variable. + + * `secret_key` - (required) The AWS secret key. It must be provided, but it can also be sourced from the `AWS_SECRET_ACCESS_KEY` environment variable. + + * `session_token` - (optional) The AWS session token. It can also be sourced from the `AWS_SESSION_TOKEN` environment variable. + + * `endpoint` - (optional) An alternative (AWS compatible) S3 endpoint to use. It can also be sourced from the `AWS_S3_ENDPOINT` environment variable. + + * `region` (optional) - The AWS region. It can be sourced from the `AWS_DEFAULT_REGION` environment variable and will default to `us-east-1` if not specified. + +If you are running your Vault server on an EC2 instance, you can also make use +of the EC2 instance profile service to provide the credentials Vault will use to +make S3 API calls. Leaving the `access_key` and `secret_key` fields empty +will cause Vault to attempt to retrieve credentials from the metadata service. +You are responsible for ensuring your instance is launched with the appropriate +profile enabled. Vault will handle renewing profile credentials as they rotate. + +#### Backend Reference: Google Cloud Storage (Community-Supported) + +For Google Cloud Storage, the following options are supported: + + * `bucket` (required) - The name of the Google Cloud Storage bucket to use. It must be provided, but it can also be sourced from the `GOOGLE_STORAGE_BUCKET` environment variable. + + * `credentials_file` - (required) The path to a GCP [service account](https://cloud.google.com/compute/docs/access/service-accounts) private key file in [JSON format](https://cloud.google.com/storage/docs/authentication#generating-a-private-key). It must be provided, but it can also be sourced from the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. + + * `max_parallel` (optional) - The maximum number of concurrent requests to Google Cloud Storage. + Defaults to `"128"`. + +#### Backend Reference: Azure (Community-Supported) + + * `accountName` (required) - The Azure Storage account name + + * `accountKey` (required) - The Azure Storage account key + + * `container` (required) - The Azure Storage Blob container name + + * `max_parallel` (optional) - The maximum number of concurrent requests to Azure. Defaults to `"128"`. + +The current implementation is limited to a maximum of 4 MBytes per blob/file. + +#### Backend Reference: Swift (Community-Supported) + +For Swift, the following options are valid; only v1.0 auth endpoints are supported: + + * `container` (required) - The name of the Swift container to use. It must be provided, but it can also be sourced from the `OS_CONTAINER` environment variable. + + * `username` - (required) The OpenStack account/username. It must be provided, but it can also be sourced from the `OS_USERNAME` environment variable. + + * `password` - (required) The OpenStack password. It must be provided, but it can also be sourced from the `OS_PASSWORD` environment variable. + + * `auth_url` - (required) Then OpenStack auth endpoint to use. It can also be sourced from the `OS_AUTH_URL` environment variable. + + * `tenant` (optional) - The name of Tenant to use. It can be sourced from the `OS_TENANT_NAME` environment variable and will default to default tenant of for the username if not specified. + + * `max_parallel` (optional) - The maximum number of concurrent requests to Swift. Defaults to `"128"`. + +#### Backend Reference: MySQL (Community-Supported) + +The MySQL backend has the following options: + + * `username` (required) - The MySQL username to connect with. + + * `password` (required) - The MySQL password to connect with. + + * `address` (optional) - The address of the MySQL host. Defaults to + "127.0.0.1:3306. + + * `database` (optional) - The name of the database to use. Defaults to "vault". + + * `table` (optional) - The name of the table to use. Defaults to "vault". + + * `tls_ca_file` (optional) - The path to the CA certificate to connect using TLS + +#### Backend Reference: PostgreSQL (Community-Supported) + +The PostgreSQL backend has the following options: + + * `connection_url` (required) - The connection string used to connect to PostgreSQL. + + Examples: + + * postgres://username:password@localhost:5432/database?sslmode=disable + + * postgres://username:password@localhost:5432/database?sslmode=verify-full + + A list of all supported parameters can be found in [the pq library documentation](https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters). + + * `table` (optional) - The name of the table to write vault data to. Defaults + to "vault_kv_store". + +Add the following table and index to a new or existing PostgreSQL database: + +```sql +CREATE TABLE vault_kv_store ( + parent_path TEXT COLLATE "C" NOT NULL, + path TEXT COLLATE "C", + key TEXT COLLATE "C", + value BYTEA, + CONSTRAINT pkey PRIMARY KEY (path, key) +); + +CREATE INDEX parent_path_idx ON vault_kv_store (parent_path); +``` + +If you're using a version of PostgreSQL prior to 9.5, create the following +function: + +```sql +CREATE FUNCTION vault_kv_put(_parent_path TEXT, _path TEXT, _key TEXT, _value BYTEA) RETURNS VOID AS +$$ +BEGIN + LOOP + -- first try to update the key + UPDATE vault_kv_store + SET (parent_path, path, key, value) = (_parent_path, _path, _key, _value) + WHERE _path = path AND key = _key; + IF found THEN + RETURN; + END IF; + -- not there, so try to insert the key + -- if someone else inserts the same key concurrently, + -- we could get a unique-key failure + BEGIN + INSERT INTO vault_kv_store (parent_path, path, key, value) + VALUES (_parent_path, _path, _key, _value); + RETURN; + EXCEPTION WHEN unique_violation THEN + -- Do nothing, and loop to try the UPDATE again. + END; + END LOOP; +END; +$$ +LANGUAGE plpgsql; +``` + +More info can be found in the [PostgreSQL documentation](http://www.postgresql.org/docs/9.4/static/plpgsql-control-structures.html#PLPGSQL-UPSERT-EXAMPLE): + +#### Backend Reference: Cassandra (Community-Supported) + +The Cassandra backend has the following options: + + * `hosts` (optional) – Comma-separated list of Cassandra hosts to connect to. + Defaults to `"127.0.0.1"`. + + * `keyspace` (optional) – Cassandra keyspace to use. Defaults to `"vault"`. + + * `table` (optional) – Table within the `keyspace` in which to store data. + Defaults to `"entries"`. + + * `consistency` (optional) – Consistency level to use when reading/writing data + in Cassandra. If set, must be one of `"ANY"`, `"ONE"`, `"TWO"`, `"THREE"`, `"QUORUM"`, + `"ALL"`, `"LOCAL_QUORUM"`, `"EACH_QUORUM"`, or `"LOCAL_ONE"`. Defaults to `"LOCAL_QUORUM"`. + + * `protocol_version` (optional) - Cassandra protocol version to use. Defaults + to `2`. + + * `username` (optional) - Username to use when authenticating with the + Cassandra hosts. + + * `password` (optional) - Password to use when authenticating with the + Cassandra hosts. + + * `connection_timeout` (optional) - A timeout in seconds to wait until a + connection is established with the Cassandra hosts. + + * `tls` (optional) - Indicates the connection with the Cassandra hosts should + use TLS. + + * `pem_bundle_file` (optional) - Specifies a file containing a + certificate and private key; a certificate, private key, and issuing CA + certificate; or just a CA certificate. + + * `pem_json_file` (optional) - Specifies a JSON file containing a certificate + and private key; a certificate, private key, and issuing CA certificate; + or just a CA certificate. + + * `tls_skip_verify` (optional) - If set, then TLS host verification + will be disabled for Cassandra. Defaults to `0`. + + * `tls_min_version` (optional) - Minimum TLS version to use. Accepted values + are `tls10`, `tls11` or `tls12`. Defaults to `tls12`. + +You need to ensure the keyspace and table exist in Cassandra: + +```cql +CREATE KEYSPACE "vault" WITH REPLICATION = { + 'class' : 'SimpleStrategy', + 'replication_factor' : 1 +}; + +CREATE TABLE "vault"."entries" ( + bucket text, + key text, + value blob, + PRIMARY KEY (bucket, key) +) WITH CLUSTERING ORDER BY (key ASC); + +``` + +_Note:_ Keyspace replication options should be [customised](http://docs.datastax.com/en/cql/3.1/cql/cql_reference/create_keyspace_r.html#reference_ds_ask_vyj_xj__description) appropriately for your environment. + +#### Backend Reference: Inmem + +The in-memory backend has no configuration options. + +#### Backend Reference: File + +The file backend has the following options: + + * `path` (required) - The path on disk to a directory where the + data will be stored. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/index.html.md index 7c61eb7..d6f2620 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/index.html.md @@ -58,10 +58,9 @@ to specify where the configuration is. - `listener` ([Listener][listener]: \) – Configures how Vault is listening for API requests. -- `cache_size` `(string: "32k")` – Specifies the size of the read cache used by - the physical storage subsystem will be set to this value. The value is in - number of entries so the total cache size is dependent on the entries being - stored. +- `cache_size` `(string: "32000")` – Specifies the size of the read cache used + by the physical storage subsystem. The value is in number of entries, so the + total cache size depends on the size of stored entries. - `disable_cache` `(bool: false)` – Disables all caches within Vault, including the read cache used by the physical storage subsystem. This will very @@ -86,22 +85,33 @@ to specify where the configuration is. sudo setcap cap_ipc_lock=+ep $(readlink -f $(which vault)) ``` -- `telemetry` ([Telemetry][telemetry]: nil) – Specifies the telemetry +- `plugin_directory` `(string: "")` – A directory from which plugins are + allowed to be loaded. Vault must have permission to read files in this + directory to successfully load plugins. + +- `telemetry` ([Telemetry][telemetry]: ) – Specifies the telemetry reporting system. -- `default_lease_ttl` `(string: "32d")` – Specifies the default lease duration +- `default_lease_ttl` `(string: "768h")` – Specifies the default lease duration for tokens and secrets. This is specified using a label suffix like `"30s"` or `"1h"`. This value cannot be larger than `max_lease_ttl`. -- `max_lease_ttl` `(string: "32d")` – Specifies the maximum possible lease +- `max_lease_ttl` `(string: "768h")` – Specifies the maximum possible lease duration for tokens and secrets. This is specified using a label suffix like `"30s"` or `"1h"`. +- `raw_storage_endpoint` `(bool: false)` – Enables the `sys/raw` endpoint which + allows the decryption/encryption of raw data into and out of the security + barrier. This is a highly priveleged endpoint. + - `ui` `(bool: false, Enterprise-only)` – Enables the built-in web UI, which is available on all listeners (address + port) at the `/ui` path. Browsers accessing the standard Vault API address will automatically redirect there. This can also be provided via the environment variable `VAULT_UI`. +- `pid_file` `(string: "")` - Path to the file in which the Vault server's + Process ID (PID) should be stored. + [storage-backend]: /docs/configuration/storage/index.html [listener]: /docs/configuration/listener/index.html [telemetry]: /docs/configuration/telemetry.html diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/listener/tcp.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/listener/tcp.html.md index bab3a45..5c20a04 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/listener/tcp.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/listener/tcp.html.md @@ -29,6 +29,19 @@ listener "tcp" { they need to hop through a TCP load balancer or some other scheme in order to talk. +- `proxy_protocol_behavior` `(string: "") – When specified, turns on the PROXY + protocol for the listener. + Accepted Values: + - *use_always* - The client's IP address will always be used. + - *allow_authorized* - If the source IP address is in the + `proxy_protocol_authorized_addrs` list, the client's IP address will be used. + If the source IP is not in the list, the source IP address will be used. + - *deny_unauthorized* - The traffic will be rejected if the source IP + address is not in the `proxy_protocol_authorized_addrs` list. + +- `proxy_protocol_authorized_addrs` `(string: )` – Specifies + the list of allowed source IP addresses to be used with the PROXY protocol. + - `tls_disable` `(string: "false")` – Specifies if TLS will be disabled. Vault assumes TLS by default, so you must explicitly disable TLS to opt-in to insecure communication. @@ -58,6 +71,9 @@ listener "tcp" { authentication for this listener; the listener will require a presented client cert that successfully validates against system CAs. +- `tls_client_ca_file` `(string: "")` – PEM-encoded Certificate Authority file + used for checking the authenticity of client. + ## `tcp` Listener Examples ### Configuring TLS diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/cassandra.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/cassandra.html.md new file mode 100644 index 0000000..6770000 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/cassandra.html.md @@ -0,0 +1,94 @@ +--- +layout: "docs" +page_title: "Cassandra - Storage Backends - Configuration" +sidebar_current: "docs-configuration-storage-cassandra" +description: |- + The Cassandra storage backend is used to persist Vault's data in an Apache + Cassandra cluster. +--- + +# Cassandra Storage Backend + +The Cassandra storage backend is used to persist Vault's data in an [Apache +Cassandra][cassandra] cluster. + +- **No High Availability** – the Cassandra storage backend does not support high + availability. + +- **Community Supported** – the Cassandra storage backend is supported by the + community. While it has undergone review by HashiCorp employees, they may not + be as knowledgeable about the technology. If you encounter problems with it, + you may be referred to the original author. + +```hcl +storage "cassandra" { + hosts = "localhost" + consistency = "LOCAL_QUORUM" + protocol_version = 3 +} +``` + +The Cassandra storage backend does not automatically create the keyspace and +table. This sample configuration can be used as a guide, but you will want to +ensure the keyspace [replication options][replication-options] +are appropriate for your cluster: + +```cql +CREATE KEYSPACE "vault" WITH REPLICATION = { + 'class': 'SimpleStrategy', + 'replication_factor': 1 +}; + +CREATE TABLE "vault"."entries" ( + bucket text, + key text, + value blob, + PRIMARY KEY (bucket, key) +) WITH CLUSTERING ORDER BY (key ASC); +``` + +## `cassandra` Parameters + +* `hosts` `(string: "127.0.0.1")` – Comma-separated list of Cassandra hosts to + connect to. + +* `keyspace` `(string: "vault")` Cassandra keyspace to use. + +* `table` `(string: "entries")` – Table within the `keyspace` in which to store + data. + +* `consistency` `(string: "LOCAL_QUORUM")` Consistency level to use when + reading/writing data. If set, must be one of `"ANY"`, `"ONE"`, `"TWO"`, + `"THREE"`, `"QUORUM"`, `"ALL"`, `"LOCAL_QUORUM"`, `"EACH_QUORUM"`, or + `"LOCAL_ONE"`. + +* `protocol_version` `(int: 2)` Cassandra protocol version to use. + +* `username` `(string: "")` – Username to use when authenticating with the + Cassandra hosts. + +* `password` `(string: "")` – Password to use when authenticating with the + Cassandra hosts. + +* `connection_timeout` `(int: 0)` - A timeout in seconds to wait until a + connection is established with the Cassandra hosts. + +* `tls` `(int: 0)` – If `1`, indicates the connection with the Cassandra hosts + should use TLS. + +* `pem_bundle_file` `(string: "")` - Specifies a file containing a + certificate and private key; a certificate, private key, and issuing CA + certificate; or just a CA certificate. + +* `pem_json_file` `(string: "")` - Specifies a JSON file containing a certificate + and private key; a certificate, private key, and issuing CA certificate; + or just a CA certificate. + +* `tls_skip_verify` `(int: 0)` - If `1`, then TLS host verification + will be disabled for Cassandra. Defaults to `0`. + +* `tls_min_version` `(string: "tls12")` - Minimum TLS version to use. Accepted + values are `tls10`, `tls11` or `tls12`. Defaults to `tls12`. + +[cassandra]: http://cassandra.apache.org/ +[replication-options]: https://docs.datastax.com/en/cassandra/2.1/cassandra/architecture/architectureDataDistributeReplication_c.html diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/cockroachdb.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/cockroachdb.html.md new file mode 100644 index 0000000..28f5699 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/cockroachdb.html.md @@ -0,0 +1,66 @@ +--- +layout: "docs" +page_title: "CockroachDB - Storage Backends - Configuration" +sidebar_current: "docs-configuration-storage-cockroachdb" +description: |- + The CockroachDB storage backend is used to persist Vault's data in a CockroachDB + server or cluster. +--- + +# CockroachDB Storage Backend + +The CockroachDB storage backend is used to persist Vault's data in a +[CockroachDB][cockroachdb] server or cluster. + +- **No High Availability** – the CockroachDB storage backend does not support + high availability. + +- **Community Supported** – the CockroachDB storage backend is supported by the + community. While it has undergone development and review by HashiCorp + employees, they may not be as knowledgeable about the technology. + +```hcl +storage "cockroachdb" { + connection_url = "postgres://user123:secret123!@localhost:5432/vault" +} +``` + +**Note** - CockroachDB is compatible with the PostgreSQL database driver and +uses that driver to interact with the database. + +## `cockroachdb` Parameters + +- `connection_url` `(string: )` – Specifies the connection string to + use to authenticate and connect to CockroachDB. A full list of supported + parameters can be found in [the pq library documentation][pglib]. For example + connection string URLs, see the examples section below. + +- `table` `(string: "vault_kv_store")` – Specifies the name of the table in + which to write Vault data. This table must already exist (Vault will not + attempt to create it). + +- `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent + requests to CockroachDB. + +## `cockroachdb` Examples + +This example shows connecting to a PostgresSQL cluster using full SSL +verification (recommended). + +```hcl +storage "cockroachdb" { + connection_url = "postgres://user:pass@localhost:5432/database?sslmode=verify-full" +} +``` + +To disable SSL verification (not recommended), replace `verify-full` with +`disable`: + +```hcl +storage "cockroachdb" { + connection_url = "postgres://user:pass@localhost:5432/database?sslmode=disable" +} +``` + +[cockroachdb]: https://www.cockroachlabs.com/ +[pglib]: https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/consul.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/consul.html.md index 821ea5e..10153a4 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/consul.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/consul.html.md @@ -88,7 +88,7 @@ at Consul's service discovery layer. - `token` `(string: "")` – Specifies the [Consul ACL token][consul-acl] with permission to read and write from the `path` in Consul's key-value store. - This is **not** a Vault token. + This is **not** a Vault token. See the ACL section below for help. The following settings apply when communicating with Consul via an encrypted connection. You can read more about encrypting Consul connections on the @@ -133,6 +133,43 @@ discussed in more detail in the [HA concepts page](/docs/concepts/ha.html). advertise to other Vault servers in the cluster for client redirection. This can also be provided via the environment variable `VAULT_REDIRECT_ADDR`. +## ACLs + +If using ACLs in Consul, you'll need appropriate permissions. For Consul 0.8, +the following will work for most use-cases, assuming that your service name is +`vault` and the prefix being used is `vault/`: + +```json +{ + "key": { + "vault/": { + "policy": "write" + } + }, + "node": { + "": { + "policy": "write" + } + }, + "service": { + "vault": { + "policy": "write" + } + }, + "agent": { + "": { + "policy": "write" + } + + }, + "session": { + "": { + "policy": "write" + } + } +} +``` + ## `consul` Examples ### Local Agent diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/couchdb.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/couchdb.html.md new file mode 100644 index 0000000..5df23e5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/couchdb.html.md @@ -0,0 +1,45 @@ +--- +layout: "docs" +page_title: "CouchDB - Storage Backends - Configuration" +sidebar_current: "docs-configuration-storage-couchdb" +description: |- + The CouchDB storage backend is used to persist Vault's data in a CouchDB + database. +--- + +# CouchDB Storage Backend + +The CouchDB storage backend is used to persist Vault's data in +[CouchDB][couchdb] table. + +- **No High Availability** – the CouchDB backend does not support high + availability. + +- **Community Supported** – the CouchDB storage backend is supported by the + community. While it has undergone review by HashiCorp employees, they may not + be as knowledgeable about the technology. If you encounter problems with them, + you may be referred to the original author. + +```hcl +storage "couchdb" { + endpoint = "https://my-couchdb-dns.tld:5984/my-database" + username = "admin" + password = "admin" +} +``` + +## `couchdb` Parameters + +- `endpoint` `(string: "")` – Specifies your CouchDB endpoint. This can also be + provided via the environment variable `COUCHDB_ENDPOINT`. + +- `username` `(string: "")` – Specifies the user to authenticate as. This can + also be provided via the environment variable `COUCHDB_USERNAME`. + +- `password` `(string: "")` – Specifies the user to authenticate as. This can + also be provided via the environment variable `COUCHDB_PASSWORD`. + +- `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent + requests to CouchDB. + +[couchdb]: http://couchdb.apache.org/ diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/dynamodb.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/dynamodb.html.md index e390ae8..888c834 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/dynamodb.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/dynamodb.html.md @@ -116,7 +116,7 @@ This example show enabling high availability for the DynamoDB storage backend. ```hcl storage "dynamodb" { ha_enabled = "true" - redirect_addr = "vault-leader.my-company.internal" + redirect_addr = "https://vault-leader.my-company.internal" } ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/etcd.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/etcd.html.md index 70895e3..9e03643 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/etcd.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/etcd.html.md @@ -133,7 +133,7 @@ This example show enabling high availability for the Etcd storage backend. ```hcl storage "etcd" { ha_enabled = true - redirect_addr = "vault-leader.my-company.internal" + redirect_addr = "https://vault-leader.my-company.internal" } ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/google-cloud.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/google-cloud.html.md index d17ac9c..2e6a98b 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/google-cloud.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/google-cloud.html.md @@ -34,10 +34,10 @@ storage "gcs" { account must have permission to read, write, and delete from the bucket. This can also be provided via the environment variable `GOOGLE_STORAGE_BUCKET`. -- `credentials_file` `(string: )` – Specifies the path on disk to a +- `credentials_file` `(string: "")` – Specifies the path on disk to a Google Cloud Platform [service account][gcs-service-account] private key file - in [JSON format][gcs-private-key]. This can also be provided via the - environment variable `GOOGLE_APPLICATION_CREDENTIALS`. + in [JSON format][gcs-private-key]. The GCS client library will attempt to use + the [application default credentials][adc] if this is not specified. - `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent requests. @@ -55,6 +55,7 @@ storage "gcs" { } ``` +[adc]: https://developers.google.com/identity/protocols/application-default-credentials [gcs]: https://cloud.google.com/storage/ [gcs-service-account]: https://cloud.google.com/compute/docs/access/service-accounts [gcs-private-key]: https://cloud.google.com/storage/docs/authentication#generating-a-private-key diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mssql.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mssql.html.md index 7f1fca8..195afdc 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mssql.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mssql.html.md @@ -39,7 +39,7 @@ storage "mssql" { - `username` `(string: "")` - enter the SQL Server Authentication user id or the Windows Authentication user id in the DOMAIN\User format. On Windows, if user id is empty or missing Single-Sign-On is used. - + - `password` `(string: "")` – specifies the MSSQL password to connect to the database. @@ -48,7 +48,7 @@ storage "mssql" { - `table` `(string: "Vault")` – Specifies the name of the table. If the table does not exist, Vault will attempt to create it. - + - `schema` `(string: "dbo")` – Specifies the name of the schema. If the schema does not exist, Vault will attempt to create it. @@ -58,6 +58,9 @@ storage "mssql" { - `logLevel` `(int: 0)` – logging flags (default 0/no logging, 63 for full logging) . +- `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent + requests to MSSQL. + ## `mssql` Examples ### Custom Database, Table and Schema diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mysql.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mysql.html.md index a71f827..9e4ee20 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mysql.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/mysql.html.md @@ -42,6 +42,9 @@ storage "mysql" { - `tls_ca_file` `(string: "")` – Specifies the path to the CA certificate to connect using TLS. +- `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent + requests to MySQL. + Additionally, Vault requires the following authentication information. - `username` `(string: )` – Specifies the MySQL username to connect to diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/postgresql.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/postgresql.html.md index 238af3e..979bf16 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/postgresql.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/postgresql.html.md @@ -26,7 +26,7 @@ storage "postgresql" { } ``` -The PostgresSQL storage backend does not automatically create the table. Here is +The PostgreSQL storage backend does not automatically create the table. Here is some sample SQL to create the schema and indexes. ```sql @@ -82,11 +82,14 @@ LANGUAGE plpgsql; which to write Vault data. This table must already exist (Vault will not attempt to create it). +- `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent + requests to PostgreSQL. + ## `postgresql` Examples ### Custom SSL Verification -This example shows connecting to a PostgresSQL cluster using full SSL +This example shows connecting to a PostgreSQL cluster using full SSL verification (recommended). ```hcl diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/s3.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/s3.html.md index a020c66..7b167bd 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/s3.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/storage/s3.html.md @@ -35,10 +35,11 @@ storage "s3" { - `endpoint` `(string: "")` – Specifies an alternative, AWS compatible, S3 endpoint. This can also be provided via the environment variable - `AWS_DEFAULT_REGION`. + `AWS_S3_ENDPOINT`. - `region` `(string "us-east-1")` – Specifies the AWS region. This can also be - provided via the environment variable `AWS_DEFAULT_REGION`. + provided via the environment variable `AWS_REGION` or `AWS_DEFAULT_REGION`, + in that order of preference. The following settings are used for authenticating to AWS. If you are running your Vault server on an EC2 instance, you can also make use of the EC2 @@ -46,11 +47,13 @@ instance profile service to provide the credentials Vault will use to make S3 API calls. Leaving the `access_key` and `secret_key` fields empty will cause Vault to attempt to retrieve credentials from the AWS metadata service. -- `access_key` `(string: )` – Specifies the AWS access key. This can - also be provided via the environment variable `AWS_ACCESS_KEY_ID`. +- `access_key` – Specifies the AWS access key. This can also be provided via + the environment variable `AWS_ACCESS_KEY_ID`, AWS credential files, or by + IAM role. -- `secret_key` `(string: )` – Specifies the AWS secret key. This can - also be provided via the environment variable `AWS_SECRET_ACCESS_KEY`. +- `secret_key` – Specifies the AWS secret key. This can also be provided via + the environment variable `AWS_SECRET_ACCESS_KEY`, AWS credential files, or + by IAM role. - `session_token` `(string: "")` – Specifies the AWS session token. This can also be provided via the environment variable `AWS_SESSION_TOKEN`. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/telemetry.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/telemetry.html.md index df462f8..6999965 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/configuration/telemetry.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/configuration/telemetry.html.md @@ -60,8 +60,7 @@ telemetry { ### `circonus` -These `telemetry` parameters apply to -[Circonus](http://circonus.com/). +These `telemetry` parameters apply to [Circonus](http://circonus.com/). - `circonus_api_token` `(string: "")` - Specifies a valid Circonus API Token used to create/manage check. If provided, metric management is enabled. @@ -120,3 +119,19 @@ These `telemetry` parameters apply to best use of this is to as a hint for which broker should be used based on *where* this particular instance is running (e.g. a specific geo location or datacenter, dc:sfo). + +### `dogstatsd` + +These `telemetry` parameters apply to +[DogStatsD](http://docs.datadoghq.com/guides/dogstatsd/). + +- `dogstatsd_addr` `(string: "")` - This provides the address of a DogStatsD + instance. DogStatsD is a protocol-compatible flavor of statsd, with the added + ability to decorate metrics with tags and event information. If provided, + Vault will send various telemetry information to that instance for + aggregation. This can be used to capture runtime information. + + +- `dogstatsd_tags` `(string array: [])` - This provides a list of global tags + that will be added to all telemetry packets sent to DogStatsD. It is a list + of strings, where each string looks like "my_tag_name:my_tag_value". diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/behavior.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/behavior.html.md similarity index 100% rename from vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/behavior.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/behavior.html.md diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/configuration.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/configuration.html.md similarity index 88% rename from vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/configuration.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/configuration.html.md index f851d5a..63f692a 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/configuration.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/configuration.html.md @@ -21,6 +21,13 @@ HSM key backup strategy requires the key to be exportable, you should generate the key yourself. The list of creation attributes that Vault uses to generate the key are listed at the end of this document. +## Requirements + +The following software packages are required for Vault Enterprise HSM: + +* PKCS#11 compatible HSM intgration library +* `libtldl` library + ## HSM Block Directives Like the rest of Vault's configuration files, the `hsm` block is in @@ -43,7 +50,11 @@ strings. ### Required Directives * `lib`: The path to the PKCS#11 library shared object file. May also be - specified by the `VAULT_HSM_LIB` environment variable. + specified by the `VAULT_HSM_LIB` environment variable. **Note:** Depending + on your HSM, this may be either a binary or a dynamic library, and its use + may require other libraries depending on which system the Vault binary is + currently running on (e.g.: a Linux system may require other libraries to + interpret Windows .dll files). * `slot`: The slot number to use, specified as a string (e.g. `"0"`). May also be specified by the `VAULT_HSM_SLOT` environment variable. * `pin`: The PIN for login. May also be specified by the `VAULT_HSM_PIN` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/index.html.md similarity index 100% rename from vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/index.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/index.html.md diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/security.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/security.html.md similarity index 100% rename from vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/hsm/security.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/enterprise/hsm/security.html.md diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/identity/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/identity/index.html.md new file mode 100644 index 0000000..c51719f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/identity/index.html.md @@ -0,0 +1,85 @@ +--- +layout: "docs" +page_title: "Vault Enterprise Identity" +sidebar_current: "docs-vault-enterprise-identity" +description: |- + Vault Enterprise has the foundations of the identity management system. +--- + +# Vault Enterprise Identity + +In version 0.8, Vault introduced the foundations of identity management system. +The goal of identity in Vault is to associate a notion of caller identity to +the tokens used in Vault. + +## Concepts + +### Entities and Personas + +Each user will have multiple accounts with various identity providers. Users +can now be mapped as `Entities` and their corresponding accounts with +authentication providers can be mapped as `Personas`. In essence, each entity +is made up of zero or more personas. + +### Entity Management + +Entities in Vault **do not** automatically pull identity information from +anywhere. It needs to be explicitly managed by operators. This way, it is +flexible in terms of administratively controlling the number of entities to be +pulled in and pulled out of Vault, and in some sense Vault will serve as a +_cache_ of identities and not as the _source_ of identities. + +### Entity Policies + +Vault policies can be assigned to entities which will grant _additional_ +permissions to the token on top of the existing policies on the token. If the +token presented on the API request contains an identifier for the entity and if +that entity has a set of policies on it, then the token will be capable of +performing actions allowed by the policies on the entity as well. + +This is a paradigm shift in terms of _when_ the policies of the token get +evaluated. Before identity, the policy names on the token were immutable (not +the contents of those policies). But with entity policies, along with the +immutable set of policy names on the token, the evaluation of policies +applicable to the token through its identity will happen at request time. This +also adds enormous flexibility to control the behavior of already issued +tokens. + +Its important to note that the policies on the entity are only a means to grant +_additional_ capabilities and not a replacement for the policies on the token, +and to know the full set of capabilities of the token with an associated entity +identifier, the policies on the token should be taken into account. + +### Mount Bound Personas + +Vault supports multiple authentication backends and also allows enabling same +authentication backend on different mounts. The persona name of the user with +each identity provider will be unique within the provider. But Vault also needs +to uniquely distinguish between conflicting persona names across different +mounts of these identity providers. Hence the persona name, in combination with +the authentication backend mount's accessor serve as the unique identifier of a +persona. + +### Implicit Entities + +Operators can create entities for all the users of an auth mount +beforehand and assign policies to them, so that when users login, the desired +capabilities to the tokens via entities are already assigned. But if that's not +done, upon a successful user login from any of the authentication backends, +Vault will create a new entity and assign a persona against the login that was +successful. + +Note that, tokens created using the token authentication backend will not have +an associated identity information. Logging in using the authentication +backends is the only way to create tokens that have a valid entity identifiers. + +### Identity Auditing + +If the token used to make API calls have an associated entity identifier, it will +be audit logged as well. This leaves a trail of actions performed by specific +users. + +### API + +Vault identity can be managed entirely over the HTTP API. Please see [Identity +API](/api/secret/identity/index.html) for more details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/index.html.md similarity index 69% rename from vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/index.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/enterprise/index.html.md index d694a17..d25ec92 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/index.html.md @@ -9,11 +9,8 @@ description: |- # Vault Enterprise -Vault Enterprise includes a number of features that may be useful in -specific workflows. These include: - -- [Replication](/docs/vault-enterprise/replication) -- [Secure Introduction Client](/docs/vault-enterprise/vsi) -- [HSM Key Wrapping and Auto-Unsealing](/docs/vault-enterprise/hsm) +Vault Enterprise includes a number of features that may be useful in specific +workflows. Please use the sidebar navigation on the left to choose a specific +topic. These features are part of [Vault Enterprise](https://www.hashicorp.com/vault.html?utm_source=oss&utm_medium=docs&utm_campaign=vault&_ga=1.201793489.1956619674.1489356624). diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/index.html.md new file mode 100644 index 0000000..20dfa2a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/index.html.md @@ -0,0 +1,80 @@ +--- +layout: "docs" +page_title: "Vault Enterprise MFA Support" +sidebar_current: "docs-vault-enterprise-mfa" +description: |- + Vault Enterprise has support for Multi-factor Authentication (MFA), using different authentication types. + +--- + +# Vault Enterprise MFA Support + +Vault Enterprise has support for Multi-factor Authentication (MFA), using +different authentication types. MFA is built on top of the Identity system of +Vault. + +## MFA Types + +MFA in Vault can be of the following types. + +- `Time-based One-time Password (TOTP)` - If configured and enabled on a path, + this would require a TOTP passcode along with Vault token, to be presented + while invoking the API request. The passcode will be validated against the + TOTP key present in the identity of the caller in Vault. + +- `Okta` - If Okta push is configured and enabled on a path, then the enrolled + device of the user will get a push notification to approve or deny the access + to the API. The Okta username will be derived from the caller identity's + persona. + +- `Duo` - If Duo push is configured and enabled on a path, then the enrolled + device of the user will get a push notification to approve or deny the access + to the API. The Duo username will be derived from the caller identity's + persona. + +- `PingID` - If PingID push is configured and enabled on a path, then the + enrolled device of the user will get a push notification to approve or deny + the access to the API. The PingID username will be derived from the caller + identity's persona. + +## Configuring MFA Methods + +MFA methods are globally managed within the `System Backend` using the HTTP API. +Please see [MFA API](/api/system/mfa.html) for details on how to configure an MFA +method. + +## MFA Methods In Policies + +MFA requirements on paths are specified as `mfa_methods` along with other ACL +parameters. + +### Sample Policy + +``` +path "secret/foo" { + capabilities = ["read"] + mfa_methods = ["dev_team_duo", "sales_team_totp"] +} +``` + +The above policy grants `read` access to `secret/foo` only after *both* the MFA +methods `dev_team_duo` and `sales_team_totp` are validated. + +## Supplying MFA Credentials + +MFA credentials are retrieved from the `X-Vault-MFA` HTTP header. The format of +the header is `mfa_method_name[:key[=value]]`. The items in the `[]` are +optional. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --header "X-Vault-MFA:my_totp:695452" \ + https://vault.rocks/v1/secret/foo +``` + +### API + +MFA can be managed entirely over the HTTP API. Please see [MFA API](/api/system/mfa.html) for more details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-duo.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-duo.html.md new file mode 100644 index 0000000..4cdebfd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-duo.html.md @@ -0,0 +1,141 @@ +--- +layout: "docs" +page_title: "Vault Enterprise Duo MFA" +sidebar_current: "docs-vault-enterprise-mfa-duo" +description: |- + Vault Enterprise supports Duo MFA type. +--- + +# Duo MFA + +This page demonstrates the Duo MFA on ACL'd paths of Vault. + +## Steps + +### Enable Auth Backend + +``` +vault auth-enable userpass +``` + +### Fetch Mount Accessor + +``` +vault auth -methods +``` + +``` +Path Type Accessor Default TTL Max TTL Replication Behavior Description +... +userpass/ userpass auth_userpass_54b8e339 system system replicated +``` + + +### Configure Duo MFA method + +``` +vault write sys/mfa/method/duo/my_duo mount_accessor=auth_userpass_54b8e339 integration_key=BIACEUEAXI20BNWTEYXT secret_key=HIGTHtrIigh2rPZQMbguugt8IUftWhMRCOBzbuyz api_hostname=api-2b5c39f5.duosecurity.com +``` + +### Create Policy + +Create a policy that gives access to secret through the MFA method created +above. + +#### Sample Payload + +```hcl +path "secret/foo" { + capabilities = ["read"] + mfa_methods = ["my_duo"] +} +``` + +``` +vault policy-write duo-policy payload.hcl +``` + +### Create User + +MFA works only for tokens that have identity information on them. Tokens +created by logging in using authentication backends will have the associated +identity information. Let's create a user in the `userpass` backend and +authenticate against it. + + +``` +vault write auth/userpass/users/testuser password=testpassword policies=duo-policy +``` + +### Create Login Token + +``` +vault write auth/userpass/login/testuser password=testpassword +``` + +``` +Key Value +--- ----- +token 70f97438-e174-c03c-40fe-6bcdc1028d6c +token_accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +token_duration 768h0m0s +token_renewable true +token_policies [default duo-policy] +token_meta_username "testuser" +``` + +Note that the CLI is not authenticated with the newly created token yet, we did +not call `vault auth`, instead we used the login API to simply return a token. + +### Fetch Entity ID From Token + +Caller identity is represented by the `entity_id` property of the token. + +``` +vault token-lookup 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +``` +Key Value +--- ----- +accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +creation_time 1502245243 +creation_ttl 2764800 +display_name userpass-testuser +entity_id 307d6c16-6f5c-4ae7-46a9-2d153ffcbc63 +expire_time 2017-09-09T22:20:43.448543132-04:00 +explicit_max_ttl 0 +id 70f97438-e174-c03c-40fe-6bcdc1028d6c +issue_time 2017-08-08T22:20:43.448543003-04:00 +meta map[username:testuser] +num_uses 0 +orphan true +path auth/userpass/login/testuser +policies [default duo-policy] +renewable true +ttl 2764623 +``` + +### Login + +Authenticate the CLI to use the newly created token. + +``` +vault auth 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +### Read Secret + +Reading the secret will trigger a Duo push. This will be a blocking call until +the push notification is either approved or declined. + +``` +vault read secret/foo +``` + +``` +Key Value +--- ----- +refresh_interval 768h0m0s +data which can only be read after MFA validation +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-okta.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-okta.html.md new file mode 100644 index 0000000..ac69774 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-okta.html.md @@ -0,0 +1,141 @@ +--- +layout: "docs" +page_title: "Vault Enterprise Okta MFA" +sidebar_current: "docs-vault-enterprise-mfa-okta" +description: |- + Vault Enterprise supports Okta MFA type. +--- + +# Okta MFA + +This page demonstrates the Okta MFA on ACL'd paths of Vault. + +## Steps + +### Enable Auth Backend + +``` +vault auth-enable userpass +``` + +### Fetch Mount Accessor + +``` +vault auth -methods +``` + +``` +Path Type Accessor Default TTL Max TTL Replication Behavior Description +... +userpass/ userpass auth_userpass_54b8e339 system system replicated +``` + + +### Configure Okta MFA method + +``` +vault write sys/mfa/method/okta/my_okta mount_accessor=auth_userpass_54b8e339 org_name="dev-262775" api_token="0071u8PrReNkzmATGJAP2oDyIXwwveqx9vIOEyCZDC" +``` + +### Create Policy + +Create a policy that gives access to secret through the MFA method created +above. + +#### Sample Payload + +```hcl +path "secret/foo" { + capabilities = ["read"] + mfa_methods = ["my_okta"] +} +``` + +``` +vault policy-write okta-policy payload.hcl +``` + +### Create User + +MFA works only for tokens that have identity information on them. Tokens +created by logging in using authentication backends will have the associated +identity information. Let's create a user in the `userpass` backend and +authenticate against it. + + +``` +vault write auth/userpass/users/testuser password=testpassword policies=okta-policy +``` + +### Create Login Token + +``` +vault write auth/userpass/login/testuser password=testpassword +``` + +``` +Key Value +--- ----- +token 70f97438-e174-c03c-40fe-6bcdc1028d6c +token_accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +token_duration 768h0m0s +token_renewable true +token_policies [default okta-policy] +token_meta_username "testuser" +``` + +Note that the CLI is not authenticated with the newly created token yet, we did +not call `vault auth`, instead we used the login API to simply return a token. + +### Fetch Entity ID From Token + +Caller identity is represented by the `entity_id` property of the token. + +``` +vault token-lookup 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +``` +Key Value +--- ----- +accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +creation_time 1502245243 +creation_ttl 2764800 +display_name userpass-testuser +entity_id 307d6c16-6f5c-4ae7-46a9-2d153ffcbc63 +expire_time 2017-09-09T22:20:43.448543132-04:00 +explicit_max_ttl 0 +id 70f97438-e174-c03c-40fe-6bcdc1028d6c +issue_time 2017-08-08T22:20:43.448543003-04:00 +meta map[username:testuser] +num_uses 0 +orphan true +path auth/userpass/login/testuser +policies [default okta-policy] +renewable true +ttl 2764623 +``` + +### Login + +Authenticate the CLI to use the newly created token. + +``` +vault auth 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +### Read Secret + +Reading the secret will trigger an Okta push. This will be a blocking call until +the push notification is either approved or declined. + +``` +vault read secret/foo +``` + +``` +Key Value +--- ----- +refresh_interval 768h0m0s +data which can only be read after MFA validation +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-pingid.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-pingid.html.md new file mode 100644 index 0000000..854d301 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-pingid.html.md @@ -0,0 +1,141 @@ +--- +layout: "docs" +page_title: "Vault Enterprise PingID MFA" +sidebar_current: "docs-vault-enterprise-mfa-pingid" +description: |- + Vault Enterprise supports PingID MFA type. +--- + +# PingID MFA + +This page demonstrates PingID MFA on ACL'd paths of Vault. + +## Steps + +### Enable Auth Backend + +``` +vault auth-enable userpass +``` + +### Fetch Mount Accessor + +``` +vault auth -methods +``` + +``` +Path Type Accessor Default TTL Max TTL Replication Behavior Description +... +userpass/ userpass auth_userpass_54b8e339 system system replicated +``` + + +### Configure PingID MFA method + +``` +vault write sys/mfa/method/pingid/ping mount_accessor=auth_userpass_54b8e339 settings_file_base64="AABDwWaR..." +``` + +### Create Policy + +Create a policy that gives access to secret through the MFA method created +above. + +#### Sample Payload + +```hcl +path "secret/foo" { + capabilities = ["read"] + mfa_methods = ["ping"] +} +``` + +``` +vault policy-write ping-policy payload.hcl +``` + +### Create User + +MFA works only for tokens that have identity information on them. Tokens +created by logging in using authentication backends will have the associated +identity information. Let's create a user in the `userpass` backend and +authenticate against it. + + +``` +vault write auth/userpass/users/testuser password=testpassword policies=ping-policy +``` + +### Create Login Token + +``` +vault write auth/userpass/login/testuser password=testpassword +``` + +``` +Key Value +--- ----- +token 70f97438-e174-c03c-40fe-6bcdc1028d6c +token_accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +token_duration 768h0m0s +token_renewable true +token_policies [default ping-policy] +token_meta_username "testuser" +``` + +Note that the CLI is not authenticated with the newly created token yet, we did +not call `vault auth`, instead we used the login API to simply return a token. + +### Fetch Entity ID From Token + +Caller identity is represented by the `entity_id` property of the token. + +``` +vault token-lookup 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +``` +Key Value +--- ----- +accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +creation_time 1502245243 +creation_ttl 2764800 +display_name userpass-testuser +entity_id 307d6c16-6f5c-4ae7-46a9-2d153ffcbc63 +expire_time 2017-09-09T22:20:43.448543132-04:00 +explicit_max_ttl 0 +id 70f97438-e174-c03c-40fe-6bcdc1028d6c +issue_time 2017-08-08T22:20:43.448543003-04:00 +meta map[username:testuser] +num_uses 0 +orphan true +path auth/userpass/login/testuser +policies [default ping-policy] +renewable true +ttl 2764623 +``` + +### Login + +Authenticate the CLI to use the newly created token. + +``` +vault auth 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +### Read Secret + +Reading the secret will trigger a PingID push. This will be a blocking call until +the push notification is either approved or declined. + +``` +vault read secret/foo +``` + +``` +Key Value +--- ----- +refresh_interval 768h0m0s +data which can only be read after MFA validation +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-totp.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-totp.html.md new file mode 100644 index 0000000..9963a9a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/mfa/mfa-totp.html.md @@ -0,0 +1,154 @@ +--- +layout: "docs" +page_title: "Vault Enterprise TOTP MFA" +sidebar_current: "docs-vault-enterprise-mfa-totp" +description: |- + Vault Enterprise supports TOTP MFA type. +--- + +# TOTP MFA + +This page demonstrates the TOTP MFA on ACL'd paths of Vault. + +## Steps + +### Configure TOTP MFA method + +``` +vault write sys/mfa/method/totp/my_totp issuer=Vault period=30 key_size=30 algorithm=SHA256 digits=6 +``` + +### Create Secret + +Create a secret to be accessed after validating MFA. + +``` +vault write secret/foo data="which can only be read after MFA validation" +``` + +### Create Policy + +Create a policy that gives access to secret through the MFA method created +above. + +#### Sample Payload + +```hcl +path "secret/foo" { + capabilities = ["read"] + mfa_methods = ["my_totp"] +} +``` + +``` +vault policy-write totp-policy payload.hcl +``` + +### Enable Auth Backend + +MFA works only for tokens that have identity information on them. Tokens +created by logging in using authentication backends will have the associated +identity information. Let's create a user in the `userpass` backend and +authenticate against it. + +``` +vault auth-enable userpass +``` + +### Create User + +``` +vault write auth/userpass/users/testuser password=testpassword policies=totp-policy +``` + +### Create Login Token + +``` +vault write auth/userpass/login/testuser password=testpassword +``` + +``` +Key Value +--- ----- +token 70f97438-e174-c03c-40fe-6bcdc1028d6c +token_accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +token_duration 768h0m0s +token_renewable true +token_policies [default totp-policy] +token_meta_username "testuser" +``` + +Note that the CLI is not authenticated with the newly created token yet, we did +not call `vault auth`, instead we used the login API to simply return a token. + +### Fetch Entity ID From Token + +Caller identity is represented by the `entity_id` property of the token. + +``` +vault token-lookup 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +``` +Key Value +--- ----- +accessor a91d97f4-1c7d-6af3-e4bf-971f74f9fab9 +creation_time 1502245243 +creation_ttl 2764800 +display_name userpass-testuser +entity_id 307d6c16-6f5c-4ae7-46a9-2d153ffcbc63 +expire_time 2017-09-09T22:20:43.448543132-04:00 +explicit_max_ttl 0 +id 70f97438-e174-c03c-40fe-6bcdc1028d6c +issue_time 2017-08-08T22:20:43.448543003-04:00 +meta map[username:testuser] +num_uses 0 +orphan true +path auth/userpass/login/testuser +policies [default totp-policy] +renewable true +ttl 2764623 +``` + +### Generate TOTP Method Secret on Entity + +Let's generate a TOTP key using the `my_totp` configuration and store it in the +entity of the user. A barcode and a URL for the secret key will be returned by +the API. This should be distributed to the intended user to be able to generate +TOTP passcode. + +``` +vault write sys/mfa/method/totp/my_totp/admin-generate entity_id=307d6c16-6f5c-4ae7-46a9-2d153ffcbc63 +``` + +``` +Key Value +--- ----- +barcode iVBORw0KGgoAAAANSUhEUgAAAMgAAADIEAAAAADYoy0BAAAG50lEQVR4nOydwW4sOwhEX57y/7+cu3AWvkKgA/To1ozqrKJut9tJCYRxZeb75+c/I8T//3oB5m8siBgWRAwLIoYFEcOCiGFBxLAgYlgQMSyIGBZEDAsihgURw4KIYUHEsCBiWBAxLIgYFkSMbzrw64uOzE7pzwzn7j3bPT6+5R6f/RxHkvXEN8aR/G4Ndy44QsSwIGLglHXg4R+vxIRTj7nv3uPjz2dMHEnmzxJd9tvF+bt/kxpHiBgWRIxmyjqQSiarT2KyimnnHpmlwTrVZPNkCTNLO3UFmL0xPstxhIhhQcQYpSxOlsrqWiWmLz7/fb1OU/UM2cg6xe1xhIhhQcR4ccriySGOv5/qbuW6Xal6ntmzMxwhYlgQMUYpqxu2WX1CGuZ1uuCdKzJnPbLmqVTmCBHDgojRTFndZjI576t/zuaJq6qvb+asjwO6f5MaR4gYFkQMnLL2VUSdQDbv6iarewyfJ455xSbRESKGBRHji4YdOac71GeCpN39lG1gNg8/T+z20wiOEDEsiBiLKqubmriJlCST2lYa5yEJpLtm0pzvnjA6QsSwIGLgKut3eNN5fsP7Rd1OV7fHVf922Zp5F2uzYXSEiGFBxGimrN+HQNIgW6pu+JPElT3Lx8SR3Q0m/ztEHCFiWBAxFieG9UYsu3Lg1Qv3w8eZs/l5qqxNrfVG0r2sj8CCiPFQlTWbIVK7p2aeK1Iv8Tb7xqpBcISIYUHEWPeyajtofZ374bm5ND5bj9xs67rbYYIjRAwLIsajJodZbyrerVPBrGdVj490Dxrsfv9QLIgYD/3Dzt3zOXf5pmxmP7ivkxqvTmu8K5VdeSqJOULEsCBirN3vs39gqf8FpttCv6/wjR45i8yu1PB0HXGEiGFBxBh9KunMTllvr2YeJ94Gr9dWnyeS3l135RmOEDEsiBjrE8OujZOnpu4pXn2XN9K5wbWeh/wWEUeIGBZEjPUnOZCqI0sv3TNH4q3KKkBSKc3OMWe2igxHiBgWRIzFZ79vNoPdvlCc+b7LDQzkLeS0kZ8husp6cyyIGCNf1qF7Bkca45FuH4nXSGQGYiIla+Y4QsSwIGI89A07+x5ONwl00+M9snu+Sda832weHCFiWBAxFp/9noVnfYVXX+Sckb8ljuym0/qNsxPPiCNEDAsixqj9nnmi4sj7bubFImeF0apan0gSm2iE15C8rnOV9eZYEDFGG8NZeHKHVbcy4YmUNM+5PaNOZfZlfQQWRIxHv66i63rKnuUpqL5O1lmn33p8XR+6/f4RWBAxFr2srOOUQaqUbkeI+8FqsiRDElp211XWR2BBxHjI5FCfGG7MD9n82dq4a31miyWzbQyljhAxLIgYzQ+fOczO4zYGhpmJoruNJW/hqW+GI0QMCyLGo5/kwM8N46Yya1zXdgKSuGbngN2uVNdYm+EIEcOCiNH8JAe+NTtkqak+lSO2UrJaMid5C084+4rLESKGBRFj1H7P6ocsLZANXV071U1+sp0kllRSKc16WRxHiBgWRIxFL4t0nLJ5eODXz97w2owkE76dnP0dMhwhYlgQMZq9rCw89xs6cuaYVXR1pVePzNZA7Kb1G7Pfq8YRIoYFEePRr149kCqI2wn2bNIpr6BIBUhwhIhhQcQYfZByvE6c6rwimq2EW1K7Jopun6q7Cb1xhIhhQcRYbAwjmzM+bjrNrtdbs2wG7mOfOdN8YvjmWBAxHvq6ivtnshHb1DmROrl1O1Fxzu74WfV4cISIYUHEWPSybrJkNXOJZ/Pw7V69qnqGunlet9NnaerGESKGBRFjZHK4Icf6z9oh9p4ovgmNT83GuMp6WyyIGItvi44dpJkfqdug5r4sbjHla4hbzmzM/UaOI0QMCyLGo19XEcfcdE/f+LYxe6p7tsjrKO7L6uIIEcOCiLH+UrBZt6qe+YZYI8j4OCa+nRhW47NPmUgPjhAxLIgYo/Y7mvgFziiSEMgWjxhB+TrJetzLelssiBhrK2kkWgtm1koe5qSyiuPju/j17L38boYjRAwLIsZD32N4yLxPmUuKnx5uzBL3mKyCmjXkiR+siyNEDAsixvrDZw5ZeNYbt5n/ql5JPb5eZ92VyhJyNg9PyDeOEDEsiBiLr6sg1AFbVzsZ5Aigm1R5lcWNpl3zxsERIoYFEePFKYtUX1niyu7e12eWA1LpRXina5asDo4QMSyIGAsr6WzM7FmyYdxvV8kxAW/1u/3+EVgQMR796tV6JN8uzSyg3A5Rr4HP0DVCEBwhYlgQMV7myzIzHCFiWBAxLIgYFkQMCyKGBRHDgohhQcSwIGJYEDEsiBgWRAwLIoYFEcOCiGFBxLAgYlgQMSyIGBZEjD8BAAD//xDzM7XcohEsAAAAAElFTkSuQmCC +url otpauth://totp/Vault:307d6c16-6f5c-4ae7-46a9-2d153ffcbc63?algorithm=SHA256&digits=6&issuer=Vault&period=30&secret=AQESPQUPHWYIXV7FGOMBYT3A2N4LQKEIRNKTSRCWTKVEW66L +``` + +Note that Vault's [TOTP secret backend](/docs/secrets/totp/index.html) can be leveraged to create TOTP passcodes. + +### Login + +Authenticate the CLI to use the newly created token. + +``` +vault auth 70f97438-e174-c03c-40fe-6bcdc1028d6c +``` + +### Read Secret + +Read the secret by supplying the TOTP passcode. + +``` +vault read -mfa my_totp:146378 secret/foo +``` + +``` +Key Value +--- ----- +refresh_interval 768h0m0s +data which can only be read after MFA validation +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/replication/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/replication/index.html.md similarity index 51% rename from vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/replication/index.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/enterprise/replication/index.html.md index c88f819..e96a262 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/replication/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/replication/index.html.md @@ -3,7 +3,7 @@ layout: "docs" page_title: "Vault Enterprise Replication" sidebar_current: "docs-vault-enterprise-replication" description: |- - Vault Enterprise has support for Replication, allowing critical data to be replicated across clusters to support horizontally scaling workloads. + Vault Enterprise has support for Replication, allowing critical data to be replicated across clusters to support horizontally scaling and disaster recovery workloads. --- @@ -17,59 +17,62 @@ policy management. This functionality is expected to be highly available and to scale as the number of clients and their functional needs increase; at the same time, operators would like to ensure that a common set of policies are enforced globally, and a consistent set of secrets and keys are exposed to -applications that need to interoperate. Vault replication addresses both of -these needs. +applications that need to interoperate. -Prior to Vault 0.7, Vault nodes could only be paired within clusters using a -common HA storage backend, where a single active node is backed up by a set of -standby nodes to ensure high availability. With replication, Vault replicates -large sets of its data to other nodes (or other HA clusters) to allow -horizontal scalability between clusters across geographically distributed data -centers. +Vault replication addresses both of these needs in providing consistency, +scalability, and highly-available disaster recovery. ## Architecture -Multiple Vault clusters communicate in a one-to-many near real-time flow. +The core unit of Vault replication is a **cluster**, which is comprised of a +collection of Vault nodes (an active and its corresponding HA nodes). Multiple Vault +clusters communicate in a one-to-many near real-time flow. -The primary cluster acts as the system or record and asynchronously replicates -most Vault data to a series of remote clusters, known as secondary clusters or -secondaries. - -The secondaries keep track of their own tokens and leases but share the -underlying configuration, policies, and supporting secrets (K/V values, -encryption keys for `transit`, etc). If a user action would modify underlying -shared state, the secondary forwards the request to the primary to be handled; -this is transparent to the client. In practice, most high-volume workloads -(reads in the `generic` backend, encryption/decryption operations in `transit`, -etc.) can be satisfied by the local secondary, allowing Vault to scale -relatively horizontally with the number of secondaries rather than vertically -as in the past. +Replication operates on a leader/follower model, wherein a leader cluster (known as a +**primary**) is linked to a series of follower **secondary** clusters. The primary +cluster acts as the system of record and asynchronously replicates most Vault data. All communication between primaries and secondaries is end-to-end encrypted -with mutually-authenticated TLS session, setup via replication tokens which are +with mutually-authenticated TLS sessions, setup via replication tokens which are exchanged during bootstrapping. -## What Is Replicated? +What data is replicated between the primary and secondary depends on the type of +replication that is configured between the primary and secondary. These types +of relationships are either **disaster recovery** or **performance** +relationships. -The data replicated in Vault 0.7 includes: +## Performance Replication and Disaster Recovery (DR) Replication - * Secrets - * Policies - * Configuration details for secret backends - * Configuration details for authentication backends - * Configuration details for audit backends +*Performance Replication*: +In performance replication, secondaries keep track of their own tokens and leases +but share the underlying configuration, policies, and supporting secrets (K/V values, +encryption keys for `transit`, etc). -Note that secret, authentication, and audit backends can be marked "local" -which prevents them from being affected by replication. +If a user action would modify underlying shared state, the secondary forwards the request +to the primary to be handled; this is transparent to the client. In practice, most +high-volume workloads (reads in the `kv` backend, encryption/decryption operations +in `transit`, etc.) can be satisfied by the local secondary, allowing Vault to scale +relatively horizontally with the number of secondaries rather than vertically as +in the past. -Access tokens for secrets are not a part of the replication process, as tokens -are local to a cluster that has generated them. Similarly, dynamic secrets -(database credentials, etc.) are issued by and their leases are tracked by each -cluster. +*Disaster Recovery (DR) Replication*: +In disaster recovery (or DR) replication, secondaries share the same underlying configuration, +policy, and supporting secrets (K/V values, encryption keys for `transit`, etc) infrastructure +as the primary. They also share the same token and lease infrastructure as the primary, as +they are designed to allow for continuous operations with applications connecting to the +original primary on the election of the DR secondary. -By not replicating leases and tokens, we avoid having each client call be -forwarded to the primary to check token validity, which enables scaling -horizontally. +DR is designed to be a mechanism to protect against catastrophic failure of entire clusters. +They do not forward service read or write requests until they are elected and become a new primary. + +| Capability | Disaster Recovery | Performance | +|-------------------------------------------------------------------------------------------------------------------------- |------------------- |-------------------------------------------------------------------------- | +| Mirrors the secrets infrastructure of a primary cluster | Yes | Yes | +| Mirrors the configuration of a primary cluster’s backends (i.e.: auth backends, storage backends, secret backends, etc.) | Yes | Yes | +| Contains a local replica of secrets on the secondary and allows the secondary to forward writes | No | Yes | +| Mirrors the token auth infrastructure for applications or users interacting with the primary cluster | Yes | No. Upon promotion, applications must re-auth tokens with a new primary. | + +For more information on the capabilities of performance and disaster recovery replication, see the Vault Replication [API Documentation](/api/system/replication.html). ## Internals @@ -131,7 +134,7 @@ its encrypted barrier. ## Setup and Best Practices -A [setup guide](/docs/guides/replication.html) is +A [setup guide](/guides/replication.html) is available to help you get started; this guide also contains best practices around operationalizing the replication feature. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/ui/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/enterprise/ui/index.html.md similarity index 100% rename from vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/ui/index.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/enterprise/ui/index.html.md diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/guides/index.html.md deleted file mode 100644 index 043460e..0000000 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/index.html.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -layout: "docs" -page_title: "Guides" -sidebar_current: "docs-guides" -description: |- - This section provides various guides for common actions. Due to the nature of Vault, some of these procedures can be complex, so our goal is to provide guidance to do them safely. ---- - -# Vault Guides - -This section provides various guides for common actions. Due to the nature -of Vault, some of these procedures can be complex, so our goal is to provide -guidance to do them safely. - -The following guides are available: - -* [Root Token Generation](/docs/guides/generate-root.html) - This guide covers - how to generate new root tokens using unseal keys. - -* [Replication Setup and Guidance](/docs/guides/replication.html) - This - guide covers how to set up and manage Vault Replication, a part of Vault - Enterprise. - -* [Upgrading](/docs/guides/upgrading/index.html) - This guide provides general - upgrade instructions for Vault with version-specific upgrade notes. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/internals/architecture.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/internals/architecture.html.md index 32f05de..341c8c2 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/internals/architecture.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/internals/architecture.html.md @@ -34,7 +34,7 @@ clarify what is being discussed: in. Much like a bank vault, the barrier must be "unsealed" before anything inside can be accessed. * **Secret Backend** - A secret backend is responsible for managing secrets. Simple secret backends - like the "generic" backend simply return the same secret when queried. Some backends support + like the "kv" backend simply return the same secret when queried. Some backends support using policies to dynamically generate a secret each time they are queried. This allows for unique secrets to be used which allows Vault to do fine-grained revocation and policy updates. As an example, a MySQL backend could be configured with a "web" policy. When the "web" secret @@ -45,10 +45,10 @@ clarify what is being discussed: and response from Vault goes through the configured audit backends. This provides a simple way to integrate Vault with multiple audit logging destinations of different types. -* **Credential Backend** - A credential backend is used to authenticate users or applications which +* **Auth Backend** - An auth backend is used to authenticate users or applications which are connecting to Vault. Once authenticated, the backend returns the list of applicable policies which should be applied. Vault takes an authenticated user and returns a client token that can - be used for future requests. As an example, the `user-password` backend uses a username and password + be used for future requests. As an example, the `userpass` backend uses a username and password to authenticate the user. Alternatively, the `github` backend allows users to authenticate via GitHub. @@ -95,7 +95,7 @@ as [Shamir's secret sharing algorithm](https://en.wikipedia.org/wiki/Shamir's_Se to split the master key into 5 shares, any 3 of which are required to reconstruct the master key. -![Keys](/assets/images/keys.png) +[![Vault Shamir Secret Sharing Algorithm](/assets/images/vault-shamir-secret-sharing.svg)](/assets/images/vault-shamir-secret-sharing.svg) The number of shares and the minimum threshold required can both be specified. Shamir's technique can be disabled, and the master key used directly for unsealing. Once Vault diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/internals/plugins.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/internals/plugins.html.md new file mode 100644 index 0000000..6f0d8ad --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/internals/plugins.html.md @@ -0,0 +1,127 @@ +--- +layout: "docs" +page_title: "Plugin System" +sidebar_current: "docs-internals-plugins" +description: |- + Learn about Vault's plugin system. +--- + +# Plugin System +Certain Vault backends utilize plugins to extend their functionality outside of +what is available in the core vault code. Often times these backends will +provide both builtin plugins and a mechanism for executing external plugins. +Builtin plugins are shipped with vault, often for commonly used implementations, +and require no additional operator intervention to run. Builtin plugins are +just like any other backend code inside vault. External plugins, on the other +hand, are not shipped with the vault binary and must be registered to vault by +a privileged vault user. This section of the documentation will describe the +architecture and security of external plugins. + +# Plugin Architecture +Vault's plugins are completely separate, standalone applications that Vault +executes and communicates with over RPC. This means the plugin process does not +share the same memory space as Vault and therefore can only access the +interfaces and arguments given to it. This also means a crash in a plugin can not +crash the entirety of Vault. + +## Plugin Communication +Vault creates a mutually authenticated TLS connection for communication with the +plugin's RPC server. While invoking the plugin process, Vault passes a [wrapping +token](https://www.vaultproject.io/docs/concepts/response-wrapping.html) to the +plugin process' environment. This token is single use and has a short TTL. Once +unwrapped, it provides the plugin with a uniquely generated TLS certificate and +private key for it to use to talk to the original vault process. + +~> Note: Reading the original connection's TLS connection state is not supported +in plugins. + +## Plugin Registration +An important consideration of Vault's plugin system is to ensure the plugin +invoked by vault is authentic and maintains integrity. There are two components +that a Vault operator needs to configure before external plugins can be run, the +plugin directory and the plugin catalog entry. + +### Plugin Directory +The plugin directory is a configuration option of Vault, and can be specified in +the [configuration file](https://www.vaultproject.io/docs/configuration/index.html). +This setting specifies a directory that all plugin binaries must live. A plugin +can not be added to vault unless it exists in the plugin directory. There is no +default for this configuration option, and if it is not set plugins can not be +added to vault. + +~> Warning: A vault operator should take care to lock down the permissions on +this directory to ensure a plugin can not be modified by an unauthorized user +between the time of the SHA check and the time of plugin execution. + +### Plugin Catalog +The plugin catalog is Vault's list of approved plugins. The catalog is stored in +Vault's barrier and can only be updated by a vault user with sudo permissions. +Upon adding a new plugin, the plugin name, SHA256 sum of the executable, and the +command that should be used to run the plugin must be provided. The catalog will +make sure the executable referenced in the command exists in the plugin +directory. When added to the catalog the plugin is not automatically executed, +it instead becomes visible to backends and can be executed by them. For more +information on the plugin catalog please see the [Plugin Catalog API +docs](/api/system/plugins-catalog.html). + +An example plugin submission looks like: + +``` +$ vault write sys/plugins/catalog/myplugin-database-plugin \ + sha_256= \ + command="myplugin" +Success! Data written to: sys/plugins/catalog/myplugin-database-plugin +``` + +### Plugin Execution +When a backend wants to run a plugin, it first looks up the plugin, by name, in +the catalog. It then checks the executable's SHA256 sum against the one +configured in the plugin catalog. Finally vault runs the command configured in +the catalog, sending along the JWT formatted response wrapping token and mlock +settings (like Vault, plugins support the use of mlock when available). + +# Plugin Development + +~> Advanced topic! Plugin development is a highly advanced topic in Vault, and +is not required knowledge for day-to-day usage. If you don't plan on writing any +plugins, we recommend not reading this section of the documentation. + +Because Vault communicates to plugins over a RPC interface, you can build and +distribute a plugin for Vault without having to rebuild Vault itself. This makes +it easy for you to build a Vault plugin for your organization's internal use, +for a proprietary API that you don't want to open source, or to prototype +something before contributing it back to the main project. + +In theory, because the plugin interface is HTTP, you could even develop a plugin +using a completely different programming language! (Disclaimer, you would also +have to re-implement the plugin API which is not a trivial amount of work.) + +Developing a plugin is simple. The only knowledge necessary to write +a plugin is basic command-line skills and basic knowledge of the +[Go programming language](http://golang.org). + +Your plugin implementation needs to satisfy the interface for the plugin +type you want to build. You can find these definitions in the docs for the +backend running the plugin. + +```go +package main + +import ( + "os" + + "github.com/hashicorp/vault/helper/pluginutil" + "github.com/hashicorp/vault/plugins" +) + +func main() { + apiClientMeta := &pluginutil.APIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args) + + plugins.Serve(New().(MyPlugin), apiClientMeta.GetTLSConfig()) +} +``` + +And that's basically it! You would just need to change MyPlugin to your actual +plugin. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/internals/rotation.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/internals/rotation.html.md index 90238a0..b1afe79 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/internals/rotation.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/internals/rotation.html.md @@ -19,7 +19,7 @@ to split the master key into 5 shares, any 3 of which are required to reconstruc key. The master key is used to protect the encryption key, which is ultimately used to protect data written to the storage backend. -![Keys](/assets/images/keys.png) +[![Vault Shamir Secret Sharing Algorithm](/assets/images/vault-shamir-secret-sharing.svg)](/assets/images/vault-shamir-secret-sharing.svg) To support key rotation, we need to support changing the unseal keys, master key, and the backend encryption key. We split this into two separate operations, `rekey` and `rotate`. @@ -55,4 +55,3 @@ provides the `N+1` encryption key protected by the `N` key. This upgrade key is for a few minutes enabling standby instances to do a periodic check for upgrades. This allows standby instances to update their keys and stay in-sync with the active Vault without requiring operators to perform another unseal. - diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/internals/telemetry.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/internals/telemetry.html.md index a2949eb..e141694 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/internals/telemetry.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/internals/telemetry.html.md @@ -46,3 +46,139 @@ Below is sample output of a telemetry dump: [2015-04-20 12:24:30 -0700 PDT][S] 'vault.core.handle_request': Count: 2 Min: 0.097 Mean: 0.228 Max: 0.359 Stddev: 0.186 Sum: 0.457 [2015-04-20 12:24:30 -0700 PDT][S] 'vault.expire.register': Count: 1 Sum: 0.18 ``` + +You'll note that log entries are prefixed with the metric type as follows: + +- `[C]` is a counter +- `[G]` is a gauge +- `[S]` is a summary + +## Key Metrics + +The following tables described the different Vault metrics. The metrics interval can be assumed to be 10 seconds when retrieving metrics using the above described signals. + +### Internal Metrics + +These metrics represent operational aspects of the running Vault instance. + +| Metric | Description | Unit | Type | +| ---------------- | ----------------------------------| ---- | ---- | +|`vault.audit.log_request`| This measures the number of audit log requests | Number of requests | Summary | +|`vault.audit.log_response`| This measures the number of audit log responses | Number of responses | Summary | +|`vault.barrier.delete`| This measures the number of delete operations at the barrier | Number of operations | Summary | +|`vault.barrier.get`| This measures the number of get operations at the barrier | Number of operations | Summary | +|`vault.barrier.put`| This measures the number of put operations at the barrier | Number of operations | Summary | +|`vault.barrier.list`| This measures the number of list operations at the barrier | Number of operations | Counter | +|`vault.core.check_token`| This measures the number of token checks | Number of checks | Summary | +|`vault.core.fetch_acl_and_token`| This measures the number of ACL and corresponding token entry fetches | Number of fetches | Summary | +|`vault.core.handle_request`| This measures the number of requests | Number of requests | Summary | +|`vault.core.handle_login_request`| This measures the number of login requests | Number of requests | Summary | +|`vault.core.leadership_setup_failed`| This measures the number of cluster leadership setup failures | Number of failures | Summary | +|`vault.core.leadership_lost`| This measures the number of cluster leadership losses | Number of losses | Summary | +|`vault.core.post_unseal` | This measures the number of post-unseal operations | Number of operations | Gauge | +|`vault.core.pre_seal`| This measures the number of pre-seal operations | Number of operations | Gauge | +|`vault.core.seal-with-request`| This measures the number of requested seal operations | Number of operations | Gauge | +|`vault.core.seal`| This measures the number of seal operations | Number of operations | Gauge | +|`vault.core.seal-internal`| This measures the number of internal seal operations | Number of operations | Gauge | +|`vault.core.step_down`| This measures the number of cluster leadership step downs | Number of stepdowns | Summary | +|`vault.core.unseal`| This measures the number of unseal operations | Number of operations | Summary | +|`vault.runtime.alloc_bytes` | This measures the number of bytes allocated by the Vault process. This may burst from time to time but should return to a steady state value.| Number of bytes | Gauge | +|`vault.runtime.free_count`| This measures the number of `free` operations | Number of operations | Gauge | +|`vault.runtime.heap_objects`| This measures the number of objects on the heap and is a good general memory pressure indicator | Number of heap objects | Gauge | +|`vault.runtime.malloc_count`| This measures the number of `malloc` operations | Number of operations | Gauge | +|`vault.runtime.num_goroutines`| This measures the number of goroutines and serves as a general load indicator | Number of goroutines| Gauge | +|`vault.runtime.sys_bytes`| This measures the number of bytes allocated to Vault and includes what is being used by the heap and what has been reclaimed but not given back| Number of bytes | Gauge | +|`vault.runtime.total_gc_pause_ns` | This measures the total garbage collector pause time since the Vault instance was last started | Nanosecond | Summary | +| `vault.runtime.total_gc_runs` | Total number of garbage collection runs since the Vault instance was last started | Number of operations | Gauge | + +### Policy and Token Metrics + +These metrics relate to policies and tokens. + +| Metric | Description | Unit | Type | +| ---------------- | ----------------------------------| ---- | ---- | +`vault.expire.fetch-lease-times`| This measures the number of lease time fetch operations | Number of operations | Gauge | +`vault.expire.fetch-lease-times-by-token`| This measures the number of operations which compute lease times by token | Number of operations | Gauge | +`vault.expire.num_leases`| This measures the number of expired leases | Number of expired leases | Gauge | +`vault.expire.revoke`| This measures the number of revoke operations | Number of operations | Counter | +`vault.expire.revoke-force`| This measures the number of forced revoke operations | Number of operations | Counter | +`vault.expire.revoke-prefix`| This measures the number of operations used to revoke all secrets with a given prefix | Number of operations | Counter | +`vault.expire.revoke-by-token`| This measures the number of operations used to revoke all secrets issued with a given token | Number of operations | Counter | +`vault.expire.renew`| This measures the number of renew operations | Number of operations | Counter | +`vault.expire.renew-token`| This measures the number of renew token operations to renew a token which does not need to invoke a logical backend | Number of operations | Gauge | +`vault.expire.register`| This measures the number of register operations which take a request and response with an associated lease and register a lease entry with lease ID | Number of operations | Gauge | +`vault.expire.register-auth`| This measures the number of register auth operations which create lease entries without lease ID | Number of operations | Gauge | +`vault.policy.get_policy`| This measures the number of policy get operations | Number of operations | Counter | +`vault.policy.list_policies`| This measures the number of policy list operations | Number of operations | Counter | +`vault.policy.delete_policy`| This measures the number of policy delete operations | Number of operations | Counter | +`vault.policy.set_policy`| This measures the number of policy set operations | Number of operations | Gauge | +`vault.token.create`| This measures the number of token create operations | Number of operations | Gauge | +`vault.token.createAccessor`| This measures the number of Token ID identifier operations | Number of operations | Gauge | +`vault.token.lookup`| This measures the number of token lookups | Number of lookups | Counter | +`vault.token.revoke`| This measures the number of token revocation operations | Number of operations | Gauge | +`vault.token.revoke-tree`| This measures the number of revoke tree operations | Number of operations | Gauge | +`vault.token.store`| This measures the number of operations to store an updated token entry without writing to the secondary index | Number of operations | Gauge | + +### Authentication Backend Metrics + +These metrics relate to supported authentication backends. + +| Metric | Description | Unit | Type | +| ---------------- | ----------------------------------| ---- | ---- | +| `vault.rollback.attempt.auth-token-` | This measures the number of rollback operations attempted for authentication tokens backend | Number of operations | Summary | +| `vault.rollback.attempt.auth-ldap-` | This measures the number of rollback operations attempted for the LDAP authentication backend | Number of operations | Summary | +| `vault.rollback.attempt.cubbyhole-` | This measures the number of rollback operations attempted for the cubbyhole authentication backend | Number of operations | Summary | +| `vault.rollback.attempt.secret-` | This measures the number of rollback operations attempted for the kv secret backend | Number of operations | Summary | +| `vault.rollback.attempt.sys-` | This measures the number of rollback operations attempted for the sys backend | Number of operations | Summary | +| `vault.route.rollback.auth-ldap-` | This measures the number of rollback operations for the LDAP authentication backend | Number of operations | Summary | +| `vault.route.rollback.auth-token-` | This measures the number of rollback operations for the authentication tokens backend | Number of operations | Summary | +| `vault.route.rollback.cubbyhole-` | This measures the number of rollback operations for the cubbyhole authentication backend | Number of operations | Summary | +| `vault.route.rollback.secret-` | This measures the number of rollback operations for the kv secret backend | Number of operations | Summary | +| `vault.route.rollback.sys-` | This measures the number of rollback operations for the sys backend | Number of operations | Summary | + +### Storage Backend Metrics + +These metrics relate to supported storage backends. + +| Metric | Description | Unit | Type | +| ---------------- | ----------------------------------| ---- | ---- | +|`vault.azure.put` | This measures the number of put operations against the Azure storage backend | Number of operations | Gauge | +|`vault.azure.get` | This measures the number of get operations against the Azure storage backend | Number of operations | Gauge | +|`vault.azure.delete` | This measures the number of delete operations against the Azure storage backend | Number of operations | Gauge | +|`vault.azure.list` | This measures the number of list operations against the Azure storage backend | Number of operations | Gauge | +|`vault.consul.put` | This measures the number of put operations against the Consul storage backend | Number of operations | Gauge | +|`vault.consul.get` | This measures the number of get operations against the Consul storage backend | Number of operations | Gauge | +|`vault.consul.delete` | This measures the number of delete operations against the Consul storage backend | Number of operations | Gauge | +|`vault.consul.list` | This measures the number of list operations against the Consul storage backend | Number of operations | Gauge | +|`vault.dynamodb.put` | This measures the number of put operations against the DynamoDB storage backend | Number of operations | Gauge | +|`vault.dynamodb.get` | This measures the number of get operations against the DynamoDB storage backend | Number of operations | Gauge | +|`vault.dynamodb.delete` | This measures the number of delete operations against the DynamoDB storage backend | Number of operations | Gauge | +|`vault.dynamodb.list` | This measures the number of list operations against the DynamoDB storage backend | Number of operations | Gauge | +|`vault.etcd.put` | This measures the number of put operations against the etcd storage backend | Number of operations | Gauge | +|`vault.etcd.get` | This measures the number of get operations against the etcd storage backend | Number of operations | Gauge | +|`vault.etcd.delete` | This measures the number of delete operations against the etcd storage backend | Number of operations | Gauge | +|`vault.etcd.list` | This measures the number of list operations against the etcd storage backend | Number of operations | Gauge | +|`vault.gcs.put` | This measures the number of put operations against the Google Cloud Storage backend | Number of operations | Gauge | +|`vault.gcs.get` | This measures the number of get operations against the Google Cloud Storage backend | Number of operations | Gauge | +|`vault.gcs.delete` | This measures the number of delete operations against the Google Cloud Storage backend | Number of operations | Gauge | +|`vault.gcs.list` | This measures the number of list operations against the Google Cloud Storage backend | Number of operations | Gauge | +|`vault.mysql.put` | This measures the number of put operations against the MySQL backend | Number of operations | Gauge | +|`vault.mysql.get` | This measures the number of get operations against the MySQL backend | Number of operations | Gauge | +|`vault.mysql.delete` | This measures the number of delete operations against the MySQL backend | Number of operations | Gauge | +|`vault.mysql.list` | This measures the number of list operations against the MySQL backend | Number of operations | Gauge | +|`vault.postgres.put` | This measures the number of put operations against the PostgreSQL backend | Number of operations | Gauge | +|`vault.postgres.get` | This measures the number of get operations against the PostgreSQL backend | Number of operations | Gauge | +|`vault.postgres.delete` | This measures the number of delete operations against the PostgreSQL backend | Number of operations | Gauge | +|`vault.postgres.list` | This measures the number of list operations against the PostgreSQL backend | Number of operations | Gauge | +|`vault.s3.put` | This measures the number of put operations against the Amazon S3 backend | Number of operations | Gauge | +|`vault.s3.get` | This measures the number of get operations against the Amazon S3 backend | Number of operations | Gauge | +|`vault.s3.delete` | This measures the number of delete operations against the Amazon S3 backend | Number of operations | Gauge | +|`vault.s3.list` | This measures the number of list operations against the Amazon S3 backend | Number of operations | Gauge | +|`vault.swift.put` | This measures the number of put operations against the OpenStack Swift backend | Number of operations | Gauge | +|`vault.swift.get` | This measures the number of get operations against the OpenStack Swift backend | Number of operations | Gauge | +|`vault.swift.delete` | This measures the number of delete operations against the OpenStack Swift backend | Number of operations | Gauge | +|`vault.swift.list` | This measures the number of list operations against the OpenStack Swift backend | Number of operations | Gauge | +|`vault.zookeeper.put` | This measures the number of put operations against the ZooKeeper backend | Number of operations | Gauge | +|`vault.zookeeper.get` | This measures the number of get operations against the ZooKeeper backend | Number of operations | Gauge | +|`vault.zookeeper.delete` | This measures the number of delete operations against the ZooKeeper backend | Number of operations | Gauge | +|`vault.zookeeper.list` | This measures the number of list operations against the ZooKeeper backend | Number of operations | Gauge | diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/plugin/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/plugin/index.html.md new file mode 100644 index 0000000..096eb9e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/plugin/index.html.md @@ -0,0 +1,42 @@ +--- +layout: "docs" +page_title: "Custom Plugin Backends" +sidebar_current: "docs-plugin" +description: |- + Plugin backends are mountable backends that are implemented unsing Vault's plugin system. +--- + +# Custom Plugin Backends + +Plugin backends are the components in Vault that can be implemented separately from Vault's +builtin backends. These backends can be either authentication or secret backends. + +Detailed information regarding the plugin system can be found in the +[internals documentation](https://www.vaultproject.io/docs/internals/plugins.html). + +# Mounting/unmounting Plugin Backends + +Before a plugin backend can be mounted, it needs to be registered via the +[plugin catalog](https://www.vaultproject.io/docs/internals/plugins.html#plugin-catalog). After +the plugin is registered, it can be mounted by specifying the registered plugin name: + +``` +$ vault mount -path=my-secrets -plugin-name=passthrough-plugin plugin +Successfully mounted plugin 'passthrough-plugin' at 'my-secrets'! +``` + +Listing mounts will display backends that are mounted as plugins, along with the +name of plugin backend that is mounted: + +``` +$ vault mounts +Path Type Accessor Plugin Default TTL Max TTL Force No Cache Replication Behavior Description +my-secrets/ plugin plugin_deb84140 passthrough-plugin system system false replicated +... +``` + +Unmounting a plugin backend is the identical to unmounting internal backends: + +``` +$ vault unmount my-secrets +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/aws/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/aws/index.html.md index a062bce..972d203 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/aws/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/aws/index.html.md @@ -21,14 +21,14 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the aws backend is to mount it. -Unlike the `generic` backend, the `aws` backend is not mounted by default. +Unlike the `kv` backend, the `aws` backend is not mounted by default. ```text $ vault mount aws Successfully mounted 'aws' at 'aws'! ``` -Next, we must configure the root credentials that are used to manage IAM credentials: +Next, we must configure the credentials that Vault uses to manage the IAM credentials generated by this secret backend: ```text $ vault write aws/config/root \ @@ -37,16 +37,26 @@ $ vault write aws/config/root \ region=us-east-1 ``` +*Note that `root` does not mean it needs to be your AWS account's root credentials, and it +probably should not be. It is also unnecessary in many cases as Vault will use normal AWS credential mechanisms (instance profile, env vars, etc.) when possible. If you need to use static credentails, create an IAM user with permissions to manage IAM and STS. +See below for the specific actions required.* + The following parameters are required: - `access_key` - the AWS access key that has permission to manage IAM credentials. - `secret_key` - the AWS secret key that has permission to manage IAM credentials. -- `region` the AWS region for API calls. -Note: the client uses the official AWS SDK and will use environment variable or IAM -role-provided credentials if available. +The following parameter is optional: + +- `region` the AWS region for API calls. If not provided, the `AWS_REGION` and + `AWS_DEFAULT_REGION` env vars will be used, in that order. If there is still + no region, `us-east-1` will be used as a fallback. + +Note: the client uses the official AWS SDK and will use the specified +credentials, environment credentials, shared file credentials, or IAM role/ECS +task credentials in that order. The next step is to configure a role. A role is a logical name that maps to a policy used to generated those credentials. @@ -128,7 +138,8 @@ The [Quick Start](#quick-start) describes how to setup the `aws/creds` endpoint. ## Root Credentials for Dynamic IAM users -The `aws/config/root` credentials need permission to manage dynamic IAM users. +The `aws/config/root` credentials need permission to manage dynamic IAM users. +This does not mean it needs to be your AWS account's root credentials, and we would not suggest using them. Here is an example IAM policy that would grant these permissions: ```javascript @@ -180,9 +191,6 @@ as soon as they are generated. Vault also supports an STS credentials instead of creating a new IAM user. -The `aws/sts` endpoint will always fetch credentials with a 1hr ttl. -Unlike the `aws/creds` endpoint, the ttl is enforced by STS. - Vault supports two of the [STS APIs](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html), [STS federation tokens](http://docs.aws.amazon.com/STS/latest/APIReference/API_GetFederationToken.html) and [STS AssumeRole](http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html). @@ -241,11 +249,11 @@ but STS would attach an implicit deny on `sts` that overrides the allow.) } ``` -To generate a new set of STS federation token credentials, we simply read from +To generate a new set of STS federation token credentials, we simply write to the role using the aws/sts endpoint: ```text -$vault read aws/sts/deploy +$vault write aws/sts/deploy -ttl=60m Key Value lease_id aws/sts/deploy/31d771a6-fb39-f46b-fdc5-945109106422 lease_duration 3600 @@ -310,11 +318,11 @@ $ vault write aws/roles/deploy \ arn=arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:role/RoleNameToAssume ``` -To generate a new set of STS assumed role credentials, we again read from +To generate a new set of STS assumed role credentials, we again write to the role using the aws/sts endpoint: ```text -$vault read aws/sts/deploy +$vault write aws/sts/deploy -ttl=60m Key Value lease_id aws/sts/deploy/31d771a6-fb39-f46b-fdc5-945109106422 lease_duration 3600 diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cassandra/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cassandra/index.html.md index 1d64678..0269f64 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cassandra/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cassandra/index.html.md @@ -10,6 +10,11 @@ description: |- Name: `cassandra` +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the documentation for +the new implementation of this backend at +[Cassandra Database Plugin](/docs/secrets/databases/cassandra.html). + The Cassandra secret backend for Vault generates database credentials dynamically based on configured roles. This means that services that need to access a database no longer need to hardcode credentials: they can request @@ -26,7 +31,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the Cassandra backend is to mount it. -Unlike the `generic` backend, the `cassandra` backend is not mounted by default. +Unlike the `kv` backend, the `cassandra` backend is not mounted by default. ```text $ vault mount cassandra diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/consul/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/consul/index.html.md index 50f7cbf..d12ef8c 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/consul/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/consul/index.html.md @@ -20,7 +20,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the consul backend is to mount it. -Unlike the `generic` backend, the `consul` backend is not mounted by default. +Unlike the `kv` backend, the `consul` backend is not mounted by default. ``` $ vault mount consul @@ -89,7 +89,7 @@ Here we can see that Vault has generated a new Consul ACL token for us. We can test this token out, and verify that it is read-only: ``` -$ curl 127.0.0.1:8500/v1/kv/foo?token=973a31ea-1ec4-c2de-0f63-623f477c25100 +$ curl 127.0.0.1:8500/v1/kv/foo?token=973a31ea-1ec4-c2de-0f63-623f477c2510 [{"CreateIndex":12,"ModifyIndex":53,"LockIndex":4,"Key":"foo","Flags":3304740253564472344,"Value":"YmF6"}] $ curl -X PUT -d 'test' 127.0.0.1:8500/v1/kv/foo?token=973a31ea-1ec4-c2de-0f63-623f477c2510 diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cubbyhole/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cubbyhole/index.html.md index 464a64f..d991125 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cubbyhole/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/cubbyhole/index.html.md @@ -14,55 +14,19 @@ The `cubbyhole` secret backend is used to store arbitrary secrets within the configured physical storage for Vault. It is mounted at the `cubbyhole/` prefix by default and cannot be mounted elsewhere or removed. -This backend differs from the `generic` backend in that the `generic` backend's +This backend differs from the `kv` backend in that the `kv` backend's values are accessible to any token with read privileges on that path. In `cubbyhole`, paths are scoped per token; no token can access another token's cubbyhole, whether to read, write, list, or for any other operation. When the token expires, its cubbyhole is destroyed. -Also unlike the `generic` backend, because the cubbyhole's lifetime is linked +Also unlike the `kv` backend, because the cubbyhole's lifetime is linked to that of an authentication token, there is no concept of a TTL or refresh interval for values contained in the token's cubbyhole. Writing to a key in the `cubbyhole` backend will replace the old value; the sub-fields are not merged together. -## Response Wrapping - -Starting in Vault 0.6, almost any response (except those from `sys/` endpoints) -from Vault can be wrapped (see the [Response -Wrapping](/docs/concepts/response-wrapping.html) -concept page for details). - -The TTL for the token is set by the client using the `X-Vault-Wrap-TTL` header -and can be either an integer number of seconds or a string duration of seconds -(`15s`), minutes (`20m`), or hours (`25h`). When using the Vault CLI, you can -set this via the `-wrap-ttl` parameter. Response wrapping is per-request; it is -the presence of a value in this header that activates wrapping of the response. - -If a client requests wrapping: - -1. The original response is serialized to JSON -2. A new single-use token is generated with a TTL as supplied by the client -3. Internally, the original response JSON is stored in the single-use token's - cubbyhole. -4. A new response is generated, with the token ID and the token TTL stored in - the new response's `wrap_info` dict -5. The new response is returned to the caller - -To get the original value, if using the API, perform a write on -`sys/wrapping/unwrap`, passing in the wrapping token ID. The original value -will be returned. - -If using the CLI, passing the wrapping token's ID to the `vault unwrap` command -will return the original value; `-format` and `-field` can be set like with -`vault read`. - -If the original response is an authentication response containing a token, the -token's accessor will be made available to the caller. This allows a privileged -caller to generate tokens for clients and be able to manage the tokens' -lifecycle while not being exposed to the actual client token IDs. - ## Quick Start The `cubbyhole` backend allows for writing keys with arbitrary values. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/custom.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/custom.html.md deleted file mode 100644 index 419d4c6..0000000 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/custom.html.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -layout: "docs" -page_title: "Custom Secret Backend" -sidebar_current: "docs-secrets-custom" -description: |- - Create custom secret backends for Vault. ---- - -# Custom Secret Backends - -Vault doesn't currently support the creation of custom secret backends. -The primary reason is because we want to ensure the core of Vault is -secure before attempting any sort of plug-in system. We're interested -in supporting custom secret backends, but don't yet have a clear strategy -or timeline to do. - -In the mean time, you can use the -[generic backend](/docs/secrets/generic/index.html) to support custom -data with custom leases. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/cassandra.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/cassandra.html.md new file mode 100644 index 0000000..8fa888b --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/cassandra.html.md @@ -0,0 +1,61 @@ +--- +layout: "docs" +page_title: "Cassandra Database Plugin - Database Secret Backend" +sidebar_current: "docs-secrets-databases-cassandra" +description: |- + The Cassandra plugin for Vault's Database backend generates database credentials to access Cassandra. +--- + +# Cassandra Database Plugin + +Name: `cassandra-database-plugin` + +The Cassandra Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the Cassandra database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a cassandra connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +cassandra configuration: + +``` +$ vault write database/config/cassandra \ + plugin_name=cassandra-database-plugin \ + allowed_roles="readonly" \ + hosts=localhost \ + username=cassandra \ + password=cassandra + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the cassandra connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=cassandra \ + creation_statements="CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER; \ + GRANT SELECT ON ALL KEYSPACES TO {{username}};" \ + default_ttl="1h" \ + max_ttl="24h" + + +Success! Data written to: database/roles/readonly +``` + +This role can be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [Cassandra database +plugin API](/api/secret/databases/cassandra.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/custom.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/custom.html.md new file mode 100644 index 0000000..8a66436 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/custom.html.md @@ -0,0 +1,117 @@ +--- +layout: "docs" +page_title: "Custom Database Plugins - Database Secret Backend" +sidebar_current: "docs-secrets-databases-custom" +description: |- + Creating custom database plugins for Vault's Database backend to generate credentials for a database. +--- + +# Custom Database Plugins + +The Database backend allows new functionality to be added through a plugin +interface without needing to modify vault's core code. This allows you write +your own code to generate credentials in any database you wish. It also allows +databases that require dynamically linked libraries to be used as plugins while +keeping Vault itself statically linked. + +~> **Advanced topic!** Plugin development is a highly advanced +topic in Vault, and is not required knowledge for day-to-day usage. +If you don't plan on writing any plugins, we recommend not reading +this section of the documentation. + +Please read the [Plugins internals](/docs/internals/plugins.html) docs for more +information about the plugin system before getting started building your +Database plugin. + +## Plugin Interface + +All plugins for the Database backend must implement the same simple interface. + +```go +type Database interface { + Type() (string, error) + CreateUser(statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) + RenewUser(statements Statements, username string, expiration time.Time) error + RevokeUser(statements Statements, username string) error + + Initialize(config map[string]interface{}, verifyConnection bool) error + Close() error +} +``` + +You'll notice the first parameter to a number of those functions is a +`Statements` struct. This struct is used to pass the Role's configured +statements to the plugin on function call. The struct is defined as: + +```go +type Statements struct { + CreationStatements string + RevocationStatements string + RollbackStatements string + RenewStatements string +} +``` + +It is up to your plugin to replace the `{{name}}`, `{{password}}`, and +`{{expiration}}` in these statements with the proper vaules. + +The `Initialize` function is passed a map of keys to values, this data is what the +user specified as the configuration for the plugin. Your plugin should use this +data to make connections to the database. It is also passed a boolean value +specifying whether or not your plugin should return an error if it is unable to +connect to the database. + +## Serving your plugin + +Once your plugin is built you should pass it to vault's `plugins` package by +calling the `Serve` method: + +```go +package main + +import ( + "github.com/hashicorp/vault/plugins" +) + +func main() { + plugins.Serve(new(MyPlugin), nil) +} +``` + +Replacing `MyPlugin` with the actual implementation of your plugin. + +The second parameter to `Serve` takes in an optional vault `api.TLSConfig` for +configuring the plugin to communicate with vault for the initial unwrap call. +This is useful if your vault setup requires client certificate checks. This +config wont be used once the plugin unwraps its own TLS cert and key. + +## Running your plugin + +The above main package, once built, will supply you with a binary of your +plugin. We also recommend if you are planning on distributing your plugin to +build with [gox](https://github.com/mitchellh/gox) for cross platform builds. + +To use your plugin with the Database backend you need to place the binary in the +plugin directory as specified in the [plugin internals](/docs/internals/plugins.html) docs. + +You should now be able to register your plugin into the vault catalog. To do +this your token will need sudo permissions. + +``` +$ vault write sys/plugins/catalog/myplugin-database-plugin \ + sha_256= \ + command="myplugin" +Success! Data written to: sys/plugins/catalog/myplugin-database-plugin +``` + +Now you should be able to configure your plugin like any other: + +``` +$ vault write database/config/myplugin \ + plugin_name=myplugin-database-plugin \ + allowed_roles="readonly" \ + myplugins_connection_details=.... + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/hanadb.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/hanadb.html.md new file mode 100644 index 0000000..295bb35 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/hanadb.html.md @@ -0,0 +1,59 @@ +--- +layout: "docs" +page_title: "HANA Database Plugin - Database Secret Backend" +sidebar_current: "docs-secrets-databases-HANA" +description: |- + The HANA plugin for Vault's Database backend generates database credentials to access SAP HANA Database. +--- + +# HANA Database Plugin + +Name: `hana-database-plugin` + +The HANA Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the HANA database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a HANA connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +configuration: + +``` +$ vault write database/config/hana \ + plugin_name=hana-database-plugin \ + connection_url="hdb://username:password@localhost:1433" \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will +return the connection details as is, including passwords, if any. +``` + +Once the HANA connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=hana \ + creation_statements="CREATE USER {{name}} PASSWORD {{password}} VALID UNTIL '{{expiration}}';\ + CALL GRANT_ACTIVATED_ROLE ( 'sap.hana.admin.roles::Monitoring', '{{name}}' );" \ + default_ttl="12h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can now be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [HANA database +plugin API](/api/secret/databases/hanadb.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/index.html.md new file mode 100644 index 0000000..28c4a91 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/index.html.md @@ -0,0 +1,102 @@ +--- +layout: "docs" +page_title: "Database Secret Backend" +sidebar_current: "docs-secrets-databases" +description: |- + Top page for database secret backend information +--- + +# Databases + +Name: `Database` + +The Database secret backend for Vault generates database credentials dynamically +based on configured roles. It works with a number of different databases through +a plugin interface. There are a number of builtin database types and an exposed +framework for running custom database types for extendability. This means that +services that need to access a database no longer need to hardcode credentials: +they can request them from Vault, and use Vault's leasing mechanism to more +easily roll keys. + +Additionally, it introduces a new ability: with every service accessing the +database with unique credentials, it makes auditing much easier when +questionable data access is discovered: you can track it down to the specific +instance of a service based on the SQL username. + +Vault makes use of its own internal revocation system to ensure that users +become invalid within a reasonable time of the lease expiring. + +This page will show a quick start for this backend. For detailed documentation +on every path, use vault path-help after mounting the backend. + +## Quick Start + +The first step in using the Database backend is mounting it. + +```text +$ vault mount database +Successfully mounted 'database' at 'database'! +``` + +Next, we must configure this backend to connect to a database. In this example +we will connect to a MySQL database, but the configuration details needed for +other plugin types can be found in their docs pages. This backend can configure +multiple database connections, therefore a name for the connection must be +provided; we'll call this one simply "mysql". + +``` +$ vault write database/config/mysql \ + plugin_name=mysql-database-plugin \ + connection_url="root:mysql@tcp(127.0.0.1:3306)/" \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +The next step is to configure a role. A role is a logical name that maps to a +policy used to generate those credentials. A role needs to be configured with +the database name we created above, and the default/max TTLs. For example, lets +create a "readonly" role: + +``` +$ vault write database/roles/readonly \ + db_name=mysql \ + creation_statements="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';" \ + default_ttl="1h" \ + max_ttl="24h" +Success! Data written to: database/roles/readonly +``` +By writing to the roles/readonly path we are defining the readonly role. This +role will be created by evaluating the given creation statements. By default, +the {{name}} and {{password}} fields will be populated by the plugin with +dynamically generated values. In other plugins the {{expiration}} field could +also be supported. This SQL statement is creating the named user, and then +granting it SELECT or read-only privileges to tables in the database. More +complex GRANT queries can be used to customize the privileges of the role. +Custom revocation statements could be passed too, but this plugin has a default +statement we can use. + +To generate a new set of credentials, we simply read from that role: + +``` +$ vault read database/creds/readonly +Key Value +--- ----- +lease_id database/creds/readonly/2f6a614c-4aa2-7b19-24b9-ad944a8d4de6 +lease_duration 1h0m0s +lease_renewable true +password 8cab931c-d62e-a73d-60d3-5ee85139cd66 +username v-root-e2978cd0- +``` + +## Custom Plugins + +This backend allows custom database types to be run through the exposed plugin +interface. Please see the [Custom database +plugin](/docs/secrets/databases/custom.html) for more information. + +## API + +The Database secret backend has a full HTTP API. Please see the [Database secret +backend API](/api/secret/databases/index.html) for more details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mongodb.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mongodb.html.md new file mode 100644 index 0000000..004883d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mongodb.html.md @@ -0,0 +1,57 @@ +--- +layout: "docs" +page_title: "MongoDB Database Plugin - Database Secret Backend" +sidebar_current: "docs-secrets-databases-mongodb" +description: |- + The MongoDB plugin for Vault's Database backend generates database credentials to access MongoDB. +--- + +# MongoDB Database Plugin + +Name: `mongodb-database-plugin` + +The MongoDB Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MongoDB database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a MongoDB connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +MongoDB configuration: + +``` +$ vault write database/config/mongodb \ + plugin_name=mongodb-database-plugin \ + allowed_roles="readonly" \ + connection_url="mongodb://admin:Password!@mongodb.acme.com:27017/admin?ssl=true" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the MongoDB connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=mongodb \ + creation_statements='{ "db": "admin", "roles": [{ "role": "readWrite" }, {"role": "read", "db": "foo"}] }' \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [MongoDB database +plugin API](/api/secret/databases/mongodb.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mssql.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mssql.html.md new file mode 100644 index 0000000..2b996e1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mssql.html.md @@ -0,0 +1,59 @@ +--- +layout: "docs" +page_title: "MSSQL Database Plugin - Database Secret Backend" +sidebar_current: "docs-secrets-databases-mssql" +description: |- + The MSSQL plugin for Vault's Database backend generates database credentials to access Microsoft SQL Server. +--- + +# MSSQL Database Plugin + +Name: `mssql-database-plugin` + +The MSSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MSSQL database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a MSSQL connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +configuration: + +``` +$ vault write database/config/mssql \ + plugin_name=mssql-database-plugin \ + connection_url='sqlserver://sa:yourStrong(!)Password@localhost:1433' \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the MSSQL connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=mssql \ + creation_statements="CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';\ + CREATE USER [{{name}}] FOR LOGIN [{{name}}];\ + GRANT SELECT ON SCHEMA::dbo TO [{{name}}];" \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can now be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [MSSQL database +plugin API](/api/secret/databases/mssql.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mysql-maria.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mysql-maria.html.md new file mode 100644 index 0000000..3b730fe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/mysql-maria.html.md @@ -0,0 +1,97 @@ +--- +layout: "docs" +page_title: "MySQL/MariaDB Database Plugin - Database Secret Backend" +sidebar_current: "docs-secrets-databases-mysql-maria" +description: |- + The MySQL/MariaDB plugin for Vault's Database backend generates database credentials to access MySQL and MariaDB servers. +--- + +# MySQL/MariaDB Database Plugin + +Name: `mysql-database-plugin`, `mysql-aurora-database-plugin`, `mysql-rds-database-plugin`, +`mysql-legacy-database-plugin` + +The MySQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the MySQL database. + +This plugin has a few different instances built into vault, each instance is for +a slightly different MySQL driver. The only difference between these plugins is +the length of usernames generated by the plugin as different versions of mysql +accept different lengths. The available plugins are: + + - mysql-database-plugin + - mysql-aurora-database-plugin + - mysql-rds-database-plugin + - mysql-legacy-database-plugin + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a MySQL connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +configuration: + +``` +$ vault write database/config/mysql \ + plugin_name=mysql-database-plugin \ + connection_url="root:mysql@tcp(127.0.0.1:3306)/" \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the MySQL connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=mysql \ + creation_statements="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';" \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can now be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [MySQL database +plugin API](/api/secret/databases/mysql-maria.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. + +## Examples + +### Using wildcards in grant statements + +MySQL supports using wildcards in grant statements. These are sometimes needed +by applications which expect access to a large number of databases inside MySQL. +This can be realized by using a wildcard in the grant statement. For example if +you want the user created by Vault to have access to all databases starting with +`fooapp_` you could use the following creation statement: + +``` +CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; GRANT SELECT ON `fooapp\_%`.* TO '{{name}}'@'%'; +``` + +MySQL expects the part in which the wildcards are to be placed inside backticks. +If you want to add this creation statement to Vault via the Vault CLI you cannot +simply paste the above statement on the CLI because the shell will interpret the +text between the backticks as something that must be executed. The easiest way to +get around this is to encode the creation statement as Base64 and feed this to Vault. +For example: + +``` +$ vault write database/roles/readonly \ + db_name=mysql \ + creation_statements="Q1JFQVRFIFVTRVIgJ3t7bmFtZX19J0AnJScgSURFTlRJRklFRCBCWSAne3twYXNzd29yZH19JzsgR1JBTlQgU0VMRUNUIE9OIGBmb29hcHBcXyVgLiogVE8gJ3t7bmFtZX19J0AnJSc7" \ + default_ttl="1h" \ + max_ttl="24h" +``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/oracle.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/oracle.html.md new file mode 100644 index 0000000..7a333fc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/oracle.html.md @@ -0,0 +1,73 @@ +--- +layout: "docs" +page_title: "Oracle Database Plugin" +sidebar_current: "docs-secrets-databases-oracle" +description: |- + The Oracle Database plugin for Vault's Database backend generates database credentials to access Oracle Database severs. +--- + +# Oracle Database Plugin + +Name: `oracle-database-plugin` + +The Oracle Database Plugin is an external plugin for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the Oracle database. + +The Oracle Database Plugin does not live in the core Vault code tree and can be found +at its own git repository here: [hashicorp/vault-plugin-database-oracle](https://github.com/hashicorp/vault-plugin-database-oracle) + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can run the plugin and configure a +connection to the Oracle Database. + +First the plugin must be built and registered to Vault's plugin catalog. To +build the plugin see the plugin's code repository. Once the plugin is built and +the binary is placed in Vault's plugin directory the catalog should be updated: + +``` +$ vault write sys/plugins/catalog/oracle-database-plugin \ + sha_256= \ + command=oracle-database-plugin +``` + +Once the plugin exists in the plugin catalog the Database backend can configure +a connection for the Oracle Database: + +``` +$ vault write database/config/oracle \ + plugin_name=oracle-database-plugin \ + connection_url="system/Oracle@localhost:1521/OraDoc.localhost" \ + allowed_roles="readonly" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the Oracle connection is configured we can add a role: + +``` +$ vault write database/roles/readonly \ + db_name=oracle \ + creation_statements="CREATE USER {{name}} IDENTIFIED BY {{password}}; GRANT CONNECT TO {{name}}; GRANT CREATE SESSION TO {{name}};" \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can now be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [Oracle database +plugin API](/api/secret/databases/oracle.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. + diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/postgresql.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/postgresql.html.md new file mode 100644 index 0000000..b05b45c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/databases/postgresql.html.md @@ -0,0 +1,59 @@ +--- +layout: "docs" +page_title: "PostgreSQL Database Plugin - Database Secret Backend" +sidebar_current: "docs-secrets-databases-postgresql" +description: |- + The PostgreSQL plugin for Vault's Database backend generates database credentials to access PostgreSQL. +--- + +# PostgreSQL Database Plugin + +Name: `postgresql-database-plugin` + +The PostgreSQL Database Plugin is one of the supported plugins for the Database +backend. This plugin generates database credentials dynamically based on +configured roles for the PostgreSQL database. + +See the [Database Backend](/docs/secrets/databases/index.html) docs for more +information about setting up the Database Backend. + +## Quick Start + +After the Database Backend is mounted you can configure a PostgreSQL connection +by specifying this plugin as the `"plugin_name"` argument. Here is an example +configuration: + +``` +$ vault write database/config/postgresql \ + plugin_name=postgresql-database-plugin \ + allowed_roles="readonly" \ + connection_url="postgresql://root:root@localhost:5432/" + +The following warnings were returned from the Vault server: +* Read access to this endpoint should be controlled via ACLs as it will return the connection details as is, including passwords, if any. +``` + +Once the PostgreSQL connection is configured we can add a role. The PostgreSQL +plugin replaces `{{expiration}}` in statements with a formated timestamp: + +``` +$ vault write database/roles/readonly \ + db_name=postgresql \ + creation_statements="CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; \ + GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"{{name}}\";" \ + default_ttl="1h" \ + max_ttl="24h" + +Success! Data written to: database/roles/readonly +``` + +This role can be used to retrieve a new set of credentials by querying the +"database/creds/readonly" endpoint. + +## API + +The full list of configurable options can be seen in the [PostgreSQL database +plugin API](/api/secret/databases/postgresql.html) page. + +For more information on the Database secret backend's HTTP API please see the [Database secret +backend API](/api/secret/databases/index.html) page. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/identity/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/identity/index.html.md new file mode 100644 index 0000000..540b6a1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/identity/index.html.md @@ -0,0 +1,41 @@ +--- +layout: "docs" +page_title: "Identity Secret Backend" +sidebar_current: "docs-secrets-identity" +description: |- + The Identity secret backend for Vault manages client identities. +--- + +# Identity Secret Backend + +Name: `identity` + +The Identity secret backend is the identity management solution for Vault. It +internally maintains the clients who are recognized by Vault. Each client is +internally termed as an `Entity`. An entity can have multiple `Personas`. For +example, a single user who has accounts in both Github and LDAP, can be mapped +to a single entity in Vault that has 2 personas, one of type Github and one of +type LDAP. When a client authenticates via any of the credential backend +(except the Token backend), Vault creates a new entity and attaches a new +persona to it, if an entity doesn't already exist. The entity identifier will +be tied to the authenticated token. When such tokens are put to use, their +entity identifiers are audit logged, marking a trail of actions performed by +specific users. + +Identity store allows operators to **manage** the entities in Vault. Entities +can be created and personas can be tied to entities, via the ACL'd API. There +can be policies set on the entities which adds capabilities to the tokens that +are tied to entity identiers. The capabilities granted to tokens via the +entities are **an addition** to the existing capabilities of the token and +**not** a replacement. Note that the additional capabilities of the token that +get inherited from entities are computed at request time. This provides +flexibility in controlling the access of tokens that are already issued. + +This backend will be mounted by default. This backend cannot be unmounted or +remounted. + +## API + +The Identity secret backend has a full HTTP API. Please see the +[Identity secret backend API](/api/secret/identity/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/index.html.md index 7a1d32d..ea8bb1c 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/index.html.md @@ -11,7 +11,7 @@ description: |- Secret backends are the components in Vault which store and generate secrets. -Some secret backends, such as "generic", simply store and read +Some secret backends, such as "kv", simply store and read secrets verbatim. Other secret backends, such as "aws", create _dynamic secrets_: secrets that are made on demand. @@ -21,7 +21,7 @@ in Vault. They behave very similarly to a virtual filesystem: any read/write/delete is sent to the secret backend, and the secret backend can choose to react to that operation however it sees fit. -For example, the "generic" backend passes through any operation back +For example, the "kv" backend passes through any operation back to the configured storage backend for Vault. A "read" turns into a "read" of the storage backend at the same path, a "write" turns into a write, etc. This is a lot like a normal filesystem. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/generic/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/kv/index.html.md similarity index 72% rename from vendor/github.com/hashicorp/vault/website/source/docs/secrets/generic/index.html.md rename to vendor/github.com/hashicorp/vault/website/source/docs/secrets/kv/index.html.md index e3861c8..8a4994b 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/generic/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/kv/index.html.md @@ -1,22 +1,22 @@ --- layout: "docs" -page_title: "Generic Secret Backend" -sidebar_current: "docs-secrets-generic" +page_title: "Key/Value Secret Backend" +sidebar_current: "docs-secrets-kv" description: |- - The generic secret backend can store arbitrary secrets. + The key/value secret backend can store arbitrary secrets. --- -# Generic Secret Backend +# Key/Value Secret Backend -Name: `generic` +Name: `kv` -The generic secret backend is used to store arbitrary secrets within +The key/value secret backend is used to store arbitrary secrets within the configured physical storage for Vault. If you followed along with -the getting started guide, you interacted with a generic secret backend +the getting started guide, you interacted with a key/value secret backend via the `secret/` prefix that Vault mounts by default. You can mount as many of these backends at different mount points as you like. -Writing to a key in the `generic` backend will replace the old value; +Writing to a key in the `kv` backend will replace the old value; sub-fields are not merged together. This backend honors the distinction between the `create` and `update` @@ -28,7 +28,7 @@ secret's path. ## Quick Start -The generic backend allows for writing keys with arbitrary values. When data is +The kv backend allows for writing keys with arbitrary values. When data is returned, the `lease_duration` field (in the API JSON) or `refresh_interval` field (on the CLI) gives a hint as to how often a reader should look for a new value. This comes from the value of the `default_lease_ttl` set on the mount, @@ -42,7 +42,9 @@ normal `lease_duration`. However, the given value will also still be returned exactly as specified, so you are free to use that key in any way that you like if it fits your input data. -As an example, we can write a new key "foo" to the generic backend mounted at +The backend _never_ removes data on its own; the `ttl` key is merely advisory. + +As an example, we can write a new key "foo" to the kv backend mounted at "secret/" by default: ``` @@ -70,6 +72,6 @@ seconds (one hour) as specified. ## API -The Generic secret backend has a full HTTP API. Please see the -[Generic secret backend API](/api/secret/generic/index.html) for more +The Key/Value secret backend has a full HTTP API. Please see the +[Key/Value secret backend API](/api/secret/kv/index.html) for more details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mongodb/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mongodb/index.html.md index 678dc35..6542781 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mongodb/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mongodb/index.html.md @@ -10,6 +10,11 @@ description: |- Name: `mongodb` +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the documentation for +the new implementation of this backend at +[MongoDB Database Plugin](/docs/secrets/databases/mongodb.html). + The `mongodb` secret backend for Vault generates MongoDB database credentials dynamically based on configured roles. This means that services that need to access a MongoDB database no longer need to hard-code credentials: they @@ -30,7 +35,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the mongodb backend is to mount it. -Unlike the `generic` backend, the `mongodb` backend is not mounted by default. +Unlike the `kv` backend, the `mongodb` backend is not mounted by default. ``` $ vault mount mongodb diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mssql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mssql/index.html.md index 41b7bd4..443ba6f 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mssql/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mssql/index.html.md @@ -10,6 +10,11 @@ description: |- Name: `mssql` +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the documentation for +the new implementation of this backend at +[MSSQL Database Plugin](/docs/secrets/databases/mssql.html). + The MSSQL secret backend for Vault generates database credentials dynamically based on configured roles. This means that services that need to access a database no longer need to hardcode credentials: they can request @@ -29,7 +34,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the mssql backend is to mount it. -Unlike the `generic` backend, the `mssql` backend is not mounted by default. +Unlike the `kv` backend, the `mssql` backend is not mounted by default. ``` $ vault mount mssql diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mysql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mysql/index.html.md index 0ac9077..cf970c9 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mysql/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/mysql/index.html.md @@ -10,6 +10,11 @@ description: |- Name: `mysql` +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the documentation for +the new implementation of this backend at +[MySQL/MariaDB Database Plugin](/docs/secrets/databases/mysql-maria.html). + The MySQL secret backend for Vault generates database credentials dynamically based on configured roles. This means that services that need to access a database no longer need to hardcode credentials: they can request @@ -29,7 +34,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the mysql backend is to mount it. -Unlike the `generic` backend, the `mysql` backend is not mounted by default. +Unlike the `kv` backend, the `mysql` backend is not mounted by default. ``` $ vault mount mysql diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/pki/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/pki/index.html.md index 62e0ef1..1ac4904 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/pki/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/pki/index.html.md @@ -127,7 +127,7 @@ endpoint. #### Mount the backend -The first step to using the PKI backend is to mount it. Unlike the `generic` +The first step to using the PKI backend is to mount it. Unlike the `kv` backend, the `pki` backend is not mounted by default. ```text diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/postgresql/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/postgresql/index.html.md index 20829a5..4c4a363 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/postgresql/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/postgresql/index.html.md @@ -10,6 +10,11 @@ description: |- Name: `postgresql` +~> **Deprecation Note:** This backend is deprecated in favor of the +combined databases backend added in v0.7.1. See the documentation for +the new implementation of this backend at +[PostgreSQL Database Plugin](/docs/secrets/databases/postgresql.html). + The PostgreSQL secret backend for Vault generates database credentials dynamically based on configured roles. This means that services that need to access a database no longer need to hardcode credentials: they can request @@ -30,7 +35,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the PostgreSQL backend is to mount it. -Unlike the `generic` backend, the `postgresql` backend is not mounted by default. +Unlike the `kv` backend, the `postgresql` backend is not mounted by default. ```text $ vault mount postgresql diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/rabbitmq/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/rabbitmq/index.html.md index d5b9c4c..56b99b8 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/rabbitmq/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/rabbitmq/index.html.md @@ -31,7 +31,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start The first step to using the RabbitMQ backend is to mount it. Unlike the -`generic` backend, the `rabbitmq` backend is not mounted by default. +`kv` backend, the `rabbitmq` backend is not mounted by default. ```text $ vault mount rabbitmq diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/dynamic-ssh-keys.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/dynamic-ssh-keys.html.md new file mode 100644 index 0000000..a77c1df --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/dynamic-ssh-keys.html.md @@ -0,0 +1,194 @@ +--- +layout: "docs" +page_title: "Dynamic SSH Keys - SSH Secret Backend" +sidebar_current: "docs-secrets-ssh-dynamic-ssh-keys" +description: |- + When using this type, the administrator registers a secret key with + appropriate sudo privileges on the remote machines. For every authorized + credential request, Vault creates a new SSH key pair and appends the + newly-generated public key to the authorized_keys file for the configured + username on the remote host. Vault uses a configurable install script to + achieve this. +--- + +# Dynamic SSH Keys + +~> **Deprecated**: There are several serious drawbacks and security implications +inherent in this type. Because of these drawbacks, please use the SSH CA or OTP +types whenever possible. + +When using this type, the administrator registers a secret key with appropriate +`sudo` privileges on the remote machines; for every authorized credential +request, Vault creates a new SSH key pair and appends the newly-generated public +key to the `authorized_keys` file for the configured username on the remote +host. Vault uses a configurable install script to achieve this. + +The backend does not prompt for `sudo` passwords; the `NOPASSWD` option for +sudoers should be enabled at all remote hosts for the Vault administrative +user. + +The private key returned to the user will be leased and can be renewed if +desired. Once the key is given to the user, Vault will not know when it gets +used or how many time it gets used. Therefore, Vault **WILL NOT** and cannot +audit the SSH session establishments. + +When the credential lease expires, Vault removes the secret key from the remote +machine. + +This page will show a quick start for this backend. For detailed documentation +on every path, use `vault path-help` after mounting the backend. + +### Drawbacks + +The dynamic key type has several serious drawbacks: + +1. _Audit logs are unreliable_: Vault can only log when users request + credentials, not when they use the given keys. If user A and user B both + request access to a machine, and are given a lease valid for five minutes, + it is impossible to know whether two accesses to that user account on the + remote machine were A, A; A, B; B, A; or B, B. +2. _Generating dynamic keys consumes entropy_: Unless equipped with a hardware + entropy generating device, a machine can quickly run out of entropy when + generating SSH keys. This will cause further requests for various Vault + operations to stall until more entropy is available, which could take a + significant amount of time, after which the next request for a new SSH key + will use the generated entropy and cause stalling again. +3. This type makes connections to client hosts; when this happens the host key + is *not* verified. + +### sudo + +In order to adjust the `authorized_keys` file for the desired user, Vault +connects via SSH to the remote machine as a separate user, and uses `sudo` to +gain the privileges required. An example `sudoers` file is shown below. + +File: `/etc/sudoers` + +```hcl +# This is a sample sudoers statement; you should modify it +# as appropriate to satisfy your security needs. +vaultadmin ALL=(ALL)NOPASSWD: ALL +``` + +### Configuration + +Next, infrastructure configuration must be registered with Vault via roles. +First, however, the shared secret key must be specified. + +### Mount the backend + +```text +$ vault mount ssh +Successfully mounted 'ssh' at 'ssh'! +``` + +#### Registering the shared secret key + +Register a key with a name; this key must have administrative capabilities on +the remote hosts. + +```text +$ vault write ssh/keys/dev_key \ + key=@dev_shared_key.pem +``` + +#### Create a Role + +Next, create a role. All of the machines contained within this CIDR block list +should be accessible using the registered shared secret key. + +```text +$ vault write ssh/roles/dynamic_key_role \ + key_type=dynamic \ + key=dev_key \ + admin_user=username \ + default_user=username \ + cidr_list=x.x.x.x/y +Success! Data written to: ssh/roles/dynamic_key_role +``` + +`cidr_list` is a comma separated list of CIDR blocks for which a role can +generate credentials. If this is empty, the role can only generate credentials +if it belongs to the set of zero-address roles. + +Zero-address roles, configured via `/ssh/config/zeroaddress` endpoint, takes +comma separated list of role names that can generate credentials for any IP +address. + +Use the `install_script` option to provide an install script if the remote +hosts do not resemble a typical Linux machine. The default script is compiled +into the Vault binary, but it is straight forward to specify an alternate. The +script takes three arguments which are explained in the comments. + +To see the default, see +[linux_install_script.go](https://github.com/hashicorp/vault/blob/master/builtin/logical/ssh/linux_install_script.go) + +### Create a credential + +Create a dynamic key for an IP of the remote host that is covered by +`dynamic_key_role`'s CIDR list. + +```text +$ vault write ssh/creds/dynamic_key_role ip=x.x.x.x +Key Value +lease_id ssh/creds/dynamic_key_role/8c4d2042-23bc-d6a8-42c2-6ff01cb83cf8 +lease_duration 600 +lease_renewable true +ip x.x.x.x +key -----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA5V/Y95qfGaUXRPkKNK9jgDHXPD2n5Ein+QTNnLSGrHtJUH7+ +pgs/5Hc4//124P9qHNmjIYQVyvcLreFgSrQCq4K8193hmypBYtsvCgvpc+jEwaGA +zK0QV7uc1z8KL7FuRAxpHJwB6+nubOzzqM03xsViHRhaWhYVHw2Vl4oputSHE7R9 +ugaTRg67wge4Nyi5RRL0RQcmW15/Vop8B6HpBSmZQy3enjg+32KbOWCMMTAPuF9/ +DgxSgZQaFMjGN4RjDreZI8Vv5zIiFJzZ3KVOWy8piI0PblLnDpU4Q0QSQ9A+Vr7b +JS22Lbet1Zbapl/n947/r1wGObLCc5Lilu//1QIDAQABAoIBAHWLfdO9sETjHp6h +BULkkpgScpuTeSN6vGHXvUrOFKn1cCfJPNR4tWBuXI6LJM2+9nEccwXs+4IMwjZ0 +ZfVCdI/SKtZxBXmP2PxBGMUMP7G/mn0kN64sDlD3ezOvQZgZVEmZFpCrvixYsG+v +qlpZ+HhrlJEWds7tvBsyyfNjwWjVIpm08zBmteFj4zu7OEcmGXEHDoxDXxyVP2BG +eLU/fM5JA2UEjfCQ1MIZ3rBtPePdz4LRpb+ajklqrUj1OHoiDrXa8EAf0/wDP9re +c1iH4bn7ZjYK0+IhZ+Pmw6gUftzZNWSC2kOLnZLdN/K7hgh0l0r0K/1eeXt43upB +WALNuiECgYEA8PM2Ob3XXKALF86PUewne4fCz9iixr/cIpvrEGrh9lyQRO8X5Jxb +ug38jEql4a574C6TSXfzxURza4P6lnfa0LvymmW0bhxZ5nev9kcAVnLKvpOUArTR +32k9bKXd6zp8Q9ZyVNwHRxcVs4YgwfJlcx8geC4o6YRiIjvcBQ9RVHkCgYEA87OK +lZDFBeEY/HVOxAQNXS5fgTd4U4DbwEJLv7SPk02v9oDkGHkpgMs4PcsIpCzsTpJ0 +oXMfLSxZ1lmZiuUvAupKj/7RjJ0XyjSMfm1Zs81epWj+boVfM4amZNHVLIWgddmM +XzXEZKByvi1gs7qFcjQz2DEbZltWO6dX14O4Fz0CgYEAlWSWyHJWZ02r0xT1c7vS +NxtTxH7zXftzR9oYgtNiStfVc4gy7kGr9c3aOjnGZAlFMRhvpevDrxnj3lO0OTsS +5rzBjM1mc6cMboLjDPW01eTSpBroeE0Ym0arGQQ2djSK+5yowsixknhTsj2FbfsW +v6wa+6jTIQY9ujAXGOQIbzECgYAYuXlw7SwgCZNYYappFqQodQD5giAyEJu66L74 +px/96N7WWoNJvFkqmPOOyV+KEIi0/ATbMGvUUHCY36RFRDU9zXldHJQz+Ogl+qja +VsvIAyj8DSfrHJrpBlsxVVyUVMZPzo+ARVs0flbF1qK9+Ul6qbMs1uaZvuCD0tmF +ovZ1XQKBgQDB0s7SDmAMgVjG8UBZgUru9vsDrxERT2BloptnnAjSiarLF5M+qeZO +7L4NLyVP39Z83eerEonzDAHHbvhPyi6n2YmnYhGjeP+lPZIVqGF9cpZD3q48YHZc +3ePn2/oLZrXKWOMyMwp2Uj+0SArCW+xMnoNp50sYNVR/JK3BPIdkag== +-----END RSA PRIVATE KEY----- +key_type dynamic +port 22 +username username +``` + +### Establish an SSH session + +Save the key to a file (e.g. `dyn_key.pem`) and then use it to establish an SSH +session. + +```text +$ ssh -i dyn_key.pem username@ +username@:~$ +``` + +### Automate it! + +Creation of new key, saving to a file, and using it to establish an SSH session +can all be done with a single Vault CLI command. + +```text +$ vault ssh -role dynamic_key_role username@ +username@:~$ +``` + +## API + +The SSH secret backend has a full HTTP API. Please see the +[SSH secret backend API](/api/secret/ssh/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/index.html.md index 5311669..124193c 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/index.html.md @@ -3,487 +3,30 @@ layout: "docs" page_title: "SSH Secret Backend" sidebar_current: "docs-secrets-ssh" description: |- - The SSH secret backend for Vault generates signed SSH certificates, dynamic SSH keys or One-Time-Passwords. + The Vault SSH secret backend provides secure authentication and authorization + for access to machines via the SSH protocol. There are multiple modes to the + Vault SSH backend including signed SSH certificates, dynamic SSH keys, and + one-time passwords. --- # SSH Secret Backend Name: `ssh` -Vault SSH backend tries to solve the problem of managing access to machine -infrastructure by providing different ways to issue SSH credentials. +The Vault SSH secret backend provides secure authentication and authorization +for access to machines via the SSH protocol. The Vault SSH backend helps manage +access to machine infrastructure, providing several ways to issue SSH +credentials. -The backend issues in 3 types of credentials: CA signed keys, Dynamic keys and -OTP keys. Read and carefully understand all the types before choosing the one -which best suits your needs. In relation to the dynamic key and OTP key type, -the CA key signing is the simplest and most powerful in terms of setup -complexity and in terms of being platform agnostic. +The Vault SSH secret backend supports the following modes. Each mode is +individually documented on its own page. -This page will show a quick start for this backend. For detailed documentation -on every path, use `vault path-help` after mounting the backend. +- [Signed SSH Certificates](/docs/secrets/ssh/signed-ssh-certificates.html) +- [One-time SSH Passwords](/docs/secrets/ssh/one-time-ssh-passwords.html) +- [Dynamic SSH Keys](/docs/secrets/ssh/dynamic-ssh-keys.html) DEPRECATED ----------------------------------------------------- -## I. CA Key Type +All guides assume a basic familiarity with the SSH protocol. -When using this type, an SSH CA signing key is generated or configured at the -backend's mount. This key will be used to sign other SSH keys. The private half -of the signing key always stays within Vault and the public half is exposed via -the API. Each mount of this backend represents a unique signing key pair. It is -recommended that the host keys and client keys are signed using different -mounts of this backend. - -### Mount a backend's instance for signing host keys - -```text -vault mount -path ssh-host-signer ssh -Successfully mounted 'ssh' at 'ssh-host-signer'! -``` - -### Mount a backend's instance for signing client keys - -```text -vault mount -path ssh-client-signer ssh -Successfully mounted 'ssh' at 'ssh-client-signer'! -``` - -### Configure the host CA certificate - -```text -vault write -f ssh-host-signer/config/ca -Key Value ---- ----- -public_key ssh-rsa -AAAAB3NzaC1yc2EAAAADAQABAAACAQDB7lbQ4ub8/7AbfnF4zntCxPGijXzdo50MrUz0I1Vz/n98pBPyrUEcNqFeCr6Qs9kRL8J8Bu48W/DtSxAiScIIlzyezulH+wxdZnCN3sxDUhFbTUosWstSt6P5BZbJsMTpfHfuYbOexewX9ljw636fUrRtU3/0Emq2FkOcKEv9v7z+Qwdaom+8R6CvRA0Kit0yyh6DXD+IjPnOmhYFoNH6ZhPU2xJ+7M16vLpCmH6OO1krHlDK+ZUvVKD2ZjfmbegoYEV/9FETSzVnQ/pOrP0SdwpmGhei7VO7hQwa6zOWIah66TYYdaIopHFha60rZPybliZFAi/CcUO7NYldq7Q2ty68RKp+Im2ROPbXLKYc2N+PThWwbxA1yQ7shvgSy/JEbAcYQuDLGn6tA6n5sXpvFcJe+vlf75w+laXIPUXTVHr2g1O5Irzwunpg03Pa+Kab8Hp8uq61XfTfNtCwHYr/K5YO27rHcpMA/gBKkOFmBbcI3VlUbi266HiyFANz0JMObzXS5hEGm90NevMD2nEFvZHYHx4hXRtoox7Bh4CCj2fUPaThnuTvv1hLBgryWxwDXL7OwHGx6wd/dpXo/1fG1DRUotfVcJP7k7FG9MUAB6tOR2RCuXEaTF6bv3tCpbsdzzzyhs/aIa6IeYRV4HUo3nW5g/IRT+6F/MLd63Cq5w== -``` - -The returned host CA public key should be added to `known_hosts` file in all -client machines with a `@cert-authority ` prefix, where `` can -be any value (single host, wildcard domain, wildcard) SSH accepts to define -hosts (e.g. `@cert-authority *.example.com ssh-rsa AAAA...`). This public key -can also be retrieved using `vault read ssh-host-signer/config/ca`. - -### Configure the client CA certificate - -```text -vault write -f ssh-client-signer/config/ca -Key Value ---- ----- -public_key ssh-rsa -AAAAB3NzaC1yc2EAAAADAQABAAACAQDoYiTm0QrK+NGkv8H/Sq1Q9l2PQwxY1ROSPpRqOoFAgCM7vtEZSGchm8j3/Da6Vj84sUg+09N+dwMZ6z3HG57dBWyfHGCbEt+fIJtRcCV7KkQ4xktBdapq5PychBg0bYIP0+JWXe7yOAR6s+DWJgUUHwmYiY0GlrzoztvtYcU/pvj4zWY/EWcCAYRiZThqJOJ9rDzhEzmVHVYelZ8dlehKZsUOQ3dY5fCHgWJ5/IKSfGUI1w1h/3ENAToBKInbW2J/Wy0mbsfUJhha84zgqSeHd4gZ9YqaSpzYffVvDz6Ud+KtPWt4ejJDyvJVFAVnVIvVUCkuXVefBlC10Ww6l52DXex/rdxQW+46ZNV5zlFOUi4VWFPT6bOlIEj1hgHG0cfFKARQXcVlp03xTi5DRJp3jlBf/QLeUy4kZEFUdQl+gkVLwySrOh+1kHwE15JQWpNHxfxWzYnNnjokT2RCW2ANO4Y8vfMPCVM5OKSVNISHm3UurNkJNTAomK/ElnXQgP23UcVgbF3mNRBPx9U3/+9HHkXcqDefSI7XBC7npIBFdetCxFt3nW/cX4l1zfq6ZExCVde+/40V1U+TOoLZVLIZZzEuA40J4H99oK7QN5/MtCGaSr7kgAx+7ymfSY37rXB6wOVi5HTBlhyh+VFGU+Wmy+q5sxfr6+ZLGwoGpvNh0Q== -``` - -The returned client CA public key should be saved in a file that is added to -the `TrustedUserCAKeys` list in the `sshd_config` file on the host machine: - -```text -cat /etc/ssh/sshd_config -... -... -TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem -``` - -The client CA public key can also be retrieved later using `vault read -ssh-client-signer/config/ca`. - -### Allow host certificate to have longer TTLs - -```text -vault mount-tune -max-lease-ttl=87600h ssh-host-signer -Successfully tuned mount 'ssh-host-signer'! -``` - -### Create a role to sign host keys - -You'll want to add in some allowed domains and either the `allow_subdomains` -flag, the `allow_bare_domains` flag, or both. - -```text -vault write ssh-host-signer/roles/hostrole ttl=87600h allow_host_certificates=true key_type=ca allowed_domains="localdomain,example.com" allow_subdomains=true -Success! Data written to: ssh-host-signer/roles/hostrole -``` - -### Create a role to sign client keys - -Because of the way that some SSH certificate features are implemented, some -common options to this call must be passed in as a map. Vault's CLI does not -currently have a native way to pass in map values, so the easiest way to do -this is to create a JSON file and use it as the input. In the following example -this is used to add the `permit-pty` extension to the certificate. - -```json -{ - "allow_user_certificates": true, - "allowed_users": "*", - "default_extensions": [ - { - "permit-pty": "" - } - ], - "key_type": "ca", - "default_user": "icecream", - "ttl": "30m0s" -} -``` - -```text -vault write ssh-client-signer/roles/clientrole @clientrole.json -Success! Data written to: ssh-client-signer/roles/clientrole -``` - -### Sign the host key - -You can generate a new key, or you can sign one of the host keys that was -autogenerated when SSH was first started up, e.g. -`/etc/ssh/ssh_host_rsa_key.pub`. - -```text -cat /etc/ssh/ssh_host_rsa_key.pub | vault write ssh-host-signer/sign/hostrole public_key=- cert_type=host -Key Value ---- ----- -serial_number 3746eb17371540d9 -signed_key ssh-rsa-cert-v01@openssh.com -AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAg1d4hqGnDjPvBFTGGDksRvaxkIrJ/cc0P4wVyk5A+NtUAAAADAQABAAABAQDKis3fESmPFS9cp7QYJRAsqvrM9w6GwHmrBp2DOUmTJ1szNDm0sbJGkmvMvmC0fbb4DkXbr8YPKk0srX8jRDCXLRtrZJs1jgDN/JVyJGR1pYwOItpeYSkoU42cjgRqEvdms30TvIEzsDhkhyOATTooi95J46GP6tczl5nPp2Zz7zVj8/yXechcM6GCs0x8epcK9UJfhpNvYrC3F7tnxbbLFkdM7AV0bTu1wND2rKTDeACbk3Xi5j9Ti4oQ0ma7aNOFrCO8gfiB4mBbAx4Y+j+FSDNuVWpQqkGBwqRp+E2hgGy4Ao+3zE89SwtnlziIgBwyecT7JTQ+X54Pn7ZBtK+BN0brFzcVQNkAAAACAAAATHZhdWx0LXRva2VuLTA0Mzk0M2MyZjFlYWNmNzBhOWQyNDhiZWE5Yzg0N2UzZDM5Yzc2ZTAyMmY5YzU3MzJkOTAyNDE1NzM2NzU4MWEAAAAAAAAAAFjPB8UAAAAAa5sK4wAAAAAAAAAAAAAAAAAAAhcAAAAHc3NoLXJzYQAAAAMBAAEAAAIBAMHuVtDi5vz/sBt+cXjOe0LE8aKNfN2jnQytTPQjVXP+f3ykE/KtQRw2oV4KvpCz2REvwnwG7jxb8O1LECJJwgiXPJ7O6Uf7DF1mcI3ezENSEVtNSixay1K3o/kFlsmwxOl8d+5hs57F7Bf2WPDrfp9StG1Tf/QSarYWQ5woS/2/vP5DB1qib7xHoK9EDQqK3TLKHoNcP4iM+c6aFgWg0fpmE9TbEn7szXq8ukKYfo47WSseUMr5lS9UoPZmN+Zt6ChgRX/0URNLNWdD+k6s/RJ3CmYaF6LtU7uFDBrrM5YhqHrpNhh1oiikcWFrrStk/JuWJkUCL8JxQ7s1iV2rtDa3LrxEqn4ibZE49tcsphzY349OFbBvEDXJDuyG+BLL8kRsBxhC4Msafq0Dqfmxem8Vwl76+V/vnD6Vpcg9RdNUevaDU7kivPC6emDTc9r4ppvweny6rrVd9N820LAdiv8rlg7busdykwD+AEqQ4WYFtwjdWVRuLbroeLIUA3PQkw5vNdLmEQab3Q168wPacQW9kdgfHiFdG2ijHsGHgIKPZ9Q9pOGe5O+/WEsGCvJbHANcvs7AcbHrB392lej/V8bUNFSi19Vwk/uTsUb0xQAHq05HZEK5cRpMXpu/e0Klux3PPPKGz9ohroh5hFXgdSjedbmD8hFP7oX8wt3rcKrnAAACDwAAAAdzc2gtcnNhAAACAHj5fKMW0KvWiVhZ0LQQUPLpBlgL3qeHic99x61mFGQdkgawhh5UjxsW/r3asPy3XI92QYHu6me8g3iTBqXTmM9u+CwCTnVkZD+pweRLqbC+w5FqfSi8qugOZWzQwa6dNkIMDOIx9CZD6Q1Mve6Bwpt4ziPdQNvZgjpAeYSyMgjpea6JBVP9SmLCv8efPnoTmPvSbMR2DQWXQz0+gi/dBzomc9UPOjSq6az10TIFcIxInNhZBlBo5Smk5403lZjLWxX/KvVVT/T19F/+2z5fPjMubYuZIvB0LbXSQmvcbFIaVX2MdOXkx1d4Iy4whmCqFHr/37WJz2FgaHsbI/R/EcC5maqLeyZzAq925g92QiNQ2bXqY2jeondkqPF3ZOVmKDC1hy1PjaVXuIhp6Wq5GEvXHjBNr8vk/WS0enaZvKRuY3h+cHqukQ3RhVIQ8kRq+wHdqytg4c2ijY7Qn9IAKUQb13cpWpH4VFRTAoVR3O5i4OwQ8BCZSQ3YgW4GK9lN29wKUc1rAb2d8gmIq5/lObs0FKpOXDgkF7jC2ilRodJkbLGRcPi2MEWLsSlXjC5p5iwwf9u02EmXzeTWL/R4HhH8j6Efdc9qobPymhFdbrNDhYnu4/TzqJtyIjuWdsMitfAxnJBYAN3xxPpL8lTvhw8gg7eXtbrmisPy69TdsXBf -``` - -Set the signed certificate as `HostCertificate` in `sshd_config` on the host -machine. In order to make things work more automagically (e.g. if you don't -want to specifically pick the host key/cert) it's a good idea to call this -`-cert.pub`, e.g. `/etc/ssh/ssh_host_rsa_key-cert.pub`. It's also not a -bad idea to specifically pick the `HostKey` that will be used: - -```text -cat /etc/ssh/sshd_config -... -... -TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem -HostKey /etc/ssh/ssh_host_rsa_key -HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub -``` - -It's also a good idea to make sure that the permissions on the -`HostCertificate` file are `0640` (on most systems). - - -### Sign the client key - -This is any key you want to use, e.g. `~/.ssh/id_rsa.pub`: - -```text -cd ~/.ssh - -cat id_rsa.pub | vault write ssh-client-signer/sign/clientrole public_key=- -Key Value ---- ----- -serial_number c73f26d2340276aa -signed_key ssh-rsa-cert-v01@openssh.com -AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgOe8Iw9mRWhAuDcOFT9hZwMhQK9VduKswcU8XBMA+NgcAAAADAQABAAABAQCyr86Eozv+j8GuBy1fw5NHfak6dvXoQ6/YycbGG/X3o9/d6Xqu2Xd5dVijUvtyym1SgeORwtOXgtwGOFbjWMWBPDupeuRoJx5ww3JRv8jBkaA7JZXhaiSIy3rHwmJNmaLgD/vvqPJEp/E1YnmURbTK2RojnQt9uRI5QMgzg5M/J6ndcv8tbzThSE5NrfIM2AzHt8Ti32I7DhZfuo0goEPbZ7wNUO1u/94itJQ7G/WrUBiTYu5HErMXPrFxhX26IpMTx4d2PkJSzLsImpSGTSuo65tvpSzZgktytCkKi/wRZvNtsABGAm5uKpo+U9t/p0rWDIXyS4RF8knjx6I7QJdhxz8m0jQCdqoAAAABAAAATHZhdWx0LXRva2VuLTE2YmE5ODZkZmM3NTcwZDdhMzA5NDhjMDNiMDA5MTQ2M2JiNmM4OTQ3OTY4ZjcyNzY2MTAzMWJjYzg0ZDhiY2QAAAAAAAAAAFjPB+QAAAAAWM8JLgAAAAAAAAAAAAAAAAAAAhcAAAAHc3NoLXJzYQAAAAMBAAEAAAIBAOhiJObRCsr40aS/wf9KrVD2XY9DDFjVE5I+lGo6gUCAIzu+0RlIZyGbyPf8NrpWPzixSD7T0353AxnrPccbnt0FbJ8cYJsS358gm1FwJXsqRDjGS0F1qmrk/JyEGDRtgg/T4lZd7vI4BHqz4NYmBRQfCZiJjQaWvOjO2+1hxT+m+PjNZj8RZwIBhGJlOGok4n2sPOETOZUdVh6Vnx2V6EpmxQ5Dd1jl8IeBYnn8gpJ8ZQjXDWH/cQ0BOgEoidtbYn9bLSZux9QmGFrzjOCpJ4d3iBn1ippKnNh99W8PPpR34q09a3h6MkPK8lUUBWdUi9VQKS5dV58GULXRbDqXnYNd7H+t3FBb7jpk1XnOUU5SLhVYU9Pps6UgSPWGAcbRx8UoBFBdxWWnTfFOLkNEmneOUF/9At5TLiRkQVR1CX6CRUvDJKs6H7WQfATXklBak0fF/FbNic2eOiRPZEJbYA07hjy98w8JUzk4pJU0hIebdS6s2Qk1MCiYr8SWddCA/bdRxWBsXeY1EE/H1Tf/70ceRdyoN59IjtcELuekgEV160LEW3edb9xfiXXN+rpkTEJV177/jRXVT5M6gtlUshlnMS4DjQngf32grtA3n8y0IZpKvuSADH7vKZ9JjfutcHrA5WLkdMGWHKH5UUZT5abL6rmzF+vr5ksbCgam82HRAAACDwAAAAdzc2gtcnNhAAACACTZP18SBmhBw/FNXgJPZ9heJJbeZHhADfqtVSfIRO5zJHxTQko/uWh1IrpdPu+cRRjHmYJvxvzwvPdTej15gTkbe/2I5lHd7owVxpn98W5dDNGpTNwg+XUMNW19YGno97L4QeacYdI1P4OeklPotGboLEiNzPECZqAjt9g/THEkIR2Xy4knGby/UTXVYboVKwZKdeJhTyG2yhxvMAfFpbLJQW2PLpMHBryCLIiN/CYOZO9t0g2oIGWDoq8oW9rG01G3z2+KjtsJAgPsXRR5e77e/UriZtaSFwJ+LdKlLSi9p2W3gNxC/Vj82LflrdcNDRR9FtRRop7MeTdeWIAy+91GayM/teVNxPuDB0N5Qfuv4IIws3aPlnXcQb17rqg28Nx6v1alzcgiePF8jDlDuVKMraHwKsaG6OT6rXPixbPJiPko4bqPFMBoQ0S2mbZDCNnXZJs1T+IdrV49aAa+WZHzSIgnNRpgooeOfKyEKeZDs4vnrinGhaMGD9Vz8vaut3drEE5BkEaOysNrrPQRWwM1XOeeg13rGHmUc0VAJCH14R16HVZgD12Ef42Bcx7K5Eo7h1uZ8ghf1eOiZHkGFafU96fVF3m4lKotJU3PZrIElOUvMMn0YXtevDDge1ToEammABnn5lFUMsb+3jfyYQypYUkiKN17ANyOLg1fJQMd -``` - -Save the signed key in a file (make sure you set the permissions to only be -writeable by the owner); if you're saving it along with your public/private key -(e.g. `~/.ssh/id_rsa(.pub)`), saving it with `-cert.pub` (e.g. -`~/.ssh/id_rsa-cert.pub`) allows SSH to automatically discover it. Otherwise, -you can tell the SSH client where to find it (and the associated private key) -by using `-i` such as `ssh -i ~/.ssh/id_rsa -i my-signed-cert.pub localhost`. - -If you want to see the configured extensions, principals, etc., you can use the -`-L` flag to `ssh-keygen`, e.g. `ssh-keygen -Lf ~/.ssh/id_rsa-cert.pub`. - -### SSH into the host machine - -```text -ssh -i signed-client-cert.pub username@ -username@:~$ -``` - -### Troubleshooting - -If you are not able to successfully make a client connection, looking at SSH server logs is likely to be your best bet. Some known potential issues: - -* If on an SELinux-enforcing system, you may need to adjust related types so - that the SSH daemon is able to read it, for instance by adjusting the signed - host certificate to be an `sshd_key_t` type. -* If you can an error on the client indicating `"no separate private key for - certificate"` you may be hitting a bug introduced into OpenSSH version 7.2 - and fixed in 7.5. See [OpenSSH bug - 2617](https://bugzilla.mindrot.org/show_bug.cgi?id=2617) for details. If you - are able to find a workaround without patching OpenSSH, please submit a PR to - update this documentation! - ----------------------------------------------------- -## II. One-Time-Password (OTP) Type - -This backend type allows a Vault server to issue an OTP every time a client -wants to SSH into a remote host, using a helper command on the remote host to -perform verification. - -An authenticated client requests credentials from the Vault server and, if -authorized, is issued an OTP. When the client establishes an SSH connection to -the desired remote host, the OTP used during SSH authentication is received by -the Vault helper, which then validates the OTP with the Vault server. The Vault -server then deletes this OTP, ensuring that it is only used once. - -Since the Vault server is contacted during SSH connection establishment, every -login attempt and the correlating Vault lease information is logged to the -audit backend. - -See [Vault-SSH-Helper](https://github.com/hashicorp/vault-ssh-helper) for -details on the helper. - -### Drawbacks - -The main concern with the OTP backend type is the remote host's connection to -Vault; if compromised, an attacker could spoof the Vault server returning a -successful request. This risk can be mitigated by using TLS for the connection -to Vault and checking certificate validity; future enhancements to this backend -may allow for extra security on top of what TLS provides. - -### Mount the backend - -```text -$ vault mount ssh -Successfully mounted 'ssh' at 'ssh'! -``` - -### Create a Role - -Create a role with the `key_type` parameter set to `otp`. All of the machines -represented by the role's CIDR list should have helper properly installed and -configured. - -```text -$ vault write ssh/roles/otp_key_role \ - key_type=otp \ - default_user=username \ - cidr_list=x.x.x.x/y,m.m.m.m/n -Success! Data written to: ssh/roles/otp_key_role -``` - -### Create a Credential - -Create an OTP credential for an IP of the remote host that belongs to -`otp_key_role`. - -```text -$ vault write ssh/creds/otp_key_role ip=x.x.x.x -Key Value -lease_id ssh/creds/otp_key_role/73bbf513-9606-4bec-816c-5a2f009765a5 -lease_duration 600 -lease_renewable false -port 22 -username username -ip x.x.x.x -key 2f7e25a2-24c9-4b7b-0d35-27d5e5203a5c -key_type otp -``` - -### Establish an SSH session - -```text -$ ssh username@localhost -Password: -username@ip:~$ -``` - -### Automate it! - -A single CLI command can be used to create a new OTP and invoke SSH with the -correct parameters to connect to the host. - -```text -$ vault ssh -role otp_key_role username@x.x.x.x -OTP for the session is `b4d47e1b-4879-5f4e-ce5c-7988d7986f37` -[Note: Install `sshpass` to automate typing in OTP] -Password: -``` - -The OTP will be entered automatically using `sshpass` if it is installed. - -```text -$ vault ssh -role otp_key_role -strict-host-key-checking=no username@x.x.x.x -username@:~$ -``` - -Note: `sshpass` cannot handle host key checking. Host key checking can be -disabled by setting `-strict-host-key-checking=no`. - ----------------------------------------------------- -## III. Dynamic Key Type (Deprecated) - -**Note**: There are several serious drawbacks (detailed below), including some -with security implications, inherent in this method. Because of these -drawbacks, the Vault team recommends use of the CA or OTP types whenever -possible. Care should be taken with respect to the above issues with any -deployments using the dynamic key type. - -When using this type, the administrator registers a secret key with appropriate -`sudo` privileges on the remote machines; for every authorized credential -request, Vault creates a new SSH key pair and appends the newly-generated -public key to the `authorized_keys` file for the configured username on the -remote host. Vault uses a configurable install script to achieve this. - -The backend does not prompt for `sudo` passwords; the `NOPASSWD` option for -sudoers should be enabled at all remote hosts for the Vault administrative -user. - -The private key returned to the user will be leased and can be renewed if -desired. Once the key is given to the user, Vault will not know when it gets -used or how many time it gets used. Therefore, Vault **WILL NOT** and cannot -audit the SSH session establishments. - -When the credential lease expires, Vault removes the secret key from the remote -machine. - -### Drawbacks - -The dynamic key type has several serious drawbacks: - -1. _Audit logs are unreliable_: Vault can only log when users request - credentials, not when they use the given keys. If user A and user B both - request access to a machine, and are given a lease valid for five minutes, - it is impossible to know whether two accesses to that user account on the - remote machine were A, A; A, B; B, A; or B, B. -2. _Generating dynamic keys consumes entropy_: Unless equipped with a hardware - entropy generating device, a machine can quickly run out of entropy when - generating SSH keys. This will cause further requests for various Vault - operations to stall until more entropy is available, which could take a - significant amount of time, after which the next request for a new SSH key - will use the generated entropy and cause stalling again. -3. This type makes connections to client hosts; when this happens the host key - is *not* verified. - -### sudo - -In order to adjust the `authorized_keys` file for the desired user, Vault -connects via SSH to the remote machine as a separate user, and uses `sudo` to -gain the privileges required. An example `sudoers` file is shown below. - -File: `/etc/sudoers` - -```hcl -# This is a sample sudoers statement; you should modify it -# as appropriate to satisfy your security needs. -vaultadmin ALL=(ALL)NOPASSWD: ALL -``` - -### Configuration - -Next, infrastructure configuration must be registered with Vault via roles. -First, however, the shared secret key must be specified. - -### Mount the backend - -```text -$ vault mount ssh -Successfully mounted 'ssh' at 'ssh'! -``` - -#### Registering the shared secret key - -Register a key with a name; this key must have administrative capabilities on -the remote hosts. - -```text -$ vault write ssh/keys/dev_key \ - key=@dev_shared_key.pem -``` - -#### Create a Role - -Next, create a role. All of the machines contained within this CIDR block list -should be accessible using the registered shared secret key. - -```text -$ vault write ssh/roles/dynamic_key_role \ - key_type=dynamic \ - key=dev_key \ - admin_user=username \ - default_user=username \ - cidr_list=x.x.x.x/y -Success! Data written to: ssh/roles/dynamic_key_role -``` - -`cidr_list` is a comma separated list of CIDR blocks for which a role can -generate credentials. If this is empty, the role can only generate credentials -if it belongs to the set of zero-address roles. - -Zero-address roles, configured via `/ssh/config/zeroaddress` endpoint, takes -comma separated list of role names that can generate credentials for any IP -address. - -Use the `install_script` option to provide an install script if the remote -hosts do not resemble a typical Linux machine. The default script is compiled -into the Vault binary, but it is straight forward to specify an alternate. The -script takes three arguments which are explained in the comments. - -To see the default, see -[linux_install_script.go](https://github.com/hashicorp/vault/blob/master/builtin/logical/ssh/linux_install_script.go) - -### Create a credential - -Create a dynamic key for an IP of the remote host that is covered by -`dynamic_key_role`'s CIDR list. - -```text -$ vault write ssh/creds/dynamic_key_role ip=x.x.x.x -Key Value -lease_id ssh/creds/dynamic_key_role/8c4d2042-23bc-d6a8-42c2-6ff01cb83cf8 -lease_duration 600 -lease_renewable true -ip x.x.x.x -key -----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA5V/Y95qfGaUXRPkKNK9jgDHXPD2n5Ein+QTNnLSGrHtJUH7+ -pgs/5Hc4//124P9qHNmjIYQVyvcLreFgSrQCq4K8193hmypBYtsvCgvpc+jEwaGA -zK0QV7uc1z8KL7FuRAxpHJwB6+nubOzzqM03xsViHRhaWhYVHw2Vl4oputSHE7R9 -ugaTRg67wge4Nyi5RRL0RQcmW15/Vop8B6HpBSmZQy3enjg+32KbOWCMMTAPuF9/ -DgxSgZQaFMjGN4RjDreZI8Vv5zIiFJzZ3KVOWy8piI0PblLnDpU4Q0QSQ9A+Vr7b -JS22Lbet1Zbapl/n947/r1wGObLCc5Lilu//1QIDAQABAoIBAHWLfdO9sETjHp6h -BULkkpgScpuTeSN6vGHXvUrOFKn1cCfJPNR4tWBuXI6LJM2+9nEccwXs+4IMwjZ0 -ZfVCdI/SKtZxBXmP2PxBGMUMP7G/mn0kN64sDlD3ezOvQZgZVEmZFpCrvixYsG+v -qlpZ+HhrlJEWds7tvBsyyfNjwWjVIpm08zBmteFj4zu7OEcmGXEHDoxDXxyVP2BG -eLU/fM5JA2UEjfCQ1MIZ3rBtPePdz4LRpb+ajklqrUj1OHoiDrXa8EAf0/wDP9re -c1iH4bn7ZjYK0+IhZ+Pmw6gUftzZNWSC2kOLnZLdN/K7hgh0l0r0K/1eeXt43upB -WALNuiECgYEA8PM2Ob3XXKALF86PUewne4fCz9iixr/cIpvrEGrh9lyQRO8X5Jxb -ug38jEql4a574C6TSXfzxURza4P6lnfa0LvymmW0bhxZ5nev9kcAVnLKvpOUArTR -32k9bKXd6zp8Q9ZyVNwHRxcVs4YgwfJlcx8geC4o6YRiIjvcBQ9RVHkCgYEA87OK -lZDFBeEY/HVOxAQNXS5fgTd4U4DbwEJLv7SPk02v9oDkGHkpgMs4PcsIpCzsTpJ0 -oXMfLSxZ1lmZiuUvAupKj/7RjJ0XyjSMfm1Zs81epWj+boVfM4amZNHVLIWgddmM -XzXEZKByvi1gs7qFcjQz2DEbZltWO6dX14O4Fz0CgYEAlWSWyHJWZ02r0xT1c7vS -NxtTxH7zXftzR9oYgtNiStfVc4gy7kGr9c3aOjnGZAlFMRhvpevDrxnj3lO0OTsS -5rzBjM1mc6cMboLjDPW01eTSpBroeE0Ym0arGQQ2djSK+5yowsixknhTsj2FbfsW -v6wa+6jTIQY9ujAXGOQIbzECgYAYuXlw7SwgCZNYYappFqQodQD5giAyEJu66L74 -px/96N7WWoNJvFkqmPOOyV+KEIi0/ATbMGvUUHCY36RFRDU9zXldHJQz+Ogl+qja -VsvIAyj8DSfrHJrpBlsxVVyUVMZPzo+ARVs0flbF1qK9+Ul6qbMs1uaZvuCD0tmF -ovZ1XQKBgQDB0s7SDmAMgVjG8UBZgUru9vsDrxERT2BloptnnAjSiarLF5M+qeZO -7L4NLyVP39Z83eerEonzDAHHbvhPyi6n2YmnYhGjeP+lPZIVqGF9cpZD3q48YHZc -3ePn2/oLZrXKWOMyMwp2Uj+0SArCW+xMnoNp50sYNVR/JK3BPIdkag== ------END RSA PRIVATE KEY----- -key_type dynamic -port 22 -username username -``` - -### Establish an SSH session - -Save the key to a file (e.g. `dyn_key.pem`) and then use it to establish an SSH -session. - -```text -$ ssh -i dyn_key.pem username@ -username@:~$ -``` - -### Automate it! - -Creation of new key, saving to a file, and using it to establish an SSH session -can all be done with a single Vault CLI command. - -```text -$ vault ssh -role dynamic_key_role username@ -username@:~$ -``` - ----------------------------------------------------- ## API The SSH secret backend has a full HTTP API. Please see the diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/one-time-ssh-passwords.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/one-time-ssh-passwords.html.md new file mode 100644 index 0000000..4d7d1a8 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/one-time-ssh-passwords.html.md @@ -0,0 +1,114 @@ +--- +layout: "docs" +page_title: "One-Time SSH Passwords (OTP) - SSH Secret Backend" +sidebar_current: "docs-secrets-ssh-one-time-ssh-passwords" +description: |- + The One-Time SSH Password (OTP) SSH secret backend type allows a Vault server + to issue a One-Time Password every time a client wants to SSH into a remote + host using a helper command on the remote host to perform verification. +--- + +# One-Time SSH Passwords + +The One-Time SSH Password (OTP) SSH secret backend type allows a Vault server to +issue a One-Time Password every time a client wants to SSH into a remote host +using a helper command on the remote host to perform verification. + +An authenticated client requests credentials from the Vault server and, if +authorized, is issued an OTP. When the client establishes an SSH connection to +the desired remote host, the OTP used during SSH authentication is received by +the Vault helper, which then validates the OTP with the Vault server. The Vault +server then deletes this OTP, ensuring that it is only used once. + +Since the Vault server is contacted during SSH connection establishment, every +login attempt and the correlating Vault lease information is logged to the audit +backend. + +See [Vault-SSH-Helper](https://github.com/hashicorp/vault-ssh-helper) for +details on the helper. + +This page will show a quick start for this backend. For detailed documentation +on every path, use `vault path-help` after mounting the backend. + +### Drawbacks + +The main concern with the OTP backend type is the remote host's connection to +Vault; if compromised, an attacker could spoof the Vault server returning a +successful request. This risk can be mitigated by using TLS for the connection +to Vault and checking certificate validity; future enhancements to this backend +may allow for extra security on top of what TLS provides. + +### Mount the backend + +```text +$ vault mount ssh +Successfully mounted 'ssh' at 'ssh'! +``` + +### Create a Role + +Create a role with the `key_type` parameter set to `otp`. All of the machines +represented by the role's CIDR list should have helper properly installed and +configured. + +```text +$ vault write ssh/roles/otp_key_role \ + key_type=otp \ + default_user=username \ + cidr_list=x.x.x.x/y,m.m.m.m/n +Success! Data written to: ssh/roles/otp_key_role +``` + +### Create a Credential + +Create an OTP credential for an IP of the remote host that belongs to +`otp_key_role`. + +```text +$ vault write ssh/creds/otp_key_role ip=x.x.x.x +Key Value +lease_id ssh/creds/otp_key_role/73bbf513-9606-4bec-816c-5a2f009765a5 +lease_duration 600 +lease_renewable false +port 22 +username username +ip x.x.x.x +key 2f7e25a2-24c9-4b7b-0d35-27d5e5203a5c +key_type otp +``` + +### Establish an SSH session + +```text +$ ssh username@localhost +Password: +username@ip:~$ +``` + +### Automate it! + +A single CLI command can be used to create a new OTP and invoke SSH with the +correct parameters to connect to the host. + +```text +$ vault ssh -role otp_key_role username@x.x.x.x +OTP for the session is `b4d47e1b-4879-5f4e-ce5c-7988d7986f37` +[Note: Install `sshpass` to automate typing in OTP] +Password: +``` + +The OTP will be entered automatically using `sshpass` if it is installed. + +```text +$ vault ssh -role otp_key_role -strict-host-key-checking=no username@x.x.x.x +username@:~$ +``` + +Note: `sshpass` cannot handle host key checking. Host key checking can be +disabled by setting `-strict-host-key-checking=no`. + +## API + +The SSH secret backend has a full HTTP API. Please see the +[SSH secret backend API](/api/secret/ssh/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/signed-ssh-certificates.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/signed-ssh-certificates.html.md new file mode 100644 index 0000000..b377ddd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/ssh/signed-ssh-certificates.html.md @@ -0,0 +1,485 @@ +--- +layout: "docs" +page_title: "Signed SSH Certificates - SSH Secret Backend" +sidebar_current: "docs-secrets-ssh-signed-ssh-certificates" +description: |- + The signed SSH certificates is the simplest and most powerful in terms of + setup complexity and in terms of being platform agnostic. When using this + type, an SSH CA signing key is generated or configured at the backend's mount. + This key will be used to sign other SSH keys. +--- + +# Signed SSH Certificates + +The signed SSH certificates is the simplest and most powerful in terms of setup +complexity and in terms of being platform agnostic. By leveraging Vault's +powerful CA capabilities and functionality built into OpenSSH, clients can SSH +into target hosts using their own local SSH keys. + +In this section, the term "**client**" refers to the person or machine +performing the SSH operation. The "**host**" refers to the target machine. If +this is confusing, substitute "client" with "user". + +This page will show a quick start for this backend. For detailed documentation +on every path, use `vault path-help` after mounting the backend. + +## Client Key Signing + +Before a client can request their SSH key be signed, the Vault SSH backend must +be configured. Usually a Vault administrator or security team performs these +steps. It is also possible to automate these actions using a configuration +management tool like Chef, Puppet, Ansible, or Salt. + +### Signing Key & Role Configuration + +The following steps are performed in advance by a Vault administrator, security +team, or configuration management tooling. + +1. Mount the backend. Like all secret backends in Vault, the SSH secret backend +must be mounted before use. + + ```text + $ vault mount -path=ssh-client-signer ssh + Successfully mounted 'ssh' at 'ssh-client-signer'! + ``` + + This mounts the SSH backend at the path "ssh-client-signer". It is possible + to mount the same secret backend multiple times using different `-path` + arguments. The name "ssh-client-signer" is not special - it can be any name, + but this documentation will assume "ssh-client-signer". + +1. Configure Vault with a CA for signing client keys using the `/config/ca` +endpoint. If you do not have an internal CA, Vault can generate a keypair for +you. + + ```text + $ vault write ssh-client-signer/config/ca generate_signing_key=true + Key Value + --- ----- + public_key ssh-rsa AAAAB3NzaC1yc2EA... + ``` + + If you already have a keypair, specify the public and private key parts as + part of the payload: + + ```text + $ vault write ssh-client-signer/config/ca \ + private_key="..." \ + public_key="..." + ``` + + Regardless of whether it is generated or uploaded, the client signer public + key is accessible via the API at the `/public_key` endpoint. + +1. Add the public key to all target host's SSH configuration. This process can +be manual or automated using a configuration management tool. The public key is +accessible via the API and does not require authentication. + + ```text + $ curl -o /etc/ssh/trusted-user-ca-keys.pem https://vault.rocks/v1/ssh-client-signer/public_key + ``` + + ```text + $ vault read -field=public_key ssh-client-signer/config/ca > /etc/ssh/trusted-user-ca-keys.pem + ``` + + Add the path where the public key contents are stored to the SSH + configuration file as the `TrustedUserCAKeys` option. + + ```text + # /etc/ssh/sshd_config + # ... + TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem + ``` + + Restart the SSH service to pick up the changes. + +1. Create a named Vault role for signing client keys. + + Because of the way some SSH certificate features are implemented, options + are passed as a map. The following example adds the `permit-pty` extension + to the certificate. + + ```text + $ vault write ssh-client-signer/roles/my-role -<<"EOH" + { + "allow_user_certificates": true, + "allowed_users": "*", + "default_extensions": [ + { + "permit-pty": "" + } + ], + "key_type": "ca", + "default_user": "ubuntu", + "ttl": "30m0s" + } + EOH + ``` + +### Client SSH Authentication + +The following steps are performed by the client (user) that wants to +authenticate to machines managed by Vault. These commands are usually run from +the client's local workstation. + +1. Locate or generate the SSH public key. Usually this is `~/.ssh/id_rsa.pub`. +If you do not have an SSH keypair, generate one: + + ```text + $ ssh-keygen -t rsa -C "user@example.com" + ``` + +1. Ask Vault to sign your **public key**. This file usually ends in `.pub` and +the contents begin with `ssh-rsa ...`. + + ```text + $ vault write ssh-client-signer/sign/my-role \ + public_key=@$HOME/.ssh/id_rsa.pub + + Key Value + --- ----- + serial_number c73f26d2340276aa + signed_key ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1... + ``` + + The result will include the serial and the signed key. This signed key is + another public key. + + To customize the signing options, use a JSON payload: + + ```text + $ vault write ssh-client-signer/sign/my-role -<<"EOH" + { + "public_key": "ssh-rsa AAA...", + "valid_principals": "my-user", + "key_id": "custom-prefix", + "extension": { + "permit-pty": "" + } + } + EOH + ``` + +1. Save the resulting signed, public key to disk. Limit permissions as needed. + + ```text + $ vault write -field=signed_key ssh-client-signer/sign/my-role \ + public_key=@$HOME/.ssh/id_rsa.pub > signed-cert.pub + ``` + + If you are saving the certificate directly beside your SSH keypair, suffix + the name with `-cert.pub` (`~/.ssh/id_rsa-cert.pub`). With this naming + scheme, OpenSSH will automatically use it during authentication. + +1. (Optional) View enabled extensions, principals, and metadata of the signed +key. + + ```text + $ ssh-keygen -Lf ~/.ssh/signed-cert.pub + ``` + +1. SSH into the host machine using the signed key. You must supply both the +signed public key from Vault **and** the corresponding private key as +authentication to the SSH call. + + ```text + $ ssh -i signed-cert.pub -i ~/.ssh/id_rsa username@10.0.23.5 + ``` + +## Host Key Signing + +For an added layers of security, we recommend enabling host key signing. This is +used in conjunction with client key signing to provide an additional integrity +layer. When enabled, the SSH agent will verify the target host is valid and +trusted before attempting to SSH. This will reduce the probability of a user +accidentally SSHing into an unmanaged or malicious machine. + +### Signing Key Configuration + +1. Mount the backend. For the most security, mount at a different path from the +client signer. + + ```text + $ vault mount -path=ssh-host-signer ssh + Successfully mounted 'ssh' at 'ssh-host-signer'! + ``` + +1. Configure Vault with a CA for signing host keys using the `/config/ca` +endpoint. If you do not have an internal CA, Vault can generate a keypair for +you. + + ```text + $ vault write ssh-host-signer/config/ca generate_signing_key=true + Key Value + --- ----- + public_key ssh-rsa AAAAB3NzaC1yc2EA... + ``` + + If you already have a keypair, specify the public and private key parts as + part of the payload: + + ```text + $ vault write ssh-host-signer/config/ca \ + private_key="..." \ + public_key="..." + ``` + + Regardless of whether it is generated or uploaded, the host signer public + key is accessible via the API at the `/public_key` endpoint. + +1. Extend host key certificate TTLs. + + ```text + $ vault mount-tune -max-lease-ttl=87600h ssh-host-signer + ``` + +1. Create a role for signing host keys. Be sure to fill in the list of allowed +domains, set `allow_bare_domains`, or both. + + ```text + $ vault write ssh-host-signer/roles/hostrole \ + key_type=ca \ + ttl=87600h \ + allow_host_certificates=true \ + allowed_domains="localdomain,example.com" \ + allow_subdomains=true + ``` + +1. Sign the host's SSH public key. + + ```text + $ vault write ssh-host-signer/sign/hostrole \ + cert_type=host \ + public_key=@/etc/ssh/ssh_host_rsa_key.pub + Key Value + --- ----- + serial_number 3746eb17371540d9 + signed_key ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1y... + ``` + +1. Set the resulting signed certificate as `HostCertificate` in the SSH +configuration on the host machine. + + ```text + $ vault write -field=signed_key ssh-host-signer/sign/hostrole \ + cert_type=host \ + public_key=@/etc/ssh/ssh_host_rsa_key.pub > /etc/ssh/ssh_host_rsa_key-cert.pub + ``` + + Set permissions on the certificate to be `0640`: + + ```text + $ chmod 0640 /etc/ssh/ssh_host_rsa_key-cert.pub + ``` + + Add host key and host certificate to the SSH configuration file. + + ```text + # /etc/ssh/sshd_config + # ... + + # For client keys + TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem + + # For host keys + HostKey /etc/ssh/ssh_host_rsa_key + HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub + ``` + + Restart the SSH service to pick up the changes. + +### Client-Side Host Verification + +1. Retrieve the host signing CA public key to validate the host signature of +target machines. + + ```text + $ curl https://vault.rocks/v1/ssh-host-signer/public_key + ``` + + ```text + $ vault read -field=public_key ssh-host-signer/config/ca + ``` + +1. Add the resulting public key to the `known_hosts` file with authority. + + ```text + # ~/.ssh/known_hosts + @cert-authority *.example.com ssh-rsa AAAAB3NzaC1yc2EAAA... + ``` + +1. SSH into target machines as usual. + +## Troubleshooting + +When initially configuring this type of key signing, enable `VERBOSE` SSH +logging to help annotate any errors in the log. + +```text +# /etc/ssh/sshd_config +# ... +LogLevel VERBOSE +``` + +Restart SSH after making these changes. + +By default, SSH logs to `/var/log/auth.log`, but so do many other things. To +extract just the SSH logs, use the following: + +```sh +$ tail -f /var/log/auth.log | grep --line-buffered "sshd" +``` + +If you are unable to make a connection to the host, the SSH server logs may +provide guidance and insights. + +### Name is not a listed principal + +If the `auth.log` displays the following messages: + +```text +# /var/log/auth.log +key_cert_check_authority: invalid certificate +Certificate invalid: name is not a listed principal +``` + +The certificate does not permit the username as a listed principal for +authenticating to the system. This is most likely due to an OpenSSH bug (see +[known issues](#known-issues) for more information). This bug does not respect +the `allowed_users` option value of "\*". Here are ways to work around this +issue: + +1. Set `default_user` in the role. If you are always authenticating as the same +user, set the `default_user` in the role to the username you are SSHing into the +target machine: + + ```text + $ vault write ssh/roles/my-role -<<"EOH" + { + "default_user": "YOUR_USER", + // ... + } + EOH + ``` + +1. Set `valid_principals` during signing. In situations where multiple users may +be authenticating to SSH via Vault, set the list of valid principles during key +signing to include the current username: + + ```text + $ vault write ssh-client-signer/sign/my-role -<<"EOH" + { + "valid_principals": "my-user" + // ... + } + EOH + ``` + + +### No Prompt After Login + +If you do not see a prompt after authenticating to the host machine, the signed +certificate may not have the `permit-pty` extension. There are two ways to add +this extension to the signed certificate. + +- As part of the role creation + + ```text + $ vault write ssh-client-signer/roles/my-role -<<"EOH" + { + "default_extensions": [ + { + "permit-pty": "" + } + ] + // ... + } + EOH + ``` + +- As part of the signing operation itself: + + ```text + $ vault write ssh-client-signer/sign/my-role -<<"EOH" + { + "extension": { + "permit-pty": "" + } + // ... + } + EOH + ``` + +### No Port Forwarding + +If port forwarding from the guest to the host is not working, the signed +certificate may not have the `permit-port-forwarding` extension. Add the +extension as part of the role creation or signing process to enable port +forwarding. See [no prompt after login](#no-prompt-after-login) for examples. + +```json +{ + "default_extensions": [ + { + "permit-port-forwarding": "" + } + ] +} +``` + +### No X11 Forwarding + +If X11 forwarding from the guest to the host is not working, the signed +certificate may not have the `permit-X11-forwarding` extension. Add the +extension as part of the role creation or signing process to enable X11 +forwarding. See [no prompt after login](#no-prompt-after-login) for examples. + +```json +{ + "default_extensions": [ + { + "permit-X11-forwarding": "" + } + ] +} +``` + +### No Agent Forwarding + +If agent forwarding from the guest to the host is not working, the signed +certificate may not have the `permit-agent-forwarding` extension. Add the +extension as part of the role creation or signing process to enable agent +forwarding. See [no prompt after login](#no-prompt-after-login) for examples. + +```json +{ + "default_extensions": [ + { + "permit-agent-forwarding": "" + } + ] +} +``` + +### Known Issues + +- On SELinux-enforcing systems, you may need to adjust related types so that the + SSH daemon is able to read it. For example, adjust the signed host certificate + to be an `sshd_key_t` type. + +- On some versions of SSH, you may get the following error: + + ```text + no separate private key for certificate + ``` + + This is a bug introduced in OpenSSH version 7.2 and fixed in 7.5. See + [OpenSSH bug 2617](https://bugzilla.mindrot.org/show_bug.cgi?id=2617) for + details. + +## API + +The SSH secret backend has a full HTTP API. Please see the +[SSH secret backend API](/api/secret/ssh/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/totp/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/totp/index.html.md new file mode 100644 index 0000000..6ae9ca1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/totp/index.html.md @@ -0,0 +1,83 @@ +--- +layout: "docs" +page_title: "TOTP Secret Backend" +sidebar_current: "docs-secrets-totp" +description: |- + The TOTP secret backend for Vault generates time-based one-time use passwords. +--- + +# TOTP Secret Backend + +Name: `totp` + +The TOTP secret backend for Vault will allow Vault users to store their multi-factor +authentication keys in Vault and use the API to retrieve time-based one-time use passwords +on demand. The backend can also be used to generate a new key and validate passwords generated by that key. + +This page will show a quick start for this backend. For detailed documentation +on every path, use `vault path-help` after mounting the backend. + +## Quick Start + +The first step to using the TOTP backend is to mount it. +Unlike the `kv` backend, the `totp` backend is not mounted by default. + +```text +$ vault mount totp +Successfully mounted 'totp' at 'totp'! +``` + +The next step is to configure a key. For example, lets create +a "test" key by passing in a TOTP key url: + +```text +$ vault write totp/keys/test \ + url="otpauth://totp/Vault:test@gmail.com?secret=Y64VEVMBTSXCYIWRSHRNDZW62MPGVU2G&issuer=Vault" +Success! Data written to: totp/keys/test +``` + +By writing to the `keys/test` path we are defining the `test` key. + +To generate a new set of credentials, we simply read from that key using the `code` path: + +```text +$ vault read totp/code/test +Key Value +code 135031 +``` +Vault is now configured to create time-based one-time use passwords! + +By reading from the `code/test` path, Vault has generated a new +time-based one-time use password using the `test` key configuration. + +Using ACLs, it is possible to restrict using the TOTP backend such +that trusted operators can manage the key definitions, and both +users and applications are restricted in the credentials they are +allowed to read. + +The TOTP backend can also be used to generate new keys and validate passwords generated using those keys. + +In order to generate a new key, set the generate flag to true and pass in an issuer and account name. + +```text +$ vault write totp/keys/test \ + generate=true issuer=Vault account_name=test@gmail.com +``` +A base64 encoded barcode and url will be returned upon generating a new key. These can be given to client applications that +can generate passwords. You can validate those passwords by writing to the `code/test` path. + +```text +$ vault write totp/code/test \ + code=127388 +Key Value +valid true +``` + +If you get stuck at any time, simply run `vault path-help totp` or with a +subpath for interactive help output. + +## API + +The TOTP secret backend has a full HTTP API. Please see the +[TOTP secret backend API](/api/secret/totp/index.html) for more +details. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/transit/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/transit/index.html.md index 4b5da3f..e84dd57 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/secrets/transit/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/docs/secrets/transit/index.html.md @@ -63,7 +63,7 @@ on every path, use `vault path-help` after mounting the backend. ## Quick Start -The first step to using the transit backend is to mount it. Unlike the `generic` +The first step to using the transit backend is to mount it. Unlike the `kv` backend, the `transit` backend is not mounted by default. ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/configuration.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/configuration.html.md deleted file mode 100644 index c113fe2..0000000 --- a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/configuration.html.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -layout: "docs" -page_title: "Vault Secure Introduction Client Configuration" -sidebar_current: "docs-vault-enterprise-vsi-configuration" -description: |- - Configuration details for Vault Secure Introduction Client. - ---- - -# Vault Secure Introduction Client Configuration - -The Vault Secure Introduction client has a flexible configuration system that -allows combining settings from CLI flags, environment variables, and a -configuration file, in that order of preference. - -Generally speaking, values with a source at a higher order of preference -override those from a source with a lower order of preference. The main -difference is in specification of servers, which is detailed further in this -document. - -The available directives from the configuration file, as well as their -environment variable and CLI flag counterparts, follow. - -## The Configuration File - -The configuration file is in [HCL](https://github.com/hashicorp/hcl) format and -contains top-level directives as well as directive blocks. An example: - -```javascript -environment "aws" { -} - -vault { - address = "https://vault.service.consul:8200" - mount_path = "auth/aws" -} - -serve "file" { - path = "/ramdisk/vault-token" -} -``` - -The configuration file can be specified either with the `config` CLI flag -or by simply providing the path to the configuration file as the command's -argument. - -All directives take string arguments unless explicitly specified otherwise. - -## Top-Level Directives - - * `environment`: A block with information about he environment under which the - client is running. If not specified, the client will attempt to - automatically discover its environment. The type may also be specified by - the `VAULT_SI_ENVIRONMENT` environment variable and the `environment` CLI - flag. Additional configuration key/value pairs can be passed in via the - `envconfig` CLI flag, which can be specified multiple times. - * `nonce_path`: If the client should save and load its nonce, the path where - the nonce should be stored. May also be specified by the - `VAULT_SI_NONCE_PATH` environment variable and the `nonce-path` CLI flag. - _This is a security-sensitive directive._ - * `vault`: A block with Vault server information, detailed below. - * `serve`: One or more blocks containing serving information, detailed below. - -## Environment Block Directives - -In the configuration file, the type is specified as the block's key, with -optional additional string values specified inside: - -```hcl -environment "aws" { - role = "prod" -} -``` - -The behavior with respect to these additional string values is -environment-specific. Currently, all environments simply round-trip any given -values to the Vault login endpoint. In the example above using the AWS -environment, the final set of values given to the login endpoint would be a -`pkcs7` key coming from the environment, as well as a `role` key with value -`prod` coming from the extra environment configuration. - -## Vault Block Directives - - * `address`: The address of the vault server, including scheme. May also be - specified by the `VAULT_ADDR` environment variable and the `address` CLI - flag. - * `mount_path`: The mount path of the authentication backend in Vault. If not - set, defaults to a value specific to the running environment (e.g. for AWS, - it will default to `auth/aws`. May also be specified by the - `VAULT_SI_MOUNT_PATH` environment variable and the `mount-path` CLI flag. - * `tls_skip_verify`: A boolean indicating whether to skip verification of the - certificate provided by the Vault server. May also be specified by the - `VAULT_SKIP_VERIFY` environment variable or the `tls-skip-verify` CLI flag. - _This is a security-sensitive directive._ - * `ca_cert`: A file containing a PEM-encoded X.509 CA certificate to use in - the validation chain of the Vault server's TLS certificate. May also be - specified by the `VAULT_CACERT` environment variable or the `ca-cert` CLI - flag. - * `ca_path`: A directory containing PEM-encoded X.509 CA certificates to use - in the validation chain of the Vault server's TLS certificate. May also be - specified by the `VAULT_CAPATH` environment variable or the `ca-path` CLI - flag. - -## Serve Block Directives - -In the configuration file, serve blocks can be one of two types: - - * Named serve blocks specify a name (`serve "myserver" {...`). The type of server - must be specified by the `type` directive within the block. - * Anonymous serve blocks, rather than specify a name, specify the type of - server (`serve "file" {...`). - -On the CLI, serve blocks are specified in one of two formats: - - * `:=`: specifies a key/value configuration directive for an - anonymous server with the given type. - * `::=`: specifies a key/value configuration directive - for a named server with the given type. - -The merging rules for CLI and configuration are as follows: - - * Each anonymous serve block in the configuration file stands alone, using - only the directives contained in the block. - * Each type of anonymous server specified in CLI flags CLI stands alone, with - the key/value configuration directives merged per-type. As a result, there - can only be one anonymous server per type specified in CLI flags. These are - not merged with any anonymous server specified in the configuration file. - * Key/value configuration directives for named serve blocks are merged between - the CLI and configuration file. - -### File Type Serve Block Directives - - * `path`: The path to a file on disk where the token should be written. To - avoid any possible issues during writing, the token will first be written to - a temporary file in the same directory, then atomically renamed to the given - path. The token will always be written with permissions `0640`; directory - permissions for this location should ensure access only to appropriate - readers. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/index.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/index.html.md deleted file mode 100644 index 2910282..0000000 --- a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/index.html.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -layout: "docs" -page_title: "About the Vault Secure Introduction Client" -sidebar_current: "docs-vault-enterprise-vsi" -description: |- - Vault Secure Introduction Client provides a turnkey way to securely introduce Vault tokens and features to applications running in various environments. - ---- - -# About the Vault Secure Introduction Client - -The Vault Secure Introduction Client is a feature of [Vault -Enterprise](https://www.hashicorp.com/vault.html) that provides a turnkey way -to securely introduce Vault tokens and features to applications running in -various environments. Currently, AWS EC2 instances are supported in conjunction -with Vault's AWS authentication backend. The client stays running until -terminated and will monitor the lifetime of retrieved Vault tokens, renewing -and reauthenticating as necessary. - -Configuration is simple and can generally be performed purely using CLI flags. -Please see the [Configuration](/docs/vault-enterprise/vsi/configuration.html) page for details -on client configuration. - -The [Security](/docs/vault-enterprise/vsi/security.html) page contains information and -suggestions to help deploy the client in a secure fashion. It assumes -familiarity with the AWS Authentication Backend. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/security.html.md b/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/security.html.md deleted file mode 100644 index 497d28f..0000000 --- a/vendor/github.com/hashicorp/vault/website/source/docs/vault-enterprise/vsi/security.html.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -layout: "docs" -page_title: "Vault Secure Introduction Client Security" -sidebar_current: "docs-vault-enterprise-vsi-security" -description: |- - Guidelines for secure usage of Vault Secure Introduction Client. ---- - -# Vault Secure Introduction Client Security - -This page discusses the various security-sensitive aspects of the Vault Secure -Introduction Client along with advice for secure usage. - -## General Advice - -### Use A Dedicated User - -We recommend that the VSI client be run by a dedicated user that does not run -other services. This reduces the chance that an attack on another service can -lead to accessing sensitive information (for instance, if the ability of the -client to store its nonce is used). This also ensures that tokens written with -the `file` serving type are not accessible to processes run by other users. - -### Write Tokens To Ephemeral Storage With A Dedicated Group - -When using the `file` serving type to write tokens to paths on the filesystem, -the directories containing those paths should only be accessible to a group or -groups of services that require access to the Vault token. The tokens are -always written with permissions `0640`. - -In addition, this should be a location that does not persist across reboots, -such as a ramdisk. After a reboot, the client will fetch a new token, so there -is no need to store the old one. - -### Firewall Instance Metadata - -If possible (based on operating system and/or distribution), use features of -the OS firewall to restrict access to the instance metadata (specifically the -signed `pkcs7` document) from the `http://169.254.169.254` endpoint to only the -users/groups that need it. - -## Nonces - -The AWS Authentication Backend operates on a Trust On First Use (TOFU) -principle, using nonces generated by a backend client to identify repeat -authentication requests by the same client. - -A nice benefit of this approach is that if a bad actor is able to acquire -machine instance metadata and authenticate before the VSI client, the errors -from the VSI client logs indicating a client nonce mismatch can be used to -trigger an alarm. - -The drawback is that reboot survivability is impacted. However, combinations of -options on the AWS Authentication Backend and the VSI client provide flexible -methods for managing this problem, allowing the security policy of nearly any -organization to be accommodated. - -Following are the various strategies of nonce management. - -### Immutable Instances - -If your EC2 instances are running in Auto Scaling Groups (ASGs), one strategy -is to enable the `disallow_reauthentication` option on a configured AMI in the -AWS Authentication Backend (or an associated Role Tag). This allows only a -single token to be granted to any particular instance ID (unless cleared via -the `whitelist/identity` endpoint in the backend), regardless of nonce. As a -result, rather than reboot an instance running in an ASG, the instance can -simply be terminated; when the ASG brings up a new instance, the instance ID -will be different and the new instance will be allowed to authenticate. - -### Manual/Automated Whitelist Management - -This approach relies on either manual or automated intervention, perhaps keyed -by reboot notifications or notifications from parsing the VSI client's error -log. In this approach, knowledge of the reboot of an instance provides -assurance to an operator that they can clear the instance and its nonce from -the backend's whitelist via the `whitelist/identity` endpoint, allowing the -client to use its new generated nonce to authenticate. - -### Instance Migration - -If your EC2 instances do not rely on ephemeral storage across reboots, one -approach is to stop/start the instance rather than reboot it, in conjunction -with enabling the `allow_instance_migration` option on a configured AMI in the -AWS Authentication Backend (or an associated Role Tag). - -When an instance is stopped and started, this causes a new placement of the -instances in the AWS infrastructure; this results in an updated value of -`pendingTime` in the instance metadata document. When the -`allow_instance_migration` option is turned on, a client is allowed to -authenticate for the same instance ID with a new nonce if the value of -`pendingTime` is later than the previously seen value. - -### Nonce Storage - -A final option for managing reboot survivability is to use the client's option -to store its nonce on the instance's filesystem and read this nonce the next -time it starts up. - -Although this option provides the best automated reboot survivability -guarantees, it does require storing the nonce in persistent storage. If using -this option, filesystem permissions should be used to ensure that only the user -running the client has access to the directory where the nonce will be stored -(the nonce will always be stored with permissions `0600`). - -This is a very security-sensitive option; so long as the nonce and instance -remain valid, disclosure of the nonce on a machine can allow any user or -service with access to the instance metadata to authenticate as the machine and -gain access to all Vault policies associated with the machine. For this reason, -you should also ensure that if you are having the client store its nonce, you -do not duplicate this nonce across instances (for instance, by baking it into -an AMI), as this would allow any user or service that learns this nonce with -access to any machine's instance metadata to authenticate as that instance. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/generate-root.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/generate-root.html.md similarity index 90% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/generate-root.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/generate-root.html.md index 6c7cbd3..311524d 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/generate-root.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/generate-root.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" -page_title: "Generate Root Tokens Using Unseal Keys" -sidebar_current: "docs-guides-generate-root" +layout: "guides" +page_title: "Generate Root Tokens using Unseal Keys - Guides" +sidebar_current: "guides-generate-root" description: |- Generate a new root token using a threshold of unseal keys. --- diff --git a/vendor/github.com/hashicorp/vault/website/source/guides/index.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/index.html.md new file mode 100644 index 0000000..e3edae4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/guides/index.html.md @@ -0,0 +1,17 @@ +--- +layout: "guides" +page_title: "Guides" +sidebar_current: "guides" +description: |- + This section provides various guides for common actions. Due to the nature of Vault, some of these procedures can be complex, so our goal is to provide guidance to do them safely. +--- + +# Vault Guides + +Welcome to the Vault guides! If you are just getting started with Vault, please +start with the [Vault introduction][intro] instead and then continue on to the +guides. The guides provide examples for common Vault workflows and actions for +both users and operators of Vault. Due to the nature of Vault, some of these +procedures can be complex, so our goal is to provide guidance to do them safely. + +[intro]: /intro/index.html diff --git a/vendor/github.com/hashicorp/vault/website/source/guides/plugin-backends.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/plugin-backends.html.md new file mode 100644 index 0000000..9cbb086 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/guides/plugin-backends.html.md @@ -0,0 +1,103 @@ +--- +layout: "guides" +page_title: "Plugin Backends - Guides" +sidebar_current: "guides-plugin-backends" +description: |- + Learn how to build, register, and mount a custom plugin backend. +--- + +# Introduction + +Plugin backends utilize the [plugin system][plugin-system] to enable +third-party secret and auth backends to be mounted. + +It is worth noting that even though [database backends][database-backend] +operate under the same underlying plugin mechanism, they are slightly different +in design than plugin backends demonstrated in this guide. The database backend +manages multiple plugins under the same backend mount point, whereas plugin +backends are generic backends that function as either secret or auth backends. + +This guide provides steps to build, register, and mount non-database external +plugin backends. + +## Setting up Vault + +Set `plugin_directory` to the desired path in the Vault configuration file. +The path should exist and have proper lockdown on access permissions. + +``` +$ cat vault-config.hcl +... +plugin_directory="/etc/vault/vault_plugins" +... +``` + +## Build the Plugin Backend + +Build the custom backend binary, and move it to the `plugin_directory` path. +In this guide, we will use `mock-plugin` that comes from Vault's +`logical/plugin/mock` package. + +``` +$ ls . +main.go + +$ ls .. +backend.go backend_test.go mock-plugin/ path_internal.go path_kv.go + +$ go build -o mock-plugin main.go + +$ mv mock-plugin /etc/vault/vault_plugins +``` + +## Register the Plugin Into the Plugin Catalog + +Start the Vault server. Find out the sha256 sum of the compiled plugin binary, +and use that to register the plugin into Vault's plugin catalog. + +``` +$ shasum -a 256 /etc/vault/vault_plugins/mock-plugin +2c071aafa1b30897e60b79643e77592cb9d1e8f803025d44a7f9bbfa4779d615 /etc/vault/vault_plugins/mock-plugin + +$ vault sys/plugins/catalog/mock-plugin sha_256=2c071aafa1b30897e60b79643e77592cb9d1e8f803025d44a7f9bbfa4779d615 command=mock-plugin +Success! Data written to: sys/plugins/catalog/mock-plugin +``` + +## Mount the Plugin + +``` +$ vault mount -path=mock -plugin-name=mock-plugin plugin +Successfully mounted plugin 'mock-plugin' at 'mock'! + +$ vault mounts +Path Type Accessor Plugin Default TTL Max TTL Force No Cache Replication Behavior Description +cubbyhole/ cubbyhole cubbyhole_80ef4e30 n/a n/a n/a false local per-token private secret storage +mock/ plugin plugin_10fc2cce mock-plugin system system false replicated +secret/ kv kv_ef2a14ec n/a system system false replicated key/value secret storage +sys/ system system_e3a4cccd n/a n/a n/a false replicated system endpoints used for control, policy and debugging +``` + +## Perform operations on the mount + +``` +$ vault write mock/kv/foo value=bar +Key Value +--- ----- +value bar +``` + +## Unmount the plugin + +``` +$ vault unmount mock +Successfully unmounted 'mock' if it was mounted + +$ vault mounts +Path Type Accessor Plugin Default TTL Max TTL Force No Cache Replication Behavior Description +cubbyhole/ cubbyhole cubbyhole_80ef4e30 n/a n/a n/a false local per-token private secret storage +secret/ kv kv_ef2a14ec n/a system system false replicated key/value secret storage +sys/ system system_e3a4cccd n/a n/a n/a false replicated system endpoints used for control, policy and debugging +``` + +[plugin-system]: /docs/internals/plugins.html +[database-backend]: /docs/secrets/databases/index.html \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/website/source/guides/production.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/production.html.md new file mode 100644 index 0000000..559dff1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/guides/production.html.md @@ -0,0 +1,110 @@ +--- +layout: "guides" +page_title: "Production Hardening - Guides" +sidebar_current: "guides-production-hardening" +description: |- + This guide provides guidance on best practices for a production hardened deployment of HashiCorp Vault. +--- + +# Production Hardening + +This guide provides guidance on best practices for a production hardened +deployment of Vault. The recommendations are based on the [security +model](/docs/internals/security.html) and focus on defense in depth. + +~> **Apply When Possible!** This guide is meant to provide guidance for an +_ideal_ deployment of Vault, not to document requirements. It is entirely +possible to use Vault without applying any of the following recommendations. +These are best practice recommendations that should be applied when possible +and practical. + +# Recommendations + +* **End-to-End TLS**. Vault should always be used with TLS in production. If + intermediate load balancers or reverse proxies are used to front Vault, they + should _not_ terminate TLS. This way traffic is always encrypted in transit + to Vault and minimizes risks introduced by intermediate layers. + +* **Single Tenancy**. Vault should be the only main process running on a + machine. This reduces the risk that another process running on the same + machine is compromised and can interact with Vault. Similarly, running on + bare metal should be preferred to a VM, and a VM preferred to a container. + This reduces the surface area introduced by additional layers of abstraction + and other tenants of the hardware. Both VM and container based deployments + work, but should be avoided when possible to minimize risk. + +* **Firewall traffic**. Vault listens on well known ports, use a local firewall + to restrict all incoming and outgoing traffic to Vault and essential system + services like NTP. This includes restricting incoming traffic to permitted + subnets and outgoing traffic to services Vault needs to connect to, such as + databases. + +* **Disable SSH / Remote Desktop**. When running a Vault as a single tenant + application, users should never access the machine directly. Instead, they + should access Vault through its API over the network. Use a centralized + logging and telemetry solution for debugging. Be sure to restrict access to + logs as need to know. + +* **Disable Swap**. Vault encrypts data in transit and at rest, however it must + still have sensitive data in memory to function. Risk of exposure should be + minimized by disabling swap to prevent the operating system from paging + sensitive data to disk. Vault attempts to ["memory lock" to physical memory + automatically](/docs/configuration/index.html#disable_mlock), but disabling + swap adds another layer of defense. + +* **Don't Run as Root**. Vault is designed to run as an unprivileged user, and + there is no reason to run Vault with root or Administrator privileges, which + can expose the Vault process memory and allow access to Vault encryption + keys. Running Vault as a regular user reduces its privilege. Configuration + files for Vault should have permissions set to restrict access to only the + Vault user. + +* **Turn Off Core Dumps**. A user or administrator that can force a core dump + and has access to the resulting file can potentially access Vault encryption + keys. Preventing core dumps is a platform-specific process; on Linux setting + the resource limit `RLIMIT_CORE` to `0` disables core dumps. This can be + performed by process managers and is also exposed by various shells; in Bash + `ulimit -c 0` will accomplish this. + +* **Immutable Upgrades**. Vault relies on an external storage backend for + persistence, and this decoupling allows the servers running Vault to be + managed immutably. When upgrading to new versions, new servers with the + upgraded version of Vault are brought online. They are attached to the same + shared storage backend and unsealed. Then the old servers are destroyed. This + reduces the need for remote access and upgrade orchestration which may + introduce security gaps. + +* **Avoid Root Tokens**. Vault provides a root token when it is first + initialized. This token should be used to setup the system initially, + particularly setting up authentication backends so that users may + authenticate. We recommend treating Vault [configuration as + code](https://www.hashicorp.com/blog/codifying-vault-policies-and-configuration/), + and using version control to manage policies. Once setup, the root token + should be revoked to eliminate the risk of exposure. Root tokens can be + [generated when needed](/guides/generate-root.html), and should be + revoked as soon as possible. + +* **Enable Auditing**. Vault supports several auditing backends. Enabling + auditing provides a history of all operations performed by Vault and provides + a forensics trail in the case of misuse or compromise. Audit logs [securely + hash](/docs/audit/index.html) any sensitive data, but access should still be + restricted to prevent any unintended disclosures. + +* **Upgrade Frequently**. Vault is actively developed, and updating frequently + is important to incorporate security fixes and any changes in default + settings such as key lengths or cipher suites. Subscribe to the [Vault + mailing list](https://groups.google.com/forum/#!forum/vault-tool) and [GitHub + CHANGELOG](https://github.com/hashicorp/vault/blob/master/CHANGELOG.md) for + updates. + +* **Configure SELinux / AppArmor**. Using additional mechanisms like SELinux + and AppArmor can help provide additional layers of security when using Vault. + While Vault can run on many operating systems, we recommend Linux due to the + various security primitives mentioned here. + +* **Restrict Storage Access**. Vault encrypts all data at rest, regardless of + which storage backend is used. Although the data is encrypted, an [attacker + with arbitrary control](/docs/internals/security.html) can cause data + corruption or loss by modifying or deleting keys. Access to the storage + backend should be restricted to only Vault to avoid unauthorized access or + operations. diff --git a/vendor/github.com/hashicorp/vault/website/source/guides/rekeying-and-rotating.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/rekeying-and-rotating.html.md new file mode 100644 index 0000000..dcc0334 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/guides/rekeying-and-rotating.html.md @@ -0,0 +1,140 @@ +--- +layout: "guides" +page_title: "Rekeying & Rotating Vault - Guides" +sidebar_current: "guides-rekeying-and-rotating" +description: |- + Vault supports generating new unseal keys as well as rotating the underlying + encryption keys. This guide covers rekeying and rotating Vault's encryption + keys. +--- + +# Rekeying & Rotating Vault + +~> **Advanced Topic** This guide presents an advanced topic that is not required +for a basic understanding of Vault. Knowledge of this topic is not required for +daily Vault use. + +## Background + +In order to prevent no one person from having complete access to the system, +Vault employs [Shamir's Secret Sharing Algorithm][shamir]. Under this process, +a secret is divided into a subset of parts such that a subset of those parts are +needed to reconstruct the original secret. Vault makes heavy use of this +algorithm as part of the [unsealing process](/docs/concepts/seal.html). + +When a Vault server is first initialized, Vault generates a master key and +immediately splits this master key into a series of key shares following +Shamir's Secret Sharing Algorithm. Vault never stores the master key, therefore, +the only way to retrieve the master key is to have a quorum of unseal keys +re-generate it. + +The master key is used to decrypt the underlying encryption key. Vault uses the +encryption key to encrypt data at rest in a storage backend like the filesystem +or Consul. + +Typically each of these key shares is distributed to trusted parties in the +organization. These parties must come together to "unseal" the Vault by entering +their key share. + +[![Vault Shamir Secret Sharing Algorithm](/assets/images/vault-shamir-secret-sharing.svg)](/assets/images/vault-shamir-secret-sharing.svg) + +[shamir]: https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing + +In some cases, you may want to re-generate the master key and key shares. Here +are a few examples: + +- Someone joins or leaves the organization +- Security wants to change the number of shares or threshold of shares +- Compliance mandates the master key be rotated at a regular interval + +In addition to rekeying the master key, there may be an independent desire to +rotate the underlying encryption key Vault uses to encrypt data at rest. + +[![Vault Rekey vs Rotate](/assets/images/vault-rekey-vs-rotate.svg)](/assets/images/vault-rekey-vs-rotate.svg) + +In Vault, _rekeying_ and _rotating_ are two separate operations. The process for +generating a new master key and applying Shamir's algorithm is called +"rekeying". The process for generating a new encryption key for Vault to encrypt +data at rest is called "rotating". + +Both rekeying the Vault and rotating Vault's underlying encryption key are fully +online operations. Vault will continue to service requests uninterrupted during +either of these processes. + +## Rekeying Vault + +Rekeying the Vault requires a quorum of unseal keys. Before continuing, you +should ensure all unseal key holders are available to assist with the rekeying. + +First, initialize a rekeying operation. The flags represent the **newly +desired** number of keys and threshold: + +```text +$ vault rekey -init -key-shares=3 -key-threshold=2 +``` + +This will generate a nonce value and start the rekeying process. All other +unseal keys must also provide this nonce value. This nonce value is not a +secret, so it is safe to distribute over insecure channels like chat, email, or +carrier pigeon. + +```text +Nonce: 22657753-9cca-189a-65b8-cb743d104ffc +Started: true +Key Shares: 3 +Key Threshold: 2 +Rekey Progress: 0 +Required Keys: 1 +``` + +Each unseal key holder runs the following command and enters their unseal key: + +```text +$ vault rekey -nonce= +Rekey operation nonce: 22657753-9cca-189a-65b8-cb743d104ffc +Key (will be hidden): +``` + +When the final unseal key holder enters their key, Vault will output the new +unseal keys: + +```text +Key 1: EDj4NZK6z5Y9rpr+TtihTulfdHvFzXtBYQk36dmBczuQ +Key 2: sCkM1i5BGGNDFk5GsqtVolWRPyd5mWn2eZG0gUySiCF7 +Key 3: e5DUvDIH0cPU8Q+hh1KNVkkMc9lliliPVe9u3Fzbzv38 + +Operation nonce: 22657753-9cca-189a-65b8-cb743d104ffc + +Vault rekeyed with 3 keys and a key threshold of 2. Please +securely distribute the above keys. When the vault is re-sealed, +restarted, or stopped, you must provide at least 2 of these keys +to unseal it again. + +Vault does not store the master key. Without at least 2 keys, +your vault will remain permanently sealed. +``` + +Like the initialization process, Vault supports PGP encrypting the resulting +unseal keys and creating backup encryption keys for disaster recovery. + +## Rotating the Encryption Key + +Unlike rekeying the Vault, rotating Vault's encryption key does not require a +quorum of unseal keys. Anyone with the proper permissions in Vault can perform +the encryption key rotation. + +To trigger a key rotation, execute the command: + +```text +$ vault rotate +``` + +This will output the key version and installation time: + +```text +Key Term: 2 +Installation Time: ... +``` + +This will add a new key to the keyring. All new values written to the storage +backend will be encrypted with this new key. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/replication.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/replication.html.md similarity index 94% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/replication.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/replication.html.md index 3458f7b..8e093bf 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/replication.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/replication.html.md @@ -1,12 +1,12 @@ --- -layout: "docs" -page_title: "Setting up Vault Enterprise Replication" -sidebar_current: "docs-guides-replication" +layout: "guides" +page_title: "Setting up Vault Enterprise Replication - Guides" +sidebar_current: "guides-replication" description: |- Learn how to set up and manage Vault Enterprise Replication. --- -# Replication Setup and Guidance +# Replication Setup & Guidance If you're unfamiliar with Vault Replication concepts, please first look at the [general information page](/docs/vault-enterprise/replication/index.html). More @@ -24,7 +24,7 @@ the [Vault Replication API documentation](/api/system/replication.html) To activate the primary, run: - $ vault write -f sys/replication/primary/enable + $ vault write -f sys/replication/performance/primary/enable There is currently one optional argument: `primary_cluster_addr`. This can be @@ -37,7 +37,7 @@ members of a single cluster and primary/secondary clusters. To fetch a secondary bootstrap token, run: - $ vault write sys/replication/primary/secondary-token id= + $ vault write sys/replication/performance/primary/secondary-token id= The value for `id` is opaque to Vault and can be any identifying value you want; @@ -50,7 +50,7 @@ except that the token will be a JWT instead of UUID-formatted random bytes. To activate a secondary using the fetched token, run: - $ vault write sys/replication/secondary/enable token= + $ vault write sys/replication/performance/secondary/enable token= You must provide the full token value. Be very careful when running this @@ -85,10 +85,10 @@ consistent `~/.vault-token` file or `VAULT_TOKEN` environment variable when working with both clusters. On a production system, after a secondary is activated, the enabled -authentication backends should be used to get tokens with appropriate policies +authentication backends should be used to get tokens with appropriate policies, as policies and auth backend configuration are replicated. -The generate-root command can be also be used to generate a root token local to +The generate-root command can also be used to generate a root token local to the secondary cluster. ## Managing Vault Replication diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/index.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/index.html.md similarity index 80% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/index.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/index.html.md index 6d2d968..24af3db 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/index.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/index.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading Vault - Guides" -sidebar_current: "docs-guides-upgrading" +sidebar_current: "guides-upgrading" description: |- These are general upgrade instructions for Vault for both non-HA and HA setups. Please ensure that you also read the version-specific upgrade notes. @@ -10,8 +10,13 @@ description: |- # Upgrading Vault These are general upgrade instructions for Vault for both non-HA and HA setups. -Please ensure that you also read the version-specific upgrade notes which can be -found in the sidebar. +_Please ensure that you also read any version-specific upgrade notes which can be +found in the sidebar._ + +**Always** back up your data before upgrading! Vault does not make +backwards-compatibility guarantees for its data store. If you need to roll back +to a previous version of Vault, it is always a good idea to roll back your data +store as well. ## Non-HA Installations @@ -32,6 +37,10 @@ particular setup since HA setups can differ on whether a load balancer is in use, what addresses clients are being given to connect to Vault (standby + leader, leader-only, or discovered via service discovery), etc. +Whatever method you use, you should ensure that you never fail over from a +newer version of Vault to an older version. Our suggested procedure is designed +to prevent this. + Please note that Vault does not support true zero-downtime upgrades, but with proper upgrade procedure the downtime should be very short (a few hundred milliseconds to a second depending on how the speed of access to the storage @@ -58,7 +67,8 @@ active duty. To do this: 3. Start the node 4. Unseal the node (it will now be a standby) -Internal upgrade tasks will happen after one of the upgraded standby nodes takes over active duty. +Internal upgrade tasks will happen after one of the upgraded standby nodes +takes over active duty. Be sure to also read and follow any instructions in the version-specific upgrade notes. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.5.0.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.5.0.html.md similarity index 99% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.5.0.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.5.0.html.md index 6adf579..c4c8a49 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.5.0.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.5.0.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.5.0 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.5.0" +sidebar_current: "guides-upgrading-to-0.5.0" description: |- This page contains the full list of breaking changes for Vault 0.5, including actions you must take to facilitate a smooth upgrade path. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.5.1.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.5.1.html.md similarity index 97% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.5.1.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.5.1.html.md index af7d0db..143def7 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.5.1.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.5.1.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.5.1 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.5.1" +sidebar_current: "guides-upgrading-to-0.5.1" description: |- This page contains the list of breaking changes for Vault 0.5.1. Please read it carefully. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.0.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.0.html.md similarity index 97% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.0.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.0.html.md index e939efa..0b4fcef 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.0.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.0.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.6.0 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.6.0" +sidebar_current: "guides-upgrading-to-0.6.0" description: |- This page contains the list of breaking changes for Vault 0.6. Please read it carefully. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.1.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.1.html.md similarity index 97% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.1.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.1.html.md index 643f5ef..a8ed967 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.1.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.1.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.6.1 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.6.1" +sidebar_current: "guides-upgrading-to-0.6.1" description: |- This page contains the list of breaking changes for Vault 0.6.1. Please read it carefully. @@ -16,7 +16,7 @@ carefully. Once an active node is running 0.6.1, only standby nodes running 0.6.1+ will be able to form an HA cluster. If following our [general upgrade -instructions](/docs/guides/upgrading/index.html) this will +instructions](/guides/upgrading/index.html) this will not be an issue. ## Health Endpoint Status Code Changes diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.2.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.2.html.md similarity index 98% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.2.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.2.html.md index 6e5ff2d..511ec1d 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.2.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.2.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.6.2 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.6.2" +sidebar_current: "guides-upgrading-to-0.6.2" description: |- This page contains the list of deprecations and important or breaking changes for Vault 0.6.2. Please read it carefully. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.3.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.3.html.md similarity index 95% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.3.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.3.html.md index ca5a477..bc32654 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.3.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.3.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.6.3 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.6.3" +sidebar_current: "guides-upgrading-to-0.6.3" description: |- This page contains the list of deprecations and important or breaking changes for Vault 0.6.3. Please read it carefully. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.4.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.4.html.md similarity index 98% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.4.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.4.html.md index c93dbba..1daaf58 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.6.4.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.6.4.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.6.4 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.6.4" +sidebar_current: "guides-upgrading-to-0.6.4" description: |- This page contains the list of deprecations and important or breaking changes for Vault 0.6.4. Please read it carefully. diff --git a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.7.0.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.7.0.html.md similarity index 96% rename from vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.7.0.html.md rename to vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.7.0.html.md index 505df62..cffa2ba 100644 --- a/vendor/github.com/hashicorp/vault/website/source/docs/guides/upgrading/upgrade-to-0.7.0.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.7.0.html.md @@ -1,7 +1,7 @@ --- -layout: "docs" +layout: "guides" page_title: "Upgrading to Vault 0.7.0 - Guides" -sidebar_current: "docs-guides-upgrading-to-0.7.0" +sidebar_current: "guides-upgrading-to-0.7.0" description: |- This page contains the list of deprecations and important or breaking changes for Vault 0.7.0. Please read it carefully. diff --git a/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.8.0.html.md b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.8.0.html.md new file mode 100644 index 0000000..155bbfd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/guides/upgrading/upgrade-to-0.8.0.html.md @@ -0,0 +1,53 @@ +--- +layout: "guides" +page_title: "Upgrading to Vault 0.8.0 - Guides" +sidebar_current: "guides-upgrading-to-0.8.0" +description: |- + This page contains the list of deprecations and important or breaking changes + for Vault 0.8.0. Please read it carefully. +--- + +# Overview + +This page contains the list of deprecations and important or breaking changes +for Vault 0.8.0 compared to the most recent release. Please read it carefully. + +## Enterprise Upgrade Procedure + +If you are upgrading from Vault Enterprise, you should take one of the +following upgrade paths. Please note that reindexing stops the ability of the +node to process requests while the indexing is happening. As a result, you may +want to plan the upgrade for a time when either no writes to the primary are +expected and secondaries can handle traffic (if using replication), or when +there is a maintenance window. + +There are two reindex processes that need to happen during the upgrade, for two +different indexes. One will happen automatically when the cluster is upgraded +to 0.8.0. _This happens even if you are not currently using replication._ The +other can be done in one of a few ways, as follows. + +### If Not Using Replication + +If not using replication, no further action needs to be taken. + +### If Using Replication + +#### Option 1: Reindex the Primary, then Upgrade Secondaries + +The first option is to issue a write to [`sys/replication/reindex`][reindex] on the +primary (it is not necessary on the secondaries). When the reindex on the +primary is finished, upgrade the secondaries, then upgrade the primary. + +#### Option 2: Upgrade All Nodes Simultaneously + +The second option is to upgrade all nodes to 0.8.0 at the same time. This +removes the need to perform an explicit reindex but may equate to more down +time since secondaries will not be able to service requests while the primary +is performing an explicit reindex. + +## `sys/revoke-force` Requires `sudo` Capability + +This path was meant to require `sudo` capability but was not implemented this +way. It now requires `sudo` capability to run. + +[reindex]: https://www.vaultproject.io/api/system/replication.html#reindex-replication diff --git a/vendor/github.com/hashicorp/vault/website/source/index.html.erb b/vendor/github.com/hashicorp/vault/website/source/index.html.erb index ba50137..26b0bf0 100644 --- a/vendor/github.com/hashicorp/vault/website/source/index.html.erb +++ b/vendor/github.com/hashicorp/vault/website/source/index.html.erb @@ -107,24 +107,18 @@ description: |-
    -
    -

    Vault v0.6.5 Released

    -

    - We are pleased to announce the release of Vault v0.6.5. This release includes several new features, improvements, and bug fixes. -

    - -
    -

    Vault on the Changelog Podcast

    -

    - Listen to the recording with Adam Stacoviak on the popular Changelog podcast to talk about modern secrets management. -

    - -
    + <% end %>
    diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/authentication.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/authentication.html.md index a57d941..ece6a9d 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/authentication.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/authentication.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Authentication" +page_title: "Authentication - Getting Started" sidebar_current: "gettingstarted-auth" description: |- Authentication to Vault gives a user access to use Vault. Vault can authenticate using multiple methods. @@ -15,7 +15,7 @@ us in as the root user. In practice, you'll almost always have to manually authe On this page, we'll talk specifically about _authentication_. On the next page, we talk about -[_authorization_](/intro/getting-started/acl.html). Authentication is the +[_authorization_](/intro/getting-started/policies.html). Authentication is the mechanism of assigning an identity to a Vault user. The access control and permissions associated with an identity are authorization, and will not be covered on this page. @@ -49,7 +49,7 @@ token_policies [root] ``` By default, this will create a child token of your current token that -inherits all the same access control policies. The "child" concept here +inherits all the same policies. The "child" concept here is important: tokens always have a parent, and when that parent token is revoked, children can also be revoked all in one operation. This makes it easy when removing access for a user, to remove access for all sub-tokens @@ -165,4 +165,4 @@ The multiple authentication backends Vault provides let you choose the most appropriate authentication mechanism for your organization. In this next section, we'll learn about -[authorization and access control policies](/intro/getting-started/acl.html). +[authorization and policies](/intro/getting-started/policies.html). diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/deploy.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/deploy.html.md index af58144..93438fb 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/deploy.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/deploy.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Deploy Vault" +page_title: "Deploy Vault - Getting Started" sidebar_current: "gettingstarted-deploy" description: |- Learn how to deploy Vault into production, how to initialize it, configure it, etc. @@ -23,7 +23,7 @@ As a reminder, JSON files are also fully HCL-compatible; HCL is a superset of JS The configuration file for Vault is relatively simple. An example is shown below: ```javascript -backend "consul" { +storage "consul" { address = "127.0.0.1:8500" path = "vault" } @@ -36,15 +36,15 @@ listener "tcp" { Within the configuration file, there are two primary configurations: - * `backend` - This is the physical backend that Vault uses for - storage. Up to this point the dev server has used "inmem" (in memory), - but in the example above we're using [Consul](https://www.consul.io), - a much more production-ready backend. + * `storage` - This is the physical backend that Vault uses for storage. Up to + this point the dev server has used "inmem" (in memory), but in the example + above we're using [Consul](https://www.consul.io), a much more + production-ready backend. - * `listener` - One or more listeners determine how Vault listens for - API requests. In the example above we're listening on localhost port - 8200 without TLS. In your environment set `VAULT_ADDR=http://127.0.0.1:8200` - so the Vault client will connect without TLS. + * `listener` - One or more listeners determine how Vault listens for API + requests. In the example above we're listening on localhost port 8200 + without TLS. In your environment set `VAULT_ADDR=http://127.0.0.1:8200` so + the Vault client will connect without TLS. For now, copy and paste the configuration above to a file called `example.hcl`. It will configure Vault to expect an instance of Consul @@ -69,7 +69,7 @@ $ vault server -config=example.hcl ==> Vault server configuration: Log Level: info - Backend: consul + Storage: consul Listener 1: tcp (addr: "127.0.0.1:8200", tls: "disabled") ==> Vault server started! Log data will stream in below: diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dev-server.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dev-server.html.md index 5c0c95c..18968ca 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dev-server.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dev-server.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Starting the Server" +page_title: "Starting the Server - Getting Started" sidebar_current: "gettingstarted-devserver" description: |- After installing Vault, the next step is to start the server. diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dynamic-secrets.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dynamic-secrets.html.md index 2aa1099..b723382 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dynamic-secrets.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/dynamic-secrets.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Dynamic Secrets" +page_title: "Dynamic Secrets - Getting Started" sidebar_current: "gettingstarted-dynamicsecrets" description: |- On this page we introduce dynamic secrets by showing you how to create AWS access keys with Vault. diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/first-secret.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/first-secret.html.md index 7e6a90b..8a5de44 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/first-secret.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/first-secret.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Your First Secret" +page_title: "Your First Secret - Getting Started" sidebar_current: "gettingstarted-firstsecret" description: |- With the Vault server running, let's read and write our first secret. diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/help.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/help.html.md index dd9a792..a8a046f 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/help.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/help.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Built-in Help" +page_title: "Built-in Help - Getting Started" sidebar_current: "gettingstarted-help" description: |- Vault has a built-in help system to learn about the available paths in Vault and how to use them. @@ -9,7 +9,7 @@ description: |- # Built-in Help You've now worked with `vault write` and `vault read` for multiple -paths: the generic secret backend with `secret/` and dynamic AWS +paths: the kv secret backend with `secret/` and dynamic AWS credentials with the AWS backend provider at `aws/`. In both cases, the structure and usage of each backend differed, for example the AWS backend has special paths like `aws/config`. @@ -109,7 +109,7 @@ There is also a description of what that path does. Go ahead and explore more paths! Mount other backends, traverse their help systems and learn about what they do. For example, learn about the -generic `secret/` path. +kv `secret/` path. ## Next diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/install.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/install.html.md index c337854..5a6ce34 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/install.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/install.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Install Vault" +page_title: "Install Vault - Getting Started" sidebar_current: "gettingstarted-install" description: |- The first step to using Vault is to get it installed. @@ -48,6 +48,7 @@ Common commands: revoke Revoke a secret. server Start a Vault server status Outputs status of whether Vault is sealed and if HA mode is enabled + unwrap Unwrap a wrapped secret write Write secrets or configuration into Vault All other commands: @@ -57,8 +58,11 @@ All other commands: auth Prints information about how to authenticate with Vault auth-disable Disable an auth provider auth-enable Enable a new auth provider + capabilities Fetch the capabilities of a token on a given path + generate-root Generates a new root token init Initialize a new Vault server key-status Provides information about the active encryption key + list List data or secrets in Vault mount Mount a logical backend mount-tune Tune mount configuration parameters mounts Lists mounted backends in Vault @@ -70,7 +74,9 @@ All other commands: rotate Rotates the backend encryption key used to persist data seal Seals the vault server ssh Initiate a SSH session + step-down Force the Vault node to give up active duty token-create Create a new auth token + token-lookup Display information about the specified token token-renew Renew an auth token if there is an associated lease token-revoke Revoke one or more auth tokens unmount Unmount a secret backend @@ -88,4 +94,3 @@ Otherwise, Vault is installed and ready to go! Now Vault is installed we can start our first Vault server! [Let's do that now](/intro/getting-started/dev-server.html). - diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/next-steps.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/next-steps.html.md index 3e6b0e0..d4bc6b2 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/next-steps.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/next-steps.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Next Steps" +page_title: "Next Steps - Getting Started" sidebar_current: "gettingstarted-nextsteps" description: |- After completing the getting started guide, learn about what to do next with Vault. diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/acl.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/policies.html.md similarity index 91% rename from vendor/github.com/hashicorp/vault/website/source/intro/getting-started/acl.html.md rename to vendor/github.com/hashicorp/vault/website/source/intro/getting-started/policies.html.md index 359b024..73ba88d 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/acl.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/policies.html.md @@ -1,19 +1,19 @@ --- layout: "intro" -page_title: "Access Control Policies" -sidebar_current: "gettingstarted-acl" +page_title: "Policies - Getting Started" +sidebar_current: "gettingstarted-policies" description: |- - Access control policies in Vault control what a user can access. + Policies in Vault control what a user can access. --- -# Access Control Policies (ACLs) +# Policies -Access control policies in Vault control what a user can access. In +Policies in Vault control what a user can access. In the last section, we learned about _authentication_. This section is about _authorization_. For authentication Vault has multiple options or backends that -can be enabled and used. For authorization and access control policies Vault always +can be enabled and used. For authorization and policies Vault always uses the same format. All authentication backends must map identities back to the core policies that are configured with Vault. @@ -31,15 +31,15 @@ policy is shown below: ```javascript path "secret/*" { - policy = "write" + capabilities = ["create"] } path "secret/foo" { - policy = "read" + capabilities = ["read"] } path "auth/token/lookup-self" { - policy = "read" + capabilities = ["read"] } ``` diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/secret-backends.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/secret-backends.html.md index 4e8d90e..9ff188d 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/secret-backends.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/getting-started/secret-backends.html.md @@ -1,6 +1,6 @@ --- layout: "intro" -page_title: "Secret Backends" +page_title: "Secret Backends - Getting Started" sidebar_current: "gettingstarted-secretbackends" description: |- Secret backends are what create, read, update, and delete secrets. @@ -10,11 +10,11 @@ description: |- Previously, we saw how to read and write arbitrary secrets to Vault. To do this, we used the `secret/` prefix. This prefix specifies which -backend to use. By default, Vault mounts a backend called _generic_ to -`secret/`. The generic backend reads and writes raw data to the backend +backend to use. By default, Vault mounts a backend called _kv_ to +`secret/`. The kv backend reads and writes raw data to the backend storage. -Vault supports other backends in addition to the _generic_ backend, and this feature +Vault supports other backends in addition to the _kv_ backend, and this feature in particular is what makes Vault unique. For example, the _aws_ backend generates AWS access keys dynamically, on demand. Another example -- this type of backend does not yet exist -- is a backend that @@ -23,7 +23,7 @@ reads and writes data directly to an As Vault matures, more and more backends will be added. To represent backends, Vault behaves much like a filesystem: backends -are mounted at specific paths. For example, the _generic_ backend is +are mounted at specific paths. For example, the _kv_ backend is mounted at the `secret/` prefix. On this page, we'll learn about the mount system and the operations @@ -32,40 +32,40 @@ for the next page, where we'll create dynamic secrets. ## Mount a Backend -To start, let's mount another _generic_ backend. Just like a normal +To start, let's mount another _kv_ backend. Just like a normal filesystem, Vault can mount a backend multiple times at different -mount points. This is useful if you want different access control policies +mount points. This is useful if you want different policies (covered later) or configurations for different paths. To mount the backend: ``` -$ vault mount generic -Successfully mounted 'generic' at 'generic'! +$ vault mount kv +Successfully mounted 'kv' at 'kv'! ``` By default, the mount point will be the same name as the backend. This is because 99% of the time, you don't want to customize this mount point. -In this example, we mounted the _generic_ backend at `generic/`. +In this example, we mounted the _kv_ backend at `kv/`. You can inspect mounts using `vault mounts`: ``` $ vault mounts Path Type Description -generic/ generic -secret/ generic generic secret storage +kv/ kv +secret/ kv key/value secret storage sys/ system system endpoints used for control, policy and debugging ``` -You can see the `generic/` path we just mounted, as well as the built-in +You can see the `kv/` path we just mounted, as well as the built-in secret path. You can also see the `sys/` path. We won't cover this in this guide, but this mount point is used to interact with the Vault core system. Spend some time reading and writing secrets to the new mount point to convince yourself it works. As a bonus, write to the `secret/` endpoint -and observe that those values are unavailable via the `generic/` path: they share the +and observe that those values are unavailable via the `kv/` path: they share the same backend, but do not share any data. In addition to this, backends (of the same type or otherwise) _cannot_ access the data of other backends; they can only access data within their mount point. @@ -78,8 +78,8 @@ and its data is deleted. If either of these operations fail, the backend remains mounted. ``` -$ vault unmount generic/ -Successfully unmounted 'generic/' if it was mounted +$ vault unmount kv/ +Successfully unmounted 'kv/' if it was mounted ``` In addition to unmounting, you can remount a backend. Remounting a @@ -95,7 +95,7 @@ Now that you've mounted and unmounted a backend, you might wonder: Vault behaves a lot like a [virtual filesystem](https://en.wikipedia.org/wiki/Virtual_file_system). The read/write/delete operations are forwarded to the backend, and the backend can choose to react to these operations however it wishes. -For example, the _generic_ backend simply passes this through to the +For example, the _kv_ backend simply passes this through to the storage backend (after encrypting data first). However, the _aws_ backend (which you'll see soon), will read/write IAM diff --git a/vendor/github.com/hashicorp/vault/website/source/intro/vs/hsm.html.md b/vendor/github.com/hashicorp/vault/website/source/intro/vs/hsm.html.md index d203690..34476e5 100644 --- a/vendor/github.com/hashicorp/vault/website/source/intro/vs/hsm.html.md +++ b/vendor/github.com/hashicorp/vault/website/source/intro/vs/hsm.html.md @@ -14,8 +14,9 @@ device that is meant to secure various secrets using protections against access and tampering at both the software and hardware layers. The primary issue with HSMs is that they are expensive and not very cloud -friendly. Amazon provides CloudHSM, but the minimum price point to even begin -using CloudHSM is in the thousands of US dollars. +friendly. An exception to the latter is Amazon's CloudHSM service, which is +friendly for AWS users but still costs more than $14k per year per instance, +and not as useful for heterogenous cloud architectures. Once an HSM is up and running, configuring it is generally very tedious, and the API to request secrets is also difficult to use. Example: CloudHSM requires diff --git a/vendor/github.com/hashicorp/vault/website/source/layouts/_sidebar.erb b/vendor/github.com/hashicorp/vault/website/source/layouts/_sidebar.erb index 41a3f31..9ae894d 100644 --- a/vendor/github.com/hashicorp/vault/website/source/layouts/_sidebar.erb +++ b/vendor/github.com/hashicorp/vault/website/source/layouts/_sidebar.erb @@ -8,6 +8,7 @@
  • @@ -66,7 +70,7 @@ > - Access Control Policies + Policies > @@ -96,9 +100,15 @@ > Azure + > + CockroachDB + > Consul + > + CouchDB + > DynamoDB @@ -120,6 +130,9 @@ > PostgreSQL + > + Cassandra + > S3 @@ -153,47 +166,6 @@ - > - Guides - - -
    > @@ -203,10 +175,6 @@ AWS - > - Cassandra - - > Consul @@ -215,44 +183,95 @@ Cubbyhole - > - Generic + > + Databases + - > - MongoDB + > + Key/Value - > - MSSQL - - - > - MySQL + > + Identity > PKI (Certificates) - > - PostgreSQL - - > RabbitMQ > SSH + + + + > + TOTP > Transit - > - Custom +
    + + > + Cassandra DEPRECATED + + + > + MongoDB DEPRECATED + + + > + MSSQL DEPRECATED + + + > + MySQL DEPRECATED + + + > + PostgreSQL DEPRECATED @@ -260,10 +279,6 @@ > Auth Backends @@ -323,41 +353,54 @@ + > + Plugin Backends + +
    > - Vault Enterprise + Vault Enterprise diff --git a/vendor/github.com/hashicorp/vault/website/source/layouts/guides.erb b/vendor/github.com/hashicorp/vault/website/source/layouts/guides.erb new file mode 100644 index 0000000..e95bb84 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/website/source/layouts/guides.erb @@ -0,0 +1,62 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> diff --git a/vendor/github.com/hashicorp/vault/website/source/layouts/intro.erb b/vendor/github.com/hashicorp/vault/website/source/layouts/intro.erb index 4855b78..4646ea4 100644 --- a/vendor/github.com/hashicorp/vault/website/source/layouts/intro.erb +++ b/vendor/github.com/hashicorp/vault/website/source/layouts/intro.erb @@ -73,8 +73,8 @@ Authentication - > - ACLs + > + Policies > diff --git a/vendor/github.com/hashicorp/vault/website/source/layouts/layout.erb b/vendor/github.com/hashicorp/vault/website/source/layouts/layout.erb index f286c1b..110b8ce 100644 --- a/vendor/github.com/hashicorp/vault/website/source/layouts/layout.erb +++ b/vendor/github.com/hashicorp/vault/website/source/layouts/layout.erb @@ -74,8 +74,8 @@